diff --git a/comfy/sd.py b/comfy/sd.py index 0bfff951b..304d180aa 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -426,7 +426,7 @@ class CLIP: def get_key_patches(self): return self.patcher.get_key_patches() - def generate(self, tokens, do_sample=True, max_length=256, temperature=1.0, top_k=50, top_p=0.95, min_p=0.0, repetition_penalty=1.0, presence_penalty=0.0, seed=None): + def generate(self, tokens, do_sample=True, max_length=256, temperature=1.0, top_k=50, top_p=0.95, min_p=0.0, repetition_penalty=1.0, seed=None, presence_penalty=0.0): self.cond_stage_model.reset_clip_options() self.load_model(tokens) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 7cdc87c1e..ffb7666ff 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -740,5 +740,5 @@ class SD1ClipModel(torch.nn.Module): def load_sd(self, sd): return getattr(self, self.clip).load_sd(sd) - def generate(self, tokens, do_sample=True, max_length=256, temperature=1.0, top_k=50, top_p=0.95, min_p=0.0, repetition_penalty=1.0, presence_penalty=0.0, seed=None): + def generate(self, tokens, do_sample=True, max_length=256, temperature=1.0, top_k=50, top_p=0.95, min_p=0.0, repetition_penalty=1.0, seed=None, presence_penalty=0.0): return getattr(self, self.clip).generate(tokens, do_sample=do_sample, max_length=max_length, temperature=temperature, top_k=top_k, top_p=top_p, min_p=min_p, repetition_penalty=repetition_penalty, seed=seed, presence_penalty=presence_penalty) diff --git a/comfy/text_encoders/qwen35.py b/comfy/text_encoders/qwen35.py index 03b273d31..ce9b07464 100644 --- a/comfy/text_encoders/qwen35.py +++ b/comfy/text_encoders/qwen35.py @@ -44,7 +44,7 @@ class Qwen35Config: qkv_bias: bool = False final_norm: bool = True lm_head: bool = False - stop_tokens: list = field(default_factory=lambda: [248044]) + stop_tokens: list = field(default_factory=lambda: [248044, 248046]) # These are needed for BaseLlama/BaseGenerate compatibility but unused directly transformer_type: str = "qwen35_2b" rope_dims: list = None diff --git a/comfy_extras/nodes_textgen.py b/comfy_extras/nodes_textgen.py index ecfaa1e77..efc5fb1d3 100644 --- a/comfy_extras/nodes_textgen.py +++ b/comfy_extras/nodes_textgen.py @@ -160,12 +160,12 @@ class TextGenerateLTX2Prompt(TextGenerate): ) @classmethod - def execute(cls, clip, prompt, max_length, sampling_mode, image=None) -> io.NodeOutput: + def execute(cls, clip, prompt, max_length, sampling_mode, image=None, thinking=False) -> io.NodeOutput: if image is None: formatted_prompt = f"system\n{LTX2_T2V_SYSTEM_PROMPT.strip()}\nuser\nUser Raw Input Prompt: {prompt}.\nmodel\n" else: formatted_prompt = f"system\n{LTX2_I2V_SYSTEM_PROMPT.strip()}\nuser\n\n\n\nUser Raw Input Prompt: {prompt}.\nmodel\n" - return super().execute(clip, formatted_prompt, max_length, sampling_mode, image) + return super().execute(clip, formatted_prompt, max_length, sampling_mode, image, thinking) class TextgenExtension(ComfyExtension):