From 722bc733196bbeef5b7ccdc95bb0e7e4156bb591 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 13 Apr 2026 17:43:57 -0700 Subject: [PATCH] Make text generation work with ministral model. (#13395) Needs template before it works properly. --- comfy/text_encoders/llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 6cdc47757..6ea8e36b1 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -82,6 +82,7 @@ class Ministral3_3BConfig: rope_scale = None final_norm: bool = True lm_head: bool = False + stop_tokens = [2] @dataclass class Qwen25_3BConfig: @@ -969,7 +970,7 @@ class Mistral3Small24B(BaseLlama, torch.nn.Module): self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) self.dtype = dtype -class Ministral3_3B(BaseLlama, torch.nn.Module): +class Ministral3_3B(BaseLlama, BaseQwen3, BaseGenerate, torch.nn.Module): def __init__(self, config_dict, dtype, device, operations): super().__init__() config = Ministral3_3BConfig(**config_dict)