Add MiniMax Chat node for text generation using MiniMax-M2.5 models

Extend the existing MiniMax integration (currently video-only) with a new
MinimaxChatNode that supports text generation via MiniMax-M2.5 and
MiniMax-M2.5-highspeed language models. The node follows the same
ComfyExtension pattern used by OpenAI and Gemini chat nodes.

Changes:
- Add chat API models (request/response) to apis/minimax.py
- Add MinimaxChatNode with system prompt, temperature, and max_tokens support
- Register the new node in MinimaxExtension
This commit is contained in:
octo-patch 2026-03-13 22:42:26 +08:00
parent 4a8cf359fe
commit acf25eeab5
2 changed files with 157 additions and 0 deletions

View File

@ -118,3 +118,43 @@ class MinimaxVideoGenerationResponse(BaseModel):
task_id: str = Field(
..., description='The task ID for the asynchronous video generation task.'
)
class MiniMaxChatModel(str, Enum):
M2_5 = 'MiniMax-M2.5'
M2_5_highspeed = 'MiniMax-M2.5-highspeed'
class MiniMaxChatMessage(BaseModel):
role: str = Field(..., description='The role of the message author (system, user, or assistant).')
content: str = Field(..., description='The content of the message.')
class MiniMaxChatRequest(BaseModel):
model: str = Field(..., description='ID of the model to use.')
messages: list[MiniMaxChatMessage] = Field(..., description='A list of messages comprising the conversation.')
max_tokens: Optional[int] = Field(None, description='The maximum number of tokens to generate.')
temperature: Optional[float] = Field(
None,
description='Sampling temperature. Must be between 0 (exclusive) and 1 (inclusive).',
gt=0.0,
le=1.0,
)
class MiniMaxChatChoice(BaseModel):
index: int = Field(..., description='The index of the choice.')
message: MiniMaxChatMessage = Field(..., description='The generated message.')
finish_reason: Optional[str] = Field(None, description='The reason the model stopped generating.')
class MiniMaxChatUsage(BaseModel):
prompt_tokens: int = Field(0, description='Number of tokens in the prompt.')
completion_tokens: int = Field(0, description='Number of tokens in the generated response.')
total_tokens: int = Field(0, description='Total number of tokens used.')
class MiniMaxChatResponse(BaseModel):
id: Optional[str] = Field(None, description='A unique identifier for the chat completion.')
choices: list[MiniMaxChatChoice] = Field(..., description='A list of chat completion choices.')
usage: Optional[MiniMaxChatUsage] = Field(None, description='Usage statistics for the request.')

View File

@ -5,6 +5,10 @@ from typing_extensions import override
from comfy_api.latest import IO, ComfyExtension
from comfy_api_nodes.apis.minimax import (
MiniMaxChatMessage,
MiniMaxChatModel,
MiniMaxChatRequest,
MiniMaxChatResponse,
MinimaxFileRetrieveResponse,
MiniMaxModel,
MinimaxTaskResultResponse,
@ -437,6 +441,118 @@ class MinimaxHailuoVideoNode(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_video_output(file_url))
class MinimaxChatNode(IO.ComfyNode):
"""
Node to generate text responses using MiniMax language models.
"""
@classmethod
def define_schema(cls) -> IO.Schema:
return IO.Schema(
node_id="MinimaxChatNode",
display_name="MiniMax Chat",
category="api node/text/MiniMax",
description="Generate text responses using MiniMax language models (MiniMax-M2.5).",
inputs=[
IO.String.Input(
"prompt",
default="",
multiline=True,
tooltip="Text prompt for the model to respond to.",
),
IO.Combo.Input(
"model",
options=MiniMaxChatModel,
default=MiniMaxChatModel.M2_5.value,
tooltip="The MiniMax model to use for text generation.",
),
IO.String.Input(
"system_prompt",
multiline=True,
optional=True,
tooltip="Optional system instructions to guide the model's behavior.",
),
IO.Int.Input(
"max_tokens",
default=4096,
min=1,
max=204800,
step=1,
tooltip="Maximum number of tokens to generate in the response.",
optional=True,
),
IO.Float.Input(
"temperature",
default=0.7,
min=0.01,
max=1.0,
step=0.01,
tooltip="Controls randomness in the response. Higher values produce more creative output.",
optional=True,
),
],
outputs=[
IO.String.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model"]),
expr="""
(
$m := widgets.model;
$contains($m, "highspeed") ? {
"type": "list_usd",
"usd": [0.00004, 0.0002],
"format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" }
}
: {
"type": "list_usd",
"usd": [0.0001, 0.0006],
"format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" }
}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
model: str = MiniMaxChatModel.M2_5.value,
system_prompt: Optional[str] = None,
max_tokens: int = 4096,
temperature: float = 0.7,
) -> IO.NodeOutput:
validate_string(prompt, field_name="prompt")
messages: list[MiniMaxChatMessage] = []
if system_prompt:
messages.append(MiniMaxChatMessage(role="system", content=system_prompt))
messages.append(MiniMaxChatMessage(role="user", content=prompt))
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/minimax/chat/completions", method="POST"),
response_model=MiniMaxChatResponse,
data=MiniMaxChatRequest(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
),
)
if response.choices:
return IO.NodeOutput(response.choices[0].message.content)
return IO.NodeOutput("No response generated by MiniMax model.")
class MinimaxExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -445,6 +561,7 @@ class MinimaxExtension(ComfyExtension):
MinimaxImageToVideoNode,
# MinimaxSubjectToVideoNode,
MinimaxHailuoVideoNode,
MinimaxChatNode,
]