convert Gemini API nodes to V3 schema (#10476)

This commit is contained in:
Alexander Piskun 2025-10-26 00:35:30 +03:00 committed by GitHub
parent 426cde37f1
commit e86b79ab9e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 282 additions and 400 deletions

View File

@ -3,8 +3,6 @@ import aiohttp
import mimetypes import mimetypes
from typing import Optional, Union from typing import Optional, Union
from comfy.utils import common_upscale from comfy.utils import common_upscale
from comfy_api.util import VideoContainer, VideoCodec
from comfy_api.input.video_types import VideoInput
from comfy_api_nodes.apis.client import ( from comfy_api_nodes.apis.client import (
ApiClient, ApiClient,
ApiEndpoint, ApiEndpoint,
@ -209,30 +207,6 @@ async def upload_file_to_comfyapi(
return response.download_url return response.download_url
def video_to_base64_string(
video: VideoInput,
container_format: VideoContainer = None,
codec: VideoCodec = None
) -> str:
"""
Converts a video input to a base64 string.
Args:
video: The video input to convert
container_format: Optional container format to use (defaults to video.container if available)
codec: Optional codec to use (defaults to video.codec if available)
"""
video_bytes_io = BytesIO()
# Use provided format/codec if specified, otherwise use video's own if available
format_to_use = container_format if container_format is not None else getattr(video, 'container', VideoContainer.MP4)
codec_to_use = codec if codec is not None else getattr(video, 'codec', VideoCodec.H264)
video.save_to(video_bytes_io, format=format_to_use, codec=codec_to_use)
video_bytes_io.seek(0)
return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8")
async def upload_images_to_comfyapi( async def upload_images_to_comfyapi(
image: torch.Tensor, image: torch.Tensor,
max_images=8, max_images=8,

View File

@ -2,42 +2,47 @@
API Nodes for Gemini Multimodal LLM Usage via Remote API API Nodes for Gemini Multimodal LLM Usage via Remote API
See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference
""" """
from __future__ import annotations from __future__ import annotations
import json
import time
import os
import uuid
import base64 import base64
from io import BytesIO import json
import os
import time
import uuid
from enum import Enum from enum import Enum
from typing import Optional, Literal from io import BytesIO
from typing import Literal, Optional
import torch import torch
from typing_extensions import override
import folder_paths import folder_paths
from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict from comfy_api.latest import IO, ComfyExtension, Input
from server import PromptServer from comfy_api.util import VideoCodec, VideoContainer
from comfy_api_nodes.apis import ( from comfy_api_nodes.apis import (
GeminiContent, GeminiContent,
GeminiGenerateContentRequest, GeminiGenerateContentRequest,
GeminiGenerateContentResponse, GeminiGenerateContentResponse,
GeminiInlineData, GeminiInlineData,
GeminiPart,
GeminiMimeType, GeminiMimeType,
GeminiPart,
) )
from comfy_api_nodes.apis.gemini_api import GeminiImageGenerationConfig, GeminiImageGenerateContentRequest, GeminiImageConfig from comfy_api_nodes.apis.gemini_api import (
from comfy_api_nodes.apis.client import ( GeminiImageConfig,
GeminiImageGenerateContentRequest,
GeminiImageGenerationConfig,
)
from comfy_api_nodes.util import (
ApiEndpoint, ApiEndpoint,
HttpMethod, audio_to_base64_string,
SynchronousOperation, bytesio_to_image_tensor,
) sync_op,
from comfy_api_nodes.apinode_utils import ( tensor_to_base64_string,
validate_string,
video_to_base64_string, video_to_base64_string,
) )
from comfy_api_nodes.util import validate_string, tensor_to_base64_string, bytesio_to_image_tensor, audio_to_base64_string from server import PromptServer
from comfy_api.util import VideoContainer, VideoCodec
GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini" GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini"
GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB
@ -63,50 +68,6 @@ class GeminiImageModel(str, Enum):
gemini_2_5_flash_image = "gemini-2.5-flash-image" gemini_2_5_flash_image = "gemini-2.5-flash-image"
def get_gemini_endpoint(
model: GeminiModel,
) -> ApiEndpoint[GeminiGenerateContentRequest, GeminiGenerateContentResponse]:
"""
Get the API endpoint for a given Gemini model.
Args:
model: The Gemini model to use, either as enum or string value.
Returns:
ApiEndpoint configured for the specific Gemini model.
"""
if isinstance(model, str):
model = GeminiModel(model)
return ApiEndpoint(
path=f"{GEMINI_BASE_ENDPOINT}/{model.value}",
method=HttpMethod.POST,
request_model=GeminiGenerateContentRequest,
response_model=GeminiGenerateContentResponse,
)
def get_gemini_image_endpoint(
model: GeminiImageModel,
) -> ApiEndpoint[GeminiGenerateContentRequest, GeminiGenerateContentResponse]:
"""
Get the API endpoint for a given Gemini model.
Args:
model: The Gemini model to use, either as enum or string value.
Returns:
ApiEndpoint configured for the specific Gemini model.
"""
if isinstance(model, str):
model = GeminiImageModel(model)
return ApiEndpoint(
path=f"{GEMINI_BASE_ENDPOINT}/{model.value}",
method=HttpMethod.POST,
request_model=GeminiImageGenerateContentRequest,
response_model=GeminiGenerateContentResponse,
)
def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]: def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]:
""" """
Convert image tensor input to Gemini API compatible parts. Convert image tensor input to Gemini API compatible parts.
@ -119,9 +80,7 @@ def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]:
""" """
image_parts: list[GeminiPart] = [] image_parts: list[GeminiPart] = []
for image_index in range(image_input.shape[0]): for image_index in range(image_input.shape[0]):
image_as_b64 = tensor_to_base64_string( image_as_b64 = tensor_to_base64_string(image_input[image_index].unsqueeze(0))
image_input[image_index].unsqueeze(0)
)
image_parts.append( image_parts.append(
GeminiPart( GeminiPart(
inlineData=GeminiInlineData( inlineData=GeminiInlineData(
@ -133,37 +92,7 @@ def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]:
return image_parts return image_parts
def create_text_part(text: str) -> GeminiPart: def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Literal["text"] | str) -> list[GeminiPart]:
"""
Create a text part for the Gemini API request.
Args:
text: The text content to include in the request.
Returns:
A GeminiPart object with the text content.
"""
return GeminiPart(text=text)
def get_parts_from_response(
response: GeminiGenerateContentResponse
) -> list[GeminiPart]:
"""
Extract all parts from the Gemini API response.
Args:
response: The API response from Gemini.
Returns:
List of response parts from the first candidate.
"""
return response.candidates[0].content.parts
def get_parts_by_type(
response: GeminiGenerateContentResponse, part_type: Literal["text"] | str
) -> list[GeminiPart]:
""" """
Filter response parts by their type. Filter response parts by their type.
@ -175,14 +104,10 @@ def get_parts_by_type(
List of response parts matching the requested type. List of response parts matching the requested type.
""" """
parts = [] parts = []
for part in get_parts_from_response(response): for part in response.candidates[0].content.parts:
if part_type == "text" and hasattr(part, "text") and part.text: if part_type == "text" and hasattr(part, "text") and part.text:
parts.append(part) parts.append(part)
elif ( elif hasattr(part, "inlineData") and part.inlineData and part.inlineData.mimeType == part_type:
hasattr(part, "inlineData")
and part.inlineData
and part.inlineData.mimeType == part_type
):
parts.append(part) parts.append(part)
# Skip parts that don't match the requested type # Skip parts that don't match the requested type
return parts return parts
@ -210,11 +135,11 @@ def get_image_from_response(response: GeminiGenerateContentResponse) -> torch.Te
returned_image = bytesio_to_image_tensor(BytesIO(image_data)) returned_image = bytesio_to_image_tensor(BytesIO(image_data))
image_tensors.append(returned_image) image_tensors.append(returned_image)
if len(image_tensors) == 0: if len(image_tensors) == 0:
return torch.zeros((1,1024,1024,4)) return torch.zeros((1, 1024, 1024, 4))
return torch.cat(image_tensors, dim=0) return torch.cat(image_tensors, dim=0)
class GeminiNode(ComfyNodeABC): class GeminiNode(IO.ComfyNode):
""" """
Node to generate text responses from a Gemini model. Node to generate text responses from a Gemini model.
@ -225,96 +150,79 @@ class GeminiNode(ComfyNodeABC):
""" """
@classmethod @classmethod
def INPUT_TYPES(cls) -> InputTypeDict: def define_schema(cls):
return { return IO.Schema(
"required": { node_id="GeminiNode",
"prompt": ( display_name="Google Gemini",
IO.STRING, category="api node/text/Gemini",
{ description="Generate text responses with Google's Gemini AI model. "
"multiline": True, "You can provide multiple types of inputs (text, images, audio, video) "
"default": "", "as context for generating more relevant and meaningful responses.",
"tooltip": "Text inputs to the model, used to generate a response. You can include detailed instructions, questions, or context for the model.", inputs=[
}, IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Text inputs to the model, used to generate a response. "
"You can include detailed instructions, questions, or context for the model.",
), ),
"model": ( IO.Combo.Input(
IO.COMBO, "model",
{ options=GeminiModel,
"tooltip": "The Gemini model to use for generating responses.", default=GeminiModel.gemini_2_5_pro,
"options": [model.value for model in GeminiModel], tooltip="The Gemini model to use for generating responses.",
"default": GeminiModel.gemini_2_5_pro.value,
},
), ),
"seed": ( IO.Int.Input(
IO.INT, "seed",
{ default=42,
"default": 42, min=0,
"min": 0, max=0xFFFFFFFFFFFFFFFF,
"max": 0xFFFFFFFFFFFFFFFF, control_after_generate=True,
"control_after_generate": True, tooltip="When seed is fixed to a specific value, the model makes a best effort to provide "
"tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", "the same response for repeated requests. Deterministic output isn't guaranteed. "
}, "Also, changing the model or parameter settings, such as the temperature, "
"can cause variations in the response even when you use the same seed value. "
"By default, a random seed value is used.",
), ),
}, IO.Image.Input(
"optional": { "images",
"images": ( optional=True,
IO.IMAGE, tooltip="Optional image(s) to use as context for the model. "
{ "To include multiple images, you can use the Batch Images node.",
"default": None,
"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.",
},
), ),
"audio": ( IO.Audio.Input(
IO.AUDIO, "audio",
{ optional=True,
"tooltip": "Optional audio to use as context for the model.", tooltip="Optional audio to use as context for the model.",
"default": None,
},
), ),
"video": ( IO.Video.Input(
IO.VIDEO, "video",
{ optional=True,
"tooltip": "Optional video to use as context for the model.", tooltip="Optional video to use as context for the model.",
"default": None,
},
), ),
"files": ( IO.Custom("GEMINI_INPUT_FILES").Input(
"GEMINI_INPUT_FILES", "files",
{ optional=True,
"default": None, tooltip="Optional file(s) to use as context for the model. "
"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node.", "Accepts inputs from the Gemini Generate Content Input Files node.",
},
), ),
}, ],
"hidden": { outputs=[
"auth_token": "AUTH_TOKEN_COMFY_ORG", IO.String.Output(),
"comfy_api_key": "API_KEY_COMFY_ORG", ],
"unique_id": "UNIQUE_ID", hidden=[
}, IO.Hidden.auth_token_comfy_org,
} IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
DESCRIPTION = "Generate text responses with Google's Gemini AI model. You can provide multiple types of inputs (text, images, audio, video) as context for generating more relevant and meaningful responses." ],
RETURN_TYPES = ("STRING",) is_api_node=True,
FUNCTION = "api_call"
CATEGORY = "api node/text/Gemini"
API_NODE = True
def create_video_parts(self, video_input: IO.VIDEO, **kwargs) -> list[GeminiPart]:
"""
Convert video input to Gemini API compatible parts.
Args:
video_input: Video tensor from ComfyUI.
**kwargs: Additional arguments to pass to the conversion function.
Returns:
List of GeminiPart objects containing the encoded video.
"""
base_64_string = video_to_base64_string(
video_input,
container_format=VideoContainer.MP4,
codec=VideoCodec.H264
) )
@classmethod
def create_video_parts(cls, video_input: Input.Video) -> list[GeminiPart]:
"""Convert video input to Gemini API compatible parts."""
base_64_string = video_to_base64_string(video_input, container_format=VideoContainer.MP4, codec=VideoCodec.H264)
return [ return [
GeminiPart( GeminiPart(
inlineData=GeminiInlineData( inlineData=GeminiInlineData(
@ -324,7 +232,8 @@ class GeminiNode(ComfyNodeABC):
) )
] ]
def create_audio_parts(self, audio_input: IO.AUDIO) -> list[GeminiPart]: @classmethod
def create_audio_parts(cls, audio_input: Input.Audio) -> list[GeminiPart]:
""" """
Convert audio input to Gemini API compatible parts. Convert audio input to Gemini API compatible parts.
@ -337,10 +246,10 @@ class GeminiNode(ComfyNodeABC):
audio_parts: list[GeminiPart] = [] audio_parts: list[GeminiPart] = []
for batch_index in range(audio_input["waveform"].shape[0]): for batch_index in range(audio_input["waveform"].shape[0]):
# Recreate an IO.AUDIO object for the given batch dimension index # Recreate an IO.AUDIO object for the given batch dimension index
audio_at_index = { audio_at_index = Input.Audio(
"waveform": audio_input["waveform"][batch_index].unsqueeze(0), waveform=audio_input["waveform"][batch_index].unsqueeze(0),
"sample_rate": audio_input["sample_rate"], sample_rate=audio_input["sample_rate"],
} )
# Convert to MP3 format for compatibility with Gemini API # Convert to MP3 format for compatibility with Gemini API
audio_bytes = audio_to_base64_string( audio_bytes = audio_to_base64_string(
audio_at_index, audio_at_index,
@ -357,38 +266,38 @@ class GeminiNode(ComfyNodeABC):
) )
return audio_parts return audio_parts
async def api_call( @classmethod
self, async def execute(
cls,
prompt: str, prompt: str,
model: GeminiModel, model: str,
images: Optional[IO.IMAGE] = None, seed: int,
audio: Optional[IO.AUDIO] = None, images: Optional[torch.Tensor] = None,
video: Optional[IO.VIDEO] = None, audio: Optional[Input.Audio] = None,
video: Optional[Input.Video] = None,
files: Optional[list[GeminiPart]] = None, files: Optional[list[GeminiPart]] = None,
unique_id: Optional[str] = None, ) -> IO.NodeOutput:
**kwargs,
) -> tuple[str]:
# Validate inputs
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
# Create parts list with text prompt as the first part # Create parts list with text prompt as the first part
parts: list[GeminiPart] = [create_text_part(prompt)] parts: list[GeminiPart] = [GeminiPart(text=prompt)]
# Add other modal parts # Add other modal parts
if images is not None: if images is not None:
image_parts = create_image_parts(images) image_parts = create_image_parts(images)
parts.extend(image_parts) parts.extend(image_parts)
if audio is not None: if audio is not None:
parts.extend(self.create_audio_parts(audio)) parts.extend(cls.create_audio_parts(audio))
if video is not None: if video is not None:
parts.extend(self.create_video_parts(video)) parts.extend(cls.create_video_parts(video))
if files is not None: if files is not None:
parts.extend(files) parts.extend(files)
# Create response # Create response
response = await SynchronousOperation( response = await sync_op(
endpoint=get_gemini_endpoint(model), cls,
request=GeminiGenerateContentRequest( endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"),
data=GeminiGenerateContentRequest(
contents=[ contents=[
GeminiContent( GeminiContent(
role="user", role="user",
@ -396,15 +305,15 @@ class GeminiNode(ComfyNodeABC):
) )
] ]
), ),
auth_kwargs=kwargs, response_model=GeminiGenerateContentResponse,
).execute() )
# Get result output # Get result output
output_text = get_text_from_response(response) output_text = get_text_from_response(response)
if unique_id and output_text: if output_text:
# Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button.
render_spec = { render_spec = {
"node_id": unique_id, "node_id": cls.hidden.unique_id,
"component": "ChatHistoryWidget", "component": "ChatHistoryWidget",
"props": { "props": {
"history": json.dumps( "history": json.dumps(
@ -424,10 +333,10 @@ class GeminiNode(ComfyNodeABC):
render_spec, render_spec,
) )
return (output_text or "Empty response from Gemini model...",) return IO.NodeOutput(output_text or "Empty response from Gemini model...")
class GeminiInputFiles(ComfyNodeABC): class GeminiInputFiles(IO.ComfyNode):
""" """
Loads and formats input files for use with the Gemini API. Loads and formats input files for use with the Gemini API.
@ -438,7 +347,7 @@ class GeminiInputFiles(ComfyNodeABC):
""" """
@classmethod @classmethod
def INPUT_TYPES(cls) -> InputTypeDict: def define_schema(cls):
""" """
For details about the supported file input types, see: For details about the supported file input types, see:
https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference
@ -453,39 +362,37 @@ class GeminiInputFiles(ComfyNodeABC):
] ]
input_files = sorted(input_files, key=lambda x: x.name) input_files = sorted(input_files, key=lambda x: x.name)
input_files = [f.name for f in input_files] input_files = [f.name for f in input_files]
return { return IO.Schema(
"required": { node_id="GeminiInputFiles",
"file": ( display_name="Gemini Input Files",
IO.COMBO, category="api node/text/Gemini",
{ description="Loads and prepares input files to include as inputs for Gemini LLM nodes. "
"tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", "The files will be read by the Gemini model when generating a response. "
"options": input_files, "The contents of the text file count toward the token limit. "
"default": input_files[0] if input_files else None, "🛈 TIP: Can be chained together with other Gemini Input File nodes.",
}, inputs=[
IO.Combo.Input(
"file",
options=input_files,
default=input_files[0] if input_files else None,
tooltip="Input files to include as context for the model. "
"Only accepts text (.txt) and PDF (.pdf) files for now.",
), ),
}, IO.Custom("GEMINI_INPUT_FILES").Input(
"optional": {
"GEMINI_INPUT_FILES": (
"GEMINI_INPUT_FILES", "GEMINI_INPUT_FILES",
{ optional=True,
"tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files.", tooltip="An optional additional file(s) to batch together with the file loaded from this node. "
"default": None, "Allows chaining of input files so that a single message can include multiple input files.",
},
), ),
}, ],
} outputs=[
IO.Custom("GEMINI_INPUT_FILES").Output(),
DESCRIPTION = "Loads and prepares input files to include as inputs for Gemini LLM nodes. The files will be read by the Gemini model when generating a response. The contents of the text file count toward the token limit. 🛈 TIP: Can be chained together with other Gemini Input File nodes." ],
RETURN_TYPES = ("GEMINI_INPUT_FILES",)
FUNCTION = "prepare_files"
CATEGORY = "api node/text/Gemini"
def create_file_part(self, file_path: str) -> GeminiPart:
mime_type = (
GeminiMimeType.application_pdf
if file_path.endswith(".pdf")
else GeminiMimeType.text_plain
) )
@classmethod
def create_file_part(cls, file_path: str) -> GeminiPart:
mime_type = GeminiMimeType.application_pdf if file_path.endswith(".pdf") else GeminiMimeType.text_plain
# Use base64 string directly, not the data URI # Use base64 string directly, not the data URI
with open(file_path, "rb") as f: with open(file_path, "rb") as f:
file_content = f.read() file_content = f.read()
@ -498,120 +405,95 @@ class GeminiInputFiles(ComfyNodeABC):
) )
) )
def prepare_files(
self, file: str, GEMINI_INPUT_FILES: list[GeminiPart] = []
) -> tuple[list[GeminiPart]]:
"""
Loads and formats input files for Gemini API.
"""
file_path = folder_paths.get_annotated_filepath(file)
input_file_content = self.create_file_part(file_path)
files = [input_file_content] + GEMINI_INPUT_FILES
return (files,)
class GeminiImage(ComfyNodeABC):
"""
Node to generate text and image responses from a Gemini model.
This node allows users to interact with Google's Gemini AI models, providing
multimodal inputs (text, images, files) to generate coherent
text and image responses. The node works with the latest Gemini models, handling the
API communication and response parsing.
"""
@classmethod @classmethod
def INPUT_TYPES(cls) -> InputTypeDict: def execute(cls, file: str, GEMINI_INPUT_FILES: Optional[list[GeminiPart]] = None) -> IO.NodeOutput:
return { """Loads and formats input files for Gemini API."""
"required": { if GEMINI_INPUT_FILES is None:
"prompt": ( GEMINI_INPUT_FILES = []
IO.STRING, file_path = folder_paths.get_annotated_filepath(file)
{ input_file_content = cls.create_file_part(file_path)
"multiline": True, return IO.NodeOutput([input_file_content] + GEMINI_INPUT_FILES)
"default": "",
"tooltip": "Text prompt for generation",
},
),
"model": (
IO.COMBO,
{
"tooltip": "The Gemini model to use for generating responses.",
"options": [model.value for model in GeminiImageModel],
"default": GeminiImageModel.gemini_2_5_flash_image.value,
},
),
"seed": (
IO.INT,
{
"default": 42,
"min": 0,
"max": 0xFFFFFFFFFFFFFFFF,
"control_after_generate": True,
"tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.",
},
),
},
"optional": {
"images": (
IO.IMAGE,
{
"default": None,
"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.",
},
),
"files": (
"GEMINI_INPUT_FILES",
{
"default": None,
"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node.",
},
),
# TODO: later we can add this parameter later
# "n": (
# IO.INT,
# {
# "default": 1,
# "min": 1,
# "max": 8,
# "step": 1,
# "display": "number",
# "tooltip": "How many images to generate",
# },
# ),
"aspect_ratio": (
IO.COMBO,
{
"tooltip": "Defaults to matching the output image size to that of your input image, or otherwise generates 1:1 squares.",
"options": ["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"],
"default": "auto",
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
RETURN_TYPES = (IO.IMAGE, IO.STRING)
FUNCTION = "api_call"
CATEGORY = "api node/image/Gemini"
DESCRIPTION = "Edit images synchronously via Google API."
API_NODE = True
async def api_call( class GeminiImage(IO.ComfyNode):
self,
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="GeminiImageNode",
display_name="Google Gemini Image",
category="api node/image/Gemini",
description="Edit images synchronously via Google API.",
inputs=[
IO.String.Input(
"prompt",
multiline=True,
tooltip="Text prompt for generation",
default="",
),
IO.Combo.Input(
"model",
options=GeminiImageModel,
default=GeminiImageModel.gemini_2_5_flash_image,
tooltip="The Gemini model to use for generating responses.",
),
IO.Int.Input(
"seed",
default=42,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="When seed is fixed to a specific value, the model makes a best effort to provide "
"the same response for repeated requests. Deterministic output isn't guaranteed. "
"Also, changing the model or parameter settings, such as the temperature, "
"can cause variations in the response even when you use the same seed value. "
"By default, a random seed value is used.",
),
IO.Image.Input(
"images",
optional=True,
tooltip="Optional image(s) to use as context for the model. "
"To include multiple images, you can use the Batch Images node.",
),
IO.Custom("GEMINI_INPUT_FILES").Input(
"files",
optional=True,
tooltip="Optional file(s) to use as context for the model. "
"Accepts inputs from the Gemini Generate Content Input Files node.",
),
IO.Combo.Input(
"aspect_ratio",
options=["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"],
default="auto",
tooltip="Defaults to matching the output image size to that of your input image, "
"or otherwise generates 1:1 squares.",
optional=True,
),
],
outputs=[
IO.Image.Output(),
IO.String.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod
async def execute(
cls,
prompt: str, prompt: str,
model: GeminiImageModel, model: str,
images: Optional[IO.IMAGE] = None, seed: int,
images: Optional[torch.Tensor] = None,
files: Optional[list[GeminiPart]] = None, files: Optional[list[GeminiPart]] = None,
n=1,
aspect_ratio: str = "auto", aspect_ratio: str = "auto",
unique_id: Optional[str] = None, ) -> IO.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
parts: list[GeminiPart] = [create_text_part(prompt)] parts: list[GeminiPart] = [GeminiPart(text=prompt)]
if not aspect_ratio: if not aspect_ratio:
aspect_ratio = "auto" # for backward compatability with old workflows; to-do remove this in December aspect_ratio = "auto" # for backward compatability with old workflows; to-do remove this in December
@ -623,29 +505,27 @@ class GeminiImage(ComfyNodeABC):
if files is not None: if files is not None:
parts.extend(files) parts.extend(files)
response = await SynchronousOperation( response = await sync_op(
endpoint=get_gemini_image_endpoint(model), cls,
request=GeminiImageGenerateContentRequest( endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"),
data=GeminiImageGenerateContentRequest(
contents=[ contents=[
GeminiContent( GeminiContent(role="user", parts=parts),
role="user",
parts=parts,
),
], ],
generationConfig=GeminiImageGenerationConfig( generationConfig=GeminiImageGenerationConfig(
responseModalities=["TEXT","IMAGE"], responseModalities=["TEXT", "IMAGE"],
imageConfig=None if aspect_ratio == "auto" else image_config, imageConfig=None if aspect_ratio == "auto" else image_config,
) ),
), ),
auth_kwargs=kwargs, response_model=GeminiGenerateContentResponse,
).execute() )
output_image = get_image_from_response(response) output_image = get_image_from_response(response)
output_text = get_text_from_response(response) output_text = get_text_from_response(response)
if unique_id and output_text: if output_text:
# Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button.
render_spec = { render_spec = {
"node_id": unique_id, "node_id": cls.hidden.unique_id,
"component": "ChatHistoryWidget", "component": "ChatHistoryWidget",
"props": { "props": {
"history": json.dumps( "history": json.dumps(
@ -666,17 +546,18 @@ class GeminiImage(ComfyNodeABC):
) )
output_text = output_text or "Empty response from Gemini model..." output_text = output_text or "Empty response from Gemini model..."
return (output_image, output_text,) return IO.NodeOutput(output_image, output_text)
NODE_CLASS_MAPPINGS = { class GeminiExtension(ComfyExtension):
"GeminiNode": GeminiNode, @override
"GeminiImageNode": GeminiImage, async def get_node_list(self) -> list[type[IO.ComfyNode]]:
"GeminiInputFiles": GeminiInputFiles, return [
} GeminiNode,
GeminiImage,
GeminiInputFiles,
]
NODE_DISPLAY_NAME_MAPPINGS = {
"GeminiNode": "Google Gemini", async def comfy_entrypoint() -> GeminiExtension:
"GeminiImageNode": "Google Gemini Image", return GeminiExtension()
"GeminiInputFiles": "Gemini Input Files",
}

View File

@ -18,6 +18,7 @@ from .conversions import (
tensor_to_bytesio, tensor_to_bytesio,
tensor_to_pil, tensor_to_pil,
trim_video, trim_video,
video_to_base64_string,
) )
from .download_helpers import ( from .download_helpers import (
download_url_as_bytesio, download_url_as_bytesio,
@ -73,6 +74,7 @@ __all__ = [
"tensor_to_bytesio", "tensor_to_bytesio",
"tensor_to_pil", "tensor_to_pil",
"trim_video", "trim_video",
"video_to_base64_string",
# Validation utilities # Validation utilities
"get_number_of_images", "get_number_of_images",
"validate_aspect_ratio_closeness", "validate_aspect_ratio_closeness",

View File

@ -12,6 +12,7 @@ from PIL import Image
from comfy.utils import common_upscale from comfy.utils import common_upscale
from comfy_api.latest import Input, InputImpl from comfy_api.latest import Input, InputImpl
from comfy_api.util import VideoContainer, VideoCodec
from ._helpers import mimetype_to_extension from ._helpers import mimetype_to_extension
@ -173,6 +174,30 @@ def audio_to_base64_string(audio: Input.Audio, container_format: str = "mp4", co
return base64.b64encode(audio_bytes).decode("utf-8") return base64.b64encode(audio_bytes).decode("utf-8")
def video_to_base64_string(
video: Input.Video,
container_format: VideoContainer = None,
codec: VideoCodec = None
) -> str:
"""
Converts a video input to a base64 string.
Args:
video: The video input to convert
container_format: Optional container format to use (defaults to video.container if available)
codec: Optional codec to use (defaults to video.codec if available)
"""
video_bytes_io = BytesIO()
# Use provided format/codec if specified, otherwise use video's own if available
format_to_use = container_format if container_format is not None else getattr(video, 'container', VideoContainer.MP4)
codec_to_use = codec if codec is not None else getattr(video, 'codec', VideoCodec.H264)
video.save_to(video_bytes_io, format=format_to_use, codec=codec_to_use)
video_bytes_io.seek(0)
return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8")
def audio_ndarray_to_bytesio( def audio_ndarray_to_bytesio(
audio_data_np: np.ndarray, audio_data_np: np.ndarray,
sample_rate: int, sample_rate: int,