mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-31 17:21:03 +08:00
api-nodes: use new custom endpoint for Nano Banana (#11311)
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
This commit is contained in:
parent
33aa808713
commit
f4f44bb807
@ -133,6 +133,7 @@ class GeminiImageGenerateContentRequest(BaseModel):
|
|||||||
systemInstruction: GeminiSystemInstructionContent | None = Field(None)
|
systemInstruction: GeminiSystemInstructionContent | None = Field(None)
|
||||||
tools: list[GeminiTool] | None = Field(None)
|
tools: list[GeminiTool] | None = Field(None)
|
||||||
videoMetadata: GeminiVideoMetadata | None = Field(None)
|
videoMetadata: GeminiVideoMetadata | None = Field(None)
|
||||||
|
uploadImagesToStorage: bool = Field(True)
|
||||||
|
|
||||||
|
|
||||||
class GeminiGenerateContentRequest(BaseModel):
|
class GeminiGenerateContentRequest(BaseModel):
|
||||||
|
|||||||
@ -34,6 +34,7 @@ from comfy_api_nodes.util import (
|
|||||||
ApiEndpoint,
|
ApiEndpoint,
|
||||||
audio_to_base64_string,
|
audio_to_base64_string,
|
||||||
bytesio_to_image_tensor,
|
bytesio_to_image_tensor,
|
||||||
|
download_url_to_image_tensor,
|
||||||
get_number_of_images,
|
get_number_of_images,
|
||||||
sync_op,
|
sync_op,
|
||||||
tensor_to_base64_string,
|
tensor_to_base64_string,
|
||||||
@ -141,9 +142,11 @@ def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Litera
|
|||||||
)
|
)
|
||||||
parts = []
|
parts = []
|
||||||
for part in response.candidates[0].content.parts:
|
for part in response.candidates[0].content.parts:
|
||||||
if part_type == "text" and hasattr(part, "text") and part.text:
|
if part_type == "text" and part.text:
|
||||||
parts.append(part)
|
parts.append(part)
|
||||||
elif hasattr(part, "inlineData") and part.inlineData and part.inlineData.mimeType == part_type:
|
elif part.inlineData and part.inlineData.mimeType == part_type:
|
||||||
|
parts.append(part)
|
||||||
|
elif part.fileData and part.fileData.mimeType == part_type:
|
||||||
parts.append(part)
|
parts.append(part)
|
||||||
# Skip parts that don't match the requested type
|
# Skip parts that don't match the requested type
|
||||||
return parts
|
return parts
|
||||||
@ -163,12 +166,15 @@ def get_text_from_response(response: GeminiGenerateContentResponse) -> str:
|
|||||||
return "\n".join([part.text for part in parts])
|
return "\n".join([part.text for part in parts])
|
||||||
|
|
||||||
|
|
||||||
def get_image_from_response(response: GeminiGenerateContentResponse) -> Input.Image:
|
async def get_image_from_response(response: GeminiGenerateContentResponse) -> Input.Image:
|
||||||
image_tensors: list[Input.Image] = []
|
image_tensors: list[Input.Image] = []
|
||||||
parts = get_parts_by_type(response, "image/png")
|
parts = get_parts_by_type(response, "image/png")
|
||||||
for part in parts:
|
for part in parts:
|
||||||
image_data = base64.b64decode(part.inlineData.data)
|
if part.inlineData:
|
||||||
returned_image = bytesio_to_image_tensor(BytesIO(image_data))
|
image_data = base64.b64decode(part.inlineData.data)
|
||||||
|
returned_image = bytesio_to_image_tensor(BytesIO(image_data))
|
||||||
|
else:
|
||||||
|
returned_image = await download_url_to_image_tensor(part.fileData.fileUri)
|
||||||
image_tensors.append(returned_image)
|
image_tensors.append(returned_image)
|
||||||
if len(image_tensors) == 0:
|
if len(image_tensors) == 0:
|
||||||
return torch.zeros((1, 1024, 1024, 4))
|
return torch.zeros((1, 1024, 1024, 4))
|
||||||
@ -596,7 +602,7 @@ class GeminiImage(IO.ComfyNode):
|
|||||||
|
|
||||||
response = await sync_op(
|
response = await sync_op(
|
||||||
cls,
|
cls,
|
||||||
endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"),
|
ApiEndpoint(path=f"/proxy/vertexai/gemini/{model}", method="POST"),
|
||||||
data=GeminiImageGenerateContentRequest(
|
data=GeminiImageGenerateContentRequest(
|
||||||
contents=[
|
contents=[
|
||||||
GeminiContent(role=GeminiRole.user, parts=parts),
|
GeminiContent(role=GeminiRole.user, parts=parts),
|
||||||
@ -610,7 +616,7 @@ class GeminiImage(IO.ComfyNode):
|
|||||||
response_model=GeminiGenerateContentResponse,
|
response_model=GeminiGenerateContentResponse,
|
||||||
price_extractor=calculate_tokens_price,
|
price_extractor=calculate_tokens_price,
|
||||||
)
|
)
|
||||||
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
|
return IO.NodeOutput(await get_image_from_response(response), get_text_from_response(response))
|
||||||
|
|
||||||
|
|
||||||
class GeminiImage2(IO.ComfyNode):
|
class GeminiImage2(IO.ComfyNode):
|
||||||
@ -729,7 +735,7 @@ class GeminiImage2(IO.ComfyNode):
|
|||||||
|
|
||||||
response = await sync_op(
|
response = await sync_op(
|
||||||
cls,
|
cls,
|
||||||
ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"),
|
ApiEndpoint(path=f"/proxy/vertexai/gemini/{model}", method="POST"),
|
||||||
data=GeminiImageGenerateContentRequest(
|
data=GeminiImageGenerateContentRequest(
|
||||||
contents=[
|
contents=[
|
||||||
GeminiContent(role=GeminiRole.user, parts=parts),
|
GeminiContent(role=GeminiRole.user, parts=parts),
|
||||||
@ -743,7 +749,7 @@ class GeminiImage2(IO.ComfyNode):
|
|||||||
response_model=GeminiGenerateContentResponse,
|
response_model=GeminiGenerateContentResponse,
|
||||||
price_extractor=calculate_tokens_price,
|
price_extractor=calculate_tokens_price,
|
||||||
)
|
)
|
||||||
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
|
return IO.NodeOutput(await get_image_from_response(response), get_text_from_response(response))
|
||||||
|
|
||||||
|
|
||||||
class GeminiExtension(ComfyExtension):
|
class GeminiExtension(ComfyExtension):
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user