mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-30 16:20:17 +08:00
Compare commits
2 Commits
a39884aa58
...
a46eb66556
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a46eb66556 | ||
|
|
606052119c |
@ -1,170 +0,0 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class Datum2(BaseModel):
|
||||
b64_json: str | None = Field(None, description="Base64 encoded image data")
|
||||
revised_prompt: str | None = Field(None, description="Revised prompt")
|
||||
url: str | None = Field(None, description="URL of the image")
|
||||
|
||||
|
||||
class InputTokensDetails(BaseModel):
|
||||
image_tokens: int | None = Field(None)
|
||||
text_tokens: int | None = Field(None)
|
||||
|
||||
|
||||
class Usage(BaseModel):
|
||||
input_tokens: int | None = Field(None)
|
||||
input_tokens_details: InputTokensDetails | None = Field(None)
|
||||
output_tokens: int | None = Field(None)
|
||||
total_tokens: int | None = Field(None)
|
||||
|
||||
|
||||
class OpenAIImageGenerationResponse(BaseModel):
|
||||
data: list[Datum2] | None = Field(None)
|
||||
usage: Usage | None = Field(None)
|
||||
|
||||
|
||||
class OpenAIImageEditRequest(BaseModel):
|
||||
background: str | None = Field(None, description="Background transparency")
|
||||
model: str = Field(...)
|
||||
moderation: str | None = Field(None)
|
||||
n: int | None = Field(None, description="The number of images to generate")
|
||||
output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)")
|
||||
output_format: str | None = Field(None)
|
||||
prompt: str = Field(...)
|
||||
quality: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)")
|
||||
size: str | None = Field(None, description="Size of the output image")
|
||||
|
||||
|
||||
class OpenAIImageGenerationRequest(BaseModel):
|
||||
background: str | None = Field(None, description="Background transparency")
|
||||
model: str | None = Field(None)
|
||||
moderation: str | None = Field(None)
|
||||
n: int | None = Field(
|
||||
None,
|
||||
description="The number of images to generate.",
|
||||
)
|
||||
output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)")
|
||||
output_format: str | None = Field(None)
|
||||
prompt: str = Field(...)
|
||||
quality: str | None = Field(None, description="The quality of the generated image")
|
||||
size: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)")
|
||||
style: str | None = Field(None, description="Style of the image (only for dall-e-3)")
|
||||
|
||||
|
||||
class ModelResponseProperties(BaseModel):
|
||||
instructions: str | None = Field(None)
|
||||
max_output_tokens: int | None = Field(None)
|
||||
model: str | None = Field(None)
|
||||
temperature: float | None = Field(1, description="Controls randomness in the response", ge=0.0, le=2.0)
|
||||
top_p: float | None = Field(
|
||||
1,
|
||||
description="Controls diversity of the response via nucleus sampling",
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
)
|
||||
truncation: str | None = Field("disabled", description="Allowed values: 'auto' or 'disabled'")
|
||||
|
||||
|
||||
class ResponseProperties(BaseModel):
|
||||
instructions: str | None = Field(None)
|
||||
max_output_tokens: int | None = Field(None)
|
||||
model: str | None = Field(None)
|
||||
previous_response_id: str | None = Field(None)
|
||||
truncation: str | None = Field("disabled", description="Allowed values: 'auto' or 'disabled'")
|
||||
|
||||
|
||||
class ResponseError(BaseModel):
|
||||
code: str = Field(...)
|
||||
message: str = Field(...)
|
||||
|
||||
|
||||
class OutputTokensDetails(BaseModel):
|
||||
reasoning_tokens: int = Field(..., description="The number of reasoning tokens.")
|
||||
|
||||
|
||||
class CachedTokensDetails(BaseModel):
|
||||
cached_tokens: int = Field(
|
||||
...,
|
||||
description="The number of tokens that were retrieved from the cache.",
|
||||
)
|
||||
|
||||
|
||||
class ResponseUsage(BaseModel):
|
||||
input_tokens: int = Field(..., description="The number of input tokens.")
|
||||
input_tokens_details: CachedTokensDetails = Field(...)
|
||||
output_tokens: int = Field(..., description="The number of output tokens.")
|
||||
output_tokens_details: OutputTokensDetails = Field(...)
|
||||
total_tokens: int = Field(..., description="The total number of tokens used.")
|
||||
|
||||
|
||||
class InputTextContent(BaseModel):
|
||||
text: str = Field(..., description="The text input to the model.")
|
||||
type: str = Field("input_text")
|
||||
|
||||
|
||||
class OutputContent(BaseModel):
|
||||
type: str = Field(..., description="The type of output content")
|
||||
text: str | None = Field(None, description="The text content")
|
||||
data: str | None = Field(None, description="Base64-encoded audio data")
|
||||
transcript: str | None = Field(None, description="Transcript of the audio")
|
||||
|
||||
|
||||
class OutputMessage(BaseModel):
|
||||
type: str = Field(..., description="The type of output item")
|
||||
content: list[OutputContent] | None = Field(None, description="The content of the message")
|
||||
role: str | None = Field(None, description="The role of the message")
|
||||
|
||||
|
||||
class OpenAIResponse(ModelResponseProperties, ResponseProperties):
|
||||
created_at: float | None = Field(
|
||||
None,
|
||||
description="Unix timestamp (in seconds) of when this Response was created.",
|
||||
)
|
||||
error: ResponseError | None = Field(None)
|
||||
id: str | None = Field(None, description="Unique identifier for this Response.")
|
||||
object: str | None = Field(None, description="The object type of this resource - always set to `response`.")
|
||||
output: list[OutputMessage] | None = Field(None)
|
||||
parallel_tool_calls: bool | None = Field(True)
|
||||
status: str | None = Field(
|
||||
None,
|
||||
description="One of `completed`, `failed`, `in_progress`, or `incomplete`.",
|
||||
)
|
||||
usage: ResponseUsage | None = Field(None)
|
||||
|
||||
|
||||
class InputImageContent(BaseModel):
|
||||
detail: str = Field(..., description="One of `high`, `low`, or `auto`. Defaults to `auto`.")
|
||||
file_id: str | None = Field(None)
|
||||
image_url: str | None = Field(None)
|
||||
type: str = Field(..., description="The type of the input item. Always `input_image`.")
|
||||
|
||||
|
||||
class InputFileContent(BaseModel):
|
||||
file_data: str | None = Field(None)
|
||||
file_id: str | None = Field(None)
|
||||
filename: str | None = Field(None, description="The name of the file to be sent to the model.")
|
||||
type: str = Field(..., description="The type of the input item. Always `input_file`.")
|
||||
|
||||
|
||||
class InputMessage(BaseModel):
|
||||
content: list[InputTextContent | InputImageContent | InputFileContent] = Field(
|
||||
...,
|
||||
description="A list of one or many input items to the model, containing different content types.",
|
||||
)
|
||||
role: str | None = Field(None)
|
||||
type: str | None = Field(None)
|
||||
|
||||
|
||||
class OpenAICreateResponse(ModelResponseProperties, ResponseProperties):
|
||||
include: str | None = Field(None)
|
||||
input: list[InputMessage] = Field(...)
|
||||
parallel_tool_calls: bool | None = Field(
|
||||
True, description="Whether to allow the model to run tool calls in parallel."
|
||||
)
|
||||
store: bool | None = Field(
|
||||
True,
|
||||
description="Whether to store the generated model response for later retrieval via API.",
|
||||
)
|
||||
stream: bool | None = Field(False)
|
||||
usage: ResponseUsage | None = Field(None)
|
||||
52
comfy_api_nodes/apis/openai_api.py
Normal file
52
comfy_api_nodes/apis/openai_api.py
Normal file
@ -0,0 +1,52 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class Datum2(BaseModel):
|
||||
b64_json: str | None = Field(None, description="Base64 encoded image data")
|
||||
revised_prompt: str | None = Field(None, description="Revised prompt")
|
||||
url: str | None = Field(None, description="URL of the image")
|
||||
|
||||
|
||||
class InputTokensDetails(BaseModel):
|
||||
image_tokens: int | None = None
|
||||
text_tokens: int | None = None
|
||||
|
||||
|
||||
class Usage(BaseModel):
|
||||
input_tokens: int | None = None
|
||||
input_tokens_details: InputTokensDetails | None = None
|
||||
output_tokens: int | None = None
|
||||
total_tokens: int | None = None
|
||||
|
||||
|
||||
class OpenAIImageGenerationResponse(BaseModel):
|
||||
data: list[Datum2] | None = None
|
||||
usage: Usage | None = None
|
||||
|
||||
|
||||
class OpenAIImageEditRequest(BaseModel):
|
||||
background: str | None = Field(None, description="Background transparency")
|
||||
model: str = Field(...)
|
||||
moderation: str | None = Field(None)
|
||||
n: int | None = Field(None, description="The number of images to generate")
|
||||
output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)")
|
||||
output_format: str | None = Field(None)
|
||||
prompt: str = Field(...)
|
||||
quality: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)")
|
||||
size: str | None = Field(None, description="Size of the output image")
|
||||
|
||||
|
||||
class OpenAIImageGenerationRequest(BaseModel):
|
||||
background: str | None = Field(None, description="Background transparency")
|
||||
model: str | None = Field(None)
|
||||
moderation: str | None = Field(None)
|
||||
n: int | None = Field(
|
||||
None,
|
||||
description="The number of images to generate.",
|
||||
)
|
||||
output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)")
|
||||
output_format: str | None = Field(None)
|
||||
prompt: str = Field(...)
|
||||
quality: str | None = Field(None, description="The quality of the generated image")
|
||||
size: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)")
|
||||
style: str | None = Field(None, description="Style of the image (only for dall-e-3)")
|
||||
@ -41,7 +41,7 @@ class Resolution(BaseModel):
|
||||
height: int = Field(...)
|
||||
|
||||
|
||||
class CreateVideoRequestSource(BaseModel):
|
||||
class CreateCreateVideoRequestSource(BaseModel):
|
||||
container: str = Field(...)
|
||||
size: int = Field(..., description="Size of the video file in bytes")
|
||||
duration: int = Field(..., description="Duration of the video file in seconds")
|
||||
@ -89,7 +89,7 @@ class Overrides(BaseModel):
|
||||
|
||||
|
||||
class CreateVideoRequest(BaseModel):
|
||||
source: CreateVideoRequestSource = Field(...)
|
||||
source: CreateCreateVideoRequestSource = Field(...)
|
||||
filters: list[Union[VideoFrameInterpolationFilter, VideoEnhancementFilter]] = Field(...)
|
||||
output: OutputInformationVideo = Field(...)
|
||||
overrides: Overrides = Field(Overrides(isPaidDiffusion=True))
|
||||
@ -3,7 +3,7 @@ from pydantic import BaseModel
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.bfl import (
|
||||
from comfy_api_nodes.apis.bfl_api import (
|
||||
BFLFluxExpandImageRequest,
|
||||
BFLFluxFillImageRequest,
|
||||
BFLFluxKontextProGenerateRequest,
|
||||
|
||||
@ -5,7 +5,7 @@ import torch
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.bytedance import (
|
||||
from comfy_api_nodes.apis.bytedance_api import (
|
||||
RECOMMENDED_PRESETS,
|
||||
RECOMMENDED_PRESETS_SEEDREAM_4,
|
||||
VIDEO_TASKS_EXECUTION_TIME,
|
||||
|
||||
@ -14,7 +14,7 @@ from typing_extensions import override
|
||||
|
||||
import folder_paths
|
||||
from comfy_api.latest import IO, ComfyExtension, Input, Types
|
||||
from comfy_api_nodes.apis.gemini import (
|
||||
from comfy_api_nodes.apis.gemini_api import (
|
||||
GeminiContent,
|
||||
GeminiFileData,
|
||||
GeminiGenerateContentRequest,
|
||||
|
||||
@ -49,7 +49,7 @@ from comfy_api_nodes.apis import (
|
||||
KlingCharacterEffectModelName,
|
||||
KlingSingleImageEffectModelName,
|
||||
)
|
||||
from comfy_api_nodes.apis.kling import (
|
||||
from comfy_api_nodes.apis.kling_api import (
|
||||
ImageToVideoWithAudioRequest,
|
||||
MotionControlRequest,
|
||||
OmniImageParamImage,
|
||||
|
||||
@ -4,7 +4,7 @@ import torch
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension
|
||||
from comfy_api_nodes.apis.luma import (
|
||||
from comfy_api_nodes.apis.luma_api import (
|
||||
LumaAspectRatio,
|
||||
LumaCharacterRef,
|
||||
LumaConceptChain,
|
||||
|
||||
@ -4,7 +4,7 @@ import torch
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension
|
||||
from comfy_api_nodes.apis.minimax import (
|
||||
from comfy_api_nodes.apis.minimax_api import (
|
||||
MinimaxFileRetrieveResponse,
|
||||
MiniMaxModel,
|
||||
MinimaxTaskResultResponse,
|
||||
|
||||
@ -10,18 +10,24 @@ from typing_extensions import override
|
||||
|
||||
import folder_paths
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.openai import (
|
||||
from comfy_api_nodes.apis import (
|
||||
CreateModelResponseProperties,
|
||||
Detail,
|
||||
InputContent,
|
||||
InputFileContent,
|
||||
InputImageContent,
|
||||
InputMessage,
|
||||
InputMessageContentList,
|
||||
InputTextContent,
|
||||
ModelResponseProperties,
|
||||
Item,
|
||||
OpenAICreateResponse,
|
||||
OpenAIResponse,
|
||||
OutputContent,
|
||||
)
|
||||
from comfy_api_nodes.apis.openai_api import (
|
||||
OpenAIImageEditRequest,
|
||||
OpenAIImageGenerationRequest,
|
||||
OpenAIImageGenerationResponse,
|
||||
OpenAIResponse,
|
||||
OutputContent,
|
||||
)
|
||||
from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
@ -260,7 +266,7 @@ class OpenAIDalle3(IO.ComfyNode):
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2**31 - 1,
|
||||
max=2 ** 31 - 1,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
@ -378,7 +384,7 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2**31 - 1,
|
||||
max=2 ** 31 - 1,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
@ -494,8 +500,8 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
files = []
|
||||
batch_size = image.shape[0]
|
||||
for i in range(batch_size):
|
||||
single_image = image[i : i + 1]
|
||||
scaled_image = downscale_image_tensor(single_image, total_pixels=2048 * 2048).squeeze()
|
||||
single_image = image[i: i + 1]
|
||||
scaled_image = downscale_image_tensor(single_image, total_pixels=2048*2048).squeeze()
|
||||
|
||||
image_np = (scaled_image.numpy() * 255).astype(np.uint8)
|
||||
img = Image.fromarray(image_np)
|
||||
@ -517,7 +523,7 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
rgba_mask = torch.zeros(height, width, 4, device="cpu")
|
||||
rgba_mask[:, :, 3] = 1 - mask.squeeze().cpu()
|
||||
|
||||
scaled_mask = downscale_image_tensor(rgba_mask.unsqueeze(0), total_pixels=2048 * 2048).squeeze()
|
||||
scaled_mask = downscale_image_tensor(rgba_mask.unsqueeze(0), total_pixels=2048*2048).squeeze()
|
||||
|
||||
mask_np = (scaled_mask.numpy() * 255).astype(np.uint8)
|
||||
mask_img = Image.fromarray(mask_np)
|
||||
@ -690,23 +696,29 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_message_content_from_response(cls, response: OpenAIResponse) -> list[OutputContent]:
|
||||
def get_message_content_from_response(
|
||||
cls, response: OpenAIResponse
|
||||
) -> list[OutputContent]:
|
||||
"""Extract message content from the API response."""
|
||||
for output in response.output:
|
||||
if output.type == "message":
|
||||
return output.content
|
||||
if output.root.type == "message":
|
||||
return output.root.content
|
||||
raise TypeError("No output message found in response")
|
||||
|
||||
@classmethod
|
||||
def get_text_from_message_content(cls, message_content: list[OutputContent]) -> str:
|
||||
def get_text_from_message_content(
|
||||
cls, message_content: list[OutputContent]
|
||||
) -> str:
|
||||
"""Extract text content from message content."""
|
||||
for content_item in message_content:
|
||||
if content_item.type == "output_text":
|
||||
return str(content_item.text)
|
||||
if content_item.root.type == "output_text":
|
||||
return str(content_item.root.text)
|
||||
return "No text output found in response"
|
||||
|
||||
@classmethod
|
||||
def tensor_to_input_image_content(cls, image: torch.Tensor, detail_level: str = "auto") -> InputImageContent:
|
||||
def tensor_to_input_image_content(
|
||||
cls, image: torch.Tensor, detail_level: Detail = "auto"
|
||||
) -> InputImageContent:
|
||||
"""Convert a tensor to an input image content object."""
|
||||
return InputImageContent(
|
||||
detail=detail_level,
|
||||
@ -720,9 +732,9 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
prompt: str,
|
||||
image: torch.Tensor | None = None,
|
||||
files: list[InputFileContent] | None = None,
|
||||
) -> list[InputTextContent | InputImageContent | InputFileContent]:
|
||||
) -> InputMessageContentList:
|
||||
"""Create a list of input message contents from prompt and optional image."""
|
||||
content_list: list[InputTextContent | InputImageContent | InputFileContent] = [
|
||||
content_list: list[InputContent | InputTextContent | InputImageContent | InputFileContent] = [
|
||||
InputTextContent(text=prompt, type="input_text"),
|
||||
]
|
||||
if image is not None:
|
||||
@ -734,9 +746,13 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
type="input_image",
|
||||
)
|
||||
)
|
||||
|
||||
if files is not None:
|
||||
content_list.extend(files)
|
||||
return content_list
|
||||
|
||||
return InputMessageContentList(
|
||||
root=content_list,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
@ -746,7 +762,7 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
model: SupportedOpenAIModel = SupportedOpenAIModel.gpt_5.value,
|
||||
images: torch.Tensor | None = None,
|
||||
files: list[InputFileContent] | None = None,
|
||||
advanced_options: ModelResponseProperties | None = None,
|
||||
advanced_options: CreateModelResponseProperties | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=False)
|
||||
|
||||
@ -757,28 +773,36 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
response_model=OpenAIResponse,
|
||||
data=OpenAICreateResponse(
|
||||
input=[
|
||||
InputMessage(
|
||||
content=cls.create_input_message_contents(prompt, images, files),
|
||||
role="user",
|
||||
Item(
|
||||
root=InputMessage(
|
||||
content=cls.create_input_message_contents(
|
||||
prompt, images, files
|
||||
),
|
||||
role="user",
|
||||
)
|
||||
),
|
||||
],
|
||||
store=True,
|
||||
stream=False,
|
||||
model=model,
|
||||
previous_response_id=None,
|
||||
**(advanced_options.model_dump(exclude_none=True) if advanced_options else {}),
|
||||
**(
|
||||
advanced_options.model_dump(exclude_none=True)
|
||||
if advanced_options
|
||||
else {}
|
||||
),
|
||||
),
|
||||
)
|
||||
response_id = create_response.id
|
||||
|
||||
# Get result output
|
||||
result_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"{RESPONSES_ENDPOINT}/{response_id}"),
|
||||
response_model=OpenAIResponse,
|
||||
status_extractor=lambda response: response.status,
|
||||
completed_statuses=["incomplete", "completed"],
|
||||
)
|
||||
cls,
|
||||
ApiEndpoint(path=f"{RESPONSES_ENDPOINT}/{response_id}"),
|
||||
response_model=OpenAIResponse,
|
||||
status_extractor=lambda response: response.status,
|
||||
completed_statuses=["incomplete", "completed"]
|
||||
)
|
||||
return IO.NodeOutput(cls.get_text_from_message_content(cls.get_message_content_from_response(result_response)))
|
||||
|
||||
|
||||
@ -899,7 +923,7 @@ class OpenAIChatConfig(IO.ComfyNode):
|
||||
remove depending on model choice.
|
||||
"""
|
||||
return IO.NodeOutput(
|
||||
ModelResponseProperties(
|
||||
CreateModelResponseProperties(
|
||||
instructions=instructions,
|
||||
truncation=truncation,
|
||||
max_output_tokens=max_output_tokens,
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import torch
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import IO, ComfyExtension
|
||||
from comfy_api_nodes.apis.pixverse import (
|
||||
from comfy_api_nodes.apis.pixverse_api import (
|
||||
PixverseTextVideoRequest,
|
||||
PixverseImageVideoRequest,
|
||||
PixverseTransitionVideoRequest,
|
||||
|
||||
@ -8,7 +8,7 @@ from typing_extensions import override
|
||||
|
||||
from comfy.utils import ProgressBar
|
||||
from comfy_api.latest import IO, ComfyExtension
|
||||
from comfy_api_nodes.apis.recraft import (
|
||||
from comfy_api_nodes.apis.recraft_api import (
|
||||
RecraftColor,
|
||||
RecraftColorChain,
|
||||
RecraftControls,
|
||||
|
||||
@ -14,7 +14,7 @@ from typing import Optional
|
||||
from io import BytesIO
|
||||
from typing_extensions import override
|
||||
from PIL import Image
|
||||
from comfy_api_nodes.apis.rodin import (
|
||||
from comfy_api_nodes.apis.rodin_api import (
|
||||
Rodin3DGenerateRequest,
|
||||
Rodin3DGenerateResponse,
|
||||
Rodin3DCheckStatusRequest,
|
||||
|
||||
@ -3,7 +3,7 @@ from typing import Optional
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import ComfyExtension, Input, IO
|
||||
from comfy_api_nodes.apis.stability import (
|
||||
from comfy_api_nodes.apis.stability_api import (
|
||||
StabilityUpscaleConservativeRequest,
|
||||
StabilityUpscaleCreativeRequest,
|
||||
StabilityAsyncResponse,
|
||||
|
||||
@ -5,24 +5,7 @@ import aiohttp
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.topaz import (
|
||||
CreateVideoRequest,
|
||||
CreateVideoRequestSource,
|
||||
CreateVideoResponse,
|
||||
ImageAsyncTaskResponse,
|
||||
ImageDownloadResponse,
|
||||
ImageEnhanceRequest,
|
||||
ImageStatusResponse,
|
||||
OutputInformationVideo,
|
||||
Resolution,
|
||||
VideoAcceptResponse,
|
||||
VideoCompleteUploadRequest,
|
||||
VideoCompleteUploadRequestPart,
|
||||
VideoCompleteUploadResponse,
|
||||
VideoEnhancementFilter,
|
||||
VideoFrameInterpolationFilter,
|
||||
VideoStatusResponse,
|
||||
)
|
||||
from comfy_api_nodes.apis import topaz_api
|
||||
from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
download_url_to_image_tensor,
|
||||
@ -170,13 +153,13 @@ class TopazImageEnhance(IO.ComfyNode):
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Only one input image is supported.")
|
||||
download_url = await upload_images_to_comfyapi(
|
||||
cls, image, max_images=1, mime_type="image/png", total_pixels=4096 * 4096
|
||||
cls, image, max_images=1, mime_type="image/png", total_pixels=4096*4096
|
||||
)
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/topaz/image/v1/enhance-gen/async", method="POST"),
|
||||
response_model=ImageAsyncTaskResponse,
|
||||
data=ImageEnhanceRequest(
|
||||
response_model=topaz_api.ImageAsyncTaskResponse,
|
||||
data=topaz_api.ImageEnhanceRequest(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
subject_detection=subject_detection,
|
||||
@ -198,7 +181,7 @@ class TopazImageEnhance(IO.ComfyNode):
|
||||
await poll_op(
|
||||
cls,
|
||||
poll_endpoint=ApiEndpoint(path=f"/proxy/topaz/image/v1/status/{initial_response.process_id}"),
|
||||
response_model=ImageStatusResponse,
|
||||
response_model=topaz_api.ImageStatusResponse,
|
||||
status_extractor=lambda x: x.status,
|
||||
progress_extractor=lambda x: getattr(x, "progress", 0),
|
||||
price_extractor=lambda x: x.credits * 0.08,
|
||||
@ -210,7 +193,7 @@ class TopazImageEnhance(IO.ComfyNode):
|
||||
results = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/topaz/image/v1/download/{initial_response.process_id}"),
|
||||
response_model=ImageDownloadResponse,
|
||||
response_model=topaz_api.ImageDownloadResponse,
|
||||
monitor_progress=False,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(results.download_url))
|
||||
@ -348,7 +331,7 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
if target_height % 2 != 0:
|
||||
target_height += 1
|
||||
filters.append(
|
||||
VideoEnhancementFilter(
|
||||
topaz_api.VideoEnhancementFilter(
|
||||
model=UPSCALER_MODELS_MAP[upscaler_model],
|
||||
creativity=(upscaler_creativity if UPSCALER_MODELS_MAP[upscaler_model] == "slc-1" else None),
|
||||
isOptimizedMode=(True if UPSCALER_MODELS_MAP[upscaler_model] == "slc-1" else None),
|
||||
@ -357,7 +340,7 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
if interpolation_enabled:
|
||||
target_frame_rate = interpolation_frame_rate
|
||||
filters.append(
|
||||
VideoFrameInterpolationFilter(
|
||||
topaz_api.VideoFrameInterpolationFilter(
|
||||
model=interpolation_model,
|
||||
slowmo=interpolation_slowmo,
|
||||
fps=interpolation_frame_rate,
|
||||
@ -368,19 +351,19 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
initial_res = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/topaz/video/", method="POST"),
|
||||
response_model=CreateVideoResponse,
|
||||
data=CreateVideoRequest(
|
||||
source=CreateVideoRequestSource(
|
||||
response_model=topaz_api.CreateVideoResponse,
|
||||
data=topaz_api.CreateVideoRequest(
|
||||
source=topaz_api.CreateCreateVideoRequestSource(
|
||||
container="mp4",
|
||||
size=get_fs_object_size(src_video_stream),
|
||||
duration=int(duration_sec),
|
||||
frameCount=video.get_frame_count(),
|
||||
frameRate=src_frame_rate,
|
||||
resolution=Resolution(width=src_width, height=src_height),
|
||||
resolution=topaz_api.Resolution(width=src_width, height=src_height),
|
||||
),
|
||||
filters=filters,
|
||||
output=OutputInformationVideo(
|
||||
resolution=Resolution(width=target_width, height=target_height),
|
||||
output=topaz_api.OutputInformationVideo(
|
||||
resolution=topaz_api.Resolution(width=target_width, height=target_height),
|
||||
frameRate=target_frame_rate,
|
||||
audioCodec="AAC",
|
||||
audioTransfer="Copy",
|
||||
@ -396,7 +379,7 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
path=f"/proxy/topaz/video/{initial_res.requestId}/accept",
|
||||
method="PATCH",
|
||||
),
|
||||
response_model=VideoAcceptResponse,
|
||||
response_model=topaz_api.VideoAcceptResponse,
|
||||
wait_label="Preparing upload",
|
||||
final_label_on_success="Upload started",
|
||||
)
|
||||
@ -419,10 +402,10 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
path=f"/proxy/topaz/video/{initial_res.requestId}/complete-upload",
|
||||
method="PATCH",
|
||||
),
|
||||
response_model=VideoCompleteUploadResponse,
|
||||
data=VideoCompleteUploadRequest(
|
||||
response_model=topaz_api.VideoCompleteUploadResponse,
|
||||
data=topaz_api.VideoCompleteUploadRequest(
|
||||
uploadResults=[
|
||||
VideoCompleteUploadRequestPart(
|
||||
topaz_api.VideoCompleteUploadRequestPart(
|
||||
partNum=1,
|
||||
eTag=upload_etag,
|
||||
),
|
||||
@ -434,7 +417,7 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
final_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/topaz/video/{initial_res.requestId}/status"),
|
||||
response_model=VideoStatusResponse,
|
||||
response_model=topaz_api.VideoStatusResponse,
|
||||
status_extractor=lambda x: x.status,
|
||||
progress_extractor=lambda x: getattr(x, "progress", 0),
|
||||
price_extractor=lambda x: (x.estimates.cost[0] * 0.08 if x.estimates and x.estimates.cost[0] else None),
|
||||
|
||||
@ -5,7 +5,7 @@ import torch
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension
|
||||
from comfy_api_nodes.apis.tripo import (
|
||||
from comfy_api_nodes.apis.tripo_api import (
|
||||
TripoAnimateRetargetRequest,
|
||||
TripoAnimateRigRequest,
|
||||
TripoConvertModelRequest,
|
||||
|
||||
@ -4,7 +4,7 @@ from io import BytesIO
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input, InputImpl
|
||||
from comfy_api_nodes.apis.veo import (
|
||||
from comfy_api_nodes.apis.veo_api import (
|
||||
VeoGenVidPollRequest,
|
||||
VeoGenVidPollResponse,
|
||||
VeoGenVidRequest,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user