mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-18 10:33:01 +08:00
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
53 lines
2.1 KiB
Python
53 lines
2.1 KiB
Python
from pydantic import BaseModel, Field
|
|
|
|
|
|
class Datum2(BaseModel):
|
|
b64_json: str | None = Field(None, description="Base64 encoded image data")
|
|
revised_prompt: str | None = Field(None, description="Revised prompt")
|
|
url: str | None = Field(None, description="URL of the image")
|
|
|
|
|
|
class InputTokensDetails(BaseModel):
|
|
image_tokens: int | None = None
|
|
text_tokens: int | None = None
|
|
|
|
|
|
class Usage(BaseModel):
|
|
input_tokens: int | None = None
|
|
input_tokens_details: InputTokensDetails | None = None
|
|
output_tokens: int | None = None
|
|
total_tokens: int | None = None
|
|
|
|
|
|
class OpenAIImageGenerationResponse(BaseModel):
|
|
data: list[Datum2] | None = None
|
|
usage: Usage | None = None
|
|
|
|
|
|
class OpenAIImageEditRequest(BaseModel):
|
|
background: str | None = Field(None, description="Background transparency")
|
|
model: str = Field(...)
|
|
moderation: str | None = Field(None)
|
|
n: int | None = Field(None, description="The number of images to generate")
|
|
output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)")
|
|
output_format: str | None = Field(None)
|
|
prompt: str = Field(...)
|
|
quality: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)")
|
|
size: str | None = Field(None, description="Size of the output image")
|
|
|
|
|
|
class OpenAIImageGenerationRequest(BaseModel):
|
|
background: str | None = Field(None, description="Background transparency")
|
|
model: str | None = Field(None)
|
|
moderation: str | None = Field(None)
|
|
n: int | None = Field(
|
|
None,
|
|
description="The number of images to generate.",
|
|
)
|
|
output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)")
|
|
output_format: str | None = Field(None)
|
|
prompt: str = Field(...)
|
|
quality: str | None = Field(None, description="The quality of the generated image")
|
|
size: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)")
|
|
style: str | None = Field(None, description="Style of the image (only for dall-e-3)")
|