mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-17 10:02:59 +08:00
Merge branch 'master' into dr-support-pip-cm
This commit is contained in:
commit
3c000c1de4
@ -18,7 +18,7 @@ from comfy_api_nodes.apis.client import (
|
|||||||
UploadResponse,
|
UploadResponse,
|
||||||
)
|
)
|
||||||
from server import PromptServer
|
from server import PromptServer
|
||||||
|
from comfy.cli_args import args
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
@ -30,7 +30,9 @@ from io import BytesIO
|
|||||||
import av
|
import av
|
||||||
|
|
||||||
|
|
||||||
async def download_url_to_video_output(video_url: str, timeout: int = None) -> VideoFromFile:
|
async def download_url_to_video_output(
|
||||||
|
video_url: str, timeout: int = None, auth_kwargs: Optional[dict[str, str]] = None
|
||||||
|
) -> VideoFromFile:
|
||||||
"""Downloads a video from a URL and returns a `VIDEO` output.
|
"""Downloads a video from a URL and returns a `VIDEO` output.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -39,7 +41,7 @@ async def download_url_to_video_output(video_url: str, timeout: int = None) -> V
|
|||||||
Returns:
|
Returns:
|
||||||
A Comfy node `VIDEO` output.
|
A Comfy node `VIDEO` output.
|
||||||
"""
|
"""
|
||||||
video_io = await download_url_to_bytesio(video_url, timeout)
|
video_io = await download_url_to_bytesio(video_url, timeout, auth_kwargs=auth_kwargs)
|
||||||
if video_io is None:
|
if video_io is None:
|
||||||
error_msg = f"Failed to download video from {video_url}"
|
error_msg = f"Failed to download video from {video_url}"
|
||||||
logging.error(error_msg)
|
logging.error(error_msg)
|
||||||
@ -164,7 +166,9 @@ def mimetype_to_extension(mime_type: str) -> str:
|
|||||||
return mime_type.split("/")[-1].lower()
|
return mime_type.split("/")[-1].lower()
|
||||||
|
|
||||||
|
|
||||||
async def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO:
|
async def download_url_to_bytesio(
|
||||||
|
url: str, timeout: int = None, auth_kwargs: Optional[dict[str, str]] = None
|
||||||
|
) -> BytesIO:
|
||||||
"""Downloads content from a URL using requests and returns it as BytesIO.
|
"""Downloads content from a URL using requests and returns it as BytesIO.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -174,9 +178,18 @@ async def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO:
|
|||||||
Returns:
|
Returns:
|
||||||
BytesIO object containing the downloaded content.
|
BytesIO object containing the downloaded content.
|
||||||
"""
|
"""
|
||||||
|
headers = {}
|
||||||
|
if url.startswith("/proxy/"):
|
||||||
|
url = str(args.comfy_api_base).rstrip("/") + url
|
||||||
|
auth_token = auth_kwargs.get("auth_token")
|
||||||
|
comfy_api_key = auth_kwargs.get("comfy_api_key")
|
||||||
|
if auth_token:
|
||||||
|
headers["Authorization"] = f"Bearer {auth_token}"
|
||||||
|
elif comfy_api_key:
|
||||||
|
headers["X-API-KEY"] = comfy_api_key
|
||||||
timeout_cfg = aiohttp.ClientTimeout(total=timeout) if timeout else None
|
timeout_cfg = aiohttp.ClientTimeout(total=timeout) if timeout else None
|
||||||
async with aiohttp.ClientSession(timeout=timeout_cfg) as session:
|
async with aiohttp.ClientSession(timeout=timeout_cfg) as session:
|
||||||
async with session.get(url) as resp:
|
async with session.get(url, headers=headers) as resp:
|
||||||
resp.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX)
|
resp.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX)
|
||||||
return BytesIO(await resp.read())
|
return BytesIO(await resp.read())
|
||||||
|
|
||||||
|
|||||||
@ -220,13 +220,16 @@ class ApiClient:
|
|||||||
if multipart_parser and data:
|
if multipart_parser and data:
|
||||||
data = multipart_parser(data)
|
data = multipart_parser(data)
|
||||||
|
|
||||||
form = aiohttp.FormData(default_to_multipart=True)
|
if isinstance(data, aiohttp.FormData):
|
||||||
if data: # regular text fields
|
form = data # If the parser already returned a FormData, pass it through
|
||||||
for k, v in data.items():
|
else:
|
||||||
if v is None:
|
form = aiohttp.FormData(default_to_multipart=True)
|
||||||
continue # aiohttp fails to serialize "None" values
|
if data: # regular text fields
|
||||||
# aiohttp expects strings or bytes; convert enums etc.
|
for k, v in data.items():
|
||||||
form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v)
|
if v is None:
|
||||||
|
continue # aiohttp fails to serialize "None" values
|
||||||
|
# aiohttp expects strings or bytes; convert enums etc.
|
||||||
|
form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v)
|
||||||
|
|
||||||
if files:
|
if files:
|
||||||
file_iter = files if isinstance(files, list) else files.items()
|
file_iter = files if isinstance(files, list) else files.items()
|
||||||
|
|||||||
@ -35,6 +35,7 @@ from server import PromptServer
|
|||||||
import torch
|
import torch
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from PIL import UnidentifiedImageError
|
from PIL import UnidentifiedImageError
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
|
||||||
async def handle_recraft_file_request(
|
async def handle_recraft_file_request(
|
||||||
@ -82,10 +83,16 @@ async def handle_recraft_file_request(
|
|||||||
return all_bytesio
|
return all_bytesio
|
||||||
|
|
||||||
|
|
||||||
def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, converted_to_check: list[list]=None, is_list=False) -> dict:
|
def recraft_multipart_parser(
|
||||||
|
data,
|
||||||
|
parent_key=None,
|
||||||
|
formatter: callable = None,
|
||||||
|
converted_to_check: list[list] = None,
|
||||||
|
is_list: bool = False,
|
||||||
|
return_mode: str = "formdata" # "dict" | "formdata"
|
||||||
|
) -> dict | aiohttp.FormData:
|
||||||
"""
|
"""
|
||||||
Formats data such that multipart/form-data will work with requests library
|
Formats data such that multipart/form-data will work with aiohttp library when both files and data are present.
|
||||||
when both files and data are present.
|
|
||||||
|
|
||||||
The OpenAI client that Recraft uses has a bizarre way of serializing lists:
|
The OpenAI client that Recraft uses has a bizarre way of serializing lists:
|
||||||
|
|
||||||
@ -103,19 +110,19 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co
|
|||||||
# Modification of a function that handled a different type of multipart parsing, big ups:
|
# Modification of a function that handled a different type of multipart parsing, big ups:
|
||||||
# https://gist.github.com/kazqvaizer/4cebebe5db654a414132809f9f88067b
|
# https://gist.github.com/kazqvaizer/4cebebe5db654a414132809f9f88067b
|
||||||
|
|
||||||
def handle_converted_lists(data, parent_key, lists_to_check=tuple[list]):
|
def handle_converted_lists(item, parent_key, lists_to_check=tuple[list]):
|
||||||
# if list already exists exists, just extend list with data
|
# if list already exists exists, just extend list with data
|
||||||
for check_list in lists_to_check:
|
for check_list in lists_to_check:
|
||||||
for conv_tuple in check_list:
|
for conv_tuple in check_list:
|
||||||
if conv_tuple[0] == parent_key and isinstance(conv_tuple[1], list):
|
if conv_tuple[0] == parent_key and isinstance(conv_tuple[1], list):
|
||||||
conv_tuple[1].append(formatter(data))
|
conv_tuple[1].append(formatter(item))
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if converted_to_check is None:
|
if converted_to_check is None:
|
||||||
converted_to_check = []
|
converted_to_check = []
|
||||||
|
|
||||||
|
effective_mode = return_mode if parent_key is None else "dict"
|
||||||
if formatter is None:
|
if formatter is None:
|
||||||
formatter = lambda v: v # Multipart representation of value
|
formatter = lambda v: v # Multipart representation of value
|
||||||
|
|
||||||
@ -145,6 +152,15 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co
|
|||||||
else:
|
else:
|
||||||
converted.append((current_key, formatter(value)))
|
converted.append((current_key, formatter(value)))
|
||||||
|
|
||||||
|
if effective_mode == "formdata":
|
||||||
|
fd = aiohttp.FormData()
|
||||||
|
for k, v in dict(converted).items():
|
||||||
|
if isinstance(v, list):
|
||||||
|
for item in v:
|
||||||
|
fd.add_field(k, str(item))
|
||||||
|
else:
|
||||||
|
fd.add_field(k, str(v))
|
||||||
|
return fd
|
||||||
return dict(converted)
|
return dict(converted)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
175
comfy_api_nodes/nodes_sora.py
Normal file
175
comfy_api_nodes/nodes_sora.py
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
from typing import Optional
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from comfy_api.latest import ComfyExtension, io as comfy_io
|
||||||
|
from comfy_api_nodes.apis.client import (
|
||||||
|
ApiEndpoint,
|
||||||
|
HttpMethod,
|
||||||
|
SynchronousOperation,
|
||||||
|
PollingOperation,
|
||||||
|
EmptyRequest,
|
||||||
|
)
|
||||||
|
from comfy_api_nodes.util.validation_utils import get_number_of_images
|
||||||
|
|
||||||
|
from comfy_api_nodes.apinode_utils import (
|
||||||
|
download_url_to_video_output,
|
||||||
|
tensor_to_bytesio,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Sora2GenerationRequest(BaseModel):
|
||||||
|
prompt: str = Field(...)
|
||||||
|
model: str = Field(...)
|
||||||
|
seconds: str = Field(...)
|
||||||
|
size: str = Field(...)
|
||||||
|
|
||||||
|
|
||||||
|
class Sora2GenerationResponse(BaseModel):
|
||||||
|
id: str = Field(...)
|
||||||
|
error: Optional[dict] = Field(None)
|
||||||
|
status: Optional[str] = Field(None)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAIVideoSora2(comfy_io.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return comfy_io.Schema(
|
||||||
|
node_id="OpenAIVideoSora2",
|
||||||
|
display_name="OpenAI Sora - Video",
|
||||||
|
category="api node/video/Sora",
|
||||||
|
description="OpenAI video and audio generation.",
|
||||||
|
inputs=[
|
||||||
|
comfy_io.Combo.Input(
|
||||||
|
"model",
|
||||||
|
options=["sora-2", "sora-2-pro"],
|
||||||
|
default="sora-2",
|
||||||
|
),
|
||||||
|
comfy_io.String.Input(
|
||||||
|
"prompt",
|
||||||
|
multiline=True,
|
||||||
|
default="",
|
||||||
|
tooltip="Guiding text; may be empty if an input image is present.",
|
||||||
|
),
|
||||||
|
comfy_io.Combo.Input(
|
||||||
|
"size",
|
||||||
|
options=[
|
||||||
|
"720x1280",
|
||||||
|
"1280x720",
|
||||||
|
"1024x1792",
|
||||||
|
"1792x1024",
|
||||||
|
],
|
||||||
|
default="1280x720",
|
||||||
|
),
|
||||||
|
comfy_io.Combo.Input(
|
||||||
|
"duration",
|
||||||
|
options=[4, 8, 12],
|
||||||
|
default=8,
|
||||||
|
),
|
||||||
|
comfy_io.Image.Input(
|
||||||
|
"image",
|
||||||
|
optional=True,
|
||||||
|
),
|
||||||
|
comfy_io.Int.Input(
|
||||||
|
"seed",
|
||||||
|
default=0,
|
||||||
|
min=0,
|
||||||
|
max=2147483647,
|
||||||
|
step=1,
|
||||||
|
display_mode=comfy_io.NumberDisplay.number,
|
||||||
|
control_after_generate=True,
|
||||||
|
optional=True,
|
||||||
|
tooltip="Seed to determine if node should re-run; "
|
||||||
|
"actual results are nondeterministic regardless of seed.",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
comfy_io.Video.Output(),
|
||||||
|
],
|
||||||
|
hidden=[
|
||||||
|
comfy_io.Hidden.auth_token_comfy_org,
|
||||||
|
comfy_io.Hidden.api_key_comfy_org,
|
||||||
|
comfy_io.Hidden.unique_id,
|
||||||
|
],
|
||||||
|
is_api_node=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def execute(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
prompt: str,
|
||||||
|
size: str = "1280x720",
|
||||||
|
duration: int = 8,
|
||||||
|
seed: int = 0,
|
||||||
|
image: Optional[torch.Tensor] = None,
|
||||||
|
):
|
||||||
|
if model == "sora-2" and size not in ("720x1280", "1280x720"):
|
||||||
|
raise ValueError("Invalid size for sora-2 model, only 720x1280 and 1280x720 are supported.")
|
||||||
|
files_input = None
|
||||||
|
if image is not None:
|
||||||
|
if get_number_of_images(image) != 1:
|
||||||
|
raise ValueError("Currently only one input image is supported.")
|
||||||
|
files_input = {"input_reference": ("image.png", tensor_to_bytesio(image), "image/png")}
|
||||||
|
auth = {
|
||||||
|
"auth_token": cls.hidden.auth_token_comfy_org,
|
||||||
|
"comfy_api_key": cls.hidden.api_key_comfy_org,
|
||||||
|
}
|
||||||
|
payload = Sora2GenerationRequest(
|
||||||
|
model=model,
|
||||||
|
prompt=prompt,
|
||||||
|
seconds=str(duration),
|
||||||
|
size=size,
|
||||||
|
)
|
||||||
|
initial_operation = SynchronousOperation(
|
||||||
|
endpoint=ApiEndpoint(
|
||||||
|
path="/proxy/openai/v1/videos",
|
||||||
|
method=HttpMethod.POST,
|
||||||
|
request_model=Sora2GenerationRequest,
|
||||||
|
response_model=Sora2GenerationResponse
|
||||||
|
),
|
||||||
|
request=payload,
|
||||||
|
files=files_input,
|
||||||
|
auth_kwargs=auth,
|
||||||
|
content_type="multipart/form-data",
|
||||||
|
)
|
||||||
|
initial_response = await initial_operation.execute()
|
||||||
|
if initial_response.error:
|
||||||
|
raise Exception(initial_response.error.message)
|
||||||
|
|
||||||
|
model_time_multiplier = 1 if model == "sora-2" else 2
|
||||||
|
poll_operation = PollingOperation(
|
||||||
|
poll_endpoint=ApiEndpoint(
|
||||||
|
path=f"/proxy/openai/v1/videos/{initial_response.id}",
|
||||||
|
method=HttpMethod.GET,
|
||||||
|
request_model=EmptyRequest,
|
||||||
|
response_model=Sora2GenerationResponse
|
||||||
|
),
|
||||||
|
completed_statuses=["completed"],
|
||||||
|
failed_statuses=["failed"],
|
||||||
|
status_extractor=lambda x: x.status,
|
||||||
|
auth_kwargs=auth,
|
||||||
|
poll_interval=8.0,
|
||||||
|
max_poll_attempts=160,
|
||||||
|
node_id=cls.hidden.unique_id,
|
||||||
|
estimated_duration=45 * (duration / 4) * model_time_multiplier,
|
||||||
|
)
|
||||||
|
await poll_operation.execute()
|
||||||
|
return comfy_io.NodeOutput(
|
||||||
|
await download_url_to_video_output(
|
||||||
|
f"/proxy/openai/v1/videos/{initial_response.id}/content",
|
||||||
|
auth_kwargs=auth,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAISoraExtension(ComfyExtension):
|
||||||
|
@override
|
||||||
|
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]:
|
||||||
|
return [
|
||||||
|
OpenAIVideoSora2,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> OpenAISoraExtension:
|
||||||
|
return OpenAISoraExtension()
|
||||||
@ -34,6 +34,7 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
|
|||||||
latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device())
|
latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device())
|
||||||
return io.NodeOutput({"samples": latent})
|
return io.NodeOutput({"samples": latent})
|
||||||
|
|
||||||
|
generate = execute # TODO: remove
|
||||||
|
|
||||||
class LTXVImgToVideo(io.ComfyNode):
|
class LTXVImgToVideo(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -77,6 +78,8 @@ class LTXVImgToVideo(io.ComfyNode):
|
|||||||
|
|
||||||
return io.NodeOutput(positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask})
|
return io.NodeOutput(positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask})
|
||||||
|
|
||||||
|
generate = execute # TODO: remove
|
||||||
|
|
||||||
|
|
||||||
def conditioning_get_any_value(conditioning, key, default=None):
|
def conditioning_get_any_value(conditioning, key, default=None):
|
||||||
for t in conditioning:
|
for t in conditioning:
|
||||||
@ -264,6 +267,8 @@ class LTXVAddGuide(io.ComfyNode):
|
|||||||
|
|
||||||
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
||||||
|
|
||||||
|
generate = execute # TODO: remove
|
||||||
|
|
||||||
|
|
||||||
class LTXVCropGuides(io.ComfyNode):
|
class LTXVCropGuides(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -300,6 +305,8 @@ class LTXVCropGuides(io.ComfyNode):
|
|||||||
|
|
||||||
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
||||||
|
|
||||||
|
crop = execute # TODO: remove
|
||||||
|
|
||||||
|
|
||||||
class LTXVConditioning(io.ComfyNode):
|
class LTXVConditioning(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -498,6 +505,7 @@ class LTXVPreprocess(io.ComfyNode):
|
|||||||
output_images.append(preprocess(image[i], img_compression))
|
output_images.append(preprocess(image[i], img_compression))
|
||||||
return io.NodeOutput(torch.stack(output_images))
|
return io.NodeOutput(torch.stack(output_images))
|
||||||
|
|
||||||
|
preprocess = execute # TODO: remove
|
||||||
|
|
||||||
class LtxvExtension(ComfyExtension):
|
class LtxvExtension(ComfyExtension):
|
||||||
@override
|
@override
|
||||||
|
|||||||
1
nodes.py
1
nodes.py
@ -2366,6 +2366,7 @@ async def init_builtin_api_nodes():
|
|||||||
"nodes_stability.py",
|
"nodes_stability.py",
|
||||||
"nodes_pika.py",
|
"nodes_pika.py",
|
||||||
"nodes_runway.py",
|
"nodes_runway.py",
|
||||||
|
"nodes_sora.py",
|
||||||
"nodes_tripo.py",
|
"nodes_tripo.py",
|
||||||
"nodes_moonvalley.py",
|
"nodes_moonvalley.py",
|
||||||
"nodes_rodin.py",
|
"nodes_rodin.py",
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
comfyui-frontend-package==1.27.7
|
comfyui-frontend-package==1.27.10
|
||||||
comfyui-workflow-templates==0.1.93
|
comfyui-workflow-templates==0.1.93
|
||||||
comfyui-embedded-docs==0.2.6
|
comfyui-embedded-docs==0.2.6
|
||||||
comfyui_manager==4.0.2
|
comfyui_manager==4.0.2
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user