mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-17 10:02:59 +08:00
Merge branch 'master' into dr-support-pip-cm
This commit is contained in:
commit
de357a01f8
@ -0,0 +1,2 @@
|
|||||||
|
..\python_embeded\python.exe -s ..\ComfyUI\main.py --windows-standalone-build --disable-api-nodes
|
||||||
|
pause
|
||||||
4
.github/workflows/release-stable-all.yml
vendored
4
.github/workflows/release-stable-all.yml
vendored
@ -18,9 +18,9 @@ jobs:
|
|||||||
uses: ./.github/workflows/stable-release.yml
|
uses: ./.github/workflows/stable-release.yml
|
||||||
with:
|
with:
|
||||||
git_tag: ${{ inputs.git_tag }}
|
git_tag: ${{ inputs.git_tag }}
|
||||||
cache_tag: "cu129"
|
cache_tag: "cu130"
|
||||||
python_minor: "13"
|
python_minor: "13"
|
||||||
python_patch: "6"
|
python_patch: "9"
|
||||||
rel_name: "nvidia"
|
rel_name: "nvidia"
|
||||||
rel_extra_name: ""
|
rel_extra_name: ""
|
||||||
test_release: true
|
test_release: true
|
||||||
|
|||||||
@ -176,6 +176,8 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you
|
|||||||
|
|
||||||
If you have trouble extracting it, right click the file -> properties -> unblock
|
If you have trouble extracting it, right click the file -> properties -> unblock
|
||||||
|
|
||||||
|
Update your Nvidia drivers if it doesn't start.
|
||||||
|
|
||||||
#### Alternative Downloads:
|
#### Alternative Downloads:
|
||||||
|
|
||||||
[Experimental portable for AMD GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_amd.7z)
|
[Experimental portable for AMD GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_amd.7z)
|
||||||
|
|||||||
191
comfy_api_nodes/nodes_ltxv.py
Normal file
191
comfy_api_nodes/nodes_ltxv.py
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
from io import BytesIO
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from comfy_api.input_impl import VideoFromFile
|
||||||
|
from comfy_api.latest import IO, ComfyExtension
|
||||||
|
from comfy_api_nodes.util import (
|
||||||
|
ApiEndpoint,
|
||||||
|
get_number_of_images,
|
||||||
|
sync_op_raw,
|
||||||
|
upload_images_to_comfyapi,
|
||||||
|
validate_string,
|
||||||
|
)
|
||||||
|
|
||||||
|
MODELS_MAP = {
|
||||||
|
"LTX-2 (Pro)": "ltx-2-pro",
|
||||||
|
"LTX-2 (Fast)": "ltx-2-fast",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ExecuteTaskRequest(BaseModel):
|
||||||
|
prompt: str = Field(...)
|
||||||
|
model: str = Field(...)
|
||||||
|
duration: int = Field(...)
|
||||||
|
resolution: str = Field(...)
|
||||||
|
fps: Optional[int] = Field(25)
|
||||||
|
generate_audio: Optional[bool] = Field(True)
|
||||||
|
image_uri: Optional[str] = Field(None)
|
||||||
|
|
||||||
|
|
||||||
|
class TextToVideoNode(IO.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return IO.Schema(
|
||||||
|
node_id="LtxvApiTextToVideo",
|
||||||
|
display_name="LTXV Text To Video",
|
||||||
|
category="api node/video/LTXV",
|
||||||
|
description="Professional-quality videos with customizable duration and resolution.",
|
||||||
|
inputs=[
|
||||||
|
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||||
|
IO.String.Input(
|
||||||
|
"prompt",
|
||||||
|
multiline=True,
|
||||||
|
default="",
|
||||||
|
),
|
||||||
|
IO.Combo.Input("duration", options=[6, 8, 10], default=8),
|
||||||
|
IO.Combo.Input(
|
||||||
|
"resolution",
|
||||||
|
options=[
|
||||||
|
"1920x1080",
|
||||||
|
"2560x1440",
|
||||||
|
"3840x2160",
|
||||||
|
],
|
||||||
|
),
|
||||||
|
IO.Combo.Input("fps", options=[25, 50], default=25),
|
||||||
|
IO.Boolean.Input(
|
||||||
|
"generate_audio",
|
||||||
|
default=False,
|
||||||
|
optional=True,
|
||||||
|
tooltip="When true, the generated video will include AI-generated audio matching the scene.",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
IO.Video.Output(),
|
||||||
|
],
|
||||||
|
hidden=[
|
||||||
|
IO.Hidden.auth_token_comfy_org,
|
||||||
|
IO.Hidden.api_key_comfy_org,
|
||||||
|
IO.Hidden.unique_id,
|
||||||
|
],
|
||||||
|
is_api_node=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def execute(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
prompt: str,
|
||||||
|
duration: int,
|
||||||
|
resolution: str,
|
||||||
|
fps: int = 25,
|
||||||
|
generate_audio: bool = False,
|
||||||
|
) -> IO.NodeOutput:
|
||||||
|
validate_string(prompt, min_length=1, max_length=10000)
|
||||||
|
response = await sync_op_raw(
|
||||||
|
cls,
|
||||||
|
ApiEndpoint("/proxy/ltx/v1/text-to-video", "POST"),
|
||||||
|
data=ExecuteTaskRequest(
|
||||||
|
prompt=prompt,
|
||||||
|
model=MODELS_MAP[model],
|
||||||
|
duration=duration,
|
||||||
|
resolution=resolution,
|
||||||
|
fps=fps,
|
||||||
|
generate_audio=generate_audio,
|
||||||
|
),
|
||||||
|
as_binary=True,
|
||||||
|
max_retries=1,
|
||||||
|
)
|
||||||
|
return IO.NodeOutput(VideoFromFile(BytesIO(response)))
|
||||||
|
|
||||||
|
|
||||||
|
class ImageToVideoNode(IO.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return IO.Schema(
|
||||||
|
node_id="LtxvApiImageToVideo",
|
||||||
|
display_name="LTXV Image To Video",
|
||||||
|
category="api node/video/LTXV",
|
||||||
|
description="Professional-quality videos with customizable duration and resolution based on start image.",
|
||||||
|
inputs=[
|
||||||
|
IO.Image.Input("image", tooltip="First frame to be used for the video."),
|
||||||
|
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||||
|
IO.String.Input(
|
||||||
|
"prompt",
|
||||||
|
multiline=True,
|
||||||
|
default="",
|
||||||
|
),
|
||||||
|
IO.Combo.Input("duration", options=[6, 8, 10], default=8),
|
||||||
|
IO.Combo.Input(
|
||||||
|
"resolution",
|
||||||
|
options=[
|
||||||
|
"1920x1080",
|
||||||
|
"2560x1440",
|
||||||
|
"3840x2160",
|
||||||
|
],
|
||||||
|
),
|
||||||
|
IO.Combo.Input("fps", options=[25, 50], default=25),
|
||||||
|
IO.Boolean.Input(
|
||||||
|
"generate_audio",
|
||||||
|
default=False,
|
||||||
|
optional=True,
|
||||||
|
tooltip="When true, the generated video will include AI-generated audio matching the scene.",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
IO.Video.Output(),
|
||||||
|
],
|
||||||
|
hidden=[
|
||||||
|
IO.Hidden.auth_token_comfy_org,
|
||||||
|
IO.Hidden.api_key_comfy_org,
|
||||||
|
IO.Hidden.unique_id,
|
||||||
|
],
|
||||||
|
is_api_node=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def execute(
|
||||||
|
cls,
|
||||||
|
image: torch.Tensor,
|
||||||
|
model: str,
|
||||||
|
prompt: str,
|
||||||
|
duration: int,
|
||||||
|
resolution: str,
|
||||||
|
fps: int = 25,
|
||||||
|
generate_audio: bool = False,
|
||||||
|
) -> IO.NodeOutput:
|
||||||
|
validate_string(prompt, min_length=1, max_length=10000)
|
||||||
|
if get_number_of_images(image) != 1:
|
||||||
|
raise ValueError("Currently only one input image is supported.")
|
||||||
|
response = await sync_op_raw(
|
||||||
|
cls,
|
||||||
|
ApiEndpoint("/proxy/ltx/v1/image-to-video", "POST"),
|
||||||
|
data=ExecuteTaskRequest(
|
||||||
|
image_uri=(await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0],
|
||||||
|
prompt=prompt,
|
||||||
|
model=MODELS_MAP[model],
|
||||||
|
duration=duration,
|
||||||
|
resolution=resolution,
|
||||||
|
fps=fps,
|
||||||
|
generate_audio=generate_audio,
|
||||||
|
),
|
||||||
|
as_binary=True,
|
||||||
|
max_retries=1,
|
||||||
|
)
|
||||||
|
return IO.NodeOutput(VideoFromFile(BytesIO(response)))
|
||||||
|
|
||||||
|
|
||||||
|
class LtxvApiExtension(ComfyExtension):
|
||||||
|
@override
|
||||||
|
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||||
|
return [
|
||||||
|
TextToVideoNode,
|
||||||
|
ImageToVideoNode,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> LtxvApiExtension:
|
||||||
|
return LtxvApiExtension()
|
||||||
@ -1,3 +1,3 @@
|
|||||||
# This file is automatically generated by the build process when version is
|
# This file is automatically generated by the build process when version is
|
||||||
# updated in pyproject.toml.
|
# updated in pyproject.toml.
|
||||||
__version__ = "0.3.66"
|
__version__ = "0.3.67"
|
||||||
|
|||||||
@ -1116,7 +1116,7 @@ class PromptQueue:
|
|||||||
messages: List[str]
|
messages: List[str]
|
||||||
|
|
||||||
def task_done(self, item_id, history_result,
|
def task_done(self, item_id, history_result,
|
||||||
status: Optional['PromptQueue.ExecutionStatus']):
|
status: Optional['PromptQueue.ExecutionStatus'], process_item=None):
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
prompt = self.currently_running.pop(item_id)
|
prompt = self.currently_running.pop(item_id)
|
||||||
if len(self.history) > MAXIMUM_HISTORY_SIZE:
|
if len(self.history) > MAXIMUM_HISTORY_SIZE:
|
||||||
@ -1126,10 +1126,8 @@ class PromptQueue:
|
|||||||
if status is not None:
|
if status is not None:
|
||||||
status_dict = copy.deepcopy(status._asdict())
|
status_dict = copy.deepcopy(status._asdict())
|
||||||
|
|
||||||
# Remove sensitive data from extra_data before storing in history
|
if process_item is not None:
|
||||||
for sensitive_val in SENSITIVE_EXTRA_DATA_KEYS:
|
prompt = process_item(prompt)
|
||||||
if sensitive_val in prompt[3]:
|
|
||||||
prompt[3].pop(sensitive_val)
|
|
||||||
|
|
||||||
self.history[prompt[1]] = {
|
self.history[prompt[1]] = {
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
|
|||||||
11
main.py
11
main.py
@ -218,14 +218,21 @@ def prompt_worker(q, server_instance):
|
|||||||
prompt_id = item[1]
|
prompt_id = item[1]
|
||||||
server_instance.last_prompt_id = prompt_id
|
server_instance.last_prompt_id = prompt_id
|
||||||
|
|
||||||
e.execute(item[2], prompt_id, item[3], item[4])
|
sensitive = item[5]
|
||||||
|
extra_data = item[3].copy()
|
||||||
|
for k in sensitive:
|
||||||
|
extra_data[k] = sensitive[k]
|
||||||
|
|
||||||
|
e.execute(item[2], prompt_id, extra_data, item[4])
|
||||||
need_gc = True
|
need_gc = True
|
||||||
|
|
||||||
|
remove_sensitive = lambda prompt: prompt[:5] + prompt[6:]
|
||||||
q.task_done(item_id,
|
q.task_done(item_id,
|
||||||
e.history_result,
|
e.history_result,
|
||||||
status=execution.PromptQueue.ExecutionStatus(
|
status=execution.PromptQueue.ExecutionStatus(
|
||||||
status_str='success' if e.success else 'error',
|
status_str='success' if e.success else 'error',
|
||||||
completed=e.success,
|
completed=e.success,
|
||||||
messages=e.status_messages))
|
messages=e.status_messages), process_item=remove_sensitive)
|
||||||
if server_instance.client_id is not None:
|
if server_instance.client_id is not None:
|
||||||
server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server_instance.client_id)
|
server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server_instance.client_id)
|
||||||
|
|
||||||
|
|||||||
1
nodes.py
1
nodes.py
@ -2358,6 +2358,7 @@ async def init_builtin_api_nodes():
|
|||||||
"nodes_kling.py",
|
"nodes_kling.py",
|
||||||
"nodes_bfl.py",
|
"nodes_bfl.py",
|
||||||
"nodes_bytedance.py",
|
"nodes_bytedance.py",
|
||||||
|
"nodes_ltxv.py",
|
||||||
"nodes_luma.py",
|
"nodes_luma.py",
|
||||||
"nodes_recraft.py",
|
"nodes_recraft.py",
|
||||||
"nodes_pixverse.py",
|
"nodes_pixverse.py",
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "ComfyUI"
|
name = "ComfyUI"
|
||||||
version = "0.3.66"
|
version = "0.3.67"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
license = { file = "LICENSE" }
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
comfyui-frontend-package==1.28.7
|
comfyui-frontend-package==1.28.8
|
||||||
comfyui-workflow-templates==0.2.2
|
comfyui-workflow-templates==0.2.4
|
||||||
comfyui-embedded-docs==0.3.0
|
comfyui-embedded-docs==0.3.0
|
||||||
comfyui_manager==4.0.3b1
|
comfyui_manager==4.0.3b1
|
||||||
torch
|
torch
|
||||||
|
|||||||
11
server.py
11
server.py
@ -697,8 +697,9 @@ class PromptServer():
|
|||||||
async def get_queue(request):
|
async def get_queue(request):
|
||||||
queue_info = {}
|
queue_info = {}
|
||||||
current_queue = self.prompt_queue.get_current_queue_volatile()
|
current_queue = self.prompt_queue.get_current_queue_volatile()
|
||||||
queue_info['queue_running'] = current_queue[0]
|
remove_sensitive = lambda queue: [x[:5] for x in queue]
|
||||||
queue_info['queue_pending'] = current_queue[1]
|
queue_info['queue_running'] = remove_sensitive(current_queue[0])
|
||||||
|
queue_info['queue_pending'] = remove_sensitive(current_queue[1])
|
||||||
return web.json_response(queue_info)
|
return web.json_response(queue_info)
|
||||||
|
|
||||||
@routes.post("/prompt")
|
@routes.post("/prompt")
|
||||||
@ -734,7 +735,11 @@ class PromptServer():
|
|||||||
extra_data["client_id"] = json_data["client_id"]
|
extra_data["client_id"] = json_data["client_id"]
|
||||||
if valid[0]:
|
if valid[0]:
|
||||||
outputs_to_execute = valid[2]
|
outputs_to_execute = valid[2]
|
||||||
self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute))
|
sensitive = {}
|
||||||
|
for sensitive_val in execution.SENSITIVE_EXTRA_DATA_KEYS:
|
||||||
|
if sensitive_val in extra_data:
|
||||||
|
sensitive[sensitive_val] = extra_data.pop(sensitive_val)
|
||||||
|
self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive))
|
||||||
response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]}
|
response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]}
|
||||||
return web.json_response(response)
|
return web.json_response(response)
|
||||||
else:
|
else:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user