mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-10 06:10:50 +08:00
fix issue with queue retrieval in distributed environment, fix text progress, fix folder paths being aggressively resolved, fix ideogram seed
This commit is contained in:
parent
5727217c2f
commit
f507bec91a
@ -50,7 +50,11 @@ def _resolve_path_with_compatibility(path: Path | str) -> PurePosixPath | Path:
|
||||
if isinstance(path, PurePosixPath) and path.is_absolute():
|
||||
return path
|
||||
if not path.is_absolute():
|
||||
return Path.resolve(_base_path() / path)
|
||||
base_path_to_path = _base_path() / path
|
||||
if base_path_to_path.is_absolute():
|
||||
return base_path_to_path
|
||||
else:
|
||||
return Path.resolve(_base_path() / path)
|
||||
return Path(path).resolve()
|
||||
|
||||
|
||||
|
||||
@ -44,6 +44,7 @@ from ..client.client_types import FileOutput
|
||||
from ..cmd import execution
|
||||
from ..cmd import folder_paths
|
||||
from ..component_model.abstract_prompt_queue import AbstractPromptQueue, AsyncAbstractPromptQueue
|
||||
from ..component_model.encode_text_for_progress import encode_text_for_progress
|
||||
from ..component_model.executor_types import ExecutorToClientProgress, StatusMessage, QueueInfo, ExecInfo
|
||||
from ..component_model.file_output_path import file_output_path
|
||||
from ..component_model.queue_types import QueueItem, HistoryEntry, BinaryEventTypes, TaskInvocation, ExecutionError, \
|
||||
@ -1139,11 +1140,6 @@ class PromptServer(ExecutorToClientProgress):
|
||||
def send_progress_text(
|
||||
self, text: Union[bytes, bytearray, str], node_id: str, sid=None
|
||||
):
|
||||
if isinstance(text, str):
|
||||
text = text.encode("utf-8")
|
||||
node_id_bytes = str(node_id).encode("utf-8")
|
||||
|
||||
# Pack the node_id length as a 4-byte unsigned integer, followed by the node_id bytes
|
||||
message = struct.pack(">I", len(node_id_bytes)) + node_id_bytes + text
|
||||
message = encode_text_for_progress(node_id, text)
|
||||
|
||||
self.send_sync(BinaryEventTypes.TEXT, message, sid)
|
||||
|
||||
@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import typing
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from .queue_types import QueueTuple, HistoryEntry, QueueItem, Flags, ExecutionStatus, TaskInvocation
|
||||
from .queue_types import QueueTuple, HistoryEntry, QueueItem, Flags, ExecutionStatus, TaskInvocation, AbstractPromptQueueGetCurrentQueueItems
|
||||
|
||||
|
||||
class AbstractPromptQueue(metaclass=ABCMeta):
|
||||
@ -54,7 +54,7 @@ class AbstractPromptQueue(metaclass=ABCMeta):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_current_queue(self) -> typing.Tuple[typing.List[QueueTuple], typing.List[QueueTuple]]:
|
||||
def get_current_queue(self) -> AbstractPromptQueueGetCurrentQueueItems:
|
||||
"""
|
||||
Gets the current state of the queue
|
||||
:return: A tuple containing (the currently running items, the items awaiting execution)
|
||||
@ -119,6 +119,13 @@ class AbstractPromptQueue(metaclass=ABCMeta):
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_current_queue_volatile(self) -> AbstractPromptQueueGetCurrentQueueItems:
|
||||
"""
|
||||
A workaround to "improve performance with large number of queued prompts",
|
||||
:return: A tuple containing (the currently running items, the items awaiting execution)
|
||||
"""
|
||||
return self.get_current_queue()
|
||||
|
||||
|
||||
class AsyncAbstractPromptQueue(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
|
||||
12
comfy/component_model/encode_text_for_progress.py
Normal file
12
comfy/component_model/encode_text_for_progress.py
Normal file
@ -0,0 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import struct
|
||||
|
||||
|
||||
def encode_text_for_progress(node_id, text):
|
||||
if isinstance(text, str):
|
||||
text = text.encode("utf-8")
|
||||
node_id_bytes = str(node_id).encode("utf-8")
|
||||
# Pack the node_id length as a 4-byte unsigned integer, followed by the node_id bytes
|
||||
message = struct.pack(">I", len(node_id_bytes)) + node_id_bytes + text
|
||||
return message
|
||||
@ -7,6 +7,7 @@ from enum import Enum
|
||||
from typing import Optional, Literal, Protocol, Union, NamedTuple, List
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
||||
from .encode_text_for_progress import encode_text_for_progress
|
||||
from .outputs_types import OutputsDict
|
||||
from .queue_types import BinaryEventTypes
|
||||
from ..cli_args_types import Configuration
|
||||
@ -121,7 +122,9 @@ class ExecutorToClientProgress(Protocol):
|
||||
pass
|
||||
|
||||
def send_progress_text(self, text: Union[bytes, bytearray, str], node_id: str, sid=None):
|
||||
pass
|
||||
message = encode_text_for_progress(node_id, text)
|
||||
|
||||
self.send_sync(BinaryEventTypes.TEXT, message, sid)
|
||||
|
||||
def queue_updated(self, queue_remaining: Optional[int] = None):
|
||||
"""
|
||||
|
||||
@ -148,3 +148,6 @@ class ExecutorToClientMessage(TypedDict, total=False):
|
||||
node: str
|
||||
prompt_id: str
|
||||
output: NotRequired[str]
|
||||
|
||||
|
||||
AbstractPromptQueueGetCurrentQueueItems = tuple[list[QueueTuple], list[QueueTuple]]
|
||||
|
||||
@ -5,7 +5,7 @@ import base64
|
||||
from asyncio import AbstractEventLoop
|
||||
from enum import Enum
|
||||
from functools import partial
|
||||
from typing import Optional, Dict, Any
|
||||
from typing import Optional, Dict, Any, Union
|
||||
|
||||
from aio_pika.patterns import RPC
|
||||
|
||||
@ -68,10 +68,6 @@ class DistributedExecutorToClientProgress(ExecutorToClientProgress):
|
||||
# these can gracefully expire
|
||||
pass
|
||||
|
||||
def send_progress_text(self, text: str, node_id: str = None):
|
||||
# todo: we'll fill this out later
|
||||
pass
|
||||
|
||||
def send_sync(self,
|
||||
event: SendSyncEvent,
|
||||
data: SendSyncData,
|
||||
|
||||
@ -31,9 +31,6 @@ RESOLUTION_ENUM = [f"RESOLUTION_{w}_{h}" for w, h in IDEOGRAM_RESOLUTIONS]
|
||||
# New enum for v3 rendering speed
|
||||
RENDERING_SPEED_ENUM = ["DEFAULT", "TURBO", "QUALITY"]
|
||||
|
||||
|
||||
# --- Helper Functions ---
|
||||
|
||||
def to_v3_resolution(resolution: str) -> str:
|
||||
return resolution[len("RESOLUTION_"):].replace("_", "x")
|
||||
|
||||
@ -44,9 +41,6 @@ def api_key_in_env_or_workflow(api_key_from_workflow: str):
|
||||
return api_key_from_workflow
|
||||
return os.environ.get("IDEOGRAM_API_KEY", args.ideogram_api_key)
|
||||
|
||||
|
||||
# --- Custom Nodes ---
|
||||
|
||||
class IdeogramGenerate(CustomNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls) -> Dict[str, Any]:
|
||||
@ -138,13 +132,16 @@ class IdeogramEdit(CustomNode):
|
||||
def INPUT_TYPES(cls) -> Dict[str, Any]:
|
||||
return {
|
||||
"required": {
|
||||
"images": ("IMAGE",), "masks": ("MASK",), "prompt": ("STRING", {"multiline": True}),
|
||||
"images": ("IMAGE",),
|
||||
"masks": ("MASK",),
|
||||
"prompt": ("STRING", {"multiline": True}),
|
||||
"model": (MODELS_ENUM, {"default": MODELS_ENUM[-1]}),
|
||||
},
|
||||
"optional": {
|
||||
"api_key": ("STRING", {"default": ""}),
|
||||
"magic_prompt_option": (AUTO_PROMPT_ENUM, {"default": AUTO_PROMPT_ENUM[0]}),
|
||||
"num_images": ("INT", {"default": 1, "min": 1, "max": 8}), "seed": ("INT", {"default": 0}),
|
||||
"num_images": ("INT", {"default": 1, "min": 1, "max": 8}),
|
||||
"seed": Seed,
|
||||
"style_type": (STYLES_ENUM, {}),
|
||||
# New v3 optional args
|
||||
"rendering_speed": (RENDERING_SPEED_ENUM, {"default": "DEFAULT"}),
|
||||
@ -220,7 +217,8 @@ class IdeogramRemix(CustomNode):
|
||||
"magic_prompt_option": (AUTO_PROMPT_ENUM, {"default": AUTO_PROMPT_ENUM[0]}),
|
||||
"negative_prompt": ("STRING", {"multiline": True}),
|
||||
"num_images": ("INT", {"default": 1, "min": 1, "max": 8}),
|
||||
"seed": ("INT", {"default": 0}), "style_type": (STYLES_ENUM, {}),
|
||||
"seed": Seed,
|
||||
"style_type": (STYLES_ENUM, {}),
|
||||
# New v3 optional args
|
||||
"rendering_speed": (RENDERING_SPEED_ENUM, {"default": "DEFAULT"}),
|
||||
"aspect_ratio": (ASPECT_RATIO_V3_ENUM, {"default": "disabled"}),
|
||||
|
||||
Loading…
Reference in New Issue
Block a user