From 0aa414388564c3a36b84aaa7c2430d92cd969408 Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Tue, 29 Apr 2025 12:00:31 -0700 Subject: [PATCH 01/10] remove main.py --- comfy/cmd/main.py | 276 ---------------------------------------------- 1 file changed, 276 deletions(-) delete mode 100644 comfy/cmd/main.py diff --git a/comfy/cmd/main.py b/comfy/cmd/main.py deleted file mode 100644 index ae3c3d80d..000000000 --- a/comfy/cmd/main.py +++ /dev/null @@ -1,276 +0,0 @@ -import asyncio -import contextvars -import gc -import itertools -import logging -import os -import shutil -import threading -import time -from pathlib import Path -from typing import Optional - -from .extra_model_paths import load_extra_path_config -# main_pre must be the earliest import since it suppresses some spurious warnings -from .main_pre import args -from .. import model_management -from ..analytics.analytics import initialize_event_tracking -from ..cmd import cuda_malloc -from ..cmd import folder_paths -from ..cmd import server as server_module -from ..component_model.abstract_prompt_queue import AbstractPromptQueue -from ..component_model.queue_types import ExecutionStatus -from ..distributed.distributed_prompt_queue import DistributedPromptQueue -from ..distributed.server_stub import ServerStub -from ..nodes.package import import_all_nodes_in_workspace - -logger = logging.getLogger(__name__) - - -def prompt_worker(q: AbstractPromptQueue, _server: server_module.PromptServer): - from ..cmd.execution import PromptExecutor, CacheType - cache_type = CacheType.CLASSIC - if args.cache_lru > 0: - cache_type = CacheType.LRU - elif args.cache_none: - cache_type = CacheType.DEPENDENCY_AWARE - - - e = PromptExecutor(_server, cache_type=cache_type, cache_size=args.cache_lru) - last_gc_collect = 0 - need_gc = False - gc_collect_interval = 10.0 - current_time = 0.0 - while True: - timeout = 1000.0 - if need_gc: - timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0) - - queue_item = q.get(timeout=timeout) - if queue_item is not None: - item, item_id = queue_item - execution_start_time = time.perf_counter() - prompt_id = item[1] - _server.last_prompt_id = prompt_id - - e.execute(item[2], prompt_id, item[3], item[4]) - need_gc = True - q.task_done(item_id, - e.history_result, - status=ExecutionStatus( - status_str='success' if e.success else 'error', - completed=e.success, - messages=e.status_messages)) - if _server.client_id is not None: - _server.send_sync("executing", {"node": None, "prompt_id": prompt_id}, _server.client_id) - - current_time = time.perf_counter() - execution_time = current_time - execution_start_time - logger.debug("Prompt executed in {:.2f} seconds".format(execution_time)) - - flags = q.get_flags() - free_memory = flags.get("free_memory", False) - - if flags.get("unload_models", free_memory): - model_management.unload_all_models() - need_gc = True - last_gc_collect = 0 - - if free_memory: - e.reset() - need_gc = True - last_gc_collect = 0 - - if need_gc: - current_time = time.perf_counter() - if (current_time - last_gc_collect) > gc_collect_interval: - gc.collect() - model_management.soft_empty_cache() - last_gc_collect = current_time - need_gc = False - - -async def run(server, address='', port=8188, verbose=True, call_on_start=None): - addresses = [] - for addr in address.split(","): - addresses.append((addr, port)) - await asyncio.gather(server.start_multi_address(addresses, call_on_start), server.publish_loop()) - - -def cleanup_temp(): - try: - folder_paths.get_temp_directory() - temp_dir = folder_paths.get_temp_directory() - if os.path.exists(temp_dir): - shutil.rmtree(temp_dir, ignore_errors=True) - except NameError: - # __file__ was not defined - pass - - -def cuda_malloc_warning(): - device = model_management.get_torch_device() - device_name = model_management.get_torch_device_name(device) - cuda_malloc_warning = False - if "cudaMallocAsync" in device_name: - for b in cuda_malloc.blacklist: - if b in device_name: - cuda_malloc_warning = True - if cuda_malloc_warning: - logger.warning( - "\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n") - - -async def main(from_script_dir: Optional[Path] = None): - """ - Runs ComfyUI's frontend and backend like upstream. - :param from_script_dir: when set to a path, assumes that you are running ComfyUI's legacy main.py entrypoint at the root of the git repository located at the path - """ - if not from_script_dir: - os_getcwd = os.getcwd() - else: - os_getcwd = str(from_script_dir) - - if args.temp_directory: - temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp") - logger.debug(f"Setting temp directory to: {temp_dir}") - folder_paths.set_temp_directory(temp_dir) - cleanup_temp() - - if args.user_directory: - user_dir = os.path.abspath(args.user_directory) - logger.info(f"Setting user directory to: {user_dir}") - folder_paths.set_user_directory(user_dir) - - # configure extra model paths earlier - try: - extra_model_paths_config_path = os.path.join(os_getcwd, "extra_model_paths.yaml") - if os.path.isfile(extra_model_paths_config_path): - load_extra_path_config(extra_model_paths_config_path) - except NameError: - pass - - if args.extra_model_paths_config: - for config_path in itertools.chain(*args.extra_model_paths_config): - load_extra_path_config(config_path) - - # always create directories when started interactively - folder_paths.create_directories() - if args.create_directories: - import_all_nodes_in_workspace(raise_on_failure=False) - folder_paths.create_directories() - exit(0) - - if args.windows_standalone_build: - folder_paths.create_directories() - try: - from . import new_updater - new_updater.update_windows_updater() - except: - pass - - loop = asyncio.get_event_loop() - server = server_module.PromptServer(loop) - if args.external_address is not None: - server.external_address = args.external_address - - # at this stage, it's safe to import nodes - server.nodes = import_all_nodes_in_workspace() - # as a side effect, this also populates the nodes for execution - - if args.distributed_queue_connection_uri is not None: - distributed = True - q = DistributedPromptQueue( - caller_server=server if args.distributed_queue_frontend else None, - connection_uri=args.distributed_queue_connection_uri, - is_caller=args.distributed_queue_frontend, - is_callee=args.distributed_queue_worker, - loop=loop, - queue_name=args.distributed_queue_name - ) - await q.init() - else: - distributed = False - from .execution import PromptQueue - q = PromptQueue(server) - server.prompt_queue = q - - server.add_routes() - cuda_malloc_warning() - - # in a distributed setting, the default prompt worker will not be able to send execution events via the websocket - worker_thread_server = server if not distributed else ServerStub() - if not distributed or args.distributed_queue_worker: - if distributed: - logger.warning(f"Distributed workers started in the default thread loop cannot notify clients of progress updates. Instead of comfyui or main.py, use comfyui-worker.") - # todo: this should really be using an executor instead of doing things this jankilicious way - ctx = contextvars.copy_context() - threading.Thread(target=lambda _q, _worker_thread_server: ctx.run(prompt_worker, _q, _worker_thread_server), daemon=True, args=(q, worker_thread_server,)).start() - - # server has been imported and things should be looking good - initialize_event_tracking(loop) - - if args.output_directory: - output_dir = os.path.abspath(args.output_directory) - logger.debug(f"Setting output directory to: {output_dir}") - folder_paths.set_output_directory(output_dir) - - # These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes - folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints")) - folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip")) - folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae")) - folder_paths.add_model_folder_path("diffusion_models", os.path.join(folder_paths.get_output_directory(), "diffusion_models")) - folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras")) - - if args.input_directory: - input_dir = os.path.abspath(args.input_directory) - logger.debug(f"Setting input directory to: {input_dir}") - folder_paths.set_input_directory(input_dir) - - if args.quick_test_for_ci: - # for CI purposes, try importing all the nodes - import_all_nodes_in_workspace(raise_on_failure=True) - exit(0) - else: - # we no longer lazily load nodes. we'll do it now for the sake of creating directories - import_all_nodes_in_workspace(raise_on_failure=False) - # now that nodes are loaded, create more directories if appropriate - folder_paths.create_directories() - - call_on_start = None - if args.auto_launch: - def startup_server(address, port): - import webbrowser - if os.name == 'nt' and address == '0.0.0.0' or address == '': - address = '127.0.0.1' - if ':' in address: - address = "[{}]".format(address) - webbrowser.open(f"http://{address}:{port}") - - call_on_start = startup_server - - first_listen_addr = args.listen.split(',')[0] if ',' in args.listen else args.listen - server.address = first_listen_addr - server.port = args.port - - try: - await server.setup() - await run(server, address=first_listen_addr, port=args.port, verbose=not args.dont_print_server, - call_on_start=call_on_start) - except (asyncio.CancelledError, KeyboardInterrupt): - logger.debug("\nStopped server") - finally: - if distributed: - await q.close() - cleanup_temp() - - -def entrypoint(): - try: - asyncio.run(main()) - except KeyboardInterrupt: - logger.info(f"Gracefully shutting down due to KeyboardInterrupt") - - -if __name__ == "__main__": - entrypoint() From df6a3cf8cb52b5bcb3a846480d7faa85f0400a2a Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Tue, 29 Apr 2025 12:01:34 -0700 Subject: [PATCH 02/10] main.py from upstream --- main.py | 327 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 315 insertions(+), 12 deletions(-) diff --git a/main.py b/main.py index 002c8926d..f3f56597a 100644 --- a/main.py +++ b/main.py @@ -1,16 +1,319 @@ -import asyncio -import warnings -from pathlib import Path +import comfy.options +comfy.options.enable_args_parsing() -from comfy.component_model.folder_path_types import FolderNames +import os +import importlib.util +import folder_paths +import time +from comfy.cli_args import args +from app.logger import setup_logger +import itertools +import utils.extra_config +import logging +import sys if __name__ == "__main__": - warnings.warn("main.py is deprecated. Start comfyui by installing the package through the instructions in the README, not by cloning the repository.", DeprecationWarning) - this_file_parent_dir = Path(__file__).parent - from comfy.cmd.main import main - from comfy.cmd.folder_paths import folder_names_and_paths # type: FolderNames - fn: FolderNames = folder_names_and_paths - fn.base_paths.clear() - fn.base_paths.append(this_file_parent_dir) + #NOTE: These do not do anything on core ComfyUI which should already have no communication with the internet, they are for custom nodes. + os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1' + os.environ['DO_NOT_TRACK'] = '1' - asyncio.run(main(from_script_dir=this_file_parent_dir)) + +setup_logger(log_level=args.verbose, use_stdout=args.log_stdout) + +def apply_custom_paths(): + # extra model paths + extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml") + if os.path.isfile(extra_model_paths_config_path): + utils.extra_config.load_extra_path_config(extra_model_paths_config_path) + + if args.extra_model_paths_config: + for config_path in itertools.chain(*args.extra_model_paths_config): + utils.extra_config.load_extra_path_config(config_path) + + # --output-directory, --input-directory, --user-directory + if args.output_directory: + output_dir = os.path.abspath(args.output_directory) + logging.info(f"Setting output directory to: {output_dir}") + folder_paths.set_output_directory(output_dir) + + # These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes + folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints")) + folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip")) + folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae")) + folder_paths.add_model_folder_path("diffusion_models", + os.path.join(folder_paths.get_output_directory(), "diffusion_models")) + folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras")) + + if args.input_directory: + input_dir = os.path.abspath(args.input_directory) + logging.info(f"Setting input directory to: {input_dir}") + folder_paths.set_input_directory(input_dir) + + if args.user_directory: + user_dir = os.path.abspath(args.user_directory) + logging.info(f"Setting user directory to: {user_dir}") + folder_paths.set_user_directory(user_dir) + + +def execute_prestartup_script(): + def execute_script(script_path): + module_name = os.path.splitext(script_path)[0] + try: + spec = importlib.util.spec_from_file_location(module_name, script_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return True + except Exception as e: + logging.error(f"Failed to execute startup-script: {script_path} / {e}") + return False + + if args.disable_all_custom_nodes: + return + + node_paths = folder_paths.get_folder_paths("custom_nodes") + for custom_node_path in node_paths: + possible_modules = os.listdir(custom_node_path) + node_prestartup_times = [] + + for possible_module in possible_modules: + module_path = os.path.join(custom_node_path, possible_module) + if os.path.isfile(module_path) or module_path.endswith(".disabled") or module_path == "__pycache__": + continue + + script_path = os.path.join(module_path, "prestartup_script.py") + if os.path.exists(script_path): + time_before = time.perf_counter() + success = execute_script(script_path) + node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) + if len(node_prestartup_times) > 0: + logging.info("\nPrestartup times for custom nodes:") + for n in sorted(node_prestartup_times): + if n[2]: + import_message = "" + else: + import_message = " (PRESTARTUP FAILED)" + logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) + logging.info("") + +apply_custom_paths() +execute_prestartup_script() + + +# Main code +import asyncio +import shutil +import threading +import gc + + +if os.name == "nt": + logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) + +if __name__ == "__main__": + if args.cuda_device is not None: + os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) + os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) + logging.info("Set cuda device to: {}".format(args.cuda_device)) + + if args.oneapi_device_selector is not None: + os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector + logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) + + if args.deterministic: + if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" + + import cuda_malloc + +if args.windows_standalone_build: + try: + from fix_torch import fix_pytorch_libomp + fix_pytorch_libomp() + except: + pass + +import comfy.utils + +import execution +import server +from server import BinaryEventTypes +import nodes +import comfy.model_management +import comfyui_version +import app.logger +import hook_breaker_ac10a0 + +def cuda_malloc_warning(): + device = comfy.model_management.get_torch_device() + device_name = comfy.model_management.get_torch_device_name(device) + cuda_malloc_warning = False + if "cudaMallocAsync" in device_name: + for b in cuda_malloc.blacklist: + if b in device_name: + cuda_malloc_warning = True + if cuda_malloc_warning: + logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n") + + +def prompt_worker(q, server_instance): + current_time: float = 0.0 + cache_type = execution.CacheType.CLASSIC + if args.cache_lru > 0: + cache_type = execution.CacheType.LRU + elif args.cache_none: + cache_type = execution.CacheType.DEPENDENCY_AWARE + + e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_size=args.cache_lru) + last_gc_collect = 0 + need_gc = False + gc_collect_interval = 10.0 + + while True: + timeout = 1000.0 + if need_gc: + timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0) + + queue_item = q.get(timeout=timeout) + if queue_item is not None: + item, item_id = queue_item + execution_start_time = time.perf_counter() + prompt_id = item[1] + server_instance.last_prompt_id = prompt_id + + e.execute(item[2], prompt_id, item[3], item[4]) + need_gc = True + q.task_done(item_id, + e.history_result, + status=execution.PromptQueue.ExecutionStatus( + status_str='success' if e.success else 'error', + completed=e.success, + messages=e.status_messages)) + if server_instance.client_id is not None: + server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server_instance.client_id) + + current_time = time.perf_counter() + execution_time = current_time - execution_start_time + logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) + + flags = q.get_flags() + free_memory = flags.get("free_memory", False) + + if flags.get("unload_models", free_memory): + comfy.model_management.unload_all_models() + need_gc = True + last_gc_collect = 0 + + if free_memory: + e.reset() + need_gc = True + last_gc_collect = 0 + + if need_gc: + current_time = time.perf_counter() + if (current_time - last_gc_collect) > gc_collect_interval: + gc.collect() + comfy.model_management.soft_empty_cache() + last_gc_collect = current_time + need_gc = False + hook_breaker_ac10a0.restore_functions() + + +async def run(server_instance, address='', port=8188, verbose=True, call_on_start=None): + addresses = [] + for addr in address.split(","): + addresses.append((addr, port)) + await asyncio.gather( + server_instance.start_multi_address(addresses, call_on_start, verbose), server_instance.publish_loop() + ) + + +def hijack_progress(server_instance): + def hook(value, total, preview_image): + comfy.model_management.throw_exception_if_processing_interrupted() + progress = {"value": value, "max": total, "prompt_id": server_instance.last_prompt_id, "node": server_instance.last_node_id} + + server_instance.send_sync("progress", progress, server_instance.client_id) + if preview_image is not None: + server_instance.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server_instance.client_id) + + comfy.utils.set_progress_bar_global_hook(hook) + + +def cleanup_temp(): + temp_dir = folder_paths.get_temp_directory() + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir, ignore_errors=True) + + +def start_comfyui(asyncio_loop=None): + """ + Starts the ComfyUI server using the provided asyncio event loop or creates a new one. + Returns the event loop, server instance, and a function to start the server asynchronously. + """ + if args.temp_directory: + temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp") + logging.info(f"Setting temp directory to: {temp_dir}") + folder_paths.set_temp_directory(temp_dir) + cleanup_temp() + + if args.windows_standalone_build: + try: + import new_updater + new_updater.update_windows_updater() + except: + pass + + if not asyncio_loop: + asyncio_loop = asyncio.new_event_loop() + asyncio.set_event_loop(asyncio_loop) + prompt_server = server.PromptServer(asyncio_loop) + q = execution.PromptQueue(prompt_server) + + hook_breaker_ac10a0.save_functions() + nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes) + hook_breaker_ac10a0.restore_functions() + + cuda_malloc_warning() + + prompt_server.add_routes() + hijack_progress(prompt_server) + + threading.Thread(target=prompt_worker, daemon=True, args=(q, prompt_server,)).start() + + if args.quick_test_for_ci: + exit(0) + + os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + call_on_start = None + if args.auto_launch: + def startup_server(scheme, address, port): + import webbrowser + if os.name == 'nt' and address == '0.0.0.0': + address = '127.0.0.1' + if ':' in address: + address = "[{}]".format(address) + webbrowser.open(f"{scheme}://{address}:{port}") + call_on_start = startup_server + + async def start_all(): + await prompt_server.setup() + await run(prompt_server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start) + + # Returning these so that other code can integrate with the ComfyUI loop and server + return asyncio_loop, prompt_server, start_all + + +if __name__ == "__main__": + # Running directly, just start ComfyUI. + logging.info("Python version: {}".format(sys.version)) + logging.info("ComfyUI version: {}".format(comfyui_version.__version__)) + + event_loop, _, start_all_func = start_comfyui() + try: + x = start_all_func() + app.logger.print_startup_warnings() + event_loop.run_until_complete(x) + except KeyboardInterrupt: + logging.info("\nStopped server") + + cleanup_temp() From b5e376a3d08f83ddde02ad85a2823a47321a0680 Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Tue, 29 Apr 2025 12:01:55 -0700 Subject: [PATCH 03/10] move main.py --- main.py => comfy/cmd/main.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename main.py => comfy/cmd/main.py (100%) diff --git a/main.py b/comfy/cmd/main.py similarity index 100% rename from main.py rename to comfy/cmd/main.py From 84ec6a53f20a3e953484e57a1e69775fa79e3012 Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Tue, 29 Apr 2025 12:02:27 -0700 Subject: [PATCH 04/10] main.py stub --- main.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 main.py diff --git a/main.py b/main.py new file mode 100644 index 000000000..002c8926d --- /dev/null +++ b/main.py @@ -0,0 +1,16 @@ +import asyncio +import warnings +from pathlib import Path + +from comfy.component_model.folder_path_types import FolderNames + +if __name__ == "__main__": + warnings.warn("main.py is deprecated. Start comfyui by installing the package through the instructions in the README, not by cloning the repository.", DeprecationWarning) + this_file_parent_dir = Path(__file__).parent + from comfy.cmd.main import main + from comfy.cmd.folder_paths import folder_names_and_paths # type: FolderNames + fn: FolderNames = folder_names_and_paths + fn.base_paths.clear() + fn.base_paths.append(this_file_parent_dir) + + asyncio.run(main(from_script_dir=this_file_parent_dir)) From aa11a31717d80ddaaf7e51245b44c680aded09c8 Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Wed, 30 Apr 2025 11:52:20 -0700 Subject: [PATCH 05/10] hook breaker --- hook_breaker_ac10a0.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 hook_breaker_ac10a0.py diff --git a/hook_breaker_ac10a0.py b/hook_breaker_ac10a0.py new file mode 100644 index 000000000..c3e1c0633 --- /dev/null +++ b/hook_breaker_ac10a0.py @@ -0,0 +1,17 @@ +# Prevent custom nodes from hooking anything important +import comfy.model_management + +HOOK_BREAK = [(comfy.model_management, "cast_to")] + + +SAVED_FUNCTIONS = [] + + +def save_functions(): + for f in HOOK_BREAK: + SAVED_FUNCTIONS.append((f[0], f[1], getattr(f[0], f[1]))) + + +def restore_functions(): + for f in SAVED_FUNCTIONS: + setattr(f[0], f[1], f[2]) From eed79e210ea094b7905b2ba522c86a5afea78457 Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Wed, 30 Apr 2025 11:52:33 -0700 Subject: [PATCH 06/10] move hook breaker --- hook_breaker_ac10a0.py => comfy/cmd/hook_breaker_ac10a0.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename hook_breaker_ac10a0.py => comfy/cmd/hook_breaker_ac10a0.py (100%) diff --git a/hook_breaker_ac10a0.py b/comfy/cmd/hook_breaker_ac10a0.py similarity index 100% rename from hook_breaker_ac10a0.py rename to comfy/cmd/hook_breaker_ac10a0.py From da2cbf7c91325d8c9f1a7c87341d3b55191f34fd Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Wed, 30 Apr 2025 11:59:59 -0700 Subject: [PATCH 07/10] move and fix main --- comfy/cmd/hook_breaker_ac10a0.py | 4 +- comfy/cmd/main.py | 375 ++++++++++++++----------------- main.py | 4 +- 3 files changed, 177 insertions(+), 206 deletions(-) diff --git a/comfy/cmd/hook_breaker_ac10a0.py b/comfy/cmd/hook_breaker_ac10a0.py index c3e1c0633..261ffba52 100644 --- a/comfy/cmd/hook_breaker_ac10a0.py +++ b/comfy/cmd/hook_breaker_ac10a0.py @@ -1,7 +1,7 @@ # Prevent custom nodes from hooking anything important -import comfy.model_management +from .. import model_management -HOOK_BREAK = [(comfy.model_management, "cast_to")] +HOOK_BREAK = [(model_management, "cast_to")] SAVED_FUNCTIONS = [] diff --git a/comfy/cmd/main.py b/comfy/cmd/main.py index f3f56597a..ea9736efc 100644 --- a/comfy/cmd/main.py +++ b/comfy/cmd/main.py @@ -1,162 +1,49 @@ -import comfy.options -comfy.options.enable_args_parsing() - -import os -import importlib.util -import folder_paths -import time -from comfy.cli_args import args -from app.logger import setup_logger -import itertools -import utils.extra_config -import logging -import sys - -if __name__ == "__main__": - #NOTE: These do not do anything on core ComfyUI which should already have no communication with the internet, they are for custom nodes. - os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1' - os.environ['DO_NOT_TRACK'] = '1' - - -setup_logger(log_level=args.verbose, use_stdout=args.log_stdout) - -def apply_custom_paths(): - # extra model paths - extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml") - if os.path.isfile(extra_model_paths_config_path): - utils.extra_config.load_extra_path_config(extra_model_paths_config_path) - - if args.extra_model_paths_config: - for config_path in itertools.chain(*args.extra_model_paths_config): - utils.extra_config.load_extra_path_config(config_path) - - # --output-directory, --input-directory, --user-directory - if args.output_directory: - output_dir = os.path.abspath(args.output_directory) - logging.info(f"Setting output directory to: {output_dir}") - folder_paths.set_output_directory(output_dir) - - # These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes - folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints")) - folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip")) - folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae")) - folder_paths.add_model_folder_path("diffusion_models", - os.path.join(folder_paths.get_output_directory(), "diffusion_models")) - folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras")) - - if args.input_directory: - input_dir = os.path.abspath(args.input_directory) - logging.info(f"Setting input directory to: {input_dir}") - folder_paths.set_input_directory(input_dir) - - if args.user_directory: - user_dir = os.path.abspath(args.user_directory) - logging.info(f"Setting user directory to: {user_dir}") - folder_paths.set_user_directory(user_dir) - - -def execute_prestartup_script(): - def execute_script(script_path): - module_name = os.path.splitext(script_path)[0] - try: - spec = importlib.util.spec_from_file_location(module_name, script_path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return True - except Exception as e: - logging.error(f"Failed to execute startup-script: {script_path} / {e}") - return False - - if args.disable_all_custom_nodes: - return - - node_paths = folder_paths.get_folder_paths("custom_nodes") - for custom_node_path in node_paths: - possible_modules = os.listdir(custom_node_path) - node_prestartup_times = [] - - for possible_module in possible_modules: - module_path = os.path.join(custom_node_path, possible_module) - if os.path.isfile(module_path) or module_path.endswith(".disabled") or module_path == "__pycache__": - continue - - script_path = os.path.join(module_path, "prestartup_script.py") - if os.path.exists(script_path): - time_before = time.perf_counter() - success = execute_script(script_path) - node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) - if len(node_prestartup_times) > 0: - logging.info("\nPrestartup times for custom nodes:") - for n in sorted(node_prestartup_times): - if n[2]: - import_message = "" - else: - import_message = " (PRESTARTUP FAILED)" - logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) - logging.info("") - -apply_custom_paths() -execute_prestartup_script() - - -# Main code import asyncio +import contextvars +import gc +import itertools +import logging +import os import shutil import threading -import gc +import time +from pathlib import Path +from typing import Optional +# main_pre must be the earliest import since it suppresses some spurious warnings +from .main_pre import args +from . import hook_breaker_ac10a0 +from .extra_model_paths import load_extra_path_config +from .. import model_management +from ..analytics.analytics import initialize_event_tracking +from ..cmd import cuda_malloc +from ..cmd import folder_paths +from ..cmd import server as server_module +from ..component_model.abstract_prompt_queue import AbstractPromptQueue +from ..distributed.distributed_prompt_queue import DistributedPromptQueue +from ..distributed.server_stub import ServerStub +from ..nodes.package import import_all_nodes_in_workspace -if os.name == "nt": - logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) +logger = logging.getLogger(__name__) -if __name__ == "__main__": - if args.cuda_device is not None: - os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) - os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) - logging.info("Set cuda device to: {}".format(args.cuda_device)) - - if args.oneapi_device_selector is not None: - os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector - logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) - - if args.deterministic: - if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" - - import cuda_malloc - -if args.windows_standalone_build: - try: - from fix_torch import fix_pytorch_libomp - fix_pytorch_libomp() - except: - pass - -import comfy.utils - -import execution -import server -from server import BinaryEventTypes -import nodes -import comfy.model_management -import comfyui_version -import app.logger -import hook_breaker_ac10a0 def cuda_malloc_warning(): - device = comfy.model_management.get_torch_device() - device_name = comfy.model_management.get_torch_device_name(device) + device = model_management.get_torch_device() + device_name = model_management.get_torch_device_name(device) cuda_malloc_warning = False if "cudaMallocAsync" in device_name: for b in cuda_malloc.blacklist: if b in device_name: cuda_malloc_warning = True if cuda_malloc_warning: - logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n") + logger.warning( + "\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n") -def prompt_worker(q, server_instance): - current_time: float = 0.0 +def prompt_worker(q: AbstractPromptQueue, server_instance: server_module.PromptServer): + from ..cmd import execution + from ..component_model import queue_types + from .. import model_management cache_type = execution.CacheType.CLASSIC if args.cache_lru > 0: cache_type = execution.CacheType.LRU @@ -167,7 +54,7 @@ def prompt_worker(q, server_instance): last_gc_collect = 0 need_gc = False gc_collect_interval = 10.0 - + current_time = 0.0 while True: timeout = 1000.0 if need_gc: @@ -184,22 +71,23 @@ def prompt_worker(q, server_instance): need_gc = True q.task_done(item_id, e.history_result, - status=execution.PromptQueue.ExecutionStatus( + status=queue_types.ExecutionStatus( status_str='success' if e.success else 'error', completed=e.success, messages=e.status_messages)) if server_instance.client_id is not None: - server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server_instance.client_id) + server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, + server_instance.client_id) current_time = time.perf_counter() execution_time = current_time - execution_start_time - logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) + logger.debug("Prompt executed in {:.2f} seconds".format(execution_time)) flags = q.get_flags() free_memory = flags.get("free_memory", False) if flags.get("unload_models", free_memory): - comfy.model_management.unload_all_models() + model_management.unload_all_models() need_gc = True last_gc_collect = 0 @@ -212,7 +100,7 @@ def prompt_worker(q, server_instance): current_time = time.perf_counter() if (current_time - last_gc_collect) > gc_collect_interval: gc.collect() - comfy.model_management.soft_empty_cache() + model_management.soft_empty_cache() last_gc_collect = current_time need_gc = False hook_breaker_ac10a0.restore_functions() @@ -222,98 +110,181 @@ async def run(server_instance, address='', port=8188, verbose=True, call_on_star addresses = [] for addr in address.split(","): addresses.append((addr, port)) - await asyncio.gather( - server_instance.start_multi_address(addresses, call_on_start, verbose), server_instance.publish_loop() - ) - - -def hijack_progress(server_instance): - def hook(value, total, preview_image): - comfy.model_management.throw_exception_if_processing_interrupted() - progress = {"value": value, "max": total, "prompt_id": server_instance.last_prompt_id, "node": server_instance.last_node_id} - - server_instance.send_sync("progress", progress, server_instance.client_id) - if preview_image is not None: - server_instance.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server_instance.client_id) - - comfy.utils.set_progress_bar_global_hook(hook) + await asyncio.gather(server_instance.start_multi_address(addresses, call_on_start), server_instance.publish_loop()) def cleanup_temp(): - temp_dir = folder_paths.get_temp_directory() - if os.path.exists(temp_dir): - shutil.rmtree(temp_dir, ignore_errors=True) + try: + folder_paths.get_temp_directory() + temp_dir = folder_paths.get_temp_directory() + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir, ignore_errors=True) + except NameError: + # __file__ was not defined + pass -def start_comfyui(asyncio_loop=None): +def start_comfyui(asyncio_loop: asyncio.AbstractEventLoop = None): + asyncio_loop = asyncio_loop or asyncio.get_event_loop() + asyncio_loop.run_until_complete(_start_comfyui()) + + +async def _start_comfyui(from_script_dir: Optional[Path] = None): """ - Starts the ComfyUI server using the provided asyncio event loop or creates a new one. - Returns the event loop, server instance, and a function to start the server asynchronously. + Runs ComfyUI's frontend and backend like upstream. + :param from_script_dir: when set to a path, assumes that you are running ComfyUI's legacy main.py entrypoint at the root of the git repository located at the path """ + if not from_script_dir: + os_getcwd = os.getcwd() + else: + os_getcwd = str(from_script_dir) + if args.temp_directory: temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp") - logging.info(f"Setting temp directory to: {temp_dir}") + logger.debug(f"Setting temp directory to: {temp_dir}") folder_paths.set_temp_directory(temp_dir) cleanup_temp() + if args.user_directory: + user_dir = os.path.abspath(args.user_directory) + logger.info(f"Setting user directory to: {user_dir}") + folder_paths.set_user_directory(user_dir) + + # configure extra model paths earlier + try: + extra_model_paths_config_path = os.path.join(os_getcwd, "extra_model_paths.yaml") + if os.path.isfile(extra_model_paths_config_path): + load_extra_path_config(extra_model_paths_config_path) + except NameError: + pass + + if args.extra_model_paths_config: + for config_path in itertools.chain(*args.extra_model_paths_config): + load_extra_path_config(config_path) + + # always create directories when started interactively + folder_paths.create_directories() + if args.create_directories: + import_all_nodes_in_workspace(raise_on_failure=False) + folder_paths.create_directories() + exit(0) + if args.windows_standalone_build: + folder_paths.create_directories() try: - import new_updater + from . import new_updater new_updater.update_windows_updater() except: pass - if not asyncio_loop: - asyncio_loop = asyncio.new_event_loop() - asyncio.set_event_loop(asyncio_loop) - prompt_server = server.PromptServer(asyncio_loop) - q = execution.PromptQueue(prompt_server) + loop = asyncio.get_event_loop() + server = server_module.PromptServer(loop) + if args.external_address is not None: + server.external_address = args.external_address + # at this stage, it's safe to import nodes hook_breaker_ac10a0.save_functions() - nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes) + server.nodes = import_all_nodes_in_workspace() hook_breaker_ac10a0.restore_functions() + # as a side effect, this also populates the nodes for execution + if args.distributed_queue_connection_uri is not None: + distributed = True + q = DistributedPromptQueue( + caller_server=server if args.distributed_queue_frontend else None, + connection_uri=args.distributed_queue_connection_uri, + is_caller=args.distributed_queue_frontend, + is_callee=args.distributed_queue_worker, + loop=loop, + queue_name=args.distributed_queue_name + ) + await q.init() + else: + distributed = False + from .execution import PromptQueue + q = PromptQueue(server) + server.prompt_queue = q + + server.add_routes() cuda_malloc_warning() - prompt_server.add_routes() - hijack_progress(prompt_server) + # in a distributed setting, the default prompt worker will not be able to send execution events via the websocket + worker_thread_server = server if not distributed else ServerStub() + if not distributed or args.distributed_queue_worker: + if distributed: + logger.warning( + f"Distributed workers started in the default thread loop cannot notify clients of progress updates. Instead of comfyui or main.py, use comfyui-worker.") + # todo: this should really be using an executor instead of doing things this jankilicious way + ctx = contextvars.copy_context() + threading.Thread(target=lambda _q, _worker_thread_server: ctx.run(prompt_worker, _q, _worker_thread_server), + daemon=True, args=(q, worker_thread_server,)).start() - threading.Thread(target=prompt_worker, daemon=True, args=(q, prompt_server,)).start() + # server has been imported and things should be looking good + initialize_event_tracking(loop) + + if args.output_directory: + output_dir = os.path.abspath(args.output_directory) + logger.debug(f"Setting output directory to: {output_dir}") + folder_paths.set_output_directory(output_dir) + + # These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes + folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints")) + folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip")) + folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae")) + folder_paths.add_model_folder_path("diffusion_models", + os.path.join(folder_paths.get_output_directory(), "diffusion_models")) + folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras")) + + if args.input_directory: + input_dir = os.path.abspath(args.input_directory) + logger.debug(f"Setting input directory to: {input_dir}") + folder_paths.set_input_directory(input_dir) if args.quick_test_for_ci: + # for CI purposes, try importing all the nodes + import_all_nodes_in_workspace(raise_on_failure=True) exit(0) + else: + # we no longer lazily load nodes. we'll do it now for the sake of creating directories + import_all_nodes_in_workspace(raise_on_failure=False) + # now that nodes are loaded, create more directories if appropriate + folder_paths.create_directories() - os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + # replaced my folder_paths.create_directories call_on_start = None if args.auto_launch: - def startup_server(scheme, address, port): + def startup_server(scheme="http", address="localhost", port=8188): import webbrowser - if os.name == 'nt' and address == '0.0.0.0': + if os.name == 'nt' and address == '0.0.0.0' or address == '': address = '127.0.0.1' if ':' in address: address = "[{}]".format(address) webbrowser.open(f"{scheme}://{address}:{port}") + call_on_start = startup_server - async def start_all(): - await prompt_server.setup() - await run(prompt_server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start) + first_listen_addr = args.listen.split(',')[0] if ',' in args.listen else args.listen + server.address = first_listen_addr + server.port = args.port - # Returning these so that other code can integrate with the ComfyUI loop and server - return asyncio_loop, prompt_server, start_all + try: + await server.setup() + await run(server, address=first_listen_addr, port=args.port, verbose=not args.dont_print_server, + call_on_start=call_on_start) + except (asyncio.CancelledError, KeyboardInterrupt): + logger.debug("\nStopped server") + finally: + if distributed: + await q.close() + cleanup_temp() + + +def entrypoint(): + try: + asyncio.run(_start_comfyui()) + except KeyboardInterrupt: + logger.info(f"Gracefully shutting down due to KeyboardInterrupt") if __name__ == "__main__": - # Running directly, just start ComfyUI. - logging.info("Python version: {}".format(sys.version)) - logging.info("ComfyUI version: {}".format(comfyui_version.__version__)) - - event_loop, _, start_all_func = start_comfyui() - try: - x = start_all_func() - app.logger.print_startup_warnings() - event_loop.run_until_complete(x) - except KeyboardInterrupt: - logging.info("\nStopped server") - - cleanup_temp() + entrypoint() diff --git a/main.py b/main.py index 002c8926d..96aa2fae8 100644 --- a/main.py +++ b/main.py @@ -7,10 +7,10 @@ from comfy.component_model.folder_path_types import FolderNames if __name__ == "__main__": warnings.warn("main.py is deprecated. Start comfyui by installing the package through the instructions in the README, not by cloning the repository.", DeprecationWarning) this_file_parent_dir = Path(__file__).parent - from comfy.cmd.main import main + from comfy.cmd.main import _start_comfyui from comfy.cmd.folder_paths import folder_names_and_paths # type: FolderNames fn: FolderNames = folder_names_and_paths fn.base_paths.clear() fn.base_paths.append(this_file_parent_dir) - asyncio.run(main(from_script_dir=this_file_parent_dir)) + asyncio.run(_start_comfyui(from_script_dir=this_file_parent_dir)) From b6d3f1fb08b5e6541579ece82cbedb08c27e88a7 Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Wed, 7 May 2025 14:53:39 -0700 Subject: [PATCH 08/10] Accept workflows from the command line --- README.md | 244 ++++++++++-------- comfy/app/logger.py | 5 +- comfy/cli_args.py | 36 +-- comfy/cli_args_types.py | 21 +- comfy/client/client_types.py | 5 +- comfy/client/embedded_comfy_client.py | 25 +- comfy/cmd/extra_model_paths.py | 4 +- comfy/cmd/folder_paths.py | 6 +- comfy/cmd/folder_paths.pyi | 3 +- comfy/cmd/main.py | 10 +- comfy/cmd/main_pre.py | 22 +- comfy/cmd/worker.py | 68 ----- comfy/component_model/asyncio_files.py | 41 +++ comfy/component_model/entrypoints_common.py | 41 +++ comfy/component_model/suppress_stdout.py | 14 + .../distributed/distributed_prompt_worker.py | 6 +- comfy/distributed/distributed_types.py | 4 +- comfy/entrypoints/__init__.py | 0 comfy/entrypoints/worker.py | 34 +++ comfy/entrypoints/workflow.py | 46 ++++ comfy/execution_ext.py | 3 +- comfy/extra_config.py | 7 +- comfy/ldm/modules/attention.py | 12 +- comfy/model_management.py | 31 ++- comfy/nodes/vanilla_node_importing.py | 18 +- comfy/vendor/appdirs.py | 16 +- comfy_extras/nodes/nodes_nf4.py | 5 +- .../script_examples/basic_api_example.py | 4 +- pyproject.toml | 5 +- tests/distributed/test_distributed_queue.py | 4 +- tests/inference/test_execution.py | 6 +- tests/inference/test_workflows.py | 8 +- tests/issues/__test_25_respect_cwd_param.py | 4 +- tests/library/test_embedded_client.py | 10 +- tests/unit/test_panics.py | 10 +- 35 files changed, 459 insertions(+), 319 deletions(-) delete mode 100644 comfy/cmd/worker.py create mode 100644 comfy/component_model/asyncio_files.py create mode 100644 comfy/component_model/entrypoints_common.py create mode 100644 comfy/component_model/suppress_stdout.py create mode 100644 comfy/entrypoints/__init__.py create mode 100644 comfy/entrypoints/worker.py create mode 100644 comfy/entrypoints/workflow.py diff --git a/README.md b/README.md index 7304ded09..48b8e78c6 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,6 @@ ComfyUI LTS A vanilla, up-to-date fork of [ComfyUI](https://github.com/comfyanonymous/comfyui) intended for long term support (LTS) from [AppMana](https://appmana.com) and [Hidden Switch](https://hiddenswitch.com). - ### New Features - To run, just type `comfyui` in your command line and press enter. @@ -25,42 +24,46 @@ ComfyUI lets you design and execute advanced stable diffusion pipelines using a ## Get Started #### [Desktop Application](https://www.comfy.org/download) -- The easiest way to get started. + +- The easiest way to get started. - Available on Windows & macOS. #### [Windows Portable Package](#installing) + - Get the latest commits and completely portable. - Available on Windows. #### [Manual Install](#manual-install-windows-linux) + Supports all operating systems and GPU types (NVIDIA, AMD, Intel, Apple Silicon, Ascend). ## [Examples](https://comfyanonymous.github.io/ComfyUI_examples/) + See what ComfyUI can do with the [example workflows](https://comfyanonymous.github.io/ComfyUI_examples/). - ## Upstream Features + - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. - Image Models - - SD1.x, SD2.x, - - [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [SDXL Turbo](https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/) - - [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/) - - [SD3 and SD3.5](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) - - Pixart Alpha and Sigma - - [AuraFlow](https://comfyanonymous.github.io/ComfyUI_examples/aura_flow/) - - [HunyuanDiT](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_dit/) - - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) - - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) - - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) + - SD1.x, SD2.x, + - [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [SDXL Turbo](https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/) + - [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/) + - [SD3 and SD3.5](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) + - Pixart Alpha and Sigma + - [AuraFlow](https://comfyanonymous.github.io/ComfyUI_examples/aura_flow/) + - [HunyuanDiT](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_dit/) + - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) + - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) + - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) - Video Models - - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) - - [LTX-Video](https://comfyanonymous.github.io/ComfyUI_examples/ltxv/) - - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) - - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) - - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) + - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) + - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) + - [LTX-Video](https://comfyanonymous.github.io/ComfyUI_examples/ltxv/) + - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) + - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) + - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) - 3D Models - - [Hunyuan3D 2.0](https://docs.comfy.org/tutorials/3d/hunyuan3D-2) + - [Hunyuan3D 2.0](https://docs.comfy.org/tutorials/3d/hunyuan3D-2) - [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) - Asynchronous Queue system - Many optimizations: Only re-executes the parts of the workflow that changes between executions. @@ -327,7 +330,6 @@ For models compatible with Cambricon Extension for PyTorch (`torch_mlu`). Here's 2. Next, install the PyTorch (`torch_mlu`) extension following the instructions on the [Installation](https://www.cambricon.com/docs/sdk_1.15.0/cambricon_pytorch_1.17.0/user_guide_1.9/index.html) 3. Launch ComfyUI by running `python main.py` - ## Manual Install (Windows, Linux, macOS) For Development 1. Clone this repo: @@ -434,7 +436,7 @@ Improve the performance of your Mochi model video generation using **Sage Attent |--------|---------------|---------------|--------------------------| | A5000 | 7.52s/it | 5.81s/it | 5.00s/it (but corrupted) | -[Use the default Mochi Workflow.](https://github.com/comfyanonymous/ComfyUI_examples/raw/refs/heads/master/mochi/mochi_text_to_video_example.webp) This does not require any custom nodes or any change to your workflow. +[Use the default Mochi Workflow.](https://github.com/comfyanonymous/ComfyUI_examples/raw/refs/heads/master/mochi/mochi_text_to_video_example.webp) This does not require any custom nodes or any change to your workflow. Install the dependencies for Windows or Linux using the `withtriton` component, or install the specific dependencies you need from [requirements-triton.txt](./requirements-triton.txt): @@ -491,6 +493,7 @@ To use the Cosmos upsampler, install the prerequisites: uv pip install loguru pynvml uv pip install --no-deps git+https://github.com/NVIDIA/Cosmos.git ``` + Then, use the workflow embedded in the upsampled prompt by dragging and dropping the upsampled animation into your workspace. The Cosmos upsampler ought to improve any text-to-image video generation pipeline. Use the `Video2World` upsampler nodes to download Pixtral-12b and upsample for an image to video workflow using NVIDIA's default prompt. Since Pixtral is not fine tuned, the improvement may not be significant over using another LLM. @@ -539,6 +542,7 @@ some_directory/some_code.py Then, if your `NODE_CLASS_MAPPINGS` are declared in `__init__.py`, use the following as a `pyproject.toml`, substituting your actual project name: **pyproject.toml** + ```toml [project] name = "my_comfyui_nodes" @@ -832,38 +836,38 @@ The default installation includes a fast latent preview method that's low-resolu ## Keyboard Shortcuts -| Keybind | Explanation | -|------------------------------------|--------------------------------------------------------------------------------------------------------------------| -| `Ctrl` + `Enter` | Queue up current graph for generation | -| `Ctrl` + `Shift` + `Enter` | Queue up current graph as first for generation | -| `Ctrl` + `Alt` + `Enter` | Cancel current generation | -| `Ctrl` + `Z`/`Ctrl` + `Y` | Undo/Redo | -| `Ctrl` + `S` | Save workflow | -| `Ctrl` + `O` | Load workflow | -| `Ctrl` + `A` | Select all nodes | -| `Alt `+ `C` | Collapse/uncollapse selected nodes | -| `Ctrl` + `M` | Mute/unmute selected nodes | +| Keybind | Explanation | +|----------------------------------------|--------------------------------------------------------------------------------------------------------------------| +| `Ctrl` + `Enter` | Queue up current graph for generation | +| `Ctrl` + `Shift` + `Enter` | Queue up current graph as first for generation | +| `Ctrl` + `Alt` + `Enter` | Cancel current generation | +| `Ctrl` + `Z`/`Ctrl` + `Y` | Undo/Redo | +| `Ctrl` + `S` | Save workflow | +| `Ctrl` + `O` | Load workflow | +| `Ctrl` + `A` | Select all nodes | +| `Alt `+ `C` | Collapse/uncollapse selected nodes | +| `Ctrl` + `M` | Mute/unmute selected nodes | | `Ctrl` + `B` | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) | | `Delete`/`Backspace` | Delete selected nodes | | `Ctrl` + `Backspace` | Delete the current graph | -| `Space` | Move the canvas around when held and moving the cursor | -| `Ctrl`/`Shift` + `Click` | Add clicked node to selection | -| `Ctrl` + `C`/`Ctrl` + `V` | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) | -| `Ctrl` + `C`/`Ctrl` + `Shift` + `V` | Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) | +| `Space` | Move the canvas around when held and moving the cursor | +| `Ctrl`/`Shift` + `Click` | Add clicked node to selection | +| `Ctrl` + `C`/`Ctrl` + `V` | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) | +| `Ctrl` + `C`/`Ctrl` + `Shift` + `V` | Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) | | `Shift` + `Drag` | Move multiple selected nodes at the same time | | `Ctrl` + `D` | Load default graph | -| `Alt` + `+` | Canvas Zoom in | -| `Alt` + `-` | Canvas Zoom out | +| `Alt` + `+` | Canvas Zoom in | +| `Alt` + `-` | Canvas Zoom out | | `Ctrl` + `Shift` + LMB + Vertical drag | Canvas Zoom in/out | -| `P` | Pin/Unpin selected nodes | +| `P` | Pin/Unpin selected nodes | | `Ctrl` + `G` | Group selected nodes | -| `Q` | Toggle visibility of the queue | -| `H` | Toggle visibility of history | -| `R` | Refresh graph | -| `F` | Show/Hide menu | -| `.` | Fit view to selection (Whole graph when nothing is selected) | -| Double-Click LMB | Open node quick search palette | -| `Shift` + Drag | Move multiple wires at once | +| `Q` | Toggle visibility of the queue | +| `H` | Toggle visibility of history | +| `R` | Refresh graph | +| `F` | Show/Hide menu | +| `.` | Fit view to selection (Whole graph when nothing is selected) | +| Double-Click LMB | Open node quick search palette | +| `Shift` + Drag | Move multiple wires at once | | `Ctrl` + `Alt` + LMB | Disconnect all wires from clicked slot | `Ctrl` can also be replaced with `Cmd` instead for macOS users @@ -887,31 +891,26 @@ You can pass additional extra model path configurations with one or more copies ### Command Line Arguments ``` -usage: comfyui.exe [-h] [-c CONFIG_FILE] [--write-out-config-file CONFIG_OUTPUT_PATH] [-w CWD] [--base-paths BASE_PATHS [BASE_PATHS ...]] [-H [IP]] [--port PORT] - [--enable-cors-header [ORIGIN]] [--max-upload-size MAX_UPLOAD_SIZE] [--extra-model-paths-config PATH [PATH ...]] - [--output-directory OUTPUT_DIRECTORY] [--temp-directory TEMP_DIRECTORY] [--input-directory INPUT_DIRECTORY] [--auto-launch] [--disable-auto-launch] - [--cuda-device DEVICE_ID] [--cuda-malloc | --disable-cuda-malloc] [--force-fp32 | --force-fp16 | --force-bf16] - [--bf16-unet | --fp16-unet | --fp8_e4m3fn-unet | --fp8_e5m2-unet] [--fp16-vae | --fp32-vae | --bf16-vae] [--cpu-vae] - [--fp8_e4m3fn-text-enc | --fp8_e5m2-text-enc | --fp16-text-enc | --fp32-text-enc] [--directml [DIRECTML_DEVICE]] [--disable-ipex-optimize] - [--preview-method [none,auto,latent2rgb,taesd]] [--preview-size PREVIEW_SIZE] [--cache-lru CACHE_LRU] - [--use-split-cross-attention | --use-quad-cross-attention | --use-pytorch-cross-attention] [--disable-xformers] [--disable-flash-attn] - [--disable-sage-attention] [--force-upcast-attention | --dont-upcast-attention] - [--gpu-only | --highvram | --normalvram | --lowvram | --novram | --cpu] [--reserve-vram RESERVE_VRAM] - [--default-hashing-function {md5,sha1,sha256,sha512}] [--disable-smart-memory] [--deterministic] [--fast] [--dont-print-server] - [--quick-test-for-ci] [--windows-standalone-build] [--disable-metadata] [--disable-all-custom-nodes] [--multi-user] [--create-directories] - [--plausible-analytics-base-url PLAUSIBLE_ANALYTICS_BASE_URL] [--plausible-analytics-domain PLAUSIBLE_ANALYTICS_DOMAIN] - [--analytics-use-identity-provider] [--distributed-queue-connection-uri DISTRIBUTED_QUEUE_CONNECTION_URI] [--distributed-queue-worker] - [--distributed-queue-frontend] [--distributed-queue-name DISTRIBUTED_QUEUE_NAME] [--external-address EXTERNAL_ADDRESS] - [--logging-level {DEBUG,INFO,WARNING,ERROR,CRITICAL}] [--disable-known-models] [--max-queue-size MAX_QUEUE_SIZE] - [--otel-service-name OTEL_SERVICE_NAME] [--otel-service-version OTEL_SERVICE_VERSION] [--otel-exporter-otlp-endpoint OTEL_EXPORTER_OTLP_ENDPOINT] - [--force-channels-last] [--force-hf-local-dir-mode] [--front-end-version FRONT_END_VERSION] [--front-end-root FRONT_END_ROOT] - [--executor-factory EXECUTOR_FACTORY] [--openai-api-key OPENAI_API_KEY] [--user-directory USER_DIRECTORY] [--blip-model-url BLIP_MODEL_URL] - [--blip-model-vqa-url BLIP_MODEL_VQA_URL] [--sam-model-vith-url SAM_MODEL_VITH_URL] [--sam-model-vitl-url SAM_MODEL_VITL_URL] - [--sam-model-vitb-url SAM_MODEL_VITB_URL] [--history-display-limit HISTORY_DISPLAY_LIMIT] [--ffmpeg-bin-path FFMPEG_BIN_PATH] - [--ffmpeg-extra-codecs FFMPEG_EXTRA_CODECS] [--wildcards-path WILDCARDS_PATH] [--wildcard-api WILDCARD_API] [--photoprism-host PHOTOPRISM_HOST] - [--immich-host IMMICH_HOST] [--ideogram-session-cookie IDEOGRAM_SESSION_COOKIE] [--annotator-ckpts-path ANNOTATOR_CKPTS_PATH] [--use-symlinks] - [--ort-providers ORT_PROVIDERS] [--vfi-ops-backend VFI_OPS_BACKEND] [--dependency-version DEPENDENCY_VERSION] [--mmdet-skip] [--sam-editor-cpu] - [--sam-editor-model SAM_EDITOR_MODEL] [--custom-wildcards CUSTOM_WILDCARDS] [--disable-gpu-opencv] +usage: comfyui [-h] [-c CONFIG_FILE] [--write-out-config-file CONFIG_OUTPUT_PATH] [-w CWD] [--base-paths BASE_PATHS [BASE_PATHS ...]] [-H [IP]] [--port PORT] + [--enable-cors-header [ORIGIN]] [--max-upload-size MAX_UPLOAD_SIZE] [--base-directory BASE_DIRECTORY] [--extra-model-paths-config PATH [PATH ...]] + [--output-directory OUTPUT_DIRECTORY] [--temp-directory TEMP_DIRECTORY] [--input-directory INPUT_DIRECTORY] [--auto-launch] [--disable-auto-launch] + [--cuda-device DEVICE_ID] [--cuda-malloc | --disable-cuda-malloc] [--force-fp32 | --force-fp16 | --force-bf16] + [--fp32-unet | --fp64-unet | --bf16-unet | --fp16-unet | --fp8_e4m3fn-unet | --fp8_e5m2-unet] [--fp16-vae | --fp32-vae | --bf16-vae] [--cpu-vae] + [--fp8_e4m3fn-text-enc | --fp8_e5m2-text-enc | --fp16-text-enc | --fp32-text-enc | --bf16-text-enc] [--directml [DIRECTML_DEVICE]] + [--oneapi-device-selector SELECTOR_STRING] [--disable-ipex-optimize] [--preview-method [none,auto,latent2rgb,taesd]] [--preview-size PREVIEW_SIZE] + [--cache-classic | --cache-lru CACHE_LRU | --cache-none] + [--use-split-cross-attention | --use-quad-cross-attention | --use-pytorch-cross-attention | --use-sage-attention | --use-flash-attention] [--disable-xformers] + [--force-upcast-attention | --dont-upcast-attention] [--gpu-only | --highvram | --normalvram | --lowvram | --novram | --cpu] [--reserve-vram RESERVE_VRAM] + [--default-hashing-function {md5,sha1,sha256,sha512}] [--disable-smart-memory] [--deterministic] [--fast [FAST ...]] [--dont-print-server] [--quick-test-for-ci] + [--windows-standalone-build] [--disable-metadata] [--disable-all-custom-nodes] [--multi-user] [--create-directories] [--log-stdout] + [--plausible-analytics-base-url PLAUSIBLE_ANALYTICS_BASE_URL] [--plausible-analytics-domain PLAUSIBLE_ANALYTICS_DOMAIN] [--analytics-use-identity-provider] + [--distributed-queue-connection-uri DISTRIBUTED_QUEUE_CONNECTION_URI] [--distributed-queue-worker] [--distributed-queue-frontend] + [--distributed-queue-name DISTRIBUTED_QUEUE_NAME] [--external-address EXTERNAL_ADDRESS] [--logging-level {DEBUG,INFO,WARNING,ERROR,CRITICAL}] + [--disable-known-models] [--max-queue-size MAX_QUEUE_SIZE] [--otel-service-name OTEL_SERVICE_NAME] [--otel-service-version OTEL_SERVICE_VERSION] + [--otel-exporter-otlp-endpoint OTEL_EXPORTER_OTLP_ENDPOINT] [--force-channels-last] [--force-hf-local-dir-mode] [--front-end-version FRONT_END_VERSION] + [--panic-when PANIC_WHEN] [--front-end-root FRONT_END_ROOT] [--executor-factory EXECUTOR_FACTORY] [--openai-api-key OPENAI_API_KEY] + [--ideogram-api-key IDEOGRAM_API_KEY] [--anthropic-api-key ANTHROPIC_API_KEY] [--user-directory USER_DIRECTORY] [--enable-compress-response-body] + [--workflows WORKFLOWS [WORKFLOWS ...]] options: -h, --help show this help message and exit @@ -919,27 +918,28 @@ options: config file path --write-out-config-file CONFIG_OUTPUT_PATH takes the current command line args and writes them out to a config file at the given path, then exits - -w CWD, --cwd CWD Specify the working directory. If not set, this is the current working directory. models/, input/, output/ and other directories will be - located here by default. [env var: COMFYUI_CWD] + -w CWD, --cwd CWD Specify the working directory. If not set, this is the current working directory. models/, input/, output/ and other directories will be located here by + default. [env var: COMFYUI_CWD] --base-paths BASE_PATHS [BASE_PATHS ...] Additional base paths for custom nodes, models and inputs. [env var: COMFYUI_BASE_PATHS] -H [IP], --listen [IP] - Specify the IP address to listen on (default: 127.0.0.1). You can give a list of ip addresses by separating them with a comma like: - 127.2.2.2,127.3.3.3 If --listen is provided without an argument, it defaults to 0.0.0.0,:: (listens on all ipv4 and ipv6) [env var: - COMFYUI_LISTEN] + Specify the IP address to listen on (default: 127.0.0.1). You can give a list of ip addresses by separating them with a comma like: 127.2.2.2,127.3.3.3 + If --listen is provided without an argument, it defaults to 0.0.0.0,:: (listens on all ipv4 and ipv6) [env var: COMFYUI_LISTEN] --port PORT Set the listen port. [env var: COMFYUI_PORT] --enable-cors-header [ORIGIN] Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'. [env var: COMFYUI_ENABLE_CORS_HEADER] --max-upload-size MAX_UPLOAD_SIZE Set the maximum upload size in MB. [env var: COMFYUI_MAX_UPLOAD_SIZE] + --base-directory BASE_DIRECTORY + Set the ComfyUI base directory for models, custom_nodes, input, output, temp, and user directories. [env var: COMFYUI_BASE_DIRECTORY] --extra-model-paths-config PATH [PATH ...] Load one or more extra_model_paths.yaml files. [env var: COMFYUI_EXTRA_MODEL_PATHS_CONFIG] --output-directory OUTPUT_DIRECTORY - Set the ComfyUI output directory. [env var: COMFYUI_OUTPUT_DIRECTORY] + Set the ComfyUI output directory. Overrides --base-directory. [env var: COMFYUI_OUTPUT_DIRECTORY] --temp-directory TEMP_DIRECTORY - Set the ComfyUI temp directory (default is in the ComfyUI directory). [env var: COMFYUI_TEMP_DIRECTORY] + Set the ComfyUI temp directory (default is in the ComfyUI directory). Overrides --base-directory. [env var: COMFYUI_TEMP_DIRECTORY] --input-directory INPUT_DIRECTORY - Set the ComfyUI input directory. [env var: COMFYUI_INPUT_DIRECTORY] + Set the ComfyUI input directory. Overrides --base-directory. [env var: COMFYUI_INPUT_DIRECTORY] --auto-launch Automatically launch ComfyUI in the default browser. [env var: COMFYUI_AUTO_LAUNCH] --disable-auto-launch Disable auto launching the browser. [env var: COMFYUI_DISABLE_AUTO_LAUNCH] @@ -951,8 +951,10 @@ options: --force-fp32 Force fp32 (If this makes your GPU work better please report it). [env var: COMFYUI_FORCE_FP32] --force-fp16 Force fp16. [env var: COMFYUI_FORCE_FP16] --force-bf16 Force bf16. [env var: COMFYUI_FORCE_BF16] - --bf16-unet Run the UNET in bf16. This should only be used for testing stuff. [env var: COMFYUI_BF16_UNET] - --fp16-unet Store unet weights in fp16. [env var: COMFYUI_FP16_UNET] + --fp32-unet Run the diffusion model in fp32. [env var: COMFYUI_FP32_UNET] + --fp64-unet Run the diffusion model in fp64. [env var: COMFYUI_FP64_UNET] + --bf16-unet Run the diffusion model in bf16. [env var: COMFYUI_BF16_UNET] + --fp16-unet Run the diffusion model in fp16 [env var: COMFYUI_FP16_UNET] --fp8_e4m3fn-unet Store unet weights in fp8_e4m3fn. [env var: COMFYUI_FP8_E4M3FN_UNET] --fp8_e5m2-unet Store unet weights in fp8_e5m2. [env var: COMFYUI_FP8_E5M2_UNET] --fp16-vae Run the VAE in fp16, might cause black images. [env var: COMFYUI_FP16_VAE] @@ -964,26 +966,31 @@ options: --fp8_e5m2-text-enc Store text encoder weights in fp8 (e5m2 variant). [env var: COMFYUI_FP8_E5M2_TEXT_ENC] --fp16-text-enc Store text encoder weights in fp16. [env var: COMFYUI_FP16_TEXT_ENC] --fp32-text-enc Store text encoder weights in fp32. [env var: COMFYUI_FP32_TEXT_ENC] + --bf16-text-enc Store text encoder weights in bf16. [env var: COMFYUI_BF16_TEXT_ENC] --directml [DIRECTML_DEVICE] Use torch-directml. [env var: COMFYUI_DIRECTML] + --oneapi-device-selector SELECTOR_STRING + Sets the oneAPI device(s) this instance will use. [env var: COMFYUI_ONEAPI_DEVICE_SELECTOR] --disable-ipex-optimize - Disables ipex.optimize when loading models with Intel GPUs. [env var: COMFYUI_DISABLE_IPEX_OPTIMIZE] + Disables ipex.optimize default when loading models with Intel's Extension for Pytorch. [env var: COMFYUI_DISABLE_IPEX_OPTIMIZE] --preview-method [none,auto,latent2rgb,taesd] Default preview method for sampler nodes. [env var: COMFYUI_PREVIEW_METHOD] --preview-size PREVIEW_SIZE Sets the maximum preview size for sampler nodes. [env var: COMFYUI_PREVIEW_SIZE] + --cache-classic WARNING: Unused. Use the old style (aggressive) caching. [env var: COMFYUI_CACHE_CLASSIC] --cache-lru CACHE_LRU Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM. [env var: COMFYUI_CACHE_LRU] + --cache-none Reduced RAM/VRAM usage at the expense of executing every node for each run. [env var: COMFYUI_CACHE_NONE] --use-split-cross-attention Use the split cross attention optimization. Ignored when xformers is used. [env var: COMFYUI_USE_SPLIT_CROSS_ATTENTION] --use-quad-cross-attention Use the sub-quadratic cross attention optimization . Ignored when xformers is used. [env var: COMFYUI_USE_QUAD_CROSS_ATTENTION] --use-pytorch-cross-attention - Use the new pytorch 2.0 cross attention function. [env var: COMFYUI_USE_PYTORCH_CROSS_ATTENTION] + Use the new pytorch 2.0 cross attention function (default). [env var: COMFYUI_USE_PYTORCH_CROSS_ATTENTION] + --use-sage-attention Use sage attention. [env var: COMFYUI_USE_SAGE_ATTENTION] + --use-flash-attention + Use FlashAttention. [env var: COMFYUI_USE_FLASH_ATTENTION] --disable-xformers Disable xformers. [env var: COMFYUI_DISABLE_XFORMERS] - --disable-flash-attn Disable Flash Attention [env var: COMFYUI_DISABLE_FLASH_ATTN] - --disable-sage-attention - Disable Sage Attention [env var: COMFYUI_DISABLE_SAGE_ATTENTION] --force-upcast-attention Force enable attention upcasting, please report if it fixes black images. [env var: COMFYUI_FORCE_UPCAST_ATTENTION] --dont-upcast-attention @@ -995,8 +1002,8 @@ options: --novram When lowvram isn't enough. [env var: COMFYUI_NOVRAM] --cpu To use the CPU for everything (slow). [env var: COMFYUI_CPU] --reserve-vram RESERVE_VRAM - Set the amount of vram in GB you want to reserve for use by your OS/other software. By default some amount is reserved depending on your OS. - [env var: COMFYUI_RESERVE_VRAM] + Set the amount of vram in GB you want to reserve for use by your OS/other software. By default some amount is reserved depending on your OS. [env var: + COMFYUI_RESERVE_VRAM] --default-hashing-function {md5,sha1,sha256,sha512} Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256. [env var: COMFYUI_DEFAULT_HASHING_FUNCTION] @@ -1004,17 +1011,19 @@ options: Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can. [env var: COMFYUI_DISABLE_SMART_MEMORY] --deterministic Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases. [env var: COMFYUI_DETERMINISTIC] - --fast Enable some untested and potentially quality deteriorating optimizations. [env var: COMFYUI_FAST] + --fast [FAST ...] Enable some untested and potentially quality deteriorating optimizations. Pass a list specific optimizations if you only want to enable specific ones. + Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops [env var: COMFYUI_FAST] --dont-print-server Don't print server output. [env var: COMFYUI_DONT_PRINT_SERVER] --quick-test-for-ci Quick test for CI. Raises an error if nodes cannot be imported, [env var: COMFYUI_QUICK_TEST_FOR_CI] --windows-standalone-build - Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening - the page on startup). [env var: COMFYUI_WINDOWS_STANDALONE_BUILD] + Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page + on startup). [env var: COMFYUI_WINDOWS_STANDALONE_BUILD] --disable-metadata Disable saving prompt metadata in files. [env var: COMFYUI_DISABLE_METADATA] --disable-all-custom-nodes Disable loading all custom nodes. [env var: COMFYUI_DISABLE_ALL_CUSTOM_NODES] --multi-user Enables per-user storage. [env var: COMFYUI_MULTI_USER] --create-directories Creates the default models/, input/, output/ and temp/ directories, then exits. [env var: COMFYUI_CREATE_DIRECTORIES] + --log-stdout Send normal process output to stdout instead of stderr (default). [env var: COMFYUI_LOG_STDOUT] --plausible-analytics-base-url PLAUSIBLE_ANALYTICS_BASE_URL Enables server-side analytics events sent to the provided URL. [env var: COMFYUI_PLAUSIBLE_ANALYTICS_BASE_URL] --plausible-analytics-domain PLAUSIBLE_ANALYTICS_DOMAIN @@ -1022,15 +1031,15 @@ options: --analytics-use-identity-provider Uses platform identifiers for unique visitor analytics. [env var: COMFYUI_ANALYTICS_USE_IDENTITY_PROVIDER] --distributed-queue-connection-uri DISTRIBUTED_QUEUE_CONNECTION_URI - EXAMPLE: "amqp://guest:guest@127.0.0.1" - Servers and clients will connect to this AMPQ URL to form a distributed queue and exchange prompt - execution requests and progress updates. [env var: COMFYUI_DISTRIBUTED_QUEUE_CONNECTION_URI] + EXAMPLE: "amqp://guest:guest@127.0.0.1" - Servers and clients will connect to this AMPQ URL to form a distributed queue and exchange prompt execution + requests and progress updates. [env var: COMFYUI_DISTRIBUTED_QUEUE_CONNECTION_URI] --distributed-queue-worker Workers will pull requests off the AMQP URL. [env var: COMFYUI_DISTRIBUTED_QUEUE_WORKER] --distributed-queue-frontend Frontends will start the web UI and connect to the provided AMQP URL to submit prompts. [env var: COMFYUI_DISTRIBUTED_QUEUE_FRONTEND] --distributed-queue-name DISTRIBUTED_QUEUE_NAME - This name will be used by the frontends and workers to exchange prompt requests and replies. Progress updates will be prefixed by the queue - name, followed by a '.', then the user ID [env var: COMFYUI_DISTRIBUTED_QUEUE_NAME] + This name will be used by the frontends and workers to exchange prompt requests and replies. Progress updates will be prefixed by the queue name, + followed by a '.', then the user ID [env var: COMFYUI_DISTRIBUTED_QUEUE_NAME] --external-address EXTERNAL_ADDRESS Specifies a base URL for external addresses reported by the API, such as for image paths. [env var: COMFYUI_EXTERNAL_ADDRESS] --logging-level {DEBUG,INFO,WARNING,ERROR,CRITICAL} @@ -1044,28 +1053,41 @@ options: --otel-service-version OTEL_SERVICE_VERSION The version of the service or application that is generating telemetry data. [env var: OTEL_SERVICE_VERSION] --otel-exporter-otlp-endpoint OTEL_EXPORTER_OTLP_ENDPOINT - A base endpoint URL for any signal type, with an optionally-specified port number. Helpful for when you're sending more than one signal to the - same endpoint and want one environment variable to control the endpoint. [env var: OTEL_EXPORTER_OTLP_ENDPOINT] + A base endpoint URL for any signal type, with an optionally-specified port number. Helpful for when you're sending more than one signal to the same + endpoint and want one environment variable to control the endpoint. [env var: OTEL_EXPORTER_OTLP_ENDPOINT] --force-channels-last Force channels last format when inferencing the models. [env var: COMFYUI_FORCE_CHANNELS_LAST] --force-hf-local-dir-mode - Download repos from huggingface.co to the models/huggingface directory with the "local_dir" argument instead of models/huggingface_cache with - the "cache_dir" argument, recreating the traditional file structure. [env var: COMFYUI_FORCE_HF_LOCAL_DIR_MODE] + Download repos from huggingface.co to the models/huggingface directory with the "local_dir" argument instead of models/huggingface_cache with the + "cache_dir" argument, recreating the traditional file structure. [env var: COMFYUI_FORCE_HF_LOCAL_DIR_MODE] --front-end-version FRONT_END_VERSION - Specifies the version of the frontend to be used. This command needs internet connectivity to query and download available frontend - implementations from GitHub releases. The version string should be in the format of: [repoOwner]/[repoName]@[version] where version is one of: - "latest" or a valid version number (e.g. "1.0.0") [env var: COMFYUI_FRONT_END_VERSION] + Specifies the version of the frontend to be used. This command needs internet connectivity to query and download available frontend implementations from + GitHub releases. The version string should be in the format of: [repoOwner]/[repoName]@[version] where version is one of: "latest" or a valid version + number (e.g. "1.0.0") [env var: COMFYUI_FRONT_END_VERSION] + --panic-when PANIC_WHEN + List of fully qualified exception class names to panic (sys.exit(1)) when a workflow raises it. Example: --panic-when=torch.cuda.OutOfMemoryError. Can + be specified multiple times or as a comma-separated list. [env var: COMFYUI_PANIC_WHEN] --front-end-root FRONT_END_ROOT The local filesystem path to the directory where the frontend is located. Overrides --front-end-version. [env var: COMFYUI_FRONT_END_ROOT] --executor-factory EXECUTOR_FACTORY - When running ComfyUI as a distributed worker, this specifies the kind of executor that should be used to run the actual ComfyUI workflow - worker. A ThreadPoolExecutor is the default. A ProcessPoolExecutor results in better memory management, since the process will be closed and - large, contiguous blocks of CUDA memory can be freed. [env var: COMFYUI_EXECUTOR_FACTORY] + When running ComfyUI as a distributed worker, this specifies the kind of executor that should be used to run the actual ComfyUI workflow worker. A + ThreadPoolExecutor is the default. A ProcessPoolExecutor results in better memory management, since the process will be closed and large, contiguous + blocks of CUDA memory can be freed. [env var: COMFYUI_EXECUTOR_FACTORY] --openai-api-key OPENAI_API_KEY - Configures the OpenAI API Key for the OpenAI nodes [env var: OPENAI_API_KEY] + Configures the OpenAI API Key for the OpenAI nodes. Visit https://platform.openai.com/api-keys to create this key. [env var: OPENAI_API_KEY] + --ideogram-api-key IDEOGRAM_API_KEY + Configures the Ideogram API Key for the Ideogram nodes. Visit https://ideogram.ai/manage-api to create this key. [env var: IDEOGRAM_API_KEY] + --anthropic-api-key ANTHROPIC_API_KEY + Configures the Anthropic API key for its nodes related to Claude functionality. Visit https://console.anthropic.com/settings/keys to create this key. + [env var: ANTHROPIC_API_KEY] --user-directory USER_DIRECTORY - Set the ComfyUI user directory with an absolute path. [env var: COMFYUI_USER_DIRECTORY] - + Set the ComfyUI user directory with an absolute path. Overrides --base-directory. [env var: COMFYUI_USER_DIRECTORY] + --enable-compress-response-body + Enable compressing response body. [env var: COMFYUI_ENABLE_COMPRESS_RESPONSE_BODY] + --workflows WORKFLOWS [WORKFLOWS ...] + Execute the API workflow(s) specified in the provided files. For each workflow, its outputs will be printed to a line to standard out. Application + logging will be redirected to standard error. Use `-` to signify standard in. [env var: COMFYUI_WORKFLOWS] + Args that start with '--' can also be set in a config file (config.yaml or config.json or specified via -c). Config file syntax allows: key=value, flag=true, stuff=[a,b,c] (for details, see syntax at https://goo.gl/R74nmi). In general, command-line values override environment variables which override config file values which override defaults. ``` @@ -1079,9 +1101,9 @@ There are multiple ways to use this ComfyUI package to run workflows programmati Start ComfyUI by creating an ordinary Python object. This does not create a web server. It runs ComfyUI as a library, like any other package you are familiar with: ```python -from comfy.client.embedded_comfy_client import EmbeddedComfyClient +from comfy.client.embedded_comfy_client import Comfy -async with EmbeddedComfyClient() as client: +async with Comfy() as client: # This will run your prompt # To get the prompt JSON, visit the ComfyUI interface, design your workflow and click **Save (API Format)**. This JSON is what you will use as your workflow. outputs = await client.queue_prompt(prompt) diff --git a/comfy/app/logger.py b/comfy/app/logger.py index 355e22198..fa5a4599e 100644 --- a/comfy/app/logger.py +++ b/comfy/app/logger.py @@ -74,7 +74,10 @@ def setup_logger(log_level: str = 'INFO', capacity: int = 300, use_stdout: bool logger.setLevel(log_level) stream_handler = logging.StreamHandler() - stream_handler.setFormatter(logging.Formatter("%(message)s")) + stream_handler.setFormatter(logging.Formatter( + "%(asctime)s [%(name)s] [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S" + )) if use_stdout: # Only errors and critical to stderr diff --git a/comfy/cli_args.py b/comfy/cli_args.py index bb7ed1e30..ae37538cc 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -1,18 +1,16 @@ from __future__ import annotations import logging -import os import sys from importlib.metadata import entry_points from types import ModuleType -from typing import Optional, List +from typing import Optional import configargparse as argparse -from watchdog.observers import Observer from . import __version__ from . import options -from .cli_args_types import LatentPreviewMethod, Configuration, ConfigurationExtender, ConfigChangeHandler, EnumAction, \ +from .cli_args_types import LatentPreviewMethod, Configuration, ConfigurationExtender, EnumAction, \ EnhancedConfigArgParser, PerformanceFeature, is_valid_directory # todo: move this @@ -104,7 +102,7 @@ def _create_parser() -> EnhancedConfigArgParser: attn_group.add_argument("--use-quad-cross-attention", action="store_true", help="Use the sub-quadratic cross attention optimization . Ignored when xformers is used.") attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", - help="Use the new pytorch 2.0 cross attention function.") + help="Use the new pytorch 2.0 cross attention function (default).", default=True) attn_group.add_argument("--use-sage-attention", action="store_true", help="Use sage attention.") attn_group.add_argument("--use-flash-attention", action="store_true", help="Use FlashAttention.") @@ -250,6 +248,8 @@ def _create_parser() -> EnhancedConfigArgParser: parser.add_argument("--enable-compress-response-body", action="store_true", help="Enable compressing response body.") + parser.add_argument("--workflows", type=str, nargs='+', default=[], help="Execute the API workflow(s) specified in the provided files. For each workflow, its outputs will be printed to a line to standard out. Application logging will be redirected to standard error. Use `-` to signify standard in.") + # now give plugins a chance to add configuration for entry_point in entry_points().select(group='comfyui.custom_config'): try: @@ -288,35 +288,11 @@ def _parse_args(parser: Optional[argparse.ArgumentParser] = None, args_parsing: configuration_obj = Configuration(**vars(args)) configuration_obj.config_files = config_files assert all(isinstance(config_file, str) for config_file in config_files) - # we always have to set up a watcher, even when there are no existing files - if len(config_files) > 0: - _setup_config_file_watcher(configuration_obj, parser, config_files) return configuration_obj -def _setup_config_file_watcher(config: Configuration, parser: EnhancedConfigArgParser, config_files: List[str]): - def update_config(): - new_args, _, _ = parser.parse_known_args() - new_config = vars(new_args) - config.update(new_config) - - handler = ConfigChangeHandler(config_files, update_config) - observer = Observer() - - for config_file in config_files: - config_dir = os.path.dirname(config_file) or '.' - observer.schedule(handler, path=config_dir, recursive=False) - - observer.start() - - # Ensure the observer is stopped when the program exits - import atexit - atexit.register(observer.stop) - atexit.register(observer.join) - - def default_configuration() -> Configuration: return _parse_args(_create_parser()) -args = _parse_args(args_parsing=options.args_parsing) +args = _parse_args(args_parsing=options.args_parsing) \ No newline at end of file diff --git a/comfy/cli_args_types.py b/comfy/cli_args_types.py index 3168b21fa..c68827254 100644 --- a/comfy/cli_args_types.py +++ b/comfy/cli_args_types.py @@ -6,7 +6,6 @@ from typing import Optional, List, Callable, Any, Union, Mapping, NamedTuple import configargparse import configargparse as argparse -from watchdog.events import FileSystemEventHandler ConfigurationExtender = Callable[[argparse.ArgParser], Optional[argparse.ArgParser]] @@ -18,16 +17,6 @@ class LatentPreviewMethod(enum.Enum): TAESD = "taesd" -class ConfigChangeHandler(FileSystemEventHandler): - def __init__(self, config_file_paths: List[str], update_callback: Callable[[], None]): - self.config_file_paths = config_file_paths - self.update_callback = update_callback - - def on_modified(self, event): - if not event.is_directory and event.src_path in self.config_file_paths: - self.update_callback() - - ConfigObserver = Callable[[str, Any], None] @@ -142,6 +131,7 @@ class Configuration(dict): log_stdout (bool): Send normal process output to stdout instead of stderr (default) panic_when (list[str]): List of fully qualified exception class names to panic (sys.exit(1)) when a workflow raises it. enable_compress_response_body (bool): Enable compressing response body. + workflows (list[str]): Execute the API workflow(s) specified in the provided files. For each workflow, its outputs will be printed to a line to standard out. Application logging will be redirected to standard error. Use `-` to signify standard in. """ def __init__(self, **kwargs): @@ -235,15 +225,16 @@ class Configuration(dict): self.otel_service_name: str = "comfyui" self.otel_service_version: str = "0.0.1" self.otel_exporter_otlp_endpoint: Optional[str] = None - for key, value in kwargs.items(): - self[key] = value - self.executor_factory: str = "ThreadPoolExecutor" self.openai_api_key: Optional[str] = None self.ideogram_api_key: Optional[str] = None self.anthropic_api_key: Optional[str] = None self.user_directory: Optional[str] = None self.panic_when: list[str] = [] + self.workflows: list[str] = [] + for key, value in kwargs.items(): + self[key] = value + # this must always be last def __getattr__(self, item): if item not in self: @@ -287,8 +278,8 @@ class Configuration(dict): return state def __setstate__(self, state): - self.update(state) self._observers = [] + self.update(state) @property def verbose(self) -> str: diff --git a/comfy/client/client_types.py b/comfy/client/client_types.py index 0cd67e2f1..f7336d7fd 100644 --- a/comfy/client/client_types.py +++ b/comfy/client/client_types.py @@ -13,8 +13,9 @@ class FileOutput(TypedDict, total=False): class Output(TypedDict, total=False): - latents: NotRequired[List[FileOutput]] - images: NotRequired[List[FileOutput]] + latents: NotRequired[list[FileOutput]] + images: NotRequired[list[FileOutput]] + videos: NotRequired[list[FileOutput]] @dataclasses.dataclass diff --git a/comfy/client/embedded_comfy_client.py b/comfy/client/embedded_comfy_client.py index 3994c3e58..e6e2fa63a 100644 --- a/comfy/client/embedded_comfy_client.py +++ b/comfy/client/embedded_comfy_client.py @@ -1,6 +1,7 @@ from __future__ import annotations import asyncio +import copy import gc import json import threading @@ -35,6 +36,7 @@ def _execute_prompt( span_context: dict, progress_handler: ExecutorToClientProgress | None, configuration: Configuration | None) -> dict: + configuration = copy.deepcopy(configuration) if configuration is not None else None execution_context = current_execution_context() if len(execution_context.folder_names_and_paths) == 0 or configuration is not None: init_default_paths(execution_context.folder_names_and_paths, configuration, replace_existing=True) @@ -59,7 +61,7 @@ async def __execute_prompt( from ..cmd.execution import PromptExecutor progress_handler = progress_handler or ServerStub() - + prompt_executor: PromptExecutor = None try: prompt_executor: PromptExecutor = _prompt_executor.executor except (LookupError, AttributeError): @@ -121,11 +123,9 @@ def _cleanup(): pass -class EmbeddedComfyClient: +class Comfy: """ - Embedded client for comfy executing prompts as a library. - - This client manages a single-threaded executor to run long-running or blocking tasks + This manages a single-threaded executor to run long-running or blocking workflows asynchronously without blocking the asyncio event loop. It initializes a PromptExecutor in a dedicated thread for executing prompts and handling server-stub communications. Example usage: @@ -186,7 +186,17 @@ class EmbeddedComfyClient: self._is_running = False async def queue_prompt_api(self, - prompt: PromptDict) -> V1QueuePromptResponse: + prompt: PromptDict | str | dict) -> V1QueuePromptResponse: + """ + Queues a prompt for execution, returning the output when it is complete. + :param prompt: a PromptDict, string or dictionary containing a so-called Workflow API prompt + :return: a response of URLs for Save-related nodes and the node outputs + """ + if isinstance(prompt, str): + prompt = json.loads(prompt) + if isinstance(prompt, dict): + from comfy.api.components.schema.prompt import Prompt + prompt = Prompt.validate(prompt) outputs = await self.queue_prompt(prompt) return V1QueuePromptResponse(urls=[], outputs=outputs) @@ -217,3 +227,6 @@ class EmbeddedComfyClient: finally: with self._task_count_lock: self._task_count -= 1 + + +EmbeddedComfyClient = Comfy diff --git a/comfy/cmd/extra_model_paths.py b/comfy/cmd/extra_model_paths.py index 564e6df99..6e3160e3d 100644 --- a/comfy/cmd/extra_model_paths.py +++ b/comfy/cmd/extra_model_paths.py @@ -1,4 +1,4 @@ -def load_extra_path_config(yaml_path): +def load_extra_path_config(yaml_path, folder_names=None): from ..extra_config import load_extra_path_config - return load_extra_path_config(yaml_path) + return load_extra_path_config(yaml_path, folder_names) diff --git a/comfy/cmd/folder_paths.py b/comfy/cmd/folder_paths.py index e83e7cdad..38418db1f 100644 --- a/comfy/cmd/folder_paths.py +++ b/comfy/cmd/folder_paths.py @@ -67,7 +67,7 @@ def init_default_paths(folder_names_and_paths: FolderNames, configuration: Optio configuration = configuration or args if base_paths_from_configuration: - base_paths = [Path(configuration.cwd) if configuration.cwd is not None else None] + [Path(configuration.base_directory) if configuration.base_directory is not None else None] + configuration.base_paths + base_paths = [Path(configuration.cwd) if configuration.cwd is not None else None] + [Path(configuration.base_directory) if configuration.base_directory is not None else None] + (configuration.base_paths or []) base_paths = [Path(path) for path in base_paths if path is not None] if len(base_paths) == 0: base_paths = [Path(os.getcwd())] @@ -250,7 +250,7 @@ def exists_annotated_filepath(name): return os.path.exists(filepath) -def add_model_folder_path(folder_name, full_folder_path: Optional[str] = None, extensions: Optional[set[str] | frozenset[str]] = None, is_default: bool = False) -> str: +def add_model_folder_path(folder_name, full_folder_path: Optional[str] = None, extensions: Optional[set[str] | frozenset[str]] = None, is_default: bool = False, folder_names_and_paths: Optional[FolderNames] = None) -> str: """ Registers a model path for the given canonical name. :param folder_name: the folder name @@ -258,7 +258,7 @@ def add_model_folder_path(folder_name, full_folder_path: Optional[str] = None, e :param extensions: supported file extensions :return: the folder path """ - folder_names_and_paths = _folder_names_and_paths() + folder_names_and_paths = folder_names_and_paths or _folder_names_and_paths() if full_folder_path is None: if folder_name not in folder_names_and_paths: folder_names_and_paths.add(ModelPaths(folder_names=[folder_name], supported_extensions=set(extensions) if extensions is not None else _supported_pt_extensions())) diff --git a/comfy/cmd/folder_paths.pyi b/comfy/cmd/folder_paths.pyi index ba5233ba6..4278d9dca 100644 --- a/comfy/cmd/folder_paths.pyi +++ b/comfy/cmd/folder_paths.pyi @@ -64,7 +64,8 @@ def add_model_folder_path( folder_name: str, full_folder_path: Optional[str] = ..., extensions: Optional[Union[set[str], frozenset[str]]] = ..., - is_default: bool = ... + is_default: bool = ..., + folder_names_and_paths: Optional[FolderNames] = ..., ) -> str: ... diff --git a/comfy/cmd/main.py b/comfy/cmd/main.py index ea9736efc..c6588d9a3 100644 --- a/comfy/cmd/main.py +++ b/comfy/cmd/main.py @@ -10,6 +10,7 @@ import time from pathlib import Path from typing import Optional +from comfy.component_model.entrypoints_common import configure_application_paths, executor_from_args # main_pre must be the earliest import since it suppresses some spurious warnings from .main_pre import args from . import hook_breaker_ac10a0 @@ -243,13 +244,20 @@ async def _start_comfyui(from_script_dir: Optional[Path] = None): if args.quick_test_for_ci: # for CI purposes, try importing all the nodes import_all_nodes_in_workspace(raise_on_failure=True) - exit(0) + return else: # we no longer lazily load nodes. we'll do it now for the sake of creating directories import_all_nodes_in_workspace(raise_on_failure=False) # now that nodes are loaded, create more directories if appropriate folder_paths.create_directories() + if len(args.workflows) > 0: + configure_application_paths(args) + executor = await executor_from_args(args) + from ..entrypoints.workflow import run_workflows + await run_workflows(executor, args.workflows) + return + # replaced my folder_paths.create_directories call_on_start = None if args.auto_launch: diff --git a/comfy/cmd/main_pre.py b/comfy/cmd/main_pre.py index e61129f40..8cb0e507a 100644 --- a/comfy/cmd/main_pre.py +++ b/comfy/cmd/main_pre.py @@ -7,10 +7,17 @@ Use this instead of cli_args to import the args: It will enable command line argument parsing. If this isn't desired, you must author your own implementation of these fixes. """ +import os + +os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1" +os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" +os.environ["TORCHINDUCTOR_FX_GRAPH_CACHE"] = "1" +os.environ["TORCHINDUCTOR_AUTOGRAD_CACHE"] = "1" +os.environ["BITSANDBYTES_NOWELCOME"] = "1" + import ctypes import importlib.util import logging -import os import shutil import sys import warnings @@ -43,6 +50,7 @@ warnings.filterwarnings("ignore", message="Importing from timm.models.registry i warnings.filterwarnings("ignore", message="Importing from timm.models.layers is deprecated, please import via timm.layers", category=FutureWarning) warnings.filterwarnings("ignore", message="Inheritance class _InstrumentedApplication from web.Application is discouraged", category=DeprecationWarning) warnings.filterwarnings("ignore", message="Please import `gaussian_filter` from the `scipy.ndimage` namespace; the `scipy.ndimage.filters` namespace is deprecated", category=DeprecationWarning) +warnings.filterwarnings("ignore", message="The installed version of bitsandbytes was compiled without GPU support") from ..cli_args import args @@ -64,11 +72,6 @@ try: except Exception: pass -os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1" -os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" -os.environ["TORCHINDUCTOR_FX_GRAPH_CACHE"] = "1" -os.environ["TORCHINDUCTOR_AUTOGRAD_CACHE"] = "1" - def _fix_pytorch_240(): """Fixes pytorch 2.4.0""" @@ -108,13 +111,10 @@ def _create_tracer(): sampler = ProgressSpanSampler() provider = TracerProvider(resource=resource, sampler=sampler) - is_debugging = hasattr(sys, 'gettrace') and sys.gettrace() is not None has_endpoint = args.otel_exporter_otlp_endpoint is not None if has_endpoint: otlp_exporter = OTLPSpanExporter() - # elif is_debugging: - # otlp_exporter = ConsoleSpanExporter("comfyui") else: otlp_exporter = SpanExporter() @@ -133,8 +133,8 @@ def _create_tracer(): def _configure_logging(): logging_level = args.logging_level - if args.distributed_queue_worker or args.distributed_queue_frontend or args.distributed_queue_connection_uri is not None: - logging.basicConfig(level=logging_level) + if len(args.workflows) > 0 or args.distributed_queue_worker or args.distributed_queue_frontend or args.distributed_queue_connection_uri is not None: + logging.basicConfig(level=logging_level, stream=sys.stderr) else: logger.setup_logger(logging_level) diff --git a/comfy/cmd/worker.py b/comfy/cmd/worker.py deleted file mode 100644 index 2cebd747f..000000000 --- a/comfy/cmd/worker.py +++ /dev/null @@ -1,68 +0,0 @@ -import asyncio -import itertools -import logging -import os - -from .extra_model_paths import load_extra_path_config -from .main_pre import args -from ..distributed.executors import ContextVarExecutor, ContextVarProcessPoolExecutor - - -async def main(): - # assume we are a worker - args.distributed_queue_worker = True - args.distributed_queue_frontend = False - assert args.distributed_queue_connection_uri is not None, "Set the --distributed-queue-connection-uri argument to your RabbitMQ server" - - # configure paths - if args.output_directory: - output_dir = os.path.abspath(args.output_directory) - logging.info(f"Setting output directory to: {output_dir}") - from ..cmd import folder_paths - - folder_paths.set_output_directory(output_dir) - - if args.input_directory: - input_dir = os.path.abspath(args.input_directory) - logging.info(f"Setting input directory to: {input_dir}") - from ..cmd import folder_paths - - folder_paths.set_input_directory(input_dir) - - if args.temp_directory: - temp_dir = os.path.abspath(args.temp_directory) - logging.info(f"Setting temp directory to: {temp_dir}") - from ..cmd import folder_paths - - folder_paths.set_temp_directory(temp_dir) - - if args.extra_model_paths_config: - for config_path in itertools.chain(*args.extra_model_paths_config): - load_extra_path_config(config_path) - - from ..distributed.distributed_prompt_worker import DistributedPromptWorker - - if args.executor_factory in ("ThreadPoolExecutor", "ContextVarExecutor"): - executor = ContextVarExecutor() - elif args.executor_factory in ("ProcessPoolExecutor", "ContextVarProcessPoolExecutor"): - executor = ContextVarProcessPoolExecutor() - else: - # default executor - executor = ContextVarExecutor() - - async with DistributedPromptWorker(connection_uri=args.distributed_queue_connection_uri, - queue_name=args.distributed_queue_name, - executor=executor): - stop = asyncio.Event() - try: - await stop.wait() - except asyncio.CancelledError: - pass - - -def entrypoint(): - asyncio.run(main()) - - -if __name__ == "__main__": - entrypoint() diff --git a/comfy/component_model/asyncio_files.py b/comfy/component_model/asyncio_files.py new file mode 100644 index 000000000..0b1ae12d7 --- /dev/null +++ b/comfy/component_model/asyncio_files.py @@ -0,0 +1,41 @@ +import asyncio + +try: + from collections.abc import Buffer +except ImportError: + from typing_extensions import Buffer +from io import BytesIO +from typing import Literal, AsyncGenerator + +import ijson +import aiofiles +import sys +import shlex + + +async def stream_json_objects(source_path_or_stdin: str | Literal["-"]) -> AsyncGenerator[dict, None]: + """ + Asynchronously yields JSON objects from a given source. + The source can be a file path or "-" for stdin. + Assumes the input stream contains concatenated JSON objects (e.g., {}{}{}). + """ + if source_path_or_stdin is None or len(source_path_or_stdin) == 0: + return + elif source_path_or_stdin == "-": + async for obj in ijson.items_async(aiofiles.stdin_bytes, '', multiple_values=True): + yield obj + else: + # Handle file path or literal JSON + if "{" in source_path_or_stdin[:2]: + # literal string + encode: Buffer = source_path_or_stdin.encode("utf-8") + source_path_or_stdin = BytesIO(encode) + for obj in ijson.items(source_path_or_stdin, '', multiple_values=True): + yield obj + else: + async with aiofiles.open(source_path_or_stdin, mode='rb') as f: + # 'rb' mode is important as ijson expects byte streams. + # The prefix '' targets root-level objects. + # multiple_values=True allows parsing of multiple top-level JSON values. + async for obj in ijson.items_async(f, '', multiple_values=True): + yield obj diff --git a/comfy/component_model/entrypoints_common.py b/comfy/component_model/entrypoints_common.py new file mode 100644 index 000000000..484d73808 --- /dev/null +++ b/comfy/component_model/entrypoints_common.py @@ -0,0 +1,41 @@ +from typing import Optional + +from ..cli_args_types import Configuration +from ..cmd.extra_model_paths import load_extra_path_config +from .folder_path_types import FolderNames +from ..component_model.platform_path import construct_path +import itertools +import os + +from ..distributed.executors import ContextVarExecutor, ContextVarProcessPoolExecutor + + +def configure_application_paths(args: Configuration, folder_names: Optional[FolderNames] = None): + if folder_names is None: + from ..cmd import folder_paths + folder_names = folder_paths.folder_names_and_paths + # configure paths + if args.output_directory: + folder_names.application_paths.output_directory = construct_path(args.output_directory) + if args.input_directory: + folder_names.application_paths.input_directory = construct_path(args.input_directory) + if args.temp_directory: + folder_names.application_paths.temp_directory = construct_path(args.temp_directory) + if args.extra_model_paths_config: + for config_path in itertools.chain(*args.extra_model_paths_config): + load_extra_path_config(config_path, folder_names=folder_names) + + +async def executor_from_args(configuration:Optional[Configuration]=None): + if configuration is None: + from ..cli_args import args + configuration = args + + if configuration.executor_factory in ("ThreadPoolExecutor", "ContextVarExecutor"): + executor = ContextVarExecutor() + elif configuration.executor_factory in ("ProcessPoolExecutor", "ContextVarProcessPoolExecutor"): + executor = ContextVarProcessPoolExecutor() + else: + # default executor + executor = ContextVarExecutor() + return executor diff --git a/comfy/component_model/suppress_stdout.py b/comfy/component_model/suppress_stdout.py new file mode 100644 index 000000000..3916ef048 --- /dev/null +++ b/comfy/component_model/suppress_stdout.py @@ -0,0 +1,14 @@ +import contextlib +import io +import sys + + +@contextlib.contextmanager +def suppress_stdout_stderr(): + new_stdout, new_stderr = io.StringIO(), io.StringIO() + old_stdout, old_stderr = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = new_stdout, new_stderr + yield + finally: + sys.stdout, sys.stderr = old_stdout, old_stderr diff --git a/comfy/distributed/distributed_prompt_worker.py b/comfy/distributed/distributed_prompt_worker.py index 974f5f8d6..a5e52754c 100644 --- a/comfy/distributed/distributed_prompt_worker.py +++ b/comfy/distributed/distributed_prompt_worker.py @@ -14,7 +14,7 @@ from .executors import ContextVarExecutor from .distributed_progress import DistributedExecutorToClientProgress from .distributed_types import RpcRequest, RpcReply from .process_pool_executor import ProcessPoolExecutor -from ..client.embedded_comfy_client import EmbeddedComfyClient +from ..client.embedded_comfy_client import Comfy from ..cmd.main_pre import tracer from ..component_model.queue_types import ExecutionStatus @@ -24,7 +24,7 @@ class DistributedPromptWorker: A distributed prompt worker. """ - def __init__(self, embedded_comfy_client: Optional[EmbeddedComfyClient] = None, + def __init__(self, embedded_comfy_client: Optional[Comfy] = None, connection_uri: str = "amqp://localhost:5672/", queue_name: str = "comfyui", health_check_port: int = 9090, @@ -124,7 +124,7 @@ class DistributedPromptWorker: self._rpc = await JsonRPC.create(channel=self._channel, auto_delete=True, durable=False) if self._embedded_comfy_client is None: - self._embedded_comfy_client = EmbeddedComfyClient(progress_handler=DistributedExecutorToClientProgress(self._rpc, self._queue_name, self._loop), executor=self._executor) + self._embedded_comfy_client = Comfy(progress_handler=DistributedExecutorToClientProgress(self._rpc, self._queue_name, self._loop), executor=self._executor) if not self._embedded_comfy_client.is_running: await self._exit_stack.enter_async_context(self._embedded_comfy_client) diff --git a/comfy/distributed/distributed_types.py b/comfy/distributed/distributed_types.py index 2ecde40f0..35c420d66 100644 --- a/comfy/distributed/distributed_types.py +++ b/comfy/distributed/distributed_types.py @@ -1,12 +1,10 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Tuple, Literal, List, Callable +from typing import Tuple, Literal, List from ..api.components.schema.prompt import PromptDict, Prompt from ..auth.permissions import ComfyJwt, jwt_decode -from ..cli_args_types import Configuration -from ..component_model.executor_types import ExecutorToClientProgress from ..component_model.queue_types import NamedQueueTuple, TaskInvocation, ExecutionStatus diff --git a/comfy/entrypoints/__init__.py b/comfy/entrypoints/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/comfy/entrypoints/worker.py b/comfy/entrypoints/worker.py new file mode 100644 index 000000000..9e180b251 --- /dev/null +++ b/comfy/entrypoints/worker.py @@ -0,0 +1,34 @@ +import asyncio + +from ..cmd.main_pre import args +from ..component_model.entrypoints_common import configure_application_paths, executor_from_args +from ..distributed.executors import ContextVarExecutor, ContextVarProcessPoolExecutor + + +async def main(): + # assume we are a worker + from ..distributed.distributed_prompt_worker import DistributedPromptWorker + + args.distributed_queue_worker = True + args.distributed_queue_frontend = False + assert args.distributed_queue_connection_uri is not None, "Set the --distributed-queue-connection-uri argument to your RabbitMQ server" + + configure_application_paths(args) + executor = await executor_from_args(args) + + async with DistributedPromptWorker(connection_uri=args.distributed_queue_connection_uri, + queue_name=args.distributed_queue_name, + executor=executor): + stop = asyncio.Event() + try: + await stop.wait() + except asyncio.CancelledError: + pass + + +def entrypoint(): + asyncio.run(main()) + + +if __name__ == "__main__": + entrypoint() diff --git a/comfy/entrypoints/workflow.py b/comfy/entrypoints/workflow.py new file mode 100644 index 000000000..b8bdee4e4 --- /dev/null +++ b/comfy/entrypoints/workflow.py @@ -0,0 +1,46 @@ +import asyncio +import json +import logging +from typing import Optional, Literal + +import typer + +from ..cmd.main_pre import args +from ..cli_args_types import Configuration +from ..component_model.asyncio_files import stream_json_objects +from ..client.embedded_comfy_client import Comfy +from ..component_model.entrypoints_common import configure_application_paths, executor_from_args + +logger = logging.getLogger(__name__) + + +async def main(): + workflows = args.workflows + assert len(workflows) > 0, "specify at least one path to a workflow, a literal workflow json starting with `{` or `-` (for standard in) using --workflows cli arg" + configure_application_paths(args) + executor = await executor_from_args(args) + + await run_workflows(executor, workflows) + + +async def run_workflows(executor, workflows: list[str | Literal["-"]], configuration: Optional[Configuration] = None): + if configuration is None: + configuration = args + async with Comfy(executor=executor, configuration=configuration) as comfy: + for workflow in workflows: + obj: dict + async for obj in stream_json_objects(workflow): + try: + res = await comfy.queue_prompt_api(obj) + typer.echo(json.dumps(res.outputs)) + except asyncio.CancelledError: + logger.info("Exiting gracefully.") + break + + +def entrypoint(): + asyncio.run(main()) + + +if __name__ == "__main__": + entrypoint() diff --git a/comfy/execution_ext.py b/comfy/execution_ext.py index 24012337e..88a50fd95 100644 --- a/comfy/execution_ext.py +++ b/comfy/execution_ext.py @@ -1,4 +1,5 @@ import importlib +import sys def import_exception_class(fqn: str): @@ -49,7 +50,7 @@ def should_panic_on_exception(exc: Exception, panic_classes: list[str]) -> bool: exception_types = [import_exception_class(name) for name in expanded_classes if name] except ValueError as e: - print(f"Warning: {str(e)}") + print(f"Warning: {str(e)}", file=sys.stderr) return False # Check if exception matches any of the specified types diff --git a/comfy/extra_config.py b/comfy/extra_config.py index 72a242af6..462105b6f 100644 --- a/comfy/extra_config.py +++ b/comfy/extra_config.py @@ -1,12 +1,13 @@ import logging import os +from typing import Optional import yaml +from .component_model.folder_path_types import FolderNames -def load_extra_path_config(yaml_path): +def load_extra_path_config(yaml_path, folder_names: Optional[FolderNames] = None): from .cmd import folder_paths - with open(yaml_path, 'r', encoding='utf-8') as stream: config = yaml.safe_load(stream) yaml_dir = os.path.dirname(os.path.abspath(yaml_path)) @@ -34,4 +35,4 @@ def load_extra_path_config(yaml_path): full_path = os.path.abspath(os.path.join(yaml_dir, y)) normalized_path = os.path.normpath(full_path) logging.info("Adding extra search path {} {}".format(x, normalized_path)) - folder_paths.add_model_folder_path(x, normalized_path, is_default=is_default) + folder_paths.add_model_folder_path(x, normalized_path, is_default=is_default, folder_names_and_paths=folder_names) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 92fc0899f..de0dea230 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -600,23 +600,23 @@ def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape optimized_attention = attention_basic if model_management.sage_attention_enabled(): - logger.info("Using sage attention") + logger.debug("Using sage attention") optimized_attention = attention_sage elif model_management.xformers_enabled(): - logger.info("Using xformers attention") + logger.debug("Using xformers attention") optimized_attention = attention_xformers elif model_management.flash_attention_enabled(): - logging.info("Using Flash Attention") + logging.debug("Using Flash Attention") optimized_attention = attention_flash elif model_management.pytorch_attention_enabled(): - logger.info("Using pytorch attention") + logger.debug("Using pytorch attention") optimized_attention = attention_pytorch else: if args.use_split_cross_attention: - logger.info("Using split optimization for attention") + logger.debug("Using split optimization for attention") optimized_attention = attention_split else: - logger.info("Using sub quadratic optimization for attention, if you have memory or speed issues try using: --use-split-cross-attention") + logger.debug("Using sub quadratic optimization for attention, if you have memory or speed issues try using: --use-split-cross-attention") optimized_attention = attention_sub_quad optimized_attention_masked = optimized_attention diff --git a/comfy/model_management.py b/comfy/model_management.py index 0bdb8ce04..12db8b038 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -263,7 +263,7 @@ try: logger.debug("pytorch version: {}".format(torch_version)) mac_ver = mac_version() if mac_ver is not None: - logging.info("Mac Version {}".format(mac_ver)) + logger.debug("Mac Version {}".format(mac_ver)) except: pass @@ -343,7 +343,7 @@ except: try: if is_amd(): arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName - logging.info("AMD arch: {}".format(arch)) + logger.info("AMD arch: {}".format(arch)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much if any((a in arch) for a in ["gfx1100", "gfx1101"]): # TODO: more arches @@ -361,7 +361,7 @@ try: if is_nvidia() and PerformanceFeature.Fp16Accumulation in args.fast: torch.backends.cuda.matmul.allow_fp16_accumulation = True PRIORITIZE_FP16 = True # TODO: limit to cards where it actually boosts performance - logging.info("Enabled fp16 accumulation.") + logger.info("Enabled fp16 accumulation.") except: pass @@ -369,7 +369,7 @@ try: if torch_version_numeric[0] == 2 and torch_version_numeric[1] >= 5: torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True) # pylint: disable=no-member except: - logging.warning("Warning, could not set allow_fp16_bf16_reduction_math_sdp") + logger.warning("Warning, could not set allow_fp16_bf16_reduction_math_sdp") if args.lowvram: set_vram_to = VRAMState.LOW_VRAM @@ -386,8 +386,8 @@ if args.force_fp32: logger.info("Forcing FP32, if this improves things please report it.") FORCE_FP32 = True -if args.force_fp16 or cpu_state == CPUState.MPS: - logger.info("Forcing FP16.") +if args.force_fp16: + logger.debug("Forcing FP16.") FORCE_FP16 = True if args.force_bf16: @@ -1256,7 +1256,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma return True if (device is not None and is_device_mps(device)) or mps_mode(): - return True + return not bfloat16_support_mps(device) if cpu_mode(): return False @@ -1327,9 +1327,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma return False if (device is not None and is_device_mps(device)) or mps_mode(): - if mac_version() < (14,): - return False - return True + return bfloat16_support_mps(device) if cpu_mode(): return False @@ -1368,6 +1366,19 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma return False +def bfloat16_support_mps(device): + # test bfloat 16 + try: + x = torch.ones(1, dtype=torch.bfloat16, device=device) + x = x + 1.0 + _ = repr(x) + supported = True + del x + except: + supported = False + return supported + + def supports_fp8_compute(device=None): if not is_nvidia(): return False diff --git a/comfy/nodes/vanilla_node_importing.py b/comfy/nodes/vanilla_node_importing.py index 8c31591a8..6a50e724e 100644 --- a/comfy/nodes/vanilla_node_importing.py +++ b/comfy/nodes/vanilla_node_importing.py @@ -30,7 +30,7 @@ def _vanilla_load_importing_execute_prestartup_script(node_paths: Iterable[str]) spec.loader.exec_module(module) return True except Exception as e: - print(f"Failed to execute startup-script: {script_path} / {e}") + print(f"Failed to execute startup-script: {script_path} / {e}", file=sys.stderr) return False node_prestartup_times = [] @@ -52,14 +52,14 @@ def _vanilla_load_importing_execute_prestartup_script(node_paths: Iterable[str]) success = execute_script(script_path) node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_prestartup_times) > 0: - print("\nPrestartup times for custom nodes:") + print("\nPrestartup times for custom nodes:", file=sys.stderr) for n in sorted(node_prestartup_times): if n[2]: import_message = "" else: import_message = " (PRESTARTUP FAILED)" - print("{:6.1f} seconds{}:".format(n[0], import_message), n[1]) - print() + print("{:6.1f} seconds{}:".format(n[0], import_message), n[1], file=sys.stderr) + print("\n", file=sys.stderr) @contextmanager @@ -118,12 +118,12 @@ def _vanilla_load_custom_nodes_1(module_path, ignore=set()) -> ExportedNodes: exported_nodes.NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) return exported_nodes else: - print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.") + print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.", file=sys.stderr) return exported_nodes except Exception as e: import traceback print(traceback.format_exc()) - print(f"Cannot import {module_path} module for custom nodes:", e) + print(f"Cannot import {module_path} module for custom nodes:", e, file=sys.stderr) return exported_nodes @@ -151,14 +151,14 @@ def _vanilla_load_custom_nodes_2(node_paths: Iterable[str]) -> ExportedNodes: exported_nodes.update(possible_exported_nodes) if len(node_import_times) > 0: - print("\nImport times for custom nodes:") + print("\nImport times for custom nodes:", file=sys.stderr) for n in sorted(node_import_times): if n[2]: import_message = "" else: import_message = " (IMPORT FAILED)" - print("{:6.1f} seconds{}:".format(n[0], import_message), n[1]) - print() + print("{:6.1f} seconds{}:".format(n[0], import_message), n[1], file=sys.stderr) + print("\n", file=sys.stderr) return exported_nodes diff --git a/comfy/vendor/appdirs.py b/comfy/vendor/appdirs.py index 32e0db762..c1d9079fe 100644 --- a/comfy/vendor/appdirs.py +++ b/comfy/vendor/appdirs.py @@ -535,24 +535,24 @@ if __name__ == "__main__": "site_data_dir", "site_config_dir") - print("-- app dirs %s --" % __version__) + print("-- app dirs %s --" % __version__, file=sys.stderr) - print("-- app dirs (with optional 'version')") + print("-- app dirs (with optional 'version')", file=sys.stderr) dirs = AppDirs(appname, appauthor, version="1.0") for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) - print("\n-- app dirs (without optional 'version')") + print("\n-- app dirs (without optional 'version')", file=sys.stderr) dirs = AppDirs(appname, appauthor) for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) + print("%s: %s" % (prop, getattr(dirs, prop)), file=sys.stderr) - print("\n-- app dirs (without optional 'appauthor')") + print("\n-- app dirs (without optional 'appauthor')", file=sys.stderr) dirs = AppDirs(appname) for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) + print("%s: %s" % (prop, getattr(dirs, prop)), file=sys.stderr) - print("\n-- app dirs (with disabled 'appauthor')") + print("\n-- app dirs (with disabled 'appauthor')", file=sys.stderr) dirs = AppDirs(appname, appauthor=False) for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) \ No newline at end of file + print("%s: %s" % (prop, getattr(dirs, prop)), file=sys.stderr) \ No newline at end of file diff --git a/comfy_extras/nodes/nodes_nf4.py b/comfy_extras/nodes/nodes_nf4.py index 2f10a6007..c5735c93f 100644 --- a/comfy_extras/nodes/nodes_nf4.py +++ b/comfy_extras/nodes/nodes_nf4.py @@ -1,8 +1,11 @@ import dataclasses from typing import Any +from comfy.component_model.suppress_stdout import suppress_stdout_stderr + try: - import bitsandbytes as bnb + with suppress_stdout_stderr(): + import bitsandbytes as bnb from bitsandbytes.nn.modules import Params4bit, QuantState has_bitsandbytes = True diff --git a/docs/examples/script_examples/basic_api_example.py b/docs/examples/script_examples/basic_api_example.py index 8758322e6..924161d4a 100644 --- a/docs/examples/script_examples/basic_api_example.py +++ b/docs/examples/script_examples/basic_api_example.py @@ -136,8 +136,8 @@ async def main(): # configuration.cwd = os.path.dirname(__file__) configuration = Configuration() - from comfy.client.embedded_comfy_client import EmbeddedComfyClient - async with EmbeddedComfyClient(configuration=configuration) as client: + from comfy.client.embedded_comfy_client import Comfy + async with Comfy(configuration=configuration) as client: # This will run your prompt outputs = await client.queue_prompt(prompt) diff --git a/pyproject.toml b/pyproject.toml index 800a9f95e..9046fddac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,6 +96,8 @@ dependencies = [ "jax", "colour", "av", + "typer", + "ijson", ] [build-system] @@ -157,7 +159,8 @@ withtriton = ["comfyui[cuda, triton]"] # Depends on 'cuda' and 'triton' extras [project.scripts] comfyui = "comfy.cmd.main:entrypoint" -comfyui-worker = "comfy.cmd.worker:entrypoint" +comfyui-worker = "comfy.entrypoints.worker:entrypoint" +comfyui-workflow = "comfy.entrypoints.workflow:entrypoint" [project.urls] Homepage = "https://github.com/comfyanonymous/ComfyUI" # Example diff --git a/tests/distributed/test_distributed_queue.py b/tests/distributed/test_distributed_queue.py index 81f969f81..0d3083cdf 100644 --- a/tests/distributed/test_distributed_queue.py +++ b/tests/distributed/test_distributed_queue.py @@ -12,7 +12,7 @@ from aiohttp import ClientSession from testcontainers.rabbitmq import RabbitMqContainer from comfy.client.aio_client import AsyncRemoteComfyClient -from comfy.client.embedded_comfy_client import EmbeddedComfyClient +from comfy.client.embedded_comfy_client import Comfy from comfy.client.sdxl_with_refiner_workflow import sdxl_workflow_with_refiner from comfy.component_model.executor_types import Executor from comfy.component_model.make_mutable import make_mutable @@ -86,7 +86,7 @@ async def test_distributed_prompt_queues_same_process(): assert incoming is not None incoming_named = NamedQueueTuple(incoming) assert incoming_named.prompt_id == incoming_prompt_id - async with EmbeddedComfyClient() as embedded_comfy_client: + async with Comfy() as embedded_comfy_client: outputs = await embedded_comfy_client.queue_prompt(incoming_named.prompt, incoming_named.prompt_id) worker.task_done(incoming_named.prompt_id, outputs, ExecutionStatus("success", True, [])) diff --git a/tests/inference/test_execution.py b/tests/inference/test_execution.py index a33f20e62..6b3992282 100644 --- a/tests/inference/test_execution.py +++ b/tests/inference/test_execution.py @@ -9,7 +9,7 @@ from PIL import Image from pytest import fixture from comfy.cli_args import default_configuration -from comfy.client.embedded_comfy_client import EmbeddedComfyClient +from comfy.client.embedded_comfy_client import Comfy from comfy.component_model.executor_types import SendSyncEvent, SendSyncData, ExecutingMessage, ExecutionErrorMessage, \ DependencyCycleError from comfy.distributed.server_stub import ServerStub @@ -62,7 +62,7 @@ class _ProgressHandler(ServerStub): class ComfyClient: - def __init__(self, embedded_client: EmbeddedComfyClient, progress_handler: _ProgressHandler): + def __init__(self, embedded_client: Comfy, progress_handler: _ProgressHandler): self.embedded_client = embedded_client self.progress_handler = progress_handler @@ -116,7 +116,7 @@ class TestExecution: configuration.cache_lru = lru_size progress_handler = _ProgressHandler() with context_add_custom_nodes(ExportedNodes(NODE_CLASS_MAPPINGS=NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS=NODE_DISPLAY_NAME_MAPPINGS)): - async with EmbeddedComfyClient(configuration, progress_handler=progress_handler) as embedded_client: + async with Comfy(configuration, progress_handler=progress_handler) as embedded_client: yield ComfyClient(embedded_client, progress_handler) @fixture diff --git a/tests/inference/test_workflows.py b/tests/inference/test_workflows.py index 98417e70f..d20948707 100644 --- a/tests/inference/test_workflows.py +++ b/tests/inference/test_workflows.py @@ -5,7 +5,7 @@ from importlib.abc import Traversable import pytest from comfy.api.components.schema.prompt import Prompt -from comfy.client.embedded_comfy_client import EmbeddedComfyClient +from comfy.client.embedded_comfy_client import Comfy from comfy.model_downloader import add_known_models, KNOWN_LORAS from comfy.model_downloader_types import CivitFile, HuggingFile from comfy_extras.nodes.nodes_audio import TorchAudioNotFoundError @@ -14,8 +14,8 @@ from . import workflows @pytest.fixture(scope="module", autouse=False) -async def client(tmp_path_factory) -> EmbeddedComfyClient: - async with EmbeddedComfyClient() as client: +async def client(tmp_path_factory) -> Comfy: + async with Comfy() as client: yield client @@ -28,7 +28,7 @@ def _prepare_for_workflows() -> dict[str, Traversable]: @pytest.mark.asyncio @pytest.mark.parametrize("workflow_name, workflow_file", _prepare_for_workflows().items()) -async def test_workflow(workflow_name: str, workflow_file: Traversable, has_gpu: bool, client: EmbeddedComfyClient): +async def test_workflow(workflow_name: str, workflow_file: Traversable, has_gpu: bool, client: Comfy): if not has_gpu: pytest.skip("requires gpu") diff --git a/tests/issues/__test_25_respect_cwd_param.py b/tests/issues/__test_25_respect_cwd_param.py index de94e6914..5270c7bc0 100644 --- a/tests/issues/__test_25_respect_cwd_param.py +++ b/tests/issues/__test_25_respect_cwd_param.py @@ -6,7 +6,7 @@ import pytest from comfy.api.components.schema.prompt import Prompt from comfy.cli_args_types import Configuration -from comfy.client.embedded_comfy_client import EmbeddedComfyClient +from comfy.client.embedded_comfy_client import Comfy _TEST_WORKFLOW = { "0": { @@ -29,7 +29,7 @@ async def test_respect_cwd_param(): from comfy.cmd.folder_paths import models_dir assert os.path.commonpath([os.getcwd(), models_dir]) == os.getcwd(), "at the time models_dir is accessed, the cwd should be the actual cwd, since there is no other configuration" - client = EmbeddedComfyClient(config) + client = Comfy(config) prompt = Prompt.validate(_TEST_WORKFLOW) outputs = await client.queue_prompt_api(prompt) path_as_imported = outputs.outputs["0"]["path"][0] diff --git a/tests/library/test_embedded_client.py b/tests/library/test_embedded_client.py index 6be3b52d1..3c20de91c 100644 --- a/tests/library/test_embedded_client.py +++ b/tests/library/test_embedded_client.py @@ -4,7 +4,7 @@ import pytest import torch from comfy.cli_args_types import Configuration -from comfy.client.embedded_comfy_client import EmbeddedComfyClient +from comfy.client.embedded_comfy_client import Comfy from comfy.client.sdxl_with_refiner_workflow import sdxl_workflow_with_refiner @@ -16,7 +16,7 @@ async def test_cuda_memory_usage(): device = torch.device("cuda") starting_memory = torch.cuda.memory_allocated(device) - async with EmbeddedComfyClient() as client: + async with Comfy() as client: prompt = sdxl_workflow_with_refiner("test") outputs = await client.queue_prompt(prompt) assert outputs["13"]["images"][0]["abs_path"] is not None @@ -29,7 +29,7 @@ async def test_cuda_memory_usage(): @pytest.mark.asyncio async def test_embedded_comfy(): - async with EmbeddedComfyClient() as client: + async with Comfy() as client: prompt = sdxl_workflow_with_refiner("test") outputs = await client.queue_prompt(prompt) assert outputs["13"]["images"][0]["abs_path"] is not None @@ -37,14 +37,14 @@ async def test_embedded_comfy(): @pytest.mark.asyncio async def test_configuration_options(): config = Configuration() - async with EmbeddedComfyClient(configuration=config) as client: + async with Comfy(configuration=config) as client: prompt = sdxl_workflow_with_refiner("test") outputs = await client.queue_prompt(prompt) assert outputs["13"]["images"][0]["abs_path"] is not None @pytest.mark.asyncio async def test_multithreaded_comfy(): - async with EmbeddedComfyClient(max_workers=2) as client: + async with Comfy(max_workers=2) as client: prompt = sdxl_workflow_with_refiner("test") outputs_iter = await asyncio.gather(*[client.queue_prompt(prompt) for _ in range(4)]) assert all(outputs["13"]["images"][0]["abs_path"] is not None for outputs in outputs_iter) \ No newline at end of file diff --git a/tests/unit/test_panics.py b/tests/unit/test_panics.py index 7d195b689..d7919c402 100644 --- a/tests/unit/test_panics.py +++ b/tests/unit/test_panics.py @@ -6,7 +6,7 @@ import pytest import torch from comfy.cli_args_types import Configuration -from comfy.client.embedded_comfy_client import EmbeddedComfyClient +from comfy.client.embedded_comfy_client import Comfy from comfy.component_model.make_mutable import make_mutable from comfy.component_model.tensor_types import RGBImageBatch from comfy.distributed.executors import ContextVarExecutor @@ -153,7 +153,7 @@ async def test_panic_on_exception_with_executor(executor_cls, executor_kwargs): NODE_DISPLAY_NAME_MAPPINGS=TEST_NODE_DISPLAY_NAME_MAPPINGS)), patch('sys.exit') as mock_exit): try: - async with EmbeddedComfyClient(configuration=config, executor=executor) as client: + async with Comfy(configuration=config, executor=executor) as client: # Queue our failing workflow await client.queue_prompt(create_failing_workflow()) except SystemExit: @@ -188,7 +188,7 @@ async def test_no_panic_when_disabled_with_executor(executor_cls, executor_kwarg NODE_DISPLAY_NAME_MAPPINGS=TEST_NODE_DISPLAY_NAME_MAPPINGS)), patch('sys.exit') as mock_exit): try: - async with EmbeddedComfyClient(configuration=config, executor=executor) as client: + async with Comfy(configuration=config, executor=executor) as client: # Queue our failing workflow await client.queue_prompt(create_failing_workflow()) except SystemExit: @@ -213,7 +213,7 @@ async def test_executor_cleanup(executor_cls, executor_kwargs): with context_add_custom_nodes(ExportedNodes(NODE_CLASS_MAPPINGS=TEST_NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS=TEST_NODE_DISPLAY_NAME_MAPPINGS)): - async with EmbeddedComfyClient(executor=executor) as client: + async with Comfy(executor=executor) as client: # Create a simple workflow that doesn't raise workflow = create_failing_workflow() workflow["1"]["inputs"]["should_raise"] = False @@ -235,7 +235,7 @@ async def test_parallel_execution(executor_cls, executor_kwargs): with context_add_custom_nodes(ExportedNodes(NODE_CLASS_MAPPINGS=TEST_NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS=TEST_NODE_DISPLAY_NAME_MAPPINGS)): - async with EmbeddedComfyClient(executor=executor) as client: + async with Comfy(executor=executor) as client: # Create multiple non-failing workflows workflow = create_failing_workflow() workflow["1"]["inputs"]["should_raise"] = False From c74cf3866f6fcb8d12e58bfe5bfe6c1c01fb1ed6 Mon Sep 17 00:00:00 2001 From: Benjamin Berman Date: Fri, 16 May 2025 05:25:29 -0700 Subject: [PATCH 09/10] switch to hatchling --- pyproject.toml | 64 ++++++++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9046fddac..c89f5bfb9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ readme = "README.md" # Optional: if you have a README authors = [ { name = "Contributors_of_ComfyUI" }, ] -requires-python = ">=3.10,<3.14" +requires-python = ">=3.10" license = { text = "Specify License Here" } # Optional: Add your license classifiers = [# Optional: Standard PyPI classifiers "Programming Language :: Python :: 3", @@ -32,7 +32,7 @@ dependencies = [ "peft>=0.10.0", "torchinfo", "safetensors>=0.4.2", - "bitsandbytes", + "bitsandbytes; platform_system != 'Darwin'", "aiohttp>=3.11.8", "yarl>=1.9.4", "accelerate>=0.25.0", @@ -98,11 +98,12 @@ dependencies = [ "av", "typer", "ijson", + "scikit-learn>=1.4.1" ] [build-system] -requires = ["setuptools>=61.0", "wheel", "pip"] -build-backend = "setuptools.build_meta" +requires = ["hatchling"] +build-backend = "hatchling.build" [tool.setuptools.packages.find] where = ["."] @@ -111,21 +112,21 @@ namespaces = false [project.optional-dependencies] cpu = [ - "torch>=2.3.0", - "torchvision>=0.15.0", - "torchaudio>=2.3.0", + "torch", + "torchvision", + "torchaudio", ] cuda = [ - "torch>=2.3.0", - "torchvision>=0.15.0", - "torchaudio>=2.3.0", + "torch", + "torchvision", + "torchaudio", ] rocm = [ - "torch>=2.3.0", - "torchvision>=0.18.0", - "torchaudio>=2.3.0", + "torch", + "torchvision", + "torchaudio", ] dev = [ @@ -146,11 +147,9 @@ dev = [ triton = [ "triton ; sys_platform == 'Linux'", - # Using direct URL references with markers for Windows + Python versions 'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp312-cp312-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.12"', 'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp311-cp311-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.11"', 'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp310-cp310-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.10"', - # Note: No Python 3.13 wheel provided in requirements-triton.txt for v3.2.0 post9 link ] withtorch = ["comfyui[cuda]"] # Depends on the 'cuda' extra @@ -168,7 +167,7 @@ Repository = "https://github.com/comfyanonymous/ComfyUI" # Example [tool.uv] conflicts = [ - [ { extra = "cpu" }, { extra = "cuda" }, { extra = "rocm" } ], + [{ extra = "cpu" }, { extra = "cuda" }, { extra = "rocm" }], ] [[tool.uv.index]] @@ -177,32 +176,35 @@ url = "https://download.pytorch.org/whl/cpu" explicit = true [[tool.uv.index]] -name = "pytorch-cu124" -url = "https://download.pytorch.org/whl/cu124" +name = "pytorch-cu126" +url = "https://download.pytorch.org/whl/cu126" explicit = true [[tool.uv.index]] -name = "pytorch-rocm" # Added ROCm index -url = "https://download.pytorch.org/whl/rocm6.2" # Using ROCm 6.2 index from examples +name = "pytorch-rocm" +url = "https://download.pytorch.org/whl/rocm6.3" explicit = true [tool.uv.sources] torch = [ { index = "pytorch-cpu", extra = "cpu" }, - { index = "pytorch-cu124", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, - { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, # ROCm only on Linux + { index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, + { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, ] torchvision = [ { index = "pytorch-cpu", extra = "cpu" }, - { index = "pytorch-cu124", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, - { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, # ROCm only on Linux + { index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, + { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, ] torchaudio = [ { index = "pytorch-cpu", extra = "cpu" }, - { index = "pytorch-cu124", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, - { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, # ROCm only on Linux + { index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, + { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, +] +comfyui-frontend-package = [ +# { git = "https://github.com/appmana/appmana-comfyui-frontend", subdirectory = "comfyui_frontend_package" }, + { workspace = true } ] -comfyui-frontend-package = { git = "https://github.com/appmana/appmana-comfyui-frontend", subdirectory = "comfyui_frontend_package" } [tool.ruff] @@ -213,4 +215,10 @@ lint.select = [ "W", # pycodestyle Warnings "F", # Pyflakes ] -exclude = ["*.ipynb"] \ No newline at end of file +exclude = ["*.ipynb"] + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.hatch.build.targets.wheel] +packages = ["comfy/", "comfy_extras/"] \ No newline at end of file From 9e0522b7a8957ec8c4e1d90b350ab535e7db9cc5 Mon Sep 17 00:00:00 2001 From: doombubbles Date: Thu, 1 May 2025 11:44:23 -0700 Subject: [PATCH 10/10] Update ideogram nodes to v3 --- comfy_extras/nodes/nodes_ideogram.py | 37 +++++++++++----------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/comfy_extras/nodes/nodes_ideogram.py b/comfy_extras/nodes/nodes_ideogram.py index 8b96e9cd8..d61460e56 100644 --- a/comfy_extras/nodes/nodes_ideogram.py +++ b/comfy_extras/nodes/nodes_ideogram.py @@ -62,22 +62,20 @@ class IdeogramGenerate(CustomNode): headers = {"Api-Key": api_key, "Content-Type": "application/json"} payload = { - "image_request": { - "prompt": prompt, - "resolution": resolution, - "model": model, - "magic_prompt_option": magic_prompt_option, - "num_images": num_images, - "style_type": style_type, - } + "prompt": prompt, + "resolution": resolution, + "model": model, + "magic_prompt": magic_prompt_option, + "num_images": num_images, + "style_type": style_type, } if negative_prompt: - payload["image_request"]["negative_prompt"] = negative_prompt + payload["negative_prompt"] = negative_prompt if seed: - payload["image_request"]["seed"] = seed + payload["seed"] = seed - response = requests.post("https://api.ideogram.ai/generate", headers=headers, json=payload) + response = requests.post("https://api.ideogram.ai/v1/ideogram-v3/generate", headers=headers, json=payload) response.raise_for_status() images = [] @@ -133,21 +131,19 @@ class IdeogramEdit(CustomNode): mask_pil.save(mask_bytes, format="PNG") files = { - "image_file": ("image.png", image_bytes.getvalue()), + "image": ("image.png", image_bytes.getvalue()), "mask": ("mask.png", mask_bytes.getvalue()), } data = { "prompt": prompt, - "model": model, - "magic_prompt_option": magic_prompt_option, + "magic_prompt": magic_prompt_option, "num_images": num_images, - "style_type": style_type, } if seed: data["seed"] = seed - response = requests.post("https://api.ideogram.ai/edit", headers=headers, files=files, data=data) + response = requests.post("https://api.ideogram.ai/v1/ideogram-v3/edit", headers=headers, files=files, data=data) response.raise_for_status() for item in response.json()["data"]: @@ -198,15 +194,14 @@ class IdeogramRemix(CustomNode): image_pil.save(image_bytes, format="PNG") files = { - "image_file": ("image.png", image_bytes.getvalue()), + "image": ("image.png", image_bytes.getvalue()), } data = { "prompt": prompt, "resolution": resolution, - "model": model, "image_weight": image_weight, - "magic_prompt_option": magic_prompt_option, + "magic_prompt": magic_prompt_option, "num_images": num_images, "style_type": style_type, } @@ -218,9 +213,7 @@ class IdeogramRemix(CustomNode): # data = {"image_request": data} - response = requests.post("https://api.ideogram.ai/remix", headers=headers, files=files, data={ - "image_request": json.dumps(data) - }) + response = requests.post("https://api.ideogram.ai/v1/ideogram-v3/remix", headers=headers, files=files, data=data) response.raise_for_status() for item in response.json()["data"]: