mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-12 07:10:52 +08:00
Plugins can add configuration
This commit is contained in:
parent
c941ee09fc
commit
fca0d8a050
136
README.md
136
README.md
@ -255,7 +255,13 @@ setup(
|
||||
|
||||
All `.py` files located in the package specified by the entrypoint with your package's name will be scanned for node class mappings declared like this:
|
||||
|
||||
**some_nodes.py**:
|
||||
```py
|
||||
from comfy.nodes.package_typing import CustomNode
|
||||
|
||||
class Binary_Preprocessor(CustomNode):
|
||||
...
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"BinaryPreprocessor": Binary_Preprocessor
|
||||
}
|
||||
@ -265,6 +271,136 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
```
|
||||
These packages will be scanned recursively.
|
||||
|
||||
Extending the `comfy.nodes.package_typing.CustomNode` provides type hints for authoring nodes.
|
||||
|
||||
## Adding Custom Configuration
|
||||
|
||||
Declare an entry point for configuration hooks in your **setup.py** that defines a function that takes and returns an
|
||||
`configargparser.ArgParser` object:
|
||||
|
||||
**setup.py**
|
||||
```python
|
||||
setup(
|
||||
name="mypackage",
|
||||
...
|
||||
entry_points={
|
||||
'comfyui.custom_nodes': [
|
||||
'mypackage = mypackage_custom_nodes',
|
||||
],
|
||||
'comfyui.custom_config': [
|
||||
'mypackage = mypackage_custom_config:add_configuration',
|
||||
]
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
**mypackage_custom_config.py**:
|
||||
```python
|
||||
import configargparse
|
||||
|
||||
|
||||
def add_configuration(parser: configargparse.ArgParser) -> configargparse.ArgParser:
|
||||
parser.add_argument("--openai-api-key",
|
||||
required=False,
|
||||
type=str,
|
||||
help="Configures the OpenAI API Key for the OpenAI nodes", env_var="OPENAI_API_KEY")
|
||||
return parser
|
||||
|
||||
```
|
||||
|
||||
You can now see your configuration option at the bottom of the `--help` command along with hints for how to use it:
|
||||
|
||||
```shell
|
||||
$ comfyui --help
|
||||
usage: comfyui.exe [-h] [-c CONFIG_FILE] [--write-out-config-file CONFIG_OUTPUT_PATH] [-w CWD] [-H [IP]] [--port PORT]
|
||||
[--enable-cors-header [ORIGIN]] [--max-upload-size MAX_UPLOAD_SIZE] [--extra-model-paths-config PATH [PATH ...]]
|
||||
...
|
||||
[--openai-api-key OPENAI_API_KEY]
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG_FILE, --config CONFIG_FILE
|
||||
config file path
|
||||
--write-out-config-file CONFIG_OUTPUT_PATH
|
||||
takes the current command line args and writes them out to a config file at the given path, then exits
|
||||
-w CWD, --cwd CWD Specify the working directory. If not set, this is the current working directory. models/, input/, output/ and other
|
||||
directories will be located here by default. [env var: COMFYUI_CWD]
|
||||
-H [IP], --listen [IP]
|
||||
Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to
|
||||
0.0.0.0. (listens on all) [env var: COMFYUI_LISTEN]
|
||||
--port PORT Set the listen port. [env var: COMFYUI_PORT]
|
||||
...
|
||||
--distributed-queue-name DISTRIBUTED_QUEUE_NAME
|
||||
This name will be used by the frontends and workers to exchange prompt requests and replies. Progress updates will be
|
||||
prefixed by the queue name, followed by a '.', then the user ID [env var: COMFYUI_DISTRIBUTED_QUEUE_NAME]
|
||||
--external-address EXTERNAL_ADDRESS
|
||||
Specifies a base URL for external addresses reported by the API, such as for image paths. [env var:
|
||||
COMFYUI_EXTERNAL_ADDRESS]
|
||||
--openai-api-key OPENAI_API_KEY
|
||||
Configures the OpenAI API Key for the OpenAI nodes [env var: OPENAI_API_KEY]
|
||||
```
|
||||
|
||||
You can now start `comfyui` with:
|
||||
|
||||
```shell
|
||||
comfyui --openai-api-key=abcdefg12345
|
||||
```
|
||||
|
||||
or set the environment variable you specified:
|
||||
|
||||
```shell
|
||||
export OPENAI_API_KEY=abcdefg12345
|
||||
comfyui
|
||||
```
|
||||
|
||||
or add it to your config file:
|
||||
|
||||
**config.yaml**:
|
||||
```txt
|
||||
openapi-api-key: abcdefg12345
|
||||
```
|
||||
|
||||
```shell
|
||||
comfyui --config config.yaml
|
||||
```
|
||||
|
||||
Since `comfyui` looks for a `config.yaml` in your current working directory by default, you can omit the argument if
|
||||
`config.yaml` is located in your current working directory:
|
||||
|
||||
```shell
|
||||
comfyui
|
||||
```
|
||||
|
||||
Your entry point for adding configuration options should **not** import your nodes. This gives you the opportunity to
|
||||
use the configuration you added in your nodes; otherwise, if you imported your nodes in your configuration entry point,
|
||||
the nodes will potentially be initialized without any configuration.
|
||||
|
||||
Access your configuration from `cli_args`:
|
||||
|
||||
```python
|
||||
from comfy.cli_args import args
|
||||
from comfy.cli_args_types import Configuration
|
||||
from typing import Optional
|
||||
|
||||
|
||||
# Add type hints when accessing args
|
||||
class CustomConfiguration(Configuration):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.openai_api_key: Optional[str] = None
|
||||
|
||||
|
||||
args: CustomConfiguration
|
||||
|
||||
|
||||
class OpenAINode(CustomNode):
|
||||
...
|
||||
|
||||
def execute(self):
|
||||
openai_api_key = args.open_api_key
|
||||
```
|
||||
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
> I see a message like `RuntimeError: '"upsample_bilinear2d_channels_last" not implemented for 'Half''`
|
||||
|
||||
@ -1,13 +1,23 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from importlib.metadata import entry_points
|
||||
from types import ModuleType
|
||||
from typing import Optional, Any, Callable
|
||||
|
||||
import configargparse
|
||||
import configargparse as argparse
|
||||
import enum
|
||||
from . import options
|
||||
from .cli_args_types import LatentPreviewMethod, Configuration
|
||||
from .cli_args_types import LatentPreviewMethod, Configuration, ConfigurationExtender
|
||||
import sys
|
||||
|
||||
|
||||
class EnumAction(argparse.Action):
|
||||
"""
|
||||
Argparse action for handling Enums
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Pop off the type value
|
||||
enum_type = kwargs.pop("type", None)
|
||||
@ -15,6 +25,7 @@ class EnumAction(argparse.Action):
|
||||
# Ensure an Enum subclass is provided
|
||||
if enum_type is None:
|
||||
raise ValueError("type must be assigned an Enum when using EnumAction")
|
||||
enum_type: Any
|
||||
if not issubclass(enum_type, enum.Enum):
|
||||
raise TypeError("type must be an Enum when using EnumAction")
|
||||
|
||||
@ -33,120 +44,171 @@ class EnumAction(argparse.Action):
|
||||
setattr(namespace, self.dest, value)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(default_config_files=['config.yaml', 'config.json'], auto_env_var_prefix='COMFYUI_',
|
||||
args_for_setting_config_path=["-c", "--config"],
|
||||
add_env_var_help=True, add_config_file_help=True, add_help=True,
|
||||
args_for_writing_out_config_file=["--write-out-config-file"])
|
||||
def create_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(default_config_files=['config.yaml', 'config.json'],
|
||||
auto_env_var_prefix='COMFYUI_',
|
||||
args_for_setting_config_path=["-c", "--config"],
|
||||
add_env_var_help=True, add_config_file_help=True, add_help=True,
|
||||
args_for_writing_out_config_file=["--write-out-config-file"])
|
||||
|
||||
parser.add_argument('-w', "--cwd", type=str, default=None, help="Specify the working directory. If not set, this is the current working directory. models/, input/, output/ and other directories will be located here by default.")
|
||||
parser.add_argument('-H', "--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
|
||||
parser.add_argument("--port", type=int, default=8188, help="Set the listen port.")
|
||||
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
|
||||
parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.")
|
||||
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
|
||||
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
|
||||
parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
|
||||
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.")
|
||||
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
||||
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
||||
cm_group = parser.add_mutually_exclusive_group()
|
||||
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
|
||||
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")
|
||||
parser.add_argument('-w', "--cwd", type=str, default=None,
|
||||
help="Specify the working directory. If not set, this is the current working directory. models/, input/, output/ and other directories will be located here by default.")
|
||||
parser.add_argument('-H', "--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0",
|
||||
help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
|
||||
parser.add_argument("--port", type=int, default=8188, help="Set the listen port.")
|
||||
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*",
|
||||
help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
|
||||
parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.")
|
||||
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+',
|
||||
action='append', help="Load one or more extra_model_paths.yaml files.")
|
||||
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
|
||||
parser.add_argument("--temp-directory", type=str, default=None,
|
||||
help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
|
||||
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.")
|
||||
parser.add_argument("--auto-launch", action="store_true",
|
||||
help="Automatically launch ComfyUI in the default browser.")
|
||||
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID",
|
||||
help="Set the id of the cuda device this instance will use.")
|
||||
cm_group = parser.add_mutually_exclusive_group()
|
||||
cm_group.add_argument("--cuda-malloc", action="store_true",
|
||||
help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
|
||||
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")
|
||||
|
||||
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
|
||||
parser.add_argument("--dont-upcast-attention", action="store_true",
|
||||
help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
|
||||
|
||||
fp_group = parser.add_mutually_exclusive_group()
|
||||
fp_group.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).")
|
||||
fp_group.add_argument("--force-fp16", action="store_true", help="Force fp16.")
|
||||
fp_group = parser.add_mutually_exclusive_group()
|
||||
fp_group.add_argument("--force-fp32", action="store_true",
|
||||
help="Force fp32 (If this makes your GPU work better please report it).")
|
||||
fp_group.add_argument("--force-fp16", action="store_true", help="Force fp16.")
|
||||
|
||||
fpunet_group = parser.add_mutually_exclusive_group()
|
||||
fpunet_group.add_argument("--bf16-unet", action="store_true", help="Run the UNET in bf16. This should only be used for testing stuff.")
|
||||
fpunet_group.add_argument("--fp16-unet", action="store_true", help="Store unet weights in fp16.")
|
||||
fpunet_group.add_argument("--fp8_e4m3fn-unet", action="store_true", help="Store unet weights in fp8_e4m3fn.")
|
||||
fpunet_group.add_argument("--fp8_e5m2-unet", action="store_true", help="Store unet weights in fp8_e5m2.")
|
||||
fpunet_group = parser.add_mutually_exclusive_group()
|
||||
fpunet_group.add_argument("--bf16-unet", action="store_true",
|
||||
help="Run the UNET in bf16. This should only be used for testing stuff.")
|
||||
fpunet_group.add_argument("--fp16-unet", action="store_true", help="Store unet weights in fp16.")
|
||||
fpunet_group.add_argument("--fp8_e4m3fn-unet", action="store_true", help="Store unet weights in fp8_e4m3fn.")
|
||||
fpunet_group.add_argument("--fp8_e5m2-unet", action="store_true", help="Store unet weights in fp8_e5m2.")
|
||||
|
||||
fpvae_group = parser.add_mutually_exclusive_group()
|
||||
fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fp16, might cause black images.")
|
||||
fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.")
|
||||
fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.")
|
||||
fpvae_group = parser.add_mutually_exclusive_group()
|
||||
fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fp16, might cause black images.")
|
||||
fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.")
|
||||
fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.")
|
||||
|
||||
parser.add_argument("--cpu-vae", action="store_true", help="Run the VAE on the CPU.")
|
||||
parser.add_argument("--cpu-vae", action="store_true", help="Run the VAE on the CPU.")
|
||||
|
||||
fpte_group = parser.add_mutually_exclusive_group()
|
||||
fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true", help="Store text encoder weights in fp8 (e4m3fn variant).")
|
||||
fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store text encoder weights in fp8 (e5m2 variant).")
|
||||
fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.")
|
||||
fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.")
|
||||
fpte_group = parser.add_mutually_exclusive_group()
|
||||
fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true",
|
||||
help="Store text encoder weights in fp8 (e4m3fn variant).")
|
||||
fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true",
|
||||
help="Store text encoder weights in fp8 (e5m2 variant).")
|
||||
fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.")
|
||||
fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.")
|
||||
|
||||
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
|
||||
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1,
|
||||
help="Use torch-directml.")
|
||||
|
||||
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.")
|
||||
parser.add_argument("--disable-ipex-optimize", action="store_true",
|
||||
help="Disables ipex.optimize when loading models with Intel GPUs.")
|
||||
|
||||
parser.add_argument("--preview-method", type=LatentPreviewMethod, default=LatentPreviewMethod.NoPreviews, help="Default preview method for sampler nodes.", action=EnumAction)
|
||||
parser.add_argument("--preview-method", type=LatentPreviewMethod, default=LatentPreviewMethod.NoPreviews,
|
||||
help="Default preview method for sampler nodes.", action=EnumAction)
|
||||
|
||||
attn_group = parser.add_mutually_exclusive_group()
|
||||
attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization. Ignored when xformers is used.")
|
||||
attn_group.add_argument("--use-quad-cross-attention", action="store_true", help="Use the sub-quadratic cross attention optimization . Ignored when xformers is used.")
|
||||
attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.")
|
||||
attn_group = parser.add_mutually_exclusive_group()
|
||||
attn_group.add_argument("--use-split-cross-attention", action="store_true",
|
||||
help="Use the split cross attention optimization. Ignored when xformers is used.")
|
||||
attn_group.add_argument("--use-quad-cross-attention", action="store_true",
|
||||
help="Use the sub-quadratic cross attention optimization . Ignored when xformers is used.")
|
||||
attn_group.add_argument("--use-pytorch-cross-attention", action="store_true",
|
||||
help="Use the new pytorch 2.0 cross attention function.")
|
||||
|
||||
parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.")
|
||||
parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.")
|
||||
|
||||
vram_group = parser.add_mutually_exclusive_group()
|
||||
vram_group.add_argument("--gpu-only", action="store_true", help="Store and run everything (text encoders/CLIP models, etc... on the GPU).")
|
||||
vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.")
|
||||
vram_group.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.")
|
||||
vram_group.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.")
|
||||
vram_group.add_argument("--novram", action="store_true", help="When lowvram isn't enough.")
|
||||
vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")
|
||||
vram_group = parser.add_mutually_exclusive_group()
|
||||
vram_group.add_argument("--gpu-only", action="store_true",
|
||||
help="Store and run everything (text encoders/CLIP models, etc... on the GPU).")
|
||||
vram_group.add_argument("--highvram", action="store_true",
|
||||
help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.")
|
||||
vram_group.add_argument("--normalvram", action="store_true",
|
||||
help="Used to force normal vram use if lowvram gets automatically enabled.")
|
||||
vram_group.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.")
|
||||
vram_group.add_argument("--novram", action="store_true", help="When lowvram isn't enough.")
|
||||
vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")
|
||||
|
||||
parser.add_argument("--disable-smart-memory", action="store_true",
|
||||
help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
|
||||
parser.add_argument("--deterministic", action="store_true",
|
||||
help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")
|
||||
|
||||
parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
|
||||
parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.")
|
||||
parser.add_argument("--windows-standalone-build", default=hasattr(sys, 'frozen') and getattr(sys, 'frozen'),
|
||||
action="store_true",
|
||||
help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).")
|
||||
|
||||
parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.")
|
||||
|
||||
parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.")
|
||||
parser.add_argument("--create-directories", action="store_true",
|
||||
help="Creates the default models/, input/, output/ and temp/ directories, then exits.")
|
||||
|
||||
parser.add_argument("--plausible-analytics-base-url", required=False,
|
||||
help="Enables server-side analytics events sent to the provided URL.")
|
||||
parser.add_argument("--plausible-analytics-domain", required=False,
|
||||
help="Specifies the domain name for analytics events.")
|
||||
parser.add_argument("--analytics-use-identity-provider", action="store_true",
|
||||
help="Uses platform identifiers for unique visitor analytics.")
|
||||
parser.add_argument("--distributed-queue-connection-uri", type=str, default=None,
|
||||
help="EXAMPLE: \"amqp://guest:guest@127.0.0.1\" - Servers and clients will connect to this AMPQ URL to form a distributed queue and exchange prompt execution requests and progress updates.")
|
||||
parser.add_argument(
|
||||
'--distributed-queue-worker',
|
||||
required=False,
|
||||
action="store_true",
|
||||
help='Workers will pull requests off the AMQP URL.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--distributed-queue-frontend',
|
||||
required=False,
|
||||
action="store_true",
|
||||
help='Frontends will start the web UI and connect to the provided AMQP URL to submit prompts.'
|
||||
)
|
||||
parser.add_argument("--distributed-queue-name", type=str, default="comfyui",
|
||||
help="This name will be used by the frontends and workers to exchange prompt requests and replies. Progress updates will be prefixed by the queue name, followed by a '.', then the user ID")
|
||||
parser.add_argument("--external-address", required=False,
|
||||
help="Specifies a base URL for external addresses reported by the API, such as for image paths.")
|
||||
|
||||
# now give plugins a chance to add configuration
|
||||
for entry_point in entry_points().select(group='comfyui.custom_config'):
|
||||
try:
|
||||
plugin_callable: ConfigurationExtender | ModuleType = entry_point.load()
|
||||
if isinstance(plugin_callable, ModuleType):
|
||||
plugin_callable = ...
|
||||
else:
|
||||
parser_result = plugin_callable(parser)
|
||||
if parser_result is not None:
|
||||
parser = parser_result
|
||||
except Exception as exc:
|
||||
logging.error("Failed to load custom config plugin", exc_info=exc)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
|
||||
parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")
|
||||
def parse_args(parser: Optional[argparse.ArgumentParser] = None) -> Configuration:
|
||||
if parser is None:
|
||||
parser = create_parser()
|
||||
|
||||
parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
|
||||
parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.")
|
||||
parser.add_argument("--windows-standalone-build", default=hasattr(sys, 'frozen') and getattr(sys, 'frozen'), action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).")
|
||||
if options.args_parsing:
|
||||
args, _ = parser.parse_known_args()
|
||||
else:
|
||||
args, _ = parser.parse_known_args([])
|
||||
|
||||
parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.")
|
||||
if args.windows_standalone_build:
|
||||
args.auto_launch = True
|
||||
|
||||
parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.")
|
||||
parser.add_argument("--create-directories", action="store_true", help="Creates the default models/, input/, output/ and temp/ directories, then exits.")
|
||||
if args.disable_auto_launch:
|
||||
args.auto_launch = False
|
||||
|
||||
parser.add_argument("--plausible-analytics-base-url", required=False,
|
||||
help="Enables server-side analytics events sent to the provided URL.")
|
||||
parser.add_argument("--plausible-analytics-domain", required=False,
|
||||
help="Specifies the domain name for analytics events.")
|
||||
parser.add_argument("--analytics-use-identity-provider", action="store_true",
|
||||
help="Uses platform identifiers for unique visitor analytics.")
|
||||
parser.add_argument("--distributed-queue-connection-uri", type=str, default=None,
|
||||
help="EXAMPLE: \"amqp://guest:guest@127.0.0.1\" - Servers and clients will connect to this AMPQ URL to form a distributed queue and exchange prompt execution requests and progress updates.")
|
||||
parser.add_argument(
|
||||
'--distributed-queue-worker',
|
||||
required=False,
|
||||
action="store_true",
|
||||
help='Workers will pull requests off the AMQP URL.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--distributed-queue-frontend',
|
||||
required=False,
|
||||
action="store_true",
|
||||
help='Frontends will start the web UI and connect to the provided AMQP URL to submit prompts.'
|
||||
)
|
||||
parser.add_argument("--distributed-queue-name", type=str, default="comfyui",
|
||||
help="This name will be used by the frontends and workers to exchange prompt requests and replies. Progress updates will be prefixed by the queue name, followed by a '.', then the user ID")
|
||||
parser.add_argument("--external-address", required=False,
|
||||
help="Specifies a base URL for external addresses reported by the API, such as for image paths.")
|
||||
return Configuration(**vars(args))
|
||||
|
||||
if options.args_parsing:
|
||||
args, _ = parser.parse_known_args()
|
||||
else:
|
||||
args, _ = parser.parse_known_args([])
|
||||
|
||||
if args.windows_standalone_build:
|
||||
args.auto_launch = True
|
||||
|
||||
if args.disable_auto_launch:
|
||||
args.auto_launch = False
|
||||
|
||||
args = Configuration(**vars(args))
|
||||
args = parse_args()
|
||||
|
||||
@ -1,6 +1,9 @@
|
||||
# Define a class for your command-line arguments
|
||||
import enum
|
||||
from typing import Optional, List, TypedDict
|
||||
from typing import Optional, List, TypeAlias, Callable
|
||||
import configargparse as argparse
|
||||
|
||||
ConfigurationExtender: TypeAlias = Callable[[argparse.ArgParser], Optional[argparse.ArgParser]]
|
||||
|
||||
|
||||
class LatentPreviewMethod(enum.Enum):
|
||||
@ -77,6 +80,7 @@ class Configuration(dict):
|
||||
external_address (str): Specifies a base URL for external addresses reported by the API, such as for image paths.
|
||||
verbose (bool): Shows extra output for debugging purposes such as import errors of custom nodes.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__()
|
||||
self.cwd: Optional[str] = None
|
||||
|
||||
Loading…
Reference in New Issue
Block a user