mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-12 07:10:52 +08:00
Default config
This commit is contained in:
parent
9ad4e539a9
commit
d979db58b9
89
config.yaml
Normal file
89
config.yaml
Normal file
@ -0,0 +1,89 @@
|
||||
config:
|
||||
network:
|
||||
|
||||
# Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)
|
||||
# Type: str
|
||||
listen: 127.0.0.1
|
||||
|
||||
# Set the listen port.
|
||||
# Type: int
|
||||
port: 8188
|
||||
|
||||
# Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.
|
||||
# Type: str
|
||||
enable_cors_header:
|
||||
files:
|
||||
|
||||
# Extra paths to scan for model files.
|
||||
extra_model_paths_config:
|
||||
a111:
|
||||
base_path: path/to/stable-diffusion-webui/
|
||||
checkpoints: models/Stable-diffusion
|
||||
configs: models/Stable-diffusion
|
||||
vae: models/VAE
|
||||
loras: models/Lora
|
||||
upscale_models: |
|
||||
models/ESRGAN
|
||||
models/SwinIR
|
||||
embeddings: embeddings
|
||||
hypernetworks: models/hypernetworks
|
||||
controlnet: models/ControlNet
|
||||
|
||||
# Set the ComfyUI output directory. Leave empty to use the default.
|
||||
# Type: str
|
||||
output_directory:
|
||||
behavior:
|
||||
|
||||
# Automatically launch ComfyUI in the default browser.
|
||||
# Type: bool
|
||||
auto_launch: false
|
||||
|
||||
# Don't print server output.
|
||||
# Type: bool
|
||||
dont_print_server: false
|
||||
|
||||
# Quick test for CI.
|
||||
# Type: bool
|
||||
quick_test_for_ci: false
|
||||
|
||||
# Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).
|
||||
# Type: bool
|
||||
windows_standalone_build: false
|
||||
pytorch:
|
||||
|
||||
# Set the id of the cuda device this instance will use, or leave empty to autodetect.
|
||||
# Type: int
|
||||
cuda_device:
|
||||
|
||||
# Disable upcasting of attention. Can boost speed but increase the chances of black images.
|
||||
# Type: bool
|
||||
dont_upcast_attention: false
|
||||
|
||||
# Force fp32 (If this makes your GPU work better please report it).
|
||||
# Type: bool
|
||||
force_fp32: false
|
||||
|
||||
# Use torch-directml.
|
||||
# Type: int
|
||||
directml:
|
||||
|
||||
# Type of cross attention to use
|
||||
# Choices:
|
||||
# - (empty): Don't use cross-attention.
|
||||
# - split: By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.
|
||||
# - pytorch: Used to force normal vram use if lowvram gets automatically enabled.
|
||||
cross_attention:
|
||||
|
||||
# Disable xformers.
|
||||
# Type: bool
|
||||
disable_xformers: false
|
||||
|
||||
# Determines how VRAM is used.
|
||||
# Choices:
|
||||
# - (empty): Autodetect the optional VRAM settings based on hardware.
|
||||
# - highvram: By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.
|
||||
# - normalvram: Used to force normal vram use if lowvram gets automatically enabled.
|
||||
# - lowvram: Split the unet in parts to use less vram.
|
||||
# - novram: When lowvram isn't enough.
|
||||
# - cpu: To use the CPU for everything (slow).
|
||||
vram:
|
||||
Loading…
Reference in New Issue
Block a user