Fix cross attention docs

This commit is contained in:
space-nuko 2023-06-01 13:38:00 -05:00
parent 1c22b82e09
commit 340974dec3
2 changed files with 10 additions and 10 deletions

View File

@ -302,8 +302,8 @@ CONFIG_OPTIONS = [
OptionInfo("directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1,
help="Use torch-directml."),
OptionInfoEnum("cross-attention", [
OptionInfoEnumChoice("split", option_name="use-split-cross-attention", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory."),
OptionInfoEnumChoice("pytorch", option_name="use-pytorch-cross-attention", help="Used to force normal vram use if lowvram gets automatically enabled."),
OptionInfoEnumChoice("split", option_name="use-split-cross-attention", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used."),
OptionInfoEnumChoice("pytorch", option_name="use-pytorch-cross-attention", help="Use the new pytorch 2.0 cross attention function."),
], help="Type of cross attention to use", empty_help="Don't use cross-attention."),
OptionInfoFlag("disable-xformers",
help="Disable xformers."),

View File

@ -17,17 +17,17 @@ config:
# Extra paths to scan for model files.
extra_model_paths:
a1111:
checkpoints: models/Stable-diffusion
vae: models/VAE
hypernetworks: models/hypernetworks
base_path: path/to/stable-diffusion-webui/
upscale_models: |
models/ESRGAN
models/SwinIR
embeddings: embeddings
controlnet: models/ControlNet
configs: models/Stable-diffusion
loras: models/Lora
embeddings: embeddings
vae: models/VAE
checkpoints: models/Stable-diffusion
upscale_models: |
models/ESRGAN
models/SwinIR
# Set the ComfyUI output directory. Leave empty to use the default.
# Type: str
@ -70,8 +70,8 @@ config:
# Type of cross attention to use
# Choices:
# - (empty): Don't use cross-attention.
# - split: By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.
# - pytorch: Used to force normal vram use if lowvram gets automatically enabled.
# - split: Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.
# - pytorch: Use the new pytorch 2.0 cross attention function.
cross_attention:
# Disable xformers.