diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 7c9c38695..271c43431 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -302,8 +302,8 @@ CONFIG_OPTIONS = [ OptionInfo("directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml."), OptionInfoEnum("cross-attention", [ - OptionInfoEnumChoice("split", option_name="use-split-cross-attention", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory."), - OptionInfoEnumChoice("pytorch", option_name="use-pytorch-cross-attention", help="Used to force normal vram use if lowvram gets automatically enabled."), + OptionInfoEnumChoice("split", option_name="use-split-cross-attention", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used."), + OptionInfoEnumChoice("pytorch", option_name="use-pytorch-cross-attention", help="Use the new pytorch 2.0 cross attention function."), ], help="Type of cross attention to use", empty_help="Don't use cross-attention."), OptionInfoFlag("disable-xformers", help="Disable xformers."), diff --git a/config.yaml b/config.yaml index 6ba382c3a..6c771d99f 100644 --- a/config.yaml +++ b/config.yaml @@ -17,17 +17,17 @@ config: # Extra paths to scan for model files. extra_model_paths: a1111: - checkpoints: models/Stable-diffusion - vae: models/VAE hypernetworks: models/hypernetworks base_path: path/to/stable-diffusion-webui/ - upscale_models: | - models/ESRGAN - models/SwinIR + embeddings: embeddings controlnet: models/ControlNet configs: models/Stable-diffusion loras: models/Lora - embeddings: embeddings + vae: models/VAE + checkpoints: models/Stable-diffusion + upscale_models: | + models/ESRGAN + models/SwinIR # Set the ComfyUI output directory. Leave empty to use the default. # Type: str @@ -70,8 +70,8 @@ config: # Type of cross attention to use # Choices: # - (empty): Don't use cross-attention. - # - split: By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory. - # - pytorch: Used to force normal vram use if lowvram gets automatically enabled. + # - split: Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used. + # - pytorch: Use the new pytorch 2.0 cross attention function. cross_attention: # Disable xformers.