From 4553891bbd993d0ee37377a6a30e13bd0e070143 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Sun, 23 Feb 2025 16:13:39 -0800 Subject: [PATCH 1/2] Update installation documentation to include desktop + cli. (#6899) * Update installation documentation. * Add portable to description. * Move cli further down. --- README.md | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 83d67cef4..b51f7a067 100644 --- a/README.md +++ b/README.md @@ -31,10 +31,24 @@ ![ComfyUI Screenshot](https://github.com/user-attachments/assets/7ccaf2c1-9b72-41ae-9a89-5688c94b7abe) -This ui will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. For some workflow examples and see what ComfyUI can do you can check out: -### [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/) +ComfyUI lets you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. Available on Windows, Linux, and macOS. + +## Get Started + +#### [Desktop Application](https://www.comfy.org/download) +- The easiest way to get started. +- Available on Windows & macOS. + +#### [Windows Portable Package](#installing) +- Get the latest commits and completely portable. +- Available on Windows. + +#### [Manual Install](#manual-install-windows-linux) +Supports all operating systems and GPU types (NVIDIA, AMD, Intel, Apple Silicon, Ascend). + +## Examples +See what ComfyUI can do with the [example workflows](https://comfyanonymous.github.io/ComfyUI_examples/). -### [Installing ComfyUI](#installing) ## Features - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. @@ -121,7 +135,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git # Installing -## Windows +## Windows Portable There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). @@ -141,6 +155,15 @@ See the [Config file](extra_model_paths.yaml.example) to set the search paths fo To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb) + +## [comfy-cli](https://docs.comfy.org/comfy-cli/getting-started) + +You can install and start ComfyUI using comfy-cli: +```bash +pip install comfy-cli +comfy install +``` + ## Manual Install (Windows, Linux) python 3.13 is supported but using 3.12 is recommended because some custom nodes and their dependencies might not support it yet. From 96d891cb94d90f220e066cebad349887137f07a6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Feb 2025 05:41:07 -0500 Subject: [PATCH 2/2] Speedup on some models by not upcasting bfloat16 to float32 on mac. --- comfy/ldm/modules/attention.py | 13 +++++++------ comfy/model_management.py | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 24fb9d950..2758f9508 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -30,11 +30,12 @@ ops = comfy.ops.disable_weight_init FORCE_UPCAST_ATTENTION_DTYPE = model_management.force_upcast_attention_dtype() -def get_attn_precision(attn_precision): +def get_attn_precision(attn_precision, current_dtype): if args.dont_upcast_attention: return None - if FORCE_UPCAST_ATTENTION_DTYPE is not None: - return FORCE_UPCAST_ATTENTION_DTYPE + + if FORCE_UPCAST_ATTENTION_DTYPE is not None and current_dtype in FORCE_UPCAST_ATTENTION_DTYPE: + return FORCE_UPCAST_ATTENTION_DTYPE[current_dtype] return attn_precision def exists(val): @@ -81,7 +82,7 @@ def Normalize(in_channels, dtype=None, device=None): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): - attn_precision = get_attn_precision(attn_precision) + attn_precision = get_attn_precision(attn_precision, q.dtype) if skip_reshape: b, _, _, dim_head = q.shape @@ -150,7 +151,7 @@ def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): - attn_precision = get_attn_precision(attn_precision) + attn_precision = get_attn_precision(attn_precision, query.dtype) if skip_reshape: b, _, _, dim_head = query.shape @@ -220,7 +221,7 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, return hidden_states def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): - attn_precision = get_attn_precision(attn_precision) + attn_precision = get_attn_precision(attn_precision, q.dtype) if skip_reshape: b, _, _, dim_head = q.shape diff --git a/comfy/model_management.py b/comfy/model_management.py index f4a63c6d3..1e6599be2 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -954,7 +954,7 @@ def force_upcast_attention_dtype(): upcast = True if upcast: - return torch.float32 + return {torch.float16: torch.float32} else: return None