From ed4f92279ca3761c4ea53373d947b988ca902965 Mon Sep 17 00:00:00 2001 From: doctorpangloss <2229300+doctorpangloss@users.noreply.github.com> Date: Thu, 11 Dec 2025 17:21:44 -0800 Subject: [PATCH] sageattention fixup --- README.md | 13 +++++++++++-- pypi/sageattention_index.html | 15 +++++++++++++++ pyproject.toml | 26 +++----------------------- 3 files changed, 29 insertions(+), 25 deletions(-) create mode 100644 pypi/sageattention_index.html diff --git a/README.md b/README.md index eb10c5c5a..af7f4701c 100644 --- a/README.md +++ b/README.md @@ -306,10 +306,19 @@ choco install -y vcredist2010 vcredist2013 vcredist140 Then, visit [NVIDIA.com's CUDA Toolkit Download Page](https://developer.nvidia.com/cuda-12-6-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=Server2022&target_type=exe_network) and download and install the CUDA Toolkit. Verify it is correctly installed by running `nvcc --version`. -You are now ready to install Sage Attention 2 and Triton: +You are now ready to install Sage Attention 2 and Flash Attention. + +### Linux ```shell -uv pip install --torch-backend=auto "comfyui[attention]@git+https://github.com/hiddenswitch/ComfyUI.git" +uv pip install --no-build-isolation "sageattention@git+https://github.com/thu-ml/SageAttention.git" +uv pip install --no-build-isolation flash_attn +``` + +### Windows + +```powershell +uv pip install --find-links https://raw.githubusercontent.com/hiddenswitch/ComfyUI/main/pypi/sageattention_index.html sageattention ``` To start ComfyUI with it: diff --git a/pypi/sageattention_index.html b/pypi/sageattention_index.html new file mode 100644 index 000000000..28b468349 --- /dev/null +++ b/pypi/sageattention_index.html @@ -0,0 +1,15 @@ + + + + Links for sageattention + + +

Links for sageattention

+sageattention-2.2.0+cu124torch2.5.1.post3-cp39-abi3-win_amd64.whl
+sageattention-2.2.0+cu126torch2.6.0.post3-cp39-abi3-win_amd64.whl
+sageattention-2.2.0+cu128torch2.7.1.post3-cp39-abi3-win_amd64.whl
+sageattention-2.2.0+cu128torch2.8.0.post3-cp39-abi3-win_amd64.whl
+sageattention-2.2.0+cu128torch2.9.0.post3-cp39-abi3-win_amd64.whl
+sageattention-2.2.0+cu130torch2.9.0.post3-cp39-abi3-win_amd64.whl
+ + diff --git a/pyproject.toml b/pyproject.toml index ba5df7564..59468c9d5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -186,10 +186,7 @@ mps = [ # "torchaudio", ] -attention = [ - "sageattention==2.2.0 ; sys_platform == 'Linux' or sys_platform == 'win32'", - "flash_attn ; sys_platform == 'Linux'", -] + comfyui-manager = [ "GitPython", @@ -202,7 +199,7 @@ comfyui-manager = [ "chardet", "pip", # todo: bold move - "comfyui_manager==4.0.3b4", +# "comfyui_manager==4.0.3b4", ] [project.scripts] @@ -218,11 +215,7 @@ Repository = "https://github.com/comfyanonymous/ComfyUI" conflicts = [ [{ extra = "cpu" }, { extra = "cu126" }, { extra = "cu128" }, { extra = "rocm" }, { extra = "mps" }], ] -no-build-isolation-package = [ - "sageattention", - "sage-attention", - "flash_attn", -] +no-build-isolation-package = [] prerelease = "allow" [[tool.uv.index]] @@ -273,20 +266,7 @@ torchaudio = [ # { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'Linux'" }, # { index = "pytorch-nightly-cpu", extra = "mps", marker = "sys_platform == 'darwin'" }, ] -sageattention = [ - { git = "https://github.com/thu-ml/SageAttention.git", marker = "sys_platform == 'Linux'", extra = "attention" }, - { url = "https://github.com/woct0rdho/SageAttention/releases/download/v2.2.0-windows.post1/sageattention-2.2.0+cu128torch2.7.1.post1-cp39-abi3-win_amd64.whl", marker = "sys_platform == 'win32'", extra = "attention" }, -] -[[tool.uv.dependency-metadata]] -name = "flash-attn" -version = "2.6.3" -requires-dist = ["torch", "einops"] - -[[tool.uv.dependency-metadata]] -name = "sageattention" -version = "2.2.0" -requires-dist = ["torch"] [tool.ruff] lint.select = [