From 6d8deff056b5f4774acb2185d8ee1263f24e77e9 Mon Sep 17 00:00:00 2001 From: doctorpangloss <@hiddenswitch.com> Date: Mon, 19 May 2025 15:58:08 -0700 Subject: [PATCH] Switch to uv for packaging --- .github/workflows/test.yml | 38 +----- README.md | 160 ++++++++------------------ comfy/nodes/vanilla_node_importing.py | 21 ++-- pyproject.toml | 107 ++++++++++------- 4 files changed, 122 insertions(+), 204 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fa4674f95..9276ca52a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,38 +9,6 @@ name: Backend Tests on: [ push ] jobs: - build_and_execute_macos: - environment: "Testing" - if: false - name: Installation, Unit and Workflow Tests for macOS - runs-on: ${{ matrix.runner.labels }} - strategy: - fail-fast: false - matrix: - runner: - - labels: [ self-hosted, m1-8gb ] - steps: - - uses: actions/checkout@v4 - - run: | - python3 -m venv - source venv/bin/activate - python -m pip install setuptools pip --upgrade - pip install --no-build-isolation .[dev] - - name: Run unit tests - run: | - source venv/bin/activate - pytest -v tests/unit - - name: Run all other supported tests - run: | - source venv/bin/activate - pytest -v tests/inference - env: - HF_TOKEN: ${{ secrets.HF_TOKEN }} - - name: Lint for errors - run: | - source venv/bin/activate - pylint -j 8 --rcfile=.pylintrc comfy/ - pylint -j 8 --rcfile=.pylintrc comfy_extras/ build_and_execute_linux: environment: "Testing" name: Installation, Unit and Workflow Tests for Linux @@ -51,9 +19,9 @@ jobs: matrix: runner: - labels: [self-hosted, Linux, X64, cpu] - container: "ubuntu:22.04" + container: "ubuntu" - labels: [self-hosted, Linux, X64, cuda-3090-24gb] - container: "nvcr.io/nvidia/pytorch:24.06-py3-igpu" + container: "nvcr.io/nvidia/pytorch:24.06-py3" steps: - run: | apt update || true @@ -72,7 +40,7 @@ jobs: run: | export UV_BREAK_SYSTEM_PACKAGES=true export UV_SYSTEM_PYTHON=true - UV_TORCH_BACKEND=auto uv pip install "comfyui[dev] @ git+https://github.com/hiddenswitch/comfyui" + UV_TORCH_BACKEND=auto uv sync --inexact - name: Run tests run: | export HSA_OVERRIDE_GFX_VERSION=11.0.0 diff --git a/README.md b/README.md index 48b8e78c6..076432443 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ A vanilla, up-to-date fork of [ComfyUI](https://github.com/comfyanonymous/comfyu ### New Features - To run, just type `comfyui` in your command line and press enter. -- [Installable](#installing) via `uv`: `uv pip install comfyui[cuda]@git+https://github.com/hiddenswitch/ComfyUI.git`. +- [Installable](#installing) via `uv`: `UV_TORCH_BACKEND=auto uv pip install comfyui@git+https://github.com/hiddenswitch/ComfyUI.git`. - [Large Language Models](#large-language-models) with multi-modal support included. - [Automatic model downloading](#model-downloading) to save you disk space and time. - [Distributed](#distributed-multi-process-and-multi-gpu-comfy) with support for multiple GPUs, multiple backends and frontends, including in containers, using RabbitMQ. @@ -131,46 +131,36 @@ When using Windows, open the **Windows Powershell** app. Then observe you are at 4. Create a virtual environment: ```shell - uv venv --seed --python 3.12 + uv venv ``` -5. Activate it on - **Windows (PowerShell):** - +5. Run the following command to install `comfyui` into your current environment. This will correctly select the version of `torch` that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA, Intel, AMD or CPU on Linux, CPU on macOS): ```powershell - Set-ExecutionPolicy Unrestricted -Scope Process - & .\.venv\Scripts\activate.ps1 + uv pip install --torch-backend=auto "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git" ``` - -6. Run the following command to install `comfyui` into your current environment. This will correctly select the version of `torch` that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA, Intel, AMD or CPU on Linux, CPU on macOS): - ```powershell - uv pip install setuptools wheel - UV_TORCH_BACKEND=auto uv pip install "comfyui[cuda]@git+https://github.com/hiddenswitch/ComfyUI.git" - ``` -7. To run the web server: +6. To run the web server: ```shell - comfyui + uv run comfyui ``` When you run workflows that use well-known models, this will download them automatically. To make it accessible over the network: ```shell - comfyui --listen + uv run comfyui --listen ``` **Running** -On Windows, you will need to open PowerShell and activate your virtual environment whenever you want to run `comfyui`. +On Windows, you should change into the directory where you ran `uv venv`, then run `comfyui`. For example, if you ran `uv venv` inside `~\Documents\ComfyUI_Workspace\` ```powershell cd ~\Documents\ComfyUI_Workspace\ -& .venv\Scripts\activate.ps1 -comfyui +uv run comfyui ``` Upgrades are delivered frequently and automatically. To force one immediately, run pip upgrade like so: ```shell -UV_TORCH_BACKEND=auto uv pip install --no-build-isolation --upgrade "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git" +uv pip install --torch-backend=auto --upgrade "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git" ``` ### macOS @@ -179,9 +169,9 @@ UV_TORCH_BACKEND=auto uv pip install --no-build-isolation --upgrade "comfyui@git ```shell /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" ``` - Then, install `python` and `uv`: + Then, install `uv`: ```shell - HOMEBREW_NO_AUTO_UPDATE=1 brew install python@3.12 uv + HOMEBREW_NO_AUTO_UPDATE=1 brew install uv ``` 3. Switch into a directory that you want to store your outputs, custom nodes and models in. This is your ComfyUI workspace. For example, if you want to store your workspace in a directory called `ComfyUI_Workspace` in your Documents folder: @@ -192,39 +182,31 @@ UV_TORCH_BACKEND=auto uv pip install --no-build-isolation --upgrade "comfyui@git 4. Create a virtual environment: ```shell - uv venv --seed --python 3.12 - ``` -5. Activate it on - **macOS** - - ```shell - source .venv/bin/activate + uv venv --python 3.12 ``` -6. Run the following command to install `comfyui` into your current environment. This will correctly select the version of `torch` that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA, Intel, AMD or CPU on Linux, CPU on macOS): +5. Run the following command to install `comfyui` into your current environment. ```shell - uv pip install setuptools wheel - UV_TORCH_BACKEND=auto uv pip install "comfyui[cpu]@git+https://github.com/hiddenswitch/ComfyUI.git" + uv pip install "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git" ``` -7. To run the web server: +6. To run the web server: ```shell - comfyui + uv run comfyui ``` When you run workflows that use well-known models, this will download them automatically. To make it accessible over the network: ```shell - comfyui --listen + uv run comfyui --listen ``` **Running** -On macOS, you will need to open the terminal and activate your virtual environment whenever you want to run `comfyui`. +On macOS, you will need to open the terminal and `cd` into the directory in which you ran `uv venv`. For example, if you ran `uv venv` in `~/Documents/ComfyUI_Workspace/`: ```shell cd ~/Documents/ComfyUI_Workspace/ -source .venv/bin/activate -comfyui +uv run comfyui ``` ## Model Downloading @@ -262,16 +244,16 @@ To save space, you will need to enable Developer Mode in the Windows Settings, t These packages have been adapted to be installable with `pip` and download models to the correct places: -- **ELLA T5 Text Conditioning for SD1.5**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ella.git` -- **IP Adapter**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ipadapter-plus` -- **ControlNet Auxiliary Preprocessors**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git`. -- **LayerDiffuse Alpha Channel Diffusion**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-layerdiffuse.git`. -- **BRIA Background Removal**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-bria-bg-removal.git` -- **Video Frame Interpolation**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation` -- **Video Helper Suite**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite` -- **AnimateDiff Evolved**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved` -- **Impact Pack**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-impact-pack` -- **TensorRT**: `pip install git+https://github.com/AppMAna/appmana-comfyui-nodes-tensorrt` +- **ELLA T5 Text Conditioning for SD1.5**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ella.git` +- **IP Adapter**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ipadapter-plus` +- **ControlNet Auxiliary Preprocessors**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git`. +- **LayerDiffuse Alpha Channel Diffusion**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-layerdiffuse.git`. +- **BRIA Background Removal**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-bria-bg-removal.git` +- **Video Frame Interpolation**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation` +- **Video Helper Suite**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite` +- **AnimateDiff Evolved**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved` +- **Impact Pack**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-impact-pack` +- **TensorRT**: `uv pip install git+https://github.com/AppMAna/appmana-comfyui-nodes-tensorrt` Custom nodes are generally supported by this fork. Use these for a bug-free experience. @@ -287,20 +269,25 @@ caddy reverse-proxy --from localhost:443 --to localhost:8188 --tls self_signed ##### Notes for AMD Users -Until a workaround is found, specify these variables: +Installation for `ROCm` should be explicit: +```shell +uv pip install "comfyui[rocm]@git+https://github.com/hiddenswitch/ComfyUI.git" +``` + +Then, until a workaround is found, specify these variables: RDNA 3 (RX 7600 and later) ```shell export HSA_OVERRIDE_GFX_VERSION=11.0.0 -comfyui +uv run comfyui ``` RDNA 2 (RX 6600 and others) ```shell export HSA_OVERRIDE_GFX_VERSION=10.3.0 -comfyui +uv run comfyui ``` You can enable experimental memory efficient attention on pytorch 2.5 in ComfyUI on RDNA3 and potentially other AMD GPUs using this command: @@ -330,57 +317,6 @@ For models compatible with Cambricon Extension for PyTorch (`torch_mlu`). Here's 2. Next, install the PyTorch (`torch_mlu`) extension following the instructions on the [Installation](https://www.cambricon.com/docs/sdk_1.15.0/cambricon_pytorch_1.17.0/user_guide_1.9/index.html) 3. Launch ComfyUI by running `python main.py` -## Manual Install (Windows, Linux, macOS) For Development - -1. Clone this repo: - ```shell - git clone https://github.com/hiddenswitch/ComfyUI.git - cd ComfyUI - ``` -2. Create a virtual environment: - 1. Create an environment: - ```shell - python -m virtualenv venv - ``` - 2. Activate it: - - **Windows (PowerShell):** - ```pwsh - Set-ExecutionPolicy Unrestricted -Scope Process - & .\venv\Scripts\activate.ps1 - ``` - - **Linux and macOS** - ```shell - source ./venv/bin/activate - ``` - -3. Then, run the following command to install `comfyui` into your current environment. This will correctly select the version of pytorch that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA AMD or CPU on Linux): - ```shell - pip install -e ".[dev]" - ``` -4. To run the web server: - ```shell - comfyui - ``` - To run tests: - ```shell - pytest -v tests/ - ``` - You can use `comfyui` as an API. Visit the [OpenAPI specification](comfy/api/openapi.yaml). This file can be used to generate typed clients for your preferred language. -5. To create the standalone binary: - ```shell - python -m PyInstaller --onefile --noupx -n ComfyUI --add-data="comfy/;comfy/" --paths $(pwd) --paths comfy/cmd main.py - ``` - -Because pip installs the package as editable with `pip install -e .`, any changes you make to the repository will affect the next launch of `comfy`. In IDEA based editors like PyCharm and IntelliJ, the Relodium plugin supports modifying your custom nodes or similar code while the server is running. - -## Linux Development Dependencies - -```shell -apt install -y git build-essential clang python3-dev python3-venv -``` - # Large Language Models ComfyUI LTS supports text and multi-modal LLM models from the `transformers` ecosystem. This means all the LLaMA family models, LLAVA-NEXT, Phi-3, etc. are supported out-of-the-box with no configuration necessary. @@ -420,10 +356,10 @@ First, install this package using the [Installation Instructions](#installing). Then, install the custom nodes packages that support video creation workflows: ```shell -pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation -pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite -pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved -pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git +uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation +uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite +uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved +uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git ``` Start creating an AnimateDiff workflow. When using these packages, the appropriate models will download automatically. @@ -438,11 +374,7 @@ Improve the performance of your Mochi model video generation using **Sage Attent [Use the default Mochi Workflow.](https://github.com/comfyanonymous/ComfyUI_examples/raw/refs/heads/master/mochi/mochi_text_to_video_example.webp) This does not require any custom nodes or any change to your workflow. -Install the dependencies for Windows or Linux using the `withtriton` component, or install the specific dependencies you need from [requirements-triton.txt](./requirements-triton.txt): - -```shell -pip install "comfyui[withtriton]@git+https://github.com/hiddenswitch/ComfyUI.git" -``` +**Installation** On Windows, you will need the CUDA Toolkit and Visual Studio 2022. If you do not already have this, use `chocolatey`: @@ -457,16 +389,16 @@ choco install -y vcredist2010 vcredist2013 vcredist140 Then, visit [NVIDIA.com's CUDA Toolkit Download Page](https://developer.nvidia.com/cuda-12-6-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=Server2022&target_type=exe_network) and download and install the CUDA Toolkit. Verify it is correctly installed by running `nvcc --version`. -You are now ready to install Sage Attention 2: +You are now ready to install Sage Attention 2 and Triton: ```shell -uv pip install --no-build-isolation --no-deps "git+https://github.com/thu-ml/SageAttention.git" +uv pip install --torch-backend=auto "comfyui[attention]@git+https://github.com/hiddenswitch/ComfyUI.git" ``` To start ComfyUI with it: ```shell -comfyui --use-sage-attention +uv run comfyui --use-sage-attention ``` ![with_sage_attention.webp](./docs/assets/with_sage_attention.webp) diff --git a/comfy/nodes/vanilla_node_importing.py b/comfy/nodes/vanilla_node_importing.py index 6a50e724e..3b13ccb0f 100644 --- a/comfy/nodes/vanilla_node_importing.py +++ b/comfy/nodes/vanilla_node_importing.py @@ -3,18 +3,18 @@ from __future__ import annotations import importlib import logging import os -import shutil import sys import time import types from contextlib import contextmanager -from typing import Dict, List, Iterable from os.path import join, basename, dirname, isdir, isfile, exists, abspath, split, splitext, realpath +from typing import Dict, Iterable from . import base_nodes from .package_typing import ExportedNodes from ..component_model.plugins import prompt_server_instance_routes +logger = logging.getLogger(__name__) class _PromptServerStub(): def __init__(self): @@ -30,7 +30,7 @@ def _vanilla_load_importing_execute_prestartup_script(node_paths: Iterable[str]) spec.loader.exec_module(module) return True except Exception as e: - print(f"Failed to execute startup-script: {script_path} / {e}", file=sys.stderr) + logger.error(f"Failed to execute startup-script: {script_path} / {e}") return False node_prestartup_times = [] @@ -52,14 +52,13 @@ def _vanilla_load_importing_execute_prestartup_script(node_paths: Iterable[str]) success = execute_script(script_path) node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_prestartup_times) > 0: - print("\nPrestartup times for custom nodes:", file=sys.stderr) + logger.debug("\nPrestartup times for custom nodes:") for n in sorted(node_prestartup_times): if n[2]: import_message = "" else: import_message = " (PRESTARTUP FAILED)" - print("{:6.1f} seconds{}:".format(n[0], import_message), n[1], file=sys.stderr) - print("\n", file=sys.stderr) + logger.debug("{:6.1f} seconds{}:".format(n[0], import_message), n[1]) @contextmanager @@ -118,12 +117,10 @@ def _vanilla_load_custom_nodes_1(module_path, ignore=set()) -> ExportedNodes: exported_nodes.NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) return exported_nodes else: - print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.", file=sys.stderr) + logger.error(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.") return exported_nodes except Exception as e: - import traceback - print(traceback.format_exc()) - print(f"Cannot import {module_path} module for custom nodes:", e, file=sys.stderr) + logger.error(f"Cannot import {module_path} module for custom nodes:", exc_info=e) return exported_nodes @@ -151,14 +148,12 @@ def _vanilla_load_custom_nodes_2(node_paths: Iterable[str]) -> ExportedNodes: exported_nodes.update(possible_exported_nodes) if len(node_import_times) > 0: - print("\nImport times for custom nodes:", file=sys.stderr) for n in sorted(node_import_times): if n[2]: import_message = "" else: import_message = " (IMPORT FAILED)" - print("{:6.1f} seconds{}:".format(n[0], import_message), n[1], file=sys.stderr) - print("\n", file=sys.stderr) + logger.debug(f"{n[0]:6.1f} seconds{import_message}: {n[1]}") return exported_nodes diff --git a/pyproject.toml b/pyproject.toml index c89f5bfb9..6c571e77d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,24 +2,25 @@ name = "comfyui" version = "0.3.29" description = "An installable version of ComfyUI" -readme = "README.md" # Optional: if you have a README +readme = "README.md" authors = [ { name = "Contributors_of_ComfyUI" }, ] requires-python = ">=3.10" -license = { text = "Specify License Here" } # Optional: Add your license -classifiers = [# Optional: Standard PyPI classifiers +license = { text = "GPL-3.0-or-later" } +classifiers = [ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", - # Add other relevant classifiers ] -# Core dependencies from requirements.txt (excluding torch/torchvision) dependencies = [ + "torch", + "torchvision", + "torchaudio", "comfyui-frontend-package", "comfyui-workflow-templates", "torchdiffeq>=0.2.3", @@ -110,25 +111,7 @@ where = ["."] include = ["comfy*"] namespaces = false -[project.optional-dependencies] -cpu = [ - "torch", - "torchvision", - "torchaudio", -] - -cuda = [ - "torch", - "torchvision", - "torchaudio", -] - -rocm = [ - "torch", - "torchvision", - "torchaudio", -] - +[dependency-groups] dev = [ "pytest", "pytest-asyncio", @@ -145,16 +128,40 @@ dev = [ "pylint", ] -triton = [ - "triton ; sys_platform == 'Linux'", - 'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp312-cp312-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.12"', - 'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp311-cp311-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.11"', - 'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp310-cp310-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.10"', +[project.optional-dependencies] +cpu = [ + "torch", + "torchvision", + "torchaudio", ] -withtorch = ["comfyui[cuda]"] # Depends on the 'cuda' extra -withtriton = ["comfyui[cuda, triton]"] # Depends on 'cuda' and 'triton' extras +cu126 = [ + "torch", + "torchvision", + "torchaudio", + "triton ; sys_platform == 'Linux'", + "triton-windows ; sys_platform == 'win32'", +] +cu128 = [ + "torch", + "torchvision", + "torchaudio", + "triton ; sys_platform == 'Linux'", + "triton-windows ; sys_platform == 'win32'", +] + +rocm = [ + "torch", + "torchvision", + "torchaudio", + "pytorch-triton-rocm" +] + +attention = [ + "sageattention", + "flash_attn ; sys_platform == 'Linux'", +] [project.scripts] comfyui = "comfy.cmd.main:entrypoint" @@ -162,12 +169,17 @@ comfyui-worker = "comfy.entrypoints.worker:entrypoint" comfyui-workflow = "comfy.entrypoints.workflow:entrypoint" [project.urls] -Homepage = "https://github.com/comfyanonymous/ComfyUI" # Example -Repository = "https://github.com/comfyanonymous/ComfyUI" # Example +Homepage = "https://github.com/comfyanonymous/ComfyUI" +Repository = "https://github.com/comfyanonymous/ComfyUI" [tool.uv] conflicts = [ - [{ extra = "cpu" }, { extra = "cuda" }, { extra = "rocm" }], + [{ extra = "cpu" }, { extra = "cu126" }, { extra = "cu128" }, { extra = "rocm" }], +] +no-build-isolation-package = [ + "sageattention", + "sage-attention", + "flash_attn", ] [[tool.uv.index]] @@ -180,6 +192,11 @@ name = "pytorch-cu126" url = "https://download.pytorch.org/whl/cu126" explicit = true +[[tool.uv.index]] +name = "pytorch-cu128" +url = "https://download.pytorch.org/whl/cu128" +explicit = true + [[tool.uv.index]] name = "pytorch-rocm" url = "https://download.pytorch.org/whl/rocm6.3" @@ -188,23 +205,29 @@ explicit = true [tool.uv.sources] torch = [ { index = "pytorch-cpu", extra = "cpu" }, - { index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, - { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, + { index = "pytorch-cu126", extra = "cu126", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" }, + { index = "pytorch-cu128", extra = "cu128", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" }, + { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'Linux'" }, ] torchvision = [ { index = "pytorch-cpu", extra = "cpu" }, - { index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, - { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, + { index = "pytorch-cu126", extra = "cu126", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" }, + { index = "pytorch-cu128", extra = "cu128", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" }, + { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'Linux'" }, ] torchaudio = [ { index = "pytorch-cpu", extra = "cpu" }, - { index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, - { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, + { index = "pytorch-cu126", extra = "cu126", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" }, + { index = "pytorch-cu128", extra = "cu128", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" }, + { index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'Linux'" }, ] comfyui-frontend-package = [ -# { git = "https://github.com/appmana/appmana-comfyui-frontend", subdirectory = "comfyui_frontend_package" }, - { workspace = true } + { git = "https://github.com/appmana/appmana-comfyui-frontend", subdirectory = "comfyui_frontend_package" }, ] +"sageattention" = [ + { git = "https://github.com/thu-ml/SageAttention.git", extra = "attention", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" }, +] + [tool.ruff]