Switch to uv for packaging

This commit is contained in:
doctorpangloss 2025-05-19 15:58:08 -07:00
parent a3ad9bdb1a
commit 6d8deff056
4 changed files with 122 additions and 204 deletions

View File

@ -9,38 +9,6 @@ name: Backend Tests
on: [ push ] on: [ push ]
jobs: jobs:
build_and_execute_macos:
environment: "Testing"
if: false
name: Installation, Unit and Workflow Tests for macOS
runs-on: ${{ matrix.runner.labels }}
strategy:
fail-fast: false
matrix:
runner:
- labels: [ self-hosted, m1-8gb ]
steps:
- uses: actions/checkout@v4
- run: |
python3 -m venv
source venv/bin/activate
python -m pip install setuptools pip --upgrade
pip install --no-build-isolation .[dev]
- name: Run unit tests
run: |
source venv/bin/activate
pytest -v tests/unit
- name: Run all other supported tests
run: |
source venv/bin/activate
pytest -v tests/inference
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
- name: Lint for errors
run: |
source venv/bin/activate
pylint -j 8 --rcfile=.pylintrc comfy/
pylint -j 8 --rcfile=.pylintrc comfy_extras/
build_and_execute_linux: build_and_execute_linux:
environment: "Testing" environment: "Testing"
name: Installation, Unit and Workflow Tests for Linux name: Installation, Unit and Workflow Tests for Linux
@ -51,9 +19,9 @@ jobs:
matrix: matrix:
runner: runner:
- labels: [self-hosted, Linux, X64, cpu] - labels: [self-hosted, Linux, X64, cpu]
container: "ubuntu:22.04" container: "ubuntu"
- labels: [self-hosted, Linux, X64, cuda-3090-24gb] - labels: [self-hosted, Linux, X64, cuda-3090-24gb]
container: "nvcr.io/nvidia/pytorch:24.06-py3-igpu" container: "nvcr.io/nvidia/pytorch:24.06-py3"
steps: steps:
- run: | - run: |
apt update || true apt update || true
@ -72,7 +40,7 @@ jobs:
run: | run: |
export UV_BREAK_SYSTEM_PACKAGES=true export UV_BREAK_SYSTEM_PACKAGES=true
export UV_SYSTEM_PYTHON=true export UV_SYSTEM_PYTHON=true
UV_TORCH_BACKEND=auto uv pip install "comfyui[dev] @ git+https://github.com/hiddenswitch/comfyui" UV_TORCH_BACKEND=auto uv sync --inexact
- name: Run tests - name: Run tests
run: | run: |
export HSA_OVERRIDE_GFX_VERSION=11.0.0 export HSA_OVERRIDE_GFX_VERSION=11.0.0

160
README.md
View File

@ -6,7 +6,7 @@ A vanilla, up-to-date fork of [ComfyUI](https://github.com/comfyanonymous/comfyu
### New Features ### New Features
- To run, just type `comfyui` in your command line and press enter. - To run, just type `comfyui` in your command line and press enter.
- [Installable](#installing) via `uv`: `uv pip install comfyui[cuda]@git+https://github.com/hiddenswitch/ComfyUI.git`. - [Installable](#installing) via `uv`: `UV_TORCH_BACKEND=auto uv pip install comfyui@git+https://github.com/hiddenswitch/ComfyUI.git`.
- [Large Language Models](#large-language-models) with multi-modal support included. - [Large Language Models](#large-language-models) with multi-modal support included.
- [Automatic model downloading](#model-downloading) to save you disk space and time. - [Automatic model downloading](#model-downloading) to save you disk space and time.
- [Distributed](#distributed-multi-process-and-multi-gpu-comfy) with support for multiple GPUs, multiple backends and frontends, including in containers, using RabbitMQ. - [Distributed](#distributed-multi-process-and-multi-gpu-comfy) with support for multiple GPUs, multiple backends and frontends, including in containers, using RabbitMQ.
@ -131,46 +131,36 @@ When using Windows, open the **Windows Powershell** app. Then observe you are at
4. Create a virtual environment: 4. Create a virtual environment:
```shell ```shell
uv venv --seed --python 3.12 uv venv
``` ```
5. Activate it on 5. Run the following command to install `comfyui` into your current environment. This will correctly select the version of `torch` that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA, Intel, AMD or CPU on Linux, CPU on macOS):
**Windows (PowerShell):**
```powershell ```powershell
Set-ExecutionPolicy Unrestricted -Scope Process uv pip install --torch-backend=auto "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git"
& .\.venv\Scripts\activate.ps1
``` ```
6. To run the web server:
6. Run the following command to install `comfyui` into your current environment. This will correctly select the version of `torch` that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA, Intel, AMD or CPU on Linux, CPU on macOS):
```powershell
uv pip install setuptools wheel
UV_TORCH_BACKEND=auto uv pip install "comfyui[cuda]@git+https://github.com/hiddenswitch/ComfyUI.git"
```
7. To run the web server:
```shell ```shell
comfyui uv run comfyui
``` ```
When you run workflows that use well-known models, this will download them automatically. When you run workflows that use well-known models, this will download them automatically.
To make it accessible over the network: To make it accessible over the network:
```shell ```shell
comfyui --listen uv run comfyui --listen
``` ```
**Running** **Running**
On Windows, you will need to open PowerShell and activate your virtual environment whenever you want to run `comfyui`. On Windows, you should change into the directory where you ran `uv venv`, then run `comfyui`. For example, if you ran `uv venv` inside `~\Documents\ComfyUI_Workspace\`
```powershell ```powershell
cd ~\Documents\ComfyUI_Workspace\ cd ~\Documents\ComfyUI_Workspace\
& .venv\Scripts\activate.ps1 uv run comfyui
comfyui
``` ```
Upgrades are delivered frequently and automatically. To force one immediately, run pip upgrade like so: Upgrades are delivered frequently and automatically. To force one immediately, run pip upgrade like so:
```shell ```shell
UV_TORCH_BACKEND=auto uv pip install --no-build-isolation --upgrade "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git" uv pip install --torch-backend=auto --upgrade "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git"
``` ```
### macOS ### macOS
@ -179,9 +169,9 @@ UV_TORCH_BACKEND=auto uv pip install --no-build-isolation --upgrade "comfyui@git
```shell ```shell
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
``` ```
Then, install `python` and `uv`: Then, install `uv`:
```shell ```shell
HOMEBREW_NO_AUTO_UPDATE=1 brew install python@3.12 uv HOMEBREW_NO_AUTO_UPDATE=1 brew install uv
``` ```
3. Switch into a directory that you want to store your outputs, custom nodes and models in. This is your ComfyUI workspace. For example, if you want to store your workspace in a directory called `ComfyUI_Workspace` in your Documents folder: 3. Switch into a directory that you want to store your outputs, custom nodes and models in. This is your ComfyUI workspace. For example, if you want to store your workspace in a directory called `ComfyUI_Workspace` in your Documents folder:
@ -192,39 +182,31 @@ UV_TORCH_BACKEND=auto uv pip install --no-build-isolation --upgrade "comfyui@git
4. Create a virtual environment: 4. Create a virtual environment:
```shell ```shell
uv venv --seed --python 3.12 uv venv --python 3.12
```
5. Activate it on
**macOS**
```shell
source .venv/bin/activate
``` ```
6. Run the following command to install `comfyui` into your current environment. This will correctly select the version of `torch` that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA, Intel, AMD or CPU on Linux, CPU on macOS): 5. Run the following command to install `comfyui` into your current environment.
```shell ```shell
uv pip install setuptools wheel uv pip install "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git"
UV_TORCH_BACKEND=auto uv pip install "comfyui[cpu]@git+https://github.com/hiddenswitch/ComfyUI.git"
``` ```
7. To run the web server: 6. To run the web server:
```shell ```shell
comfyui uv run comfyui
``` ```
When you run workflows that use well-known models, this will download them automatically. When you run workflows that use well-known models, this will download them automatically.
To make it accessible over the network: To make it accessible over the network:
```shell ```shell
comfyui --listen uv run comfyui --listen
``` ```
**Running** **Running**
On macOS, you will need to open the terminal and activate your virtual environment whenever you want to run `comfyui`. On macOS, you will need to open the terminal and `cd` into the directory in which you ran `uv venv`. For example, if you ran `uv venv` in `~/Documents/ComfyUI_Workspace/`:
```shell ```shell
cd ~/Documents/ComfyUI_Workspace/ cd ~/Documents/ComfyUI_Workspace/
source .venv/bin/activate uv run comfyui
comfyui
``` ```
## Model Downloading ## Model Downloading
@ -262,16 +244,16 @@ To save space, you will need to enable Developer Mode in the Windows Settings, t
These packages have been adapted to be installable with `pip` and download models to the correct places: These packages have been adapted to be installable with `pip` and download models to the correct places:
- **ELLA T5 Text Conditioning for SD1.5**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ella.git` - **ELLA T5 Text Conditioning for SD1.5**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ella.git`
- **IP Adapter**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ipadapter-plus` - **IP Adapter**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-ipadapter-plus`
- **ControlNet Auxiliary Preprocessors**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git`. - **ControlNet Auxiliary Preprocessors**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git`.
- **LayerDiffuse Alpha Channel Diffusion**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-layerdiffuse.git`. - **LayerDiffuse Alpha Channel Diffusion**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-layerdiffuse.git`.
- **BRIA Background Removal**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-bria-bg-removal.git` - **BRIA Background Removal**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-bria-bg-removal.git`
- **Video Frame Interpolation**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation` - **Video Frame Interpolation**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation`
- **Video Helper Suite**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite` - **Video Helper Suite**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite`
- **AnimateDiff Evolved**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved` - **AnimateDiff Evolved**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved`
- **Impact Pack**: `pip install git+https://github.com/AppMana/appmana-comfyui-nodes-impact-pack` - **Impact Pack**: `uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-impact-pack`
- **TensorRT**: `pip install git+https://github.com/AppMAna/appmana-comfyui-nodes-tensorrt` - **TensorRT**: `uv pip install git+https://github.com/AppMAna/appmana-comfyui-nodes-tensorrt`
Custom nodes are generally supported by this fork. Use these for a bug-free experience. Custom nodes are generally supported by this fork. Use these for a bug-free experience.
@ -287,20 +269,25 @@ caddy reverse-proxy --from localhost:443 --to localhost:8188 --tls self_signed
##### Notes for AMD Users ##### Notes for AMD Users
Until a workaround is found, specify these variables: Installation for `ROCm` should be explicit:
```shell
uv pip install "comfyui[rocm]@git+https://github.com/hiddenswitch/ComfyUI.git"
```
Then, until a workaround is found, specify these variables:
RDNA 3 (RX 7600 and later) RDNA 3 (RX 7600 and later)
```shell ```shell
export HSA_OVERRIDE_GFX_VERSION=11.0.0 export HSA_OVERRIDE_GFX_VERSION=11.0.0
comfyui uv run comfyui
``` ```
RDNA 2 (RX 6600 and others) RDNA 2 (RX 6600 and others)
```shell ```shell
export HSA_OVERRIDE_GFX_VERSION=10.3.0 export HSA_OVERRIDE_GFX_VERSION=10.3.0
comfyui uv run comfyui
``` ```
You can enable experimental memory efficient attention on pytorch 2.5 in ComfyUI on RDNA3 and potentially other AMD GPUs using this command: You can enable experimental memory efficient attention on pytorch 2.5 in ComfyUI on RDNA3 and potentially other AMD GPUs using this command:
@ -330,57 +317,6 @@ For models compatible with Cambricon Extension for PyTorch (`torch_mlu`). Here's
2. Next, install the PyTorch (`torch_mlu`) extension following the instructions on the [Installation](https://www.cambricon.com/docs/sdk_1.15.0/cambricon_pytorch_1.17.0/user_guide_1.9/index.html) 2. Next, install the PyTorch (`torch_mlu`) extension following the instructions on the [Installation](https://www.cambricon.com/docs/sdk_1.15.0/cambricon_pytorch_1.17.0/user_guide_1.9/index.html)
3. Launch ComfyUI by running `python main.py` 3. Launch ComfyUI by running `python main.py`
## Manual Install (Windows, Linux, macOS) For Development
1. Clone this repo:
```shell
git clone https://github.com/hiddenswitch/ComfyUI.git
cd ComfyUI
```
2. Create a virtual environment:
1. Create an environment:
```shell
python -m virtualenv venv
```
2. Activate it:
**Windows (PowerShell):**
```pwsh
Set-ExecutionPolicy Unrestricted -Scope Process
& .\venv\Scripts\activate.ps1
```
**Linux and macOS**
```shell
source ./venv/bin/activate
```
3. Then, run the following command to install `comfyui` into your current environment. This will correctly select the version of pytorch that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA AMD or CPU on Linux):
```shell
pip install -e ".[dev]"
```
4. To run the web server:
```shell
comfyui
```
To run tests:
```shell
pytest -v tests/
```
You can use `comfyui` as an API. Visit the [OpenAPI specification](comfy/api/openapi.yaml). This file can be used to generate typed clients for your preferred language.
5. To create the standalone binary:
```shell
python -m PyInstaller --onefile --noupx -n ComfyUI --add-data="comfy/;comfy/" --paths $(pwd) --paths comfy/cmd main.py
```
Because pip installs the package as editable with `pip install -e .`, any changes you make to the repository will affect the next launch of `comfy`. In IDEA based editors like PyCharm and IntelliJ, the Relodium plugin supports modifying your custom nodes or similar code while the server is running.
## Linux Development Dependencies
```shell
apt install -y git build-essential clang python3-dev python3-venv
```
# Large Language Models # Large Language Models
ComfyUI LTS supports text and multi-modal LLM models from the `transformers` ecosystem. This means all the LLaMA family models, LLAVA-NEXT, Phi-3, etc. are supported out-of-the-box with no configuration necessary. ComfyUI LTS supports text and multi-modal LLM models from the `transformers` ecosystem. This means all the LLaMA family models, LLAVA-NEXT, Phi-3, etc. are supported out-of-the-box with no configuration necessary.
@ -420,10 +356,10 @@ First, install this package using the [Installation Instructions](#installing).
Then, install the custom nodes packages that support video creation workflows: Then, install the custom nodes packages that support video creation workflows:
```shell ```shell
pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-frame-interpolation
pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-video-helper-suite
pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-animatediff-evolved
pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git uv pip install git+https://github.com/AppMana/appmana-comfyui-nodes-controlnet-aux.git
``` ```
Start creating an AnimateDiff workflow. When using these packages, the appropriate models will download automatically. Start creating an AnimateDiff workflow. When using these packages, the appropriate models will download automatically.
@ -438,11 +374,7 @@ Improve the performance of your Mochi model video generation using **Sage Attent
[Use the default Mochi Workflow.](https://github.com/comfyanonymous/ComfyUI_examples/raw/refs/heads/master/mochi/mochi_text_to_video_example.webp) This does not require any custom nodes or any change to your workflow. [Use the default Mochi Workflow.](https://github.com/comfyanonymous/ComfyUI_examples/raw/refs/heads/master/mochi/mochi_text_to_video_example.webp) This does not require any custom nodes or any change to your workflow.
Install the dependencies for Windows or Linux using the `withtriton` component, or install the specific dependencies you need from [requirements-triton.txt](./requirements-triton.txt): **Installation**
```shell
pip install "comfyui[withtriton]@git+https://github.com/hiddenswitch/ComfyUI.git"
```
On Windows, you will need the CUDA Toolkit and Visual Studio 2022. If you do not already have this, use `chocolatey`: On Windows, you will need the CUDA Toolkit and Visual Studio 2022. If you do not already have this, use `chocolatey`:
@ -457,16 +389,16 @@ choco install -y vcredist2010 vcredist2013 vcredist140
Then, visit [NVIDIA.com's CUDA Toolkit Download Page](https://developer.nvidia.com/cuda-12-6-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=Server2022&target_type=exe_network) and download and install the CUDA Toolkit. Verify it is correctly installed by running `nvcc --version`. Then, visit [NVIDIA.com's CUDA Toolkit Download Page](https://developer.nvidia.com/cuda-12-6-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=Server2022&target_type=exe_network) and download and install the CUDA Toolkit. Verify it is correctly installed by running `nvcc --version`.
You are now ready to install Sage Attention 2: You are now ready to install Sage Attention 2 and Triton:
```shell ```shell
uv pip install --no-build-isolation --no-deps "git+https://github.com/thu-ml/SageAttention.git" uv pip install --torch-backend=auto "comfyui[attention]@git+https://github.com/hiddenswitch/ComfyUI.git"
``` ```
To start ComfyUI with it: To start ComfyUI with it:
```shell ```shell
comfyui --use-sage-attention uv run comfyui --use-sage-attention
``` ```
![with_sage_attention.webp](./docs/assets/with_sage_attention.webp) ![with_sage_attention.webp](./docs/assets/with_sage_attention.webp)

View File

@ -3,18 +3,18 @@ from __future__ import annotations
import importlib import importlib
import logging import logging
import os import os
import shutil
import sys import sys
import time import time
import types import types
from contextlib import contextmanager from contextlib import contextmanager
from typing import Dict, List, Iterable
from os.path import join, basename, dirname, isdir, isfile, exists, abspath, split, splitext, realpath from os.path import join, basename, dirname, isdir, isfile, exists, abspath, split, splitext, realpath
from typing import Dict, Iterable
from . import base_nodes from . import base_nodes
from .package_typing import ExportedNodes from .package_typing import ExportedNodes
from ..component_model.plugins import prompt_server_instance_routes from ..component_model.plugins import prompt_server_instance_routes
logger = logging.getLogger(__name__)
class _PromptServerStub(): class _PromptServerStub():
def __init__(self): def __init__(self):
@ -30,7 +30,7 @@ def _vanilla_load_importing_execute_prestartup_script(node_paths: Iterable[str])
spec.loader.exec_module(module) spec.loader.exec_module(module)
return True return True
except Exception as e: except Exception as e:
print(f"Failed to execute startup-script: {script_path} / {e}", file=sys.stderr) logger.error(f"Failed to execute startup-script: {script_path} / {e}")
return False return False
node_prestartup_times = [] node_prestartup_times = []
@ -52,14 +52,13 @@ def _vanilla_load_importing_execute_prestartup_script(node_paths: Iterable[str])
success = execute_script(script_path) success = execute_script(script_path)
node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) node_prestartup_times.append((time.perf_counter() - time_before, module_path, success))
if len(node_prestartup_times) > 0: if len(node_prestartup_times) > 0:
print("\nPrestartup times for custom nodes:", file=sys.stderr) logger.debug("\nPrestartup times for custom nodes:")
for n in sorted(node_prestartup_times): for n in sorted(node_prestartup_times):
if n[2]: if n[2]:
import_message = "" import_message = ""
else: else:
import_message = " (PRESTARTUP FAILED)" import_message = " (PRESTARTUP FAILED)"
print("{:6.1f} seconds{}:".format(n[0], import_message), n[1], file=sys.stderr) logger.debug("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
print("\n", file=sys.stderr)
@contextmanager @contextmanager
@ -118,12 +117,10 @@ def _vanilla_load_custom_nodes_1(module_path, ignore=set()) -> ExportedNodes:
exported_nodes.NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) exported_nodes.NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
return exported_nodes return exported_nodes
else: else:
print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.", file=sys.stderr) logger.error(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
return exported_nodes return exported_nodes
except Exception as e: except Exception as e:
import traceback logger.error(f"Cannot import {module_path} module for custom nodes:", exc_info=e)
print(traceback.format_exc())
print(f"Cannot import {module_path} module for custom nodes:", e, file=sys.stderr)
return exported_nodes return exported_nodes
@ -151,14 +148,12 @@ def _vanilla_load_custom_nodes_2(node_paths: Iterable[str]) -> ExportedNodes:
exported_nodes.update(possible_exported_nodes) exported_nodes.update(possible_exported_nodes)
if len(node_import_times) > 0: if len(node_import_times) > 0:
print("\nImport times for custom nodes:", file=sys.stderr)
for n in sorted(node_import_times): for n in sorted(node_import_times):
if n[2]: if n[2]:
import_message = "" import_message = ""
else: else:
import_message = " (IMPORT FAILED)" import_message = " (IMPORT FAILED)"
print("{:6.1f} seconds{}:".format(n[0], import_message), n[1], file=sys.stderr) logger.debug(f"{n[0]:6.1f} seconds{import_message}: {n[1]}")
print("\n", file=sys.stderr)
return exported_nodes return exported_nodes

View File

@ -2,24 +2,25 @@
name = "comfyui" name = "comfyui"
version = "0.3.29" version = "0.3.29"
description = "An installable version of ComfyUI" description = "An installable version of ComfyUI"
readme = "README.md" # Optional: if you have a README readme = "README.md"
authors = [ authors = [
{ name = "Contributors_of_ComfyUI" }, { name = "Contributors_of_ComfyUI" },
] ]
requires-python = ">=3.10" requires-python = ">=3.10"
license = { text = "Specify License Here" } # Optional: Add your license license = { text = "GPL-3.0-or-later" }
classifiers = [# Optional: Standard PyPI classifiers classifiers = [
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.13",
"Operating System :: OS Independent", "Operating System :: OS Independent",
# Add other relevant classifiers
] ]
# Core dependencies from requirements.txt (excluding torch/torchvision)
dependencies = [ dependencies = [
"torch",
"torchvision",
"torchaudio",
"comfyui-frontend-package", "comfyui-frontend-package",
"comfyui-workflow-templates", "comfyui-workflow-templates",
"torchdiffeq>=0.2.3", "torchdiffeq>=0.2.3",
@ -110,25 +111,7 @@ where = ["."]
include = ["comfy*"] include = ["comfy*"]
namespaces = false namespaces = false
[project.optional-dependencies] [dependency-groups]
cpu = [
"torch",
"torchvision",
"torchaudio",
]
cuda = [
"torch",
"torchvision",
"torchaudio",
]
rocm = [
"torch",
"torchvision",
"torchaudio",
]
dev = [ dev = [
"pytest", "pytest",
"pytest-asyncio", "pytest-asyncio",
@ -145,16 +128,40 @@ dev = [
"pylint", "pylint",
] ]
triton = [ [project.optional-dependencies]
"triton ; sys_platform == 'Linux'", cpu = [
'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp312-cp312-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.12"', "torch",
'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp311-cp311-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.11"', "torchvision",
'triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post9/triton-3.2.0-cp310-cp310-win_amd64.whl ; sys_platform == "Windows" and python_version == "3.10"', "torchaudio",
] ]
withtorch = ["comfyui[cuda]"] # Depends on the 'cuda' extra cu126 = [
withtriton = ["comfyui[cuda, triton]"] # Depends on 'cuda' and 'triton' extras "torch",
"torchvision",
"torchaudio",
"triton ; sys_platform == 'Linux'",
"triton-windows ; sys_platform == 'win32'",
]
cu128 = [
"torch",
"torchvision",
"torchaudio",
"triton ; sys_platform == 'Linux'",
"triton-windows ; sys_platform == 'win32'",
]
rocm = [
"torch",
"torchvision",
"torchaudio",
"pytorch-triton-rocm"
]
attention = [
"sageattention",
"flash_attn ; sys_platform == 'Linux'",
]
[project.scripts] [project.scripts]
comfyui = "comfy.cmd.main:entrypoint" comfyui = "comfy.cmd.main:entrypoint"
@ -162,12 +169,17 @@ comfyui-worker = "comfy.entrypoints.worker:entrypoint"
comfyui-workflow = "comfy.entrypoints.workflow:entrypoint" comfyui-workflow = "comfy.entrypoints.workflow:entrypoint"
[project.urls] [project.urls]
Homepage = "https://github.com/comfyanonymous/ComfyUI" # Example Homepage = "https://github.com/comfyanonymous/ComfyUI"
Repository = "https://github.com/comfyanonymous/ComfyUI" # Example Repository = "https://github.com/comfyanonymous/ComfyUI"
[tool.uv] [tool.uv]
conflicts = [ conflicts = [
[{ extra = "cpu" }, { extra = "cuda" }, { extra = "rocm" }], [{ extra = "cpu" }, { extra = "cu126" }, { extra = "cu128" }, { extra = "rocm" }],
]
no-build-isolation-package = [
"sageattention",
"sage-attention",
"flash_attn",
] ]
[[tool.uv.index]] [[tool.uv.index]]
@ -180,6 +192,11 @@ name = "pytorch-cu126"
url = "https://download.pytorch.org/whl/cu126" url = "https://download.pytorch.org/whl/cu126"
explicit = true explicit = true
[[tool.uv.index]]
name = "pytorch-cu128"
url = "https://download.pytorch.org/whl/cu128"
explicit = true
[[tool.uv.index]] [[tool.uv.index]]
name = "pytorch-rocm" name = "pytorch-rocm"
url = "https://download.pytorch.org/whl/rocm6.3" url = "https://download.pytorch.org/whl/rocm6.3"
@ -188,23 +205,29 @@ explicit = true
[tool.uv.sources] [tool.uv.sources]
torch = [ torch = [
{ index = "pytorch-cpu", extra = "cpu" }, { index = "pytorch-cpu", extra = "cpu" },
{ index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, { index = "pytorch-cu126", extra = "cu126", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" },
{ index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, { index = "pytorch-cu128", extra = "cu128", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" },
{ index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'Linux'" },
] ]
torchvision = [ torchvision = [
{ index = "pytorch-cpu", extra = "cpu" }, { index = "pytorch-cpu", extra = "cpu" },
{ index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, { index = "pytorch-cu126", extra = "cu126", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" },
{ index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, { index = "pytorch-cu128", extra = "cu128", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" },
{ index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'Linux'" },
] ]
torchaudio = [ torchaudio = [
{ index = "pytorch-cpu", extra = "cpu" }, { index = "pytorch-cpu", extra = "cpu" },
{ index = "pytorch-cu126", extra = "cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, { index = "pytorch-cu126", extra = "cu126", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" },
{ index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" }, { index = "pytorch-cu128", extra = "cu128", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" },
{ index = "pytorch-rocm", extra = "rocm", marker = "sys_platform == 'Linux'" },
] ]
comfyui-frontend-package = [ comfyui-frontend-package = [
# { git = "https://github.com/appmana/appmana-comfyui-frontend", subdirectory = "comfyui_frontend_package" }, { git = "https://github.com/appmana/appmana-comfyui-frontend", subdirectory = "comfyui_frontend_package" },
{ workspace = true }
] ]
"sageattention" = [
{ git = "https://github.com/thu-ml/SageAttention.git", extra = "attention", marker = "sys_platform == 'Linux' or sys_platform == 'win32'" },
]
[tool.ruff] [tool.ruff]