Create a setup.py and automatically select the correct pytorch binaries for the current platform and supported devices

- setup.py now works
 - Makes installation work a variety of ways, including making other packages dependent on this one for e.g. plugins
 - Fixes missing __init__.py issues
 - Fixes imports
 - Compatible with your existing scripts that rely on requirements.txt
 - Fixes error in comfy/ldm/models/diffusion/ddim.py
 - Fixes missing packages for other diffusers code in this repo
This commit is contained in:
Benjamin Berman 2023-03-25 15:51:31 -07:00
parent b90991d2c3
commit c69a80d94e
25 changed files with 323 additions and 71 deletions

158
.gitignore vendored
View File

@ -1,5 +1,3 @@
__pycache__/
*.py[cod]
output/ output/
input/ input/
!input/example.png !input/example.png
@ -8,3 +6,159 @@ temp/
custom_nodes/ custom_nodes/
!custom_nodes/example_node.py.example !custom_nodes/example_node.py.example
extra_model_paths.yaml extra_model_paths.yaml
# The GitHub python gitignore
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
.idea/

View File

@ -59,17 +59,13 @@ Put your VAE in: models/vae
At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10. At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10.
### AMD (Linux only) Then, run the following command to install comfyui into your current environment:
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` ```
pip install -e .
```
This will correctly select the version of pytorch that matches the GPU on your machine (NVIDIA or CPU on Windows, NVIDIA AMD or CPU on Linux).
### NVIDIA
Nvidia users should install torch and xformers using this command:
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers```
#### Troubleshooting #### Troubleshooting
@ -79,15 +75,6 @@ If you get the "Torch not compiled with CUDA enabled" error, uninstall torch wit
And install it again with the command above. And install it again with the command above.
### Dependencies
Install the dependencies by opening your terminal inside the ComfyUI folder and:
```pip install -r requirements.txt```
After this you should have everything installed and can proceed to running ComfyUI.
### I already have another UI for Stable Diffusion installed do I really have to install all of these dependencies? ### I already have another UI for Stable Diffusion installed do I really have to install all of these dependencies?
You don't. If you have another UI installed and working with it's own python venv you can use that venv to run ComfyUI. You can open up your favorite terminal and activate it: You don't. If you have another UI installed and working with it's own python venv you can use that venv to run ComfyUI. You can open up your favorite terminal and activate it:
@ -102,7 +89,6 @@ With cmd.exe: ```"path_to_other_sd_gui\venv\Scripts\activate.bat"```
And then you can use that terminal to run Comfyui without installing any dependencies. Note that the venv folder might be called something else depending on the SD UI. And then you can use that terminal to run Comfyui without installing any dependencies. Note that the venv folder might be called something else depending on the SD UI.
# Running # Running
```python main.py``` ```python main.py```

0
comfy/__init__.py Normal file
View File

View File

@ -5,17 +5,17 @@ import torch
import torch as th import torch as th
import torch.nn as nn import torch.nn as nn
from ldm.modules.diffusionmodules.util import ( from comfy.ldm.modules.diffusionmodules.util import (
conv_nd, conv_nd,
linear, linear,
zero_module, zero_module,
timestep_embedding, timestep_embedding,
) )
from ldm.modules.attention import SpatialTransformer from comfy.ldm.modules.attention import SpatialTransformer
from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock from comfy.ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock
from ldm.models.diffusion.ddpm import LatentDiffusion from comfy.ldm.models.diffusion.ddpm import LatentDiffusion
from ldm.util import log_txt_as_img, exists, instantiate_from_config from comfy.ldm.util import log_txt_as_img, exists, instantiate_from_config
class ControlledUnetModel(UNetModel): class ControlledUnetModel(UNetModel):

View File

@ -689,7 +689,7 @@ class UniPC:
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t) x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
else: else:
x_t_ = ( x_t_ = (
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dimss) * x expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- expand_dims(sigma_t * h_phi_1, dims) * model_prev_0 - expand_dims(sigma_t * h_phi_1, dims) * model_prev_0
) )
if x_t is None: if x_t is None:

View File

@ -1,6 +1,6 @@
import torch import torch
from ldm.modules.midas.api import load_midas_transform from comfy.ldm.modules.midas.api import load_midas_transform
class AddMiDaS(object): class AddMiDaS(object):

View File

@ -3,11 +3,11 @@ import torch
import torch.nn.functional as F import torch.nn.functional as F
from contextlib import contextmanager from contextlib import contextmanager
from ldm.modules.diffusionmodules.model import Encoder, Decoder from comfy.ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from ldm.util import instantiate_from_config from comfy.ldm.util import instantiate_from_config
from ldm.modules.ema import LitEma from comfy.ldm.modules.ema import LitEma
# class AutoencoderKL(pl.LightningModule): # class AutoencoderKL(pl.LightningModule):
class AutoencoderKL(torch.nn.Module): class AutoencoderKL(torch.nn.Module):

View File

@ -4,7 +4,7 @@ import torch
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor from comfy.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
class DDIMSampler(object): class DDIMSampler(object):

View File

@ -19,12 +19,12 @@ from tqdm import tqdm
from torchvision.utils import make_grid from torchvision.utils import make_grid
# from pytorch_lightning.utilities.distributed import rank_zero_only # from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from comfy.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma from comfy.ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from comfy.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from comfy.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler from comfy.ldm.models.diffusion.ddim import DDIMSampler
__conditioning_keys__ = {'concat': 'c_concat', __conditioning_keys__ = {'concat': 'c_concat',

View File

@ -5,8 +5,8 @@ import numpy as np
from tqdm import tqdm from tqdm import tqdm
from functools import partial from functools import partial
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like from comfy.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding from comfy.ldm.models.diffusion.sampling_util import norm_thresholding
class PLMSSampler(object): class PLMSSampler(object):

View File

@ -6,10 +6,10 @@ from torch import nn, einsum
from einops import rearrange, repeat from einops import rearrange, repeat
from typing import Optional, Any from typing import Optional, Any
from ldm.modules.diffusionmodules.util import checkpoint from comfy.ldm.modules.diffusionmodules.util import checkpoint
from .sub_quadratic_attention import efficient_dot_product_attention from .sub_quadratic_attention import efficient_dot_product_attention
import model_management from comfy import model_management
if model_management.xformers_enabled(): if model_management.xformers_enabled():

View File

@ -6,8 +6,8 @@ import numpy as np
from einops import rearrange from einops import rearrange
from typing import Optional, Any from typing import Optional, Any
from ldm.modules.attention import MemoryEfficientCrossAttention from comfy.ldm.modules.attention import MemoryEfficientCrossAttention
import model_management from comfy import model_management
if model_management.xformers_enabled(): if model_management.xformers_enabled():
import xformers import xformers

View File

@ -6,7 +6,7 @@ import torch as th
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from ldm.modules.diffusionmodules.util import ( from comfy.ldm.modules.diffusionmodules.util import (
checkpoint, checkpoint,
conv_nd, conv_nd,
linear, linear,
@ -15,8 +15,8 @@ from ldm.modules.diffusionmodules.util import (
normalization, normalization,
timestep_embedding, timestep_embedding,
) )
from ldm.modules.attention import SpatialTransformer from comfy.ldm.modules.attention import SpatialTransformer
from ldm.util import exists from comfy.ldm.util import exists
# dummy replace # dummy replace

View File

@ -3,8 +3,8 @@ import torch.nn as nn
import numpy as np import numpy as np
from functools import partial from functools import partial
from ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule from comfy.ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule
from ldm.util import default from comfy.ldm.util import default
class AbstractLowScaleModel(nn.Module): class AbstractLowScaleModel(nn.Module):

View File

@ -15,7 +15,7 @@ import torch.nn as nn
import numpy as np import numpy as np
from einops import repeat from einops import repeat
from ldm.util import instantiate_from_config from comfy.ldm.util import instantiate_from_config
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):

View File

@ -5,7 +5,7 @@ from torch.utils.checkpoint import checkpoint
from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel
import open_clip import open_clip
from ldm.util import default, count_params from comfy.ldm.util import default, count_params
class AbstractEncoder(nn.Module): class AbstractEncoder(nn.Module):

View File

@ -23,7 +23,7 @@ from scipy.interpolate import interp2d
from scipy.linalg import orth from scipy.linalg import orth
import albumentations import albumentations
import ldm.modules.image_degradation.utils_image as util import comfy.ldm.modules.image_degradation.utils_image as util
def modcrop_np(img, sf): def modcrop_np(img, sf):

View File

@ -12,7 +12,7 @@ from scipy.interpolate import interp2d
from scipy.linalg import orth from scipy.linalg import orth
import albumentations import albumentations
import ldm.modules.image_degradation.utils_image as util import comfy.ldm.modules.image_degradation.utils_image as util
""" """
# -------------------------------------------- # --------------------------------------------

View File

@ -5,10 +5,10 @@ import torch
import torch.nn as nn import torch.nn as nn
from torchvision.transforms import Compose from torchvision.transforms import Compose
from ldm.modules.midas.midas.dpt_depth import DPTDepthModel from comfy.ldm.modules.midas.midas.dpt_depth import DPTDepthModel
from ldm.modules.midas.midas.midas_net import MidasNet from comfy.ldm.modules.midas.midas.midas_net import MidasNet
from ldm.modules.midas.midas.midas_net_custom import MidasNet_small from comfy.ldm.modules.midas.midas.midas_net_custom import MidasNet_small
from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet from comfy.ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
ISL_PATHS = { ISL_PATHS = {

View File

@ -24,7 +24,7 @@ except ImportError:
from torch import Tensor from torch import Tensor
from typing import List from typing import List
import model_management import comfy.model_management as model_management
def dynamic_slice( def dynamic_slice(
x: Tensor, x: Tensor,

View File

@ -3,7 +3,7 @@ from .k_diffusion import external as k_diffusion_external
from .extra_samplers import uni_pc from .extra_samplers import uni_pc
import torch import torch
import contextlib import contextlib
import model_management from comfy import model_management
from .ldm.models.diffusion.ddim import DDIMSampler from .ldm.models.diffusion.ddim import DDIMSampler
from .ldm.modules.diffusionmodules.util import make_ddim_timesteps from .ldm.modules.diffusionmodules.util import make_ddim_timesteps

0
comfy_extras/__init__.py Normal file
View File

View File

@ -1,7 +1,7 @@
import os import os
from comfy_extras.chainner_models import model_loading from comfy_extras.chainner_models import model_loading
from comfy.sd import load_torch_file from comfy.sd import load_torch_file
import model_management from comfy import model_management
import torch import torch
import comfy.utils import comfy.utils
import folder_paths import folder_paths

View File

@ -1,11 +1,20 @@
torch torch==1.13.1
torchdiffeq torchvision==0.14.1
torchsde torchaudio==0.13.1
einops torchdiffeq==0.2.3
open-clip-torch torchsde==0.2.5
einops==0.6.0
open-clip-torch==2.16.0
transformers>=4.25.1 transformers>=4.25.1
safetensors safetensors==0.3.0
pytorch_lightning pytorch-lightning==2.0.0
aiohttp aiohttp==3.8.4
accelerate accelerate==0.18.0
pyyaml pyyaml==6.0
scikit-image==0.20.0
jsonmerge==1.9.0
clean-fid==0.1.35
clip==0.2.0
resize-right==0.0.2
opencv-python==4.7.0.72
albumentations==1.3.0

103
setup.py Normal file
View File

@ -0,0 +1,103 @@
#!/usr/bin/env python3
# this script does a little housekeeping for your platform
import os.path
import platform
import subprocess
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.network.session import PipSession
from pip._internal.req import InstallRequirement
from pip._vendor.packaging.requirements import Requirement
from setuptools import setup, find_packages
package_name = "comfyui"
version = '0.0.1'
def _is_nvidia() -> bool:
system = platform.system().lower()
nvidia_smi_paths = []
if system == "windows":
nvidia_smi_paths.append(os.path.join(os.environ.get("SystemRoot", ""), "System32", "nvidia-smi.exe"))
elif system == "linux":
nvidia_smi_paths.extend(["/usr/bin/nvidia-smi", "/opt/nvidia/bin/nvidia-smi"])
for nvidia_smi_path in nvidia_smi_paths:
try:
output = subprocess.check_output([nvidia_smi_path, "-L"]).decode("utf-8")
if "GPU" in output:
return True
except:
pass
return False
def _is_amd() -> bool:
system = platform.system().lower()
rocminfo_paths = []
# todo: torch windows doesn't support amd
if system == "windows":
rocminfo_paths.append(os.path.join(os.environ.get("ProgramFiles", ""), "AMD", "ROCm", "bin", "rocminfo.exe"))
elif system == "linux":
rocminfo_paths.extend(["/opt/rocm/bin/rocminfo", "/usr/bin/rocminfo"])
for rocminfo_path in rocminfo_paths:
try:
output = subprocess.check_output([rocminfo_path]).decode("utf-8")
if "Device" in output:
return True
except:
pass
return False
_amd_torch_index = "https://download.pytorch.org/whl/rocm5.4.2"
_nvidia_torch_index = "https://download.pytorch.org/whl/cu117"
_alternative_indices = [_amd_torch_index, _nvidia_torch_index]
def dependencies() -> [str]:
_dependencies = open(os.path.join(os.path.dirname(__file__), "requirements.txt")).readlines()
session = PipSession()
index_urls = ['https://pypi.org/simple']
# prefer nvidia over AMD because AM5/iGPU systems will have a valid ROCm device
if _is_nvidia():
index_urls += [_nvidia_torch_index]
_dependencies += ["xformers==0.0.16"]
elif _is_amd():
index_urls += [_amd_torch_index]
if len(index_urls) == 1:
return _dependencies
finder = PackageFinder.create(LinkCollector(session, SearchScope([], index_urls, no_index=False)),
SelectionPreferences(allow_yanked=False, prefer_binary=False))
for i, package in enumerate(_dependencies[:]):
requirement = InstallRequirement(Requirement(package), comes_from=f"{package_name}=={version}")
candidate = finder.find_best_candidate(requirement.name, requirement.specifier)
if any([url in candidate.best_candidate.link.url for url in _alternative_indices]):
_dependencies[i] = f"{requirement.name} @ {candidate.best_candidate.link.url}"
return _dependencies
setup(
# "comfyui"
name=package_name,
description="",
author="",
version=version,
python_requires=">=3.9,<3.11",
packages=find_packages(include=['comfy', 'comfy_extras']),
install_requires=dependencies(),
)