mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-23 21:00:16 +08:00
Merge branch 'installable' into spellsource
This commit is contained in:
commit
0fb47ea4f5
@ -2,6 +2,13 @@ name: "Windows Release cu118 dependencies 2"
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
xformers:
|
||||||
|
description: 'xformers version'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
default: "xformers"
|
||||||
|
|
||||||
# push:
|
# push:
|
||||||
# branches:
|
# branches:
|
||||||
# - master
|
# - master
|
||||||
@ -17,7 +24,7 @@ jobs:
|
|||||||
|
|
||||||
- shell: bash
|
- shell: bash
|
||||||
run: |
|
run: |
|
||||||
python -m pip wheel --no-cache-dir torch torchvision torchaudio xformers --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
|
python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
|
||||||
python -m pip install --no-cache-dir ./temp_wheel_dir/*
|
python -m pip install --no-cache-dir ./temp_wheel_dir/*
|
||||||
echo installed basic
|
echo installed basic
|
||||||
ls -lah temp_wheel_dir
|
ls -lah temp_wheel_dir
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -4,9 +4,9 @@
|
|||||||
!/input/example.png
|
!/input/example.png
|
||||||
/[Mm]odels/
|
/[Mm]odels/
|
||||||
/[Tt]emp/
|
/[Tt]emp/
|
||||||
/[Cc]ustom_nodes/
|
/[Cc]ustom_nodes/*
|
||||||
|
![Cc]ustom_nodes/__init__.py
|
||||||
!/custom_nodes/example_node.py.example
|
!/custom_nodes/example_node.py.example
|
||||||
!/custom_nodes/__init__.py
|
|
||||||
/extra_model_paths.yaml
|
/extra_model_paths.yaml
|
||||||
/.vs
|
/.vs
|
||||||
.idea/
|
.idea/
|
||||||
|
|||||||
1
CODEOWNERS
Normal file
1
CODEOWNERS
Normal file
@ -0,0 +1 @@
|
|||||||
|
* @comfyanonymous
|
||||||
@ -47,6 +47,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git
|
|||||||
| Ctrl + O | Load workflow |
|
| Ctrl + O | Load workflow |
|
||||||
| Ctrl + A | Select all nodes |
|
| Ctrl + A | Select all nodes |
|
||||||
| Ctrl + M | Mute/unmute selected nodes |
|
| Ctrl + M | Mute/unmute selected nodes |
|
||||||
|
| Ctrl + B | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) |
|
||||||
| Delete/Backspace | Delete selected nodes |
|
| Delete/Backspace | Delete selected nodes |
|
||||||
| Ctrl + Delete/Backspace | Delete the current graph |
|
| Ctrl + Delete/Backspace | Delete the current graph |
|
||||||
| Space | Move the canvas around when held and moving the cursor |
|
| Space | Move the canvas around when held and moving the cursor |
|
||||||
|
|||||||
@ -24,8 +24,8 @@ class ClipVisionModel():
|
|||||||
return self.model.load_state_dict(sd, strict=False)
|
return self.model.load_state_dict(sd, strict=False)
|
||||||
|
|
||||||
def encode_image(self, image):
|
def encode_image(self, image):
|
||||||
img = torch.clip((255. * image[0]), 0, 255).round().int()
|
img = torch.clip((255. * image), 0, 255).round().int()
|
||||||
inputs = self.processor(images=[img], return_tensors="pt")
|
inputs = self.processor(images=img, return_tensors="pt")
|
||||||
outputs = self.model(**inputs)
|
outputs = self.model(**inputs)
|
||||||
return outputs
|
return outputs
|
||||||
|
|
||||||
|
|||||||
@ -36,13 +36,15 @@ def get_gpu_names():
|
|||||||
else:
|
else:
|
||||||
return set()
|
return set()
|
||||||
|
|
||||||
def cuda_malloc_supported():
|
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
|
||||||
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
|
"GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
|
||||||
"GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
|
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
|
||||||
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
|
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
|
||||||
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
|
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
|
||||||
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M"}
|
"GeForce GTX 1650", "GeForce GTX 1630"
|
||||||
|
}
|
||||||
|
|
||||||
|
def cuda_malloc_supported():
|
||||||
try:
|
try:
|
||||||
names = get_gpu_names()
|
names = get_gpu_names()
|
||||||
except:
|
except:
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import copy
|
|||||||
import datetime
|
import datetime
|
||||||
import heapq
|
import heapq
|
||||||
import threading
|
import threading
|
||||||
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import typing
|
import typing
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
@ -46,7 +47,7 @@ def get_good_outputs(t: QueueTuple):
|
|||||||
class HistoryEntry(typing.TypedDict):
|
class HistoryEntry(typing.TypedDict):
|
||||||
prompt: QueueTuple
|
prompt: QueueTuple
|
||||||
outputs: dict
|
outputs: dict
|
||||||
timestamp: datetime.datetime
|
timestamp: int
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -780,7 +781,7 @@ class PromptQueue:
|
|||||||
with self.mutex:
|
with self.mutex:
|
||||||
queue_item = self.currently_running.pop(item_id)
|
queue_item = self.currently_running.pop(item_id)
|
||||||
prompt = queue_item.queue_tuple
|
prompt = queue_item.queue_tuple
|
||||||
self.history[prompt[1]] = {"prompt": prompt, "outputs": {}, "timestamp": datetime.datetime.now()}
|
self.history[prompt[1]] = {"prompt": prompt, "outputs": {}, "timestamp": time.time()}
|
||||||
for o in outputs:
|
for o in outputs:
|
||||||
self.history[prompt[1]]["outputs"][o] = outputs[o]
|
self.history[prompt[1]]["outputs"][o] = outputs[o]
|
||||||
self.server.queue_updated()
|
self.server.queue_updated()
|
||||||
|
|||||||
@ -1,5 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import importlib.util
|
import importlib.util
|
||||||
|
|
||||||
|
from comfy.cmd import cuda_malloc
|
||||||
from ..cmd import folder_paths
|
from ..cmd import folder_paths
|
||||||
import time
|
import time
|
||||||
|
|
||||||
@ -124,6 +126,18 @@ def load_extra_path_config(yaml_path):
|
|||||||
folder_paths.add_model_folder_path(x, full_path)
|
folder_paths.add_model_folder_path(x, full_path)
|
||||||
|
|
||||||
|
|
||||||
|
def cuda_malloc_warning():
|
||||||
|
device = comfy.model_management.get_torch_device()
|
||||||
|
device_name = comfy.model_management.get_torch_device_name(device)
|
||||||
|
cuda_malloc_warning = False
|
||||||
|
if "cudaMallocAsync" in device_name:
|
||||||
|
for b in cuda_malloc.blacklist:
|
||||||
|
if b in device_name:
|
||||||
|
cuda_malloc_warning = True
|
||||||
|
if cuda_malloc_warning:
|
||||||
|
print("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if args.temp_directory:
|
if args.temp_directory:
|
||||||
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
|
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
|
||||||
@ -146,6 +160,7 @@ def main():
|
|||||||
|
|
||||||
server.add_routes()
|
server.add_routes()
|
||||||
hijack_progress(server)
|
hijack_progress(server)
|
||||||
|
cuda_malloc_warning()
|
||||||
|
|
||||||
threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start()
|
threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start()
|
||||||
|
|
||||||
|
|||||||
@ -726,11 +726,14 @@ class PromptServer():
|
|||||||
site = web.TCPSite(runner, address, port)
|
site = web.TCPSite(runner, address, port)
|
||||||
await site.start()
|
await site.start()
|
||||||
|
|
||||||
if address == '':
|
address_to_print = 'localhost'
|
||||||
|
if address == '' or address == '0.0.0.0':
|
||||||
address = '0.0.0.0'
|
address = '0.0.0.0'
|
||||||
|
else:
|
||||||
|
address_to_print = address
|
||||||
if verbose:
|
if verbose:
|
||||||
print("Starting server\n")
|
print("Starting server\n")
|
||||||
print("To see the GUI go to: http://{}:{}".format(address, port))
|
print("To see the GUI go to: http://{}:{}".format(address_to_print, port))
|
||||||
if call_on_start is not None:
|
if call_on_start is not None:
|
||||||
call_on_start(address, port)
|
call_on_start(address, port)
|
||||||
|
|
||||||
|
|||||||
@ -631,23 +631,78 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl
|
|||||||
elif solver_type == 'midpoint':
|
elif solver_type == 'midpoint':
|
||||||
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
|
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
|
||||||
|
|
||||||
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise
|
if eta:
|
||||||
|
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise
|
||||||
|
|
||||||
old_denoised = denoised
|
old_denoised = denoised
|
||||||
h_last = h
|
h_last = h
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||||
|
"""DPM-Solver++(3M) SDE."""
|
||||||
|
|
||||||
|
seed = extra_args.get("seed", None)
|
||||||
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
s_in = x.new_ones([x.shape[0]])
|
||||||
|
|
||||||
|
denoised_1, denoised_2 = None, None
|
||||||
|
h_1, h_2 = None, None
|
||||||
|
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||||
|
if sigmas[i + 1] == 0:
|
||||||
|
# Denoising step
|
||||||
|
x = denoised
|
||||||
|
else:
|
||||||
|
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
|
||||||
|
h = s - t
|
||||||
|
h_eta = h * (eta + 1)
|
||||||
|
|
||||||
|
x = torch.exp(-h_eta) * x + (-h_eta).expm1().neg() * denoised
|
||||||
|
|
||||||
|
if h_2 is not None:
|
||||||
|
r0 = h_1 / h
|
||||||
|
r1 = h_2 / h
|
||||||
|
d1_0 = (denoised - denoised_1) / r0
|
||||||
|
d1_1 = (denoised_1 - denoised_2) / r1
|
||||||
|
d1 = d1_0 + (d1_0 - d1_1) * r0 / (r0 + r1)
|
||||||
|
d2 = (d1_0 - d1_1) / (r0 + r1)
|
||||||
|
phi_2 = h_eta.neg().expm1() / h_eta + 1
|
||||||
|
phi_3 = phi_2 / h_eta - 0.5
|
||||||
|
x = x + phi_2 * d1 - phi_3 * d2
|
||||||
|
elif h_1 is not None:
|
||||||
|
r = h_1 / h
|
||||||
|
d = (denoised - denoised_1) / r
|
||||||
|
phi_2 = h_eta.neg().expm1() / h_eta + 1
|
||||||
|
x = x + phi_2 * d
|
||||||
|
|
||||||
|
if eta:
|
||||||
|
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
|
||||||
|
|
||||||
|
denoised_1, denoised_2 = denoised, denoised_1
|
||||||
|
h_1, h_2 = h, h_1
|
||||||
|
return x
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||||
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||||
|
return sample_dpmpp_3m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler)
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
|
def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
|
||||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||||
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
|
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
|
def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
|
||||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||||
return sample_dpmpp_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=r)
|
return sample_dpmpp_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=r)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -105,6 +105,29 @@ class BaseModel(torch.nn.Module):
|
|||||||
|
|
||||||
return {**unet_state_dict, **vae_state_dict, **clip_state_dict}
|
return {**unet_state_dict, **vae_state_dict, **clip_state_dict}
|
||||||
|
|
||||||
|
def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0):
|
||||||
|
adm_inputs = []
|
||||||
|
weights = []
|
||||||
|
noise_aug = []
|
||||||
|
for unclip_cond in unclip_conditioning:
|
||||||
|
for adm_cond in unclip_cond["clip_vision_output"].image_embeds:
|
||||||
|
weight = unclip_cond["strength"]
|
||||||
|
noise_augment = unclip_cond["noise_augmentation"]
|
||||||
|
noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment)
|
||||||
|
c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device))
|
||||||
|
adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight
|
||||||
|
weights.append(weight)
|
||||||
|
noise_aug.append(noise_augment)
|
||||||
|
adm_inputs.append(adm_out)
|
||||||
|
|
||||||
|
if len(noise_aug) > 1:
|
||||||
|
adm_out = torch.stack(adm_inputs).sum(0)
|
||||||
|
noise_augment = noise_augment_merge
|
||||||
|
noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment)
|
||||||
|
c_adm, noise_level_emb = noise_augmentor(adm_out[:, :noise_augmentor.time_embed.dim], noise_level=torch.tensor([noise_level], device=device))
|
||||||
|
adm_out = torch.cat((c_adm, noise_level_emb), 1)
|
||||||
|
|
||||||
|
return adm_out
|
||||||
|
|
||||||
class SD21UNCLIP(BaseModel):
|
class SD21UNCLIP(BaseModel):
|
||||||
def __init__(self, model_config, noise_aug_config, model_type=ModelType.V_PREDICTION, device=None):
|
def __init__(self, model_config, noise_aug_config, model_type=ModelType.V_PREDICTION, device=None):
|
||||||
@ -114,33 +137,11 @@ class SD21UNCLIP(BaseModel):
|
|||||||
def encode_adm(self, **kwargs):
|
def encode_adm(self, **kwargs):
|
||||||
unclip_conditioning = kwargs.get("unclip_conditioning", None)
|
unclip_conditioning = kwargs.get("unclip_conditioning", None)
|
||||||
device = kwargs["device"]
|
device = kwargs["device"]
|
||||||
|
if unclip_conditioning is None:
|
||||||
if unclip_conditioning is not None:
|
return torch.zeros((1, self.adm_channels))
|
||||||
adm_inputs = []
|
|
||||||
weights = []
|
|
||||||
noise_aug = []
|
|
||||||
for unclip_cond in unclip_conditioning:
|
|
||||||
adm_cond = unclip_cond["clip_vision_output"].image_embeds
|
|
||||||
weight = unclip_cond["strength"]
|
|
||||||
noise_augment = unclip_cond["noise_augmentation"]
|
|
||||||
noise_level = round((self.noise_augmentor.max_noise_level - 1) * noise_augment)
|
|
||||||
c_adm, noise_level_emb = self.noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device))
|
|
||||||
adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight
|
|
||||||
weights.append(weight)
|
|
||||||
noise_aug.append(noise_augment)
|
|
||||||
adm_inputs.append(adm_out)
|
|
||||||
|
|
||||||
if len(noise_aug) > 1:
|
|
||||||
adm_out = torch.stack(adm_inputs).sum(0)
|
|
||||||
#TODO: add a way to control this
|
|
||||||
noise_augment = 0.05
|
|
||||||
noise_level = round((self.noise_augmentor.max_noise_level - 1) * noise_augment)
|
|
||||||
c_adm, noise_level_emb = self.noise_augmentor(adm_out[:, :self.noise_augmentor.time_embed.dim], noise_level=torch.tensor([noise_level], device=device))
|
|
||||||
adm_out = torch.cat((c_adm, noise_level_emb), 1)
|
|
||||||
else:
|
else:
|
||||||
adm_out = torch.zeros((1, self.adm_channels))
|
return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05))
|
||||||
|
|
||||||
return adm_out
|
|
||||||
|
|
||||||
class SDInpaint(BaseModel):
|
class SDInpaint(BaseModel):
|
||||||
def __init__(self, model_config, model_type=ModelType.EPS, device=None):
|
def __init__(self, model_config, model_type=ModelType.EPS, device=None):
|
||||||
|
|||||||
@ -113,6 +113,7 @@ def model_config_from_unet_config(unet_config):
|
|||||||
if model_config.matches(unet_config):
|
if model_config.matches(unet_config):
|
||||||
return model_config(unet_config)
|
return model_config(unet_config)
|
||||||
|
|
||||||
|
print("no match", unet_config)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def model_config_from_unet(state_dict, unet_key_prefix, use_fp16):
|
def model_config_from_unet(state_dict, unet_key_prefix, use_fp16):
|
||||||
|
|||||||
@ -757,7 +757,7 @@ class StyleModelApply:
|
|||||||
CATEGORY = "conditioning/style_model"
|
CATEGORY = "conditioning/style_model"
|
||||||
|
|
||||||
def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
|
def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
|
||||||
cond = style_model.get_cond(clip_vision_output)
|
cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
|
||||||
c = []
|
c = []
|
||||||
for t in conditioning:
|
for t in conditioning:
|
||||||
n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
|
n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
|
||||||
@ -1436,6 +1436,44 @@ class ImageInvert:
|
|||||||
s = 1.0 - image
|
s = 1.0 - image
|
||||||
return (s,)
|
return (s,)
|
||||||
|
|
||||||
|
class ImageBatch:
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("IMAGE",)
|
||||||
|
FUNCTION = "batch"
|
||||||
|
|
||||||
|
CATEGORY = "image"
|
||||||
|
|
||||||
|
def batch(self, image1, image2):
|
||||||
|
if image1.shape[1:] != image2.shape[1:]:
|
||||||
|
image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
|
||||||
|
s = torch.cat((image1, image2), dim=0)
|
||||||
|
return (s,)
|
||||||
|
|
||||||
|
class EmptyImage:
|
||||||
|
def __init__(self, device="cpu"):
|
||||||
|
self.device = device
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
|
||||||
|
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
|
||||||
|
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
|
||||||
|
"color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
|
||||||
|
}}
|
||||||
|
RETURN_TYPES = ("IMAGE",)
|
||||||
|
FUNCTION = "generate"
|
||||||
|
|
||||||
|
CATEGORY = "image"
|
||||||
|
|
||||||
|
def generate(self, width, height, batch_size=1, color=0):
|
||||||
|
r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
|
||||||
|
g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
|
||||||
|
b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
|
||||||
|
return (torch.cat((r, g, b), dim=-1), )
|
||||||
|
|
||||||
class ImagePadForOutpaint:
|
class ImagePadForOutpaint:
|
||||||
|
|
||||||
@ -1521,7 +1559,9 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"ImageScale": ImageScale,
|
"ImageScale": ImageScale,
|
||||||
"ImageScaleBy": ImageScaleBy,
|
"ImageScaleBy": ImageScaleBy,
|
||||||
"ImageInvert": ImageInvert,
|
"ImageInvert": ImageInvert,
|
||||||
|
"ImageBatch": ImageBatch,
|
||||||
"ImagePadForOutpaint": ImagePadForOutpaint,
|
"ImagePadForOutpaint": ImagePadForOutpaint,
|
||||||
|
"EmptyImage": EmptyImage,
|
||||||
"ConditioningAverage ": ConditioningAverage ,
|
"ConditioningAverage ": ConditioningAverage ,
|
||||||
"ConditioningCombine": ConditioningCombine,
|
"ConditioningCombine": ConditioningCombine,
|
||||||
"ConditioningConcat": ConditioningConcat,
|
"ConditioningConcat": ConditioningConcat,
|
||||||
@ -1615,6 +1655,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
|||||||
"ImageUpscaleWithModel": "Upscale Image (using Model)",
|
"ImageUpscaleWithModel": "Upscale Image (using Model)",
|
||||||
"ImageInvert": "Invert Image",
|
"ImageInvert": "Invert Image",
|
||||||
"ImagePadForOutpaint": "Pad Image for Outpainting",
|
"ImagePadForOutpaint": "Pad Image for Outpainting",
|
||||||
|
"ImageBatch": "Batch Images",
|
||||||
# _for_testing
|
# _for_testing
|
||||||
"VAEDecodeTiled": "VAE Decode (Tiled)",
|
"VAEDecodeTiled": "VAE Decode (Tiled)",
|
||||||
"VAEEncodeTiled": "VAE Encode (Tiled)",
|
"VAEEncodeTiled": "VAE Encode (Tiled)",
|
||||||
|
|||||||
@ -347,6 +347,17 @@ def ddim_scheduler(model, steps):
|
|||||||
sigs += [0.0]
|
sigs += [0.0]
|
||||||
return torch.FloatTensor(sigs)
|
return torch.FloatTensor(sigs)
|
||||||
|
|
||||||
|
def sgm_scheduler(model, steps):
|
||||||
|
sigs = []
|
||||||
|
timesteps = torch.linspace(model.inner_model.inner_model.num_timesteps - 1, 0, steps + 1)[:-1].type(torch.int)
|
||||||
|
for x in range(len(timesteps)):
|
||||||
|
ts = timesteps[x]
|
||||||
|
if ts > 999:
|
||||||
|
ts = 999
|
||||||
|
sigs.append(model.t_to_sigma(torch.tensor(ts)))
|
||||||
|
sigs += [0.0]
|
||||||
|
return torch.FloatTensor(sigs)
|
||||||
|
|
||||||
def blank_inpaint_image_like(latent_image):
|
def blank_inpaint_image_like(latent_image):
|
||||||
blank_image = torch.ones_like(latent_image)
|
blank_image = torch.ones_like(latent_image)
|
||||||
# these are the values for "zero" in pixel space translated to latent space
|
# these are the values for "zero" in pixel space translated to latent space
|
||||||
@ -525,10 +536,10 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type):
|
|||||||
|
|
||||||
|
|
||||||
class KSampler:
|
class KSampler:
|
||||||
SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
|
SCHEDULERS = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"]
|
||||||
SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
|
SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
|
||||||
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
|
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
|
||||||
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "ddim", "uni_pc", "uni_pc_bh2"]
|
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddim", "uni_pc", "uni_pc_bh2"]
|
||||||
|
|
||||||
def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}):
|
def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}):
|
||||||
self.model = model
|
self.model = model
|
||||||
@ -570,6 +581,8 @@ class KSampler:
|
|||||||
sigmas = simple_scheduler(self.model_wrap, steps)
|
sigmas = simple_scheduler(self.model_wrap, steps)
|
||||||
elif self.scheduler == "ddim_uniform":
|
elif self.scheduler == "ddim_uniform":
|
||||||
sigmas = ddim_scheduler(self.model_wrap, steps)
|
sigmas = ddim_scheduler(self.model_wrap, steps)
|
||||||
|
elif self.scheduler == "sgm_uniform":
|
||||||
|
sigmas = sgm_scheduler(self.model_wrap, steps)
|
||||||
else:
|
else:
|
||||||
print("error invalid scheduler", self.scheduler)
|
print("error invalid scheduler", self.scheduler)
|
||||||
|
|
||||||
|
|||||||
13
comfy/sd.py
13
comfy/sd.py
@ -223,13 +223,16 @@ def model_lora_keys_unet(model, key_map={}):
|
|||||||
diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
|
diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
|
||||||
for k in diffusers_keys:
|
for k in diffusers_keys:
|
||||||
if k.endswith(".weight"):
|
if k.endswith(".weight"):
|
||||||
|
unet_key = "diffusion_model.{}".format(diffusers_keys[k])
|
||||||
key_lora = k[:-len(".weight")].replace(".", "_")
|
key_lora = k[:-len(".weight")].replace(".", "_")
|
||||||
key_map["lora_unet_{}".format(key_lora)] = "diffusion_model.{}".format(diffusers_keys[k])
|
key_map["lora_unet_{}".format(key_lora)] = unet_key
|
||||||
|
|
||||||
diffusers_lora_key = "unet.{}".format(k[:-len(".weight")].replace(".to_", ".processor.to_"))
|
diffusers_lora_prefix = ["", "unet."]
|
||||||
if diffusers_lora_key.endswith(".to_out.0"):
|
for p in diffusers_lora_prefix:
|
||||||
diffusers_lora_key = diffusers_lora_key[:-2]
|
diffusers_lora_key = "{}{}".format(p, k[:-len(".weight")].replace(".to_", ".processor.to_"))
|
||||||
key_map[diffusers_lora_key] = "diffusion_model.{}".format(diffusers_keys[k])
|
if diffusers_lora_key.endswith(".to_out.0"):
|
||||||
|
diffusers_lora_key = diffusers_lora_key[:-2]
|
||||||
|
key_map[diffusers_lora_key] = unet_key
|
||||||
return key_map
|
return key_map
|
||||||
|
|
||||||
def set_attr(obj, attr, value):
|
def set_attr(obj, attr, value):
|
||||||
|
|||||||
@ -3,6 +3,37 @@ import torch
|
|||||||
from comfy.nodes.common import MAX_RESOLUTION
|
from comfy.nodes.common import MAX_RESOLUTION
|
||||||
|
|
||||||
|
|
||||||
|
def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False):
|
||||||
|
if resize_source:
|
||||||
|
source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear")
|
||||||
|
|
||||||
|
x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier))
|
||||||
|
y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier))
|
||||||
|
|
||||||
|
left, top = (x // multiplier, y // multiplier)
|
||||||
|
right, bottom = (left + source.shape[3], top + source.shape[2],)
|
||||||
|
|
||||||
|
if mask is None:
|
||||||
|
mask = torch.ones_like(source)
|
||||||
|
else:
|
||||||
|
mask = mask.clone()
|
||||||
|
mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear")
|
||||||
|
mask = mask.repeat((source.shape[0], source.shape[1], 1, 1))
|
||||||
|
|
||||||
|
# calculate the bounds of the source that will be overlapping the destination
|
||||||
|
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
||||||
|
# of the destination
|
||||||
|
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
|
||||||
|
|
||||||
|
mask = mask[:, :, :visible_height, :visible_width]
|
||||||
|
inverse_mask = torch.ones_like(mask) - mask
|
||||||
|
|
||||||
|
source_portion = mask * source[:, :, :visible_height, :visible_width]
|
||||||
|
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
|
||||||
|
|
||||||
|
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
|
||||||
|
return destination
|
||||||
|
|
||||||
class LatentCompositeMasked:
|
class LatentCompositeMasked:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
@ -12,6 +43,7 @@ class LatentCompositeMasked:
|
|||||||
"source": ("LATENT",),
|
"source": ("LATENT",),
|
||||||
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
|
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
|
||||||
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
|
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
|
||||||
|
"resize_source": ("BOOLEAN", {"default": False}),
|
||||||
},
|
},
|
||||||
"optional": {
|
"optional": {
|
||||||
"mask": ("MASK",),
|
"mask": ("MASK",),
|
||||||
@ -22,40 +54,36 @@ class LatentCompositeMasked:
|
|||||||
|
|
||||||
CATEGORY = "latent"
|
CATEGORY = "latent"
|
||||||
|
|
||||||
def composite(self, destination, source, x, y, mask = None):
|
def composite(self, destination, source, x, y, resize_source, mask = None):
|
||||||
output = destination.copy()
|
output = destination.copy()
|
||||||
destination = destination["samples"].clone()
|
destination = destination["samples"].clone()
|
||||||
source = source["samples"]
|
source = source["samples"]
|
||||||
|
output["samples"] = composite(destination, source, x, y, mask, 8, resize_source)
|
||||||
|
return (output,)
|
||||||
|
|
||||||
x = max(-source.shape[3] * 8, min(x, destination.shape[3] * 8))
|
class ImageCompositeMasked:
|
||||||
y = max(-source.shape[2] * 8, min(y, destination.shape[2] * 8))
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"destination": ("IMAGE",),
|
||||||
|
"source": ("IMAGE",),
|
||||||
|
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
||||||
|
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
||||||
|
"resize_source": ("BOOLEAN", {"default": False}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"mask": ("MASK",),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RETURN_TYPES = ("IMAGE",)
|
||||||
|
FUNCTION = "composite"
|
||||||
|
|
||||||
left, top = (x // 8, y // 8)
|
CATEGORY = "image"
|
||||||
right, bottom = (left + source.shape[3], top + source.shape[2],)
|
|
||||||
|
|
||||||
|
|
||||||
if mask is None:
|
|
||||||
mask = torch.ones_like(source)
|
|
||||||
else:
|
|
||||||
mask = mask.clone()
|
|
||||||
mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear")
|
|
||||||
mask = mask.repeat((source.shape[0], source.shape[1], 1, 1))
|
|
||||||
|
|
||||||
# calculate the bounds of the source that will be overlapping the destination
|
|
||||||
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
|
||||||
# of the destination
|
|
||||||
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
|
|
||||||
|
|
||||||
mask = mask[:, :, :visible_height, :visible_width]
|
|
||||||
inverse_mask = torch.ones_like(mask) - mask
|
|
||||||
|
|
||||||
source_portion = mask * source[:, :, :visible_height, :visible_width]
|
|
||||||
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
|
|
||||||
|
|
||||||
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
|
|
||||||
|
|
||||||
output["samples"] = destination
|
|
||||||
|
|
||||||
|
def composite(self, destination, source, x, y, resize_source, mask = None):
|
||||||
|
destination = destination.clone().movedim(-1, 1)
|
||||||
|
output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1)
|
||||||
return (output,)
|
return (output,)
|
||||||
|
|
||||||
class MaskToImage:
|
class MaskToImage:
|
||||||
@ -254,6 +282,7 @@ class FeatherMask:
|
|||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
"LatentCompositeMasked": LatentCompositeMasked,
|
"LatentCompositeMasked": LatentCompositeMasked,
|
||||||
|
"ImageCompositeMasked": ImageCompositeMasked,
|
||||||
"MaskToImage": MaskToImage,
|
"MaskToImage": MaskToImage,
|
||||||
"ImageToMask": ImageToMask,
|
"ImageToMask": ImageToMask,
|
||||||
"SolidMask": SolidMask,
|
"SolidMask": SolidMask,
|
||||||
|
|||||||
@ -59,8 +59,8 @@ class Blend:
|
|||||||
def g(self, x):
|
def g(self, x):
|
||||||
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
|
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
|
||||||
|
|
||||||
def gaussian_kernel(kernel_size: int, sigma: float):
|
def gaussian_kernel(kernel_size: int, sigma: float, device=None):
|
||||||
x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size), torch.linspace(-1, 1, kernel_size), indexing="ij")
|
x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size, device=device), torch.linspace(-1, 1, kernel_size, device=device), indexing="ij")
|
||||||
d = torch.sqrt(x * x + y * y)
|
d = torch.sqrt(x * x + y * y)
|
||||||
g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
|
g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
|
||||||
return g / g.sum()
|
return g / g.sum()
|
||||||
@ -101,7 +101,7 @@ class Blur:
|
|||||||
batch_size, height, width, channels = image.shape
|
batch_size, height, width, channels = image.shape
|
||||||
|
|
||||||
kernel_size = blur_radius * 2 + 1
|
kernel_size = blur_radius * 2 + 1
|
||||||
kernel = gaussian_kernel(kernel_size, sigma).repeat(channels, 1, 1).unsqueeze(1)
|
kernel = gaussian_kernel(kernel_size, sigma, device=image.device).repeat(channels, 1, 1).unsqueeze(1)
|
||||||
|
|
||||||
image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C)
|
image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C)
|
||||||
padded_image = F.pad(image, (blur_radius,blur_radius,blur_radius,blur_radius), 'reflect')
|
padded_image = F.pad(image, (blur_radius,blur_radius,blur_radius,blur_radius), 'reflect')
|
||||||
|
|||||||
0
custom_nodes/__init__.py
Normal file
0
custom_nodes/__init__.py
Normal file
@ -284,6 +284,11 @@ export class ComfyApp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
options.push({
|
||||||
|
content: "Bypass",
|
||||||
|
callback: (obj) => { if (this.mode === 4) this.mode = 0; else this.mode = 4; this.graph.change(); }
|
||||||
|
});
|
||||||
|
|
||||||
// prevent conflict of clipspace content
|
// prevent conflict of clipspace content
|
||||||
if(!ComfyApp.clipspace_return_node) {
|
if(!ComfyApp.clipspace_return_node) {
|
||||||
options.push({
|
options.push({
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user