mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-13 15:02:37 +08:00
Merge branch 'comfyanonymous:master' into feature/preview-latent
This commit is contained in:
commit
479b3cd872
@ -631,23 +631,78 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl
|
|||||||
elif solver_type == 'midpoint':
|
elif solver_type == 'midpoint':
|
||||||
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
|
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
|
||||||
|
|
||||||
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise
|
if eta:
|
||||||
|
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise
|
||||||
|
|
||||||
old_denoised = denoised
|
old_denoised = denoised
|
||||||
h_last = h
|
h_last = h
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||||
|
"""DPM-Solver++(3M) SDE."""
|
||||||
|
|
||||||
|
seed = extra_args.get("seed", None)
|
||||||
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
s_in = x.new_ones([x.shape[0]])
|
||||||
|
|
||||||
|
denoised_1, denoised_2 = None, None
|
||||||
|
h_1, h_2 = None, None
|
||||||
|
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||||
|
if sigmas[i + 1] == 0:
|
||||||
|
# Denoising step
|
||||||
|
x = denoised
|
||||||
|
else:
|
||||||
|
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
|
||||||
|
h = s - t
|
||||||
|
h_eta = h * (eta + 1)
|
||||||
|
|
||||||
|
x = torch.exp(-h_eta) * x + (-h_eta).expm1().neg() * denoised
|
||||||
|
|
||||||
|
if h_2 is not None:
|
||||||
|
r0 = h_1 / h
|
||||||
|
r1 = h_2 / h
|
||||||
|
d1_0 = (denoised - denoised_1) / r0
|
||||||
|
d1_1 = (denoised_1 - denoised_2) / r1
|
||||||
|
d1 = d1_0 + (d1_0 - d1_1) * r0 / (r0 + r1)
|
||||||
|
d2 = (d1_0 - d1_1) / (r0 + r1)
|
||||||
|
phi_2 = h_eta.neg().expm1() / h_eta + 1
|
||||||
|
phi_3 = phi_2 / h_eta - 0.5
|
||||||
|
x = x + phi_2 * d1 - phi_3 * d2
|
||||||
|
elif h_1 is not None:
|
||||||
|
r = h_1 / h
|
||||||
|
d = (denoised - denoised_1) / r
|
||||||
|
phi_2 = h_eta.neg().expm1() / h_eta + 1
|
||||||
|
x = x + phi_2 * d
|
||||||
|
|
||||||
|
if eta:
|
||||||
|
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
|
||||||
|
|
||||||
|
denoised_1, denoised_2 = denoised, denoised_1
|
||||||
|
h_1, h_2 = h, h_1
|
||||||
|
return x
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||||
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||||
|
return sample_dpmpp_3m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler)
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
|
def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
|
||||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||||
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
|
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
|
def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
|
||||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||||
return sample_dpmpp_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=r)
|
return sample_dpmpp_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=r)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -113,6 +113,7 @@ def model_config_from_unet_config(unet_config):
|
|||||||
if model_config.matches(unet_config):
|
if model_config.matches(unet_config):
|
||||||
return model_config(unet_config)
|
return model_config(unet_config)
|
||||||
|
|
||||||
|
print("no match", unet_config)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def model_config_from_unet(state_dict, unet_key_prefix, use_fp16):
|
def model_config_from_unet(state_dict, unet_key_prefix, use_fp16):
|
||||||
|
|||||||
@ -347,6 +347,17 @@ def ddim_scheduler(model, steps):
|
|||||||
sigs += [0.0]
|
sigs += [0.0]
|
||||||
return torch.FloatTensor(sigs)
|
return torch.FloatTensor(sigs)
|
||||||
|
|
||||||
|
def sgm_scheduler(model, steps):
|
||||||
|
sigs = []
|
||||||
|
timesteps = torch.linspace(model.inner_model.inner_model.num_timesteps - 1, 0, steps + 1)[:-1].type(torch.int)
|
||||||
|
for x in range(len(timesteps)):
|
||||||
|
ts = timesteps[x]
|
||||||
|
if ts > 999:
|
||||||
|
ts = 999
|
||||||
|
sigs.append(model.t_to_sigma(torch.tensor(ts)))
|
||||||
|
sigs += [0.0]
|
||||||
|
return torch.FloatTensor(sigs)
|
||||||
|
|
||||||
def blank_inpaint_image_like(latent_image):
|
def blank_inpaint_image_like(latent_image):
|
||||||
blank_image = torch.ones_like(latent_image)
|
blank_image = torch.ones_like(latent_image)
|
||||||
# these are the values for "zero" in pixel space translated to latent space
|
# these are the values for "zero" in pixel space translated to latent space
|
||||||
@ -525,10 +536,10 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type):
|
|||||||
|
|
||||||
|
|
||||||
class KSampler:
|
class KSampler:
|
||||||
SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
|
SCHEDULERS = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"]
|
||||||
SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
|
SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
|
||||||
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
|
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
|
||||||
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "ddim", "uni_pc", "uni_pc_bh2"]
|
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddim", "uni_pc", "uni_pc_bh2"]
|
||||||
|
|
||||||
def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}):
|
def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}):
|
||||||
self.model = model
|
self.model = model
|
||||||
@ -570,6 +581,8 @@ class KSampler:
|
|||||||
sigmas = simple_scheduler(self.model_wrap, steps)
|
sigmas = simple_scheduler(self.model_wrap, steps)
|
||||||
elif self.scheduler == "ddim_uniform":
|
elif self.scheduler == "ddim_uniform":
|
||||||
sigmas = ddim_scheduler(self.model_wrap, steps)
|
sigmas = ddim_scheduler(self.model_wrap, steps)
|
||||||
|
elif self.scheduler == "sgm_uniform":
|
||||||
|
sigmas = sgm_scheduler(self.model_wrap, steps)
|
||||||
else:
|
else:
|
||||||
print("error invalid scheduler", self.scheduler)
|
print("error invalid scheduler", self.scheduler)
|
||||||
|
|
||||||
|
|||||||
@ -36,13 +36,15 @@ def get_gpu_names():
|
|||||||
else:
|
else:
|
||||||
return set()
|
return set()
|
||||||
|
|
||||||
def cuda_malloc_supported():
|
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
|
||||||
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
|
"GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
|
||||||
"GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
|
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
|
||||||
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
|
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
|
||||||
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
|
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
|
||||||
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M"}
|
"GeForce GTX 1650", "GeForce GTX 1630"
|
||||||
|
}
|
||||||
|
|
||||||
|
def cuda_malloc_supported():
|
||||||
try:
|
try:
|
||||||
names = get_gpu_names()
|
names = get_gpu_names()
|
||||||
except:
|
except:
|
||||||
|
|||||||
14
main.py
14
main.py
@ -72,6 +72,17 @@ from server import BinaryEventTypes
|
|||||||
from nodes import init_custom_nodes
|
from nodes import init_custom_nodes
|
||||||
import comfy.model_management
|
import comfy.model_management
|
||||||
|
|
||||||
|
def cuda_malloc_warning():
|
||||||
|
device = comfy.model_management.get_torch_device()
|
||||||
|
device_name = comfy.model_management.get_torch_device_name(device)
|
||||||
|
cuda_malloc_warning = False
|
||||||
|
if "cudaMallocAsync" in device_name:
|
||||||
|
for b in cuda_malloc.blacklist:
|
||||||
|
if b in device_name:
|
||||||
|
cuda_malloc_warning = True
|
||||||
|
if cuda_malloc_warning:
|
||||||
|
print("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")
|
||||||
|
|
||||||
def prompt_worker(q, server):
|
def prompt_worker(q, server):
|
||||||
e = execution.PromptExecutor(server)
|
e = execution.PromptExecutor(server)
|
||||||
while True:
|
while True:
|
||||||
@ -147,6 +158,9 @@ if __name__ == "__main__":
|
|||||||
load_extra_path_config(config_path)
|
load_extra_path_config(config_path)
|
||||||
|
|
||||||
init_custom_nodes()
|
init_custom_nodes()
|
||||||
|
|
||||||
|
cuda_malloc_warning()
|
||||||
|
|
||||||
server.add_routes()
|
server.add_routes()
|
||||||
hijack_progress(server)
|
hijack_progress(server)
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user