mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-11 06:40:48 +08:00
Merge with upstream. Remove TLS flags, because a third party proxy will do this better
This commit is contained in:
commit
330ecb10b2
@ -103,6 +103,14 @@ On macOS, install Python 3.10, 3.11 or 3.12 using `brew`, which you can download
|
||||
comfyui --listen
|
||||
```
|
||||
|
||||
##### Running with TLS
|
||||
|
||||
To serve with `https://` on Windows easily, use [Caddy](https://github.com/caddyserver/caddy/releases/download/v2.7.6/caddy_2.7.6_windows_amd64.zip). Extract `caddy.exe` to a directory, then run it:
|
||||
|
||||
```shell
|
||||
caddy reverse-proxy --from localhost:443 --to localhost:8188 --tls self_signed
|
||||
```
|
||||
|
||||
### Known Models
|
||||
|
||||
These models will be automatically downloaded when you queue prompts with workflows that use them:
|
||||
|
||||
@ -527,6 +527,9 @@ def sample_dpmpp_2s_ancestral(model, x, sigmas, extra_args=None, callback=None,
|
||||
@torch.no_grad()
|
||||
def sample_dpmpp_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
|
||||
"""DPM-Solver++ (stochastic)."""
|
||||
if len(sigmas) <= 1:
|
||||
return x
|
||||
|
||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||
seed = extra_args.get("seed", None)
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
|
||||
@ -595,6 +598,8 @@ def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=No
|
||||
@torch.no_grad()
|
||||
def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
|
||||
"""DPM-Solver++(2M) SDE."""
|
||||
if len(sigmas) <= 1:
|
||||
return x
|
||||
|
||||
if solver_type not in {'heun', 'midpoint'}:
|
||||
raise ValueError('solver_type must be \'heun\' or \'midpoint\'')
|
||||
@ -642,6 +647,9 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl
|
||||
def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||
"""DPM-Solver++(3M) SDE."""
|
||||
|
||||
if len(sigmas) <= 1:
|
||||
return x
|
||||
|
||||
seed = extra_args.get("seed", None)
|
||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
|
||||
@ -690,18 +698,27 @@ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disabl
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||
if len(sigmas) <= 1:
|
||||
return x
|
||||
|
||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||
return sample_dpmpp_3m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler)
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
|
||||
if len(sigmas) <= 1:
|
||||
return x
|
||||
|
||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
|
||||
if len(sigmas) <= 1:
|
||||
return x
|
||||
|
||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||
return sample_dpmpp_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=r)
|
||||
|
||||
@ -296,8 +296,8 @@ def attention_split(q, k, v, heads, mask=None):
|
||||
BROKEN_XFORMERS = False
|
||||
try:
|
||||
x_vers = xformers.__version__
|
||||
#I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error)
|
||||
BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23")
|
||||
# XFormers bug confirmed on all versions from 0.0.21 to 0.0.26 (q with bs bigger than 65535 gives CUDA error)
|
||||
BROKEN_XFORMERS = x_vers.startswith("0.0.2") and not x_vers.startswith("0.0.20")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@ _session = Session()
|
||||
|
||||
def get_filename_list_with_downloadable(folder_name: str, known_files: List[Any]) -> List[str]:
|
||||
existing = frozenset(folder_paths.get_filename_list(folder_name))
|
||||
downloadable = frozenset() if args.disable_known_models else frozenset(str(f) for f in known_files)
|
||||
downloadable = frozenset() if args.disable_known_models else frozenset(str(f) for f in known_files if not isinstance(f, HuggingFile) or f.show_in_ui)
|
||||
return sorted(list(existing | downloadable))
|
||||
|
||||
|
||||
@ -150,7 +150,7 @@ KNOWN_CHECKPOINTS = [
|
||||
HuggingFile("stabilityai/sdxl-turbo", "sd_xl_turbo_1.0.safetensors", show_in_ui=False),
|
||||
HuggingFile("stabilityai/stable-cascade", "comfyui_checkpoints/stable_cascade_stage_b.safetensors"),
|
||||
HuggingFile("stabilityai/stable-cascade", "comfyui_checkpoints/stable_cascade_stage_c.safetensors"),
|
||||
HuggingFile("stabilityai/stable-cascade", "comfyui_checkpoints/stage_a.safetensors"),
|
||||
HuggingFile("stabilityai/stable-cascade", "comfyui_checkpoints/stage_a.safetensors", show_in_ui=False),
|
||||
HuggingFile("runwayml/stable-diffusion-v1-5", "v1-5-pruned-emaonly.safetensors"),
|
||||
HuggingFile("runwayml/stable-diffusion-v1-5", "v1-5-pruned-emaonly.ckpt", show_in_ui=False),
|
||||
HuggingFile("runwayml/stable-diffusion-v1-5", "v1-5-pruned.ckpt", show_in_ui=False),
|
||||
@ -161,6 +161,8 @@ KNOWN_CHECKPOINTS = [
|
||||
HuggingFile("jomcs/NeverEnding_Dream-Feb19-2023", "CarDos Anime/cardosAnime_v10.safetensors", show_in_ui=False),
|
||||
# from https://github.com/comfyanonymous/ComfyUI_examples/blob/master/area_composition/README.md
|
||||
HuggingFile("ckpt/anything-v3.0", "Anything-V3.0.ckpt", show_in_ui=False),
|
||||
HuggingFile("stabilityai/cosxl", "cosxl.safetensors"),
|
||||
HuggingFile("stabilityai/cosxl", "cosxl_edit.safetensors"),
|
||||
# latest, popular civitai models
|
||||
CivitFile(133005, 357609, filename="juggernautXL_v9Rundiffusionphoto2.safetensors"),
|
||||
CivitFile(112902, 351306, filename="dreamshaperXL_v21TurboDPMSDE.safetensors"),
|
||||
@ -182,7 +184,7 @@ KNOWN_UPSCALERS = [
|
||||
]
|
||||
|
||||
KNOWN_GLIGEN_MODELS = [
|
||||
HuggingFile("comfyanonymous/GLIGEN_pruned_safetensors", "gligen_sd14_textbox_pruned.safetensors"),
|
||||
HuggingFile("comfyanonymous/GLIGEN_pruned_safetensors", "gligen_sd14_textbox_pruned.safetensors", show_in_ui=False),
|
||||
HuggingFile("comfyanonymous/GLIGEN_pruned_safetensors", "gligen_sd14_textbox_pruned_fp16.safetensors"),
|
||||
]
|
||||
|
||||
|
||||
@ -91,7 +91,7 @@ def get_torch_device():
|
||||
return torch.device("cpu")
|
||||
else:
|
||||
if is_intel_xpu():
|
||||
return torch.device("xpu")
|
||||
return torch.device("xpu", torch.xpu.current_device())
|
||||
else:
|
||||
return torch.device(torch.cuda.current_device())
|
||||
|
||||
@ -312,7 +312,7 @@ class LoadedModel:
|
||||
raise e
|
||||
|
||||
if is_intel_xpu() and not args.disable_ipex_optimize:
|
||||
self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True)
|
||||
self.real_model = ipex.optimize(self.real_model.eval(), graph_mode=True, concat_linear=True)
|
||||
|
||||
self.weights_loaded = True
|
||||
return self.real_model
|
||||
@ -564,8 +564,6 @@ def text_encoder_device():
|
||||
if args.gpu_only:
|
||||
return get_torch_device()
|
||||
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
|
||||
if is_intel_xpu():
|
||||
return torch.device("cpu")
|
||||
if should_use_fp16(prioritize_performance=False):
|
||||
return get_torch_device()
|
||||
else:
|
||||
|
||||
@ -542,6 +542,9 @@ class KSAMPLER(Sampler):
|
||||
def ksampler(sampler_name, extra_options={}, inpaint_options={}):
|
||||
if sampler_name == "dpm_fast":
|
||||
def dpm_fast_function(model, noise, sigmas, extra_args, callback, disable):
|
||||
if len(sigmas) <= 1:
|
||||
return noise
|
||||
|
||||
sigma_min = sigmas[-1]
|
||||
if sigma_min == 0:
|
||||
sigma_min = sigmas[-2]
|
||||
@ -550,6 +553,9 @@ def ksampler(sampler_name, extra_options={}, inpaint_options={}):
|
||||
sampler_function = dpm_fast_function
|
||||
elif sampler_name == "dpm_adaptive":
|
||||
def dpm_adaptive_function(model, noise, sigmas, extra_args, callback, disable, **extra_options):
|
||||
if len(sigmas) <= 1:
|
||||
return noise
|
||||
|
||||
sigma_min = sigmas[-1]
|
||||
if sigma_min == 0:
|
||||
sigma_min = sigmas[-2]
|
||||
|
||||
@ -229,7 +229,7 @@ function createIntWidget(node, inputName, inputData, app, isSeedInput) {
|
||||
val,
|
||||
function (v) {
|
||||
const s = this.options.step / 10;
|
||||
this.value = Math.round(v / s) * s;
|
||||
this.value = Math.round((v - this.options.min) / s) * s + this.options.min;
|
||||
},
|
||||
config
|
||||
),
|
||||
|
||||
@ -109,8 +109,7 @@ class SDTurboScheduler:
|
||||
def get_sigmas(self, model, steps, denoise):
|
||||
start_step = 10 - int(10 * denoise)
|
||||
timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps]
|
||||
model_management.load_models_gpu([model])
|
||||
sigmas = model.model.model_sampling.sigma(timesteps)
|
||||
sigmas = model.get_model_object("model_sampling").sigma(timesteps)
|
||||
sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
|
||||
return (sigmas, )
|
||||
|
||||
|
||||
61
comfy_extras/nodes_advanced_samplers.py
Normal file
61
comfy_extras/nodes_advanced_samplers.py
Normal file
@ -0,0 +1,61 @@
|
||||
import comfy.samplers
|
||||
import comfy.utils
|
||||
import torch
|
||||
import numpy as np
|
||||
from tqdm.auto import trange, tqdm
|
||||
import math
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_lcm_upscale(model, x, sigmas, extra_args=None, callback=None, disable=None, total_upscale=2.0, upscale_method="bislerp", upscale_steps=None):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
if upscale_steps is None:
|
||||
upscale_steps = max(len(sigmas) // 2 + 1, 2)
|
||||
else:
|
||||
upscale_steps += 1
|
||||
upscale_steps = min(upscale_steps, len(sigmas) + 1)
|
||||
|
||||
upscales = np.linspace(1.0, total_upscale, upscale_steps)[1:]
|
||||
|
||||
orig_shape = x.size()
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
|
||||
x = denoised
|
||||
if i < len(upscales):
|
||||
x = comfy.utils.common_upscale(x, round(orig_shape[-1] * upscales[i]), round(orig_shape[-2] * upscales[i]), upscale_method, "disabled")
|
||||
|
||||
if sigmas[i + 1] > 0:
|
||||
x += sigmas[i + 1] * torch.randn_like(x)
|
||||
return x
|
||||
|
||||
|
||||
class SamplerLCMUpscale:
|
||||
upscale_methods = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"scale_ratio": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 20.0, "step": 0.01}),
|
||||
"scale_steps": ("INT", {"default": -1, "min": -1, "max": 1000, "step": 1}),
|
||||
"upscale_method": (s.upscale_methods,),
|
||||
}
|
||||
}
|
||||
RETURN_TYPES = ("SAMPLER",)
|
||||
CATEGORY = "sampling/custom_sampling/samplers"
|
||||
|
||||
FUNCTION = "get_sampler"
|
||||
|
||||
def get_sampler(self, scale_ratio, scale_steps, upscale_method):
|
||||
if scale_steps < 0:
|
||||
scale_steps = None
|
||||
sampler = comfy.samplers.KSAMPLER(sample_lcm_upscale, extra_options={"total_upscale": scale_ratio, "upscale_steps": scale_steps, "upscale_method": upscale_method})
|
||||
return (sampler, )
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"SamplerLCMUpscale": SamplerLCMUpscale,
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user