mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-10 13:32:36 +08:00
Merge branch 'master' of https://github.com/BlenderNeko/ComfyUI
This commit is contained in:
commit
805e043bce
@ -712,7 +712,7 @@ class UniPC:
|
|||||||
|
|
||||||
def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='time_uniform',
|
def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='time_uniform',
|
||||||
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
|
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
|
||||||
atol=0.0078, rtol=0.05, corrector=False,
|
atol=0.0078, rtol=0.05, corrector=False, callback=None
|
||||||
):
|
):
|
||||||
t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
||||||
t_T = self.noise_schedule.T if t_start is None else t_start
|
t_T = self.noise_schedule.T if t_start is None else t_start
|
||||||
@ -766,6 +766,8 @@ class UniPC:
|
|||||||
if model_x is None:
|
if model_x is None:
|
||||||
model_x = self.model_fn(x, vec_t)
|
model_x = self.model_fn(x, vec_t)
|
||||||
model_prev_list[-1] = model_x
|
model_prev_list[-1] = model_x
|
||||||
|
if callback is not None:
|
||||||
|
callback(step_index, model_prev_list[-1], x)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
if denoise_to_zero:
|
if denoise_to_zero:
|
||||||
@ -877,7 +879,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex
|
|||||||
|
|
||||||
order = min(3, len(timesteps) - 1)
|
order = min(3, len(timesteps) - 1)
|
||||||
uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant)
|
uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant)
|
||||||
x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True)
|
x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback)
|
||||||
if not to_zero:
|
if not to_zero:
|
||||||
x /= ns.marginal_alpha(timesteps[-1])
|
x /= ns.marginal_alpha(timesteps[-1])
|
||||||
return x
|
return x
|
||||||
|
|||||||
@ -56,7 +56,7 @@ def cleanup_additional_models(models):
|
|||||||
for m in models:
|
for m in models:
|
||||||
m.cleanup()
|
m.cleanup()
|
||||||
|
|
||||||
def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None):
|
def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None):
|
||||||
device = comfy.model_management.get_torch_device()
|
device = comfy.model_management.get_torch_device()
|
||||||
|
|
||||||
if noise_mask is not None:
|
if noise_mask is not None:
|
||||||
@ -76,7 +76,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative
|
|||||||
|
|
||||||
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
|
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
|
||||||
|
|
||||||
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas)
|
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback)
|
||||||
samples = samples.cpu()
|
samples = samples.cpu()
|
||||||
|
|
||||||
cleanup_additional_models(models)
|
cleanup_additional_models(models)
|
||||||
|
|||||||
@ -462,7 +462,7 @@ class KSampler:
|
|||||||
self.sigmas = sigmas[-(steps + 1):]
|
self.sigmas = sigmas[-(steps + 1):]
|
||||||
|
|
||||||
|
|
||||||
def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None):
|
def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None):
|
||||||
if sigmas is None:
|
if sigmas is None:
|
||||||
sigmas = self.sigmas
|
sigmas = self.sigmas
|
||||||
sigma_min = self.sigma_min
|
sigma_min = self.sigma_min
|
||||||
@ -527,9 +527,9 @@ class KSampler:
|
|||||||
|
|
||||||
with precision_scope(model_management.get_autocast_device(self.device)):
|
with precision_scope(model_management.get_autocast_device(self.device)):
|
||||||
if self.sampler == "uni_pc":
|
if self.sampler == "uni_pc":
|
||||||
samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask)
|
samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback)
|
||||||
elif self.sampler == "uni_pc_bh2":
|
elif self.sampler == "uni_pc_bh2":
|
||||||
samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, variant='bh2')
|
samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2')
|
||||||
elif self.sampler == "ddim":
|
elif self.sampler == "ddim":
|
||||||
timesteps = []
|
timesteps = []
|
||||||
for s in range(sigmas.shape[0]):
|
for s in range(sigmas.shape[0]):
|
||||||
@ -537,6 +537,11 @@ class KSampler:
|
|||||||
noise_mask = None
|
noise_mask = None
|
||||||
if denoise_mask is not None:
|
if denoise_mask is not None:
|
||||||
noise_mask = 1.0 - denoise_mask
|
noise_mask = 1.0 - denoise_mask
|
||||||
|
|
||||||
|
ddim_callback = None
|
||||||
|
if callback is not None:
|
||||||
|
ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None)
|
||||||
|
|
||||||
sampler = DDIMSampler(self.model, device=self.device)
|
sampler = DDIMSampler(self.model, device=self.device)
|
||||||
sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False)
|
sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False)
|
||||||
z_enc = sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(self.device), noise=noise, max_denoise=max_denoise)
|
z_enc = sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(self.device), noise=noise, max_denoise=max_denoise)
|
||||||
@ -550,6 +555,7 @@ class KSampler:
|
|||||||
eta=0.0,
|
eta=0.0,
|
||||||
x_T=z_enc,
|
x_T=z_enc,
|
||||||
x0=latent_image,
|
x0=latent_image,
|
||||||
|
img_callback=ddim_callback,
|
||||||
denoise_function=sampling_function,
|
denoise_function=sampling_function,
|
||||||
extra_args=extra_args,
|
extra_args=extra_args,
|
||||||
mask=noise_mask,
|
mask=noise_mask,
|
||||||
@ -563,13 +569,17 @@ class KSampler:
|
|||||||
|
|
||||||
noise = noise * sigmas[0]
|
noise = noise * sigmas[0]
|
||||||
|
|
||||||
|
k_callback = None
|
||||||
|
if callback is not None:
|
||||||
|
k_callback = lambda x: callback(x["i"], x["denoised"], x["x"])
|
||||||
|
|
||||||
if latent_image is not None:
|
if latent_image is not None:
|
||||||
noise += latent_image
|
noise += latent_image
|
||||||
if self.sampler == "dpm_fast":
|
if self.sampler == "dpm_fast":
|
||||||
samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args=extra_args)
|
samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args=extra_args, callback=k_callback)
|
||||||
elif self.sampler == "dpm_adaptive":
|
elif self.sampler == "dpm_adaptive":
|
||||||
samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args)
|
samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback)
|
||||||
else:
|
else:
|
||||||
samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args)
|
samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback)
|
||||||
|
|
||||||
return samples.to(torch.float32)
|
return samples.to(torch.float32)
|
||||||
|
|||||||
@ -4,7 +4,10 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from typing import Literal
|
try:
|
||||||
|
from typing import Literal
|
||||||
|
except ImportError:
|
||||||
|
from typing_extensions import Literal
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
|
|||||||
55
execution.py
55
execution.py
@ -149,40 +149,44 @@ def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item
|
|||||||
|
|
||||||
is_changed_old = ''
|
is_changed_old = ''
|
||||||
is_changed = ''
|
is_changed = ''
|
||||||
|
to_delete = False
|
||||||
if hasattr(class_def, 'IS_CHANGED'):
|
if hasattr(class_def, 'IS_CHANGED'):
|
||||||
if unique_id in old_prompt and 'is_changed' in old_prompt[unique_id]:
|
if unique_id in old_prompt and 'is_changed' in old_prompt[unique_id]:
|
||||||
is_changed_old = old_prompt[unique_id]['is_changed']
|
is_changed_old = old_prompt[unique_id]['is_changed']
|
||||||
if 'is_changed' not in prompt[unique_id]:
|
if 'is_changed' not in prompt[unique_id]:
|
||||||
input_data_all = get_input_data(inputs, class_def, unique_id, outputs)
|
input_data_all = get_input_data(inputs, class_def, unique_id, outputs)
|
||||||
if input_data_all is not None:
|
if input_data_all is not None:
|
||||||
is_changed = class_def.IS_CHANGED(**input_data_all)
|
try:
|
||||||
prompt[unique_id]['is_changed'] = is_changed
|
is_changed = class_def.IS_CHANGED(**input_data_all)
|
||||||
|
prompt[unique_id]['is_changed'] = is_changed
|
||||||
|
except:
|
||||||
|
to_delete = True
|
||||||
else:
|
else:
|
||||||
is_changed = prompt[unique_id]['is_changed']
|
is_changed = prompt[unique_id]['is_changed']
|
||||||
|
|
||||||
if unique_id not in outputs:
|
if unique_id not in outputs:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
to_delete = False
|
if not to_delete:
|
||||||
if is_changed != is_changed_old:
|
if is_changed != is_changed_old:
|
||||||
to_delete = True
|
to_delete = True
|
||||||
elif unique_id not in old_prompt:
|
elif unique_id not in old_prompt:
|
||||||
to_delete = True
|
to_delete = True
|
||||||
elif inputs == old_prompt[unique_id]['inputs']:
|
elif inputs == old_prompt[unique_id]['inputs']:
|
||||||
for x in inputs:
|
for x in inputs:
|
||||||
input_data = inputs[x]
|
input_data = inputs[x]
|
||||||
|
|
||||||
if isinstance(input_data, list):
|
if isinstance(input_data, list):
|
||||||
input_unique_id = input_data[0]
|
input_unique_id = input_data[0]
|
||||||
output_index = input_data[1]
|
output_index = input_data[1]
|
||||||
if input_unique_id in outputs:
|
if input_unique_id in outputs:
|
||||||
to_delete = recursive_output_delete_if_changed(prompt, old_prompt, outputs, input_unique_id)
|
to_delete = recursive_output_delete_if_changed(prompt, old_prompt, outputs, input_unique_id)
|
||||||
else:
|
else:
|
||||||
to_delete = True
|
to_delete = True
|
||||||
if to_delete:
|
if to_delete:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
to_delete = True
|
to_delete = True
|
||||||
|
|
||||||
if to_delete:
|
if to_delete:
|
||||||
d = outputs.pop(unique_id)
|
d = outputs.pop(unique_id)
|
||||||
@ -204,6 +208,15 @@ class PromptExecutor:
|
|||||||
self.server.client_id = None
|
self.server.client_id = None
|
||||||
|
|
||||||
with torch.inference_mode():
|
with torch.inference_mode():
|
||||||
|
#delete cached outputs if nodes don't exist for them
|
||||||
|
to_delete = []
|
||||||
|
for o in self.outputs:
|
||||||
|
if o not in prompt:
|
||||||
|
to_delete += [o]
|
||||||
|
for o in to_delete:
|
||||||
|
d = self.outputs.pop(o)
|
||||||
|
del d
|
||||||
|
|
||||||
for x in prompt:
|
for x in prompt:
|
||||||
recursive_output_delete_if_changed(prompt, self.old_prompt, self.outputs, x)
|
recursive_output_delete_if_changed(prompt, self.old_prompt, self.outputs, x)
|
||||||
|
|
||||||
|
|||||||
@ -47,7 +47,7 @@
|
|||||||
" !git pull\n",
|
" !git pull\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!echo -= Install dependencies =-\n",
|
"!echo -= Install dependencies =-\n",
|
||||||
"!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118"
|
"!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@ -9953,11 +9953,11 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case "slider":
|
case "slider":
|
||||||
var range = w.options.max - w.options.min;
|
var old_value = w.value;
|
||||||
var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1);
|
var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1);
|
||||||
if(w.options.read_only) break;
|
if(w.options.read_only) break;
|
||||||
w.value = w.options.min + (w.options.max - w.options.min) * nvalue;
|
w.value = w.options.min + (w.options.max - w.options.min) * nvalue;
|
||||||
if (w.callback) {
|
if (old_value != w.value) {
|
||||||
setTimeout(function() {
|
setTimeout(function() {
|
||||||
inner_value_change(w, w.value);
|
inner_value_change(w, w.value);
|
||||||
}, 20);
|
}, 20);
|
||||||
@ -10044,7 +10044,7 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
if (event.click_time < 200 && delta == 0) {
|
if (event.click_time < 200 && delta == 0) {
|
||||||
this.prompt("Value",w.value,function(v) {
|
this.prompt("Value",w.value,function(v) {
|
||||||
// check if v is a valid equation or a number
|
// check if v is a valid equation or a number
|
||||||
if (/^[0-9+\-*/()\s]+$/.test(v)) {
|
if (/^[0-9+\-*/()\s]+|\d+\.\d+$/.test(v)) {
|
||||||
try {//solve the equation if possible
|
try {//solve the equation if possible
|
||||||
v = eval(v);
|
v = eval(v);
|
||||||
} catch (e) { }
|
} catch (e) { }
|
||||||
|
|||||||
@ -136,9 +136,11 @@ function addMultilineWidget(node, name, opts, app) {
|
|||||||
left: `${t.a * margin + t.e}px`,
|
left: `${t.a * margin + t.e}px`,
|
||||||
top: `${t.d * (y + widgetHeight - margin - 3) + t.f}px`,
|
top: `${t.d * (y + widgetHeight - margin - 3) + t.f}px`,
|
||||||
width: `${(widgetWidth - margin * 2 - 3) * t.a}px`,
|
width: `${(widgetWidth - margin * 2 - 3) * t.a}px`,
|
||||||
|
background: (!node.color)?'':node.color,
|
||||||
height: `${(this.parent.inputHeight - margin * 2 - 4) * t.d}px`,
|
height: `${(this.parent.inputHeight - margin * 2 - 4) * t.d}px`,
|
||||||
position: "absolute",
|
position: "absolute",
|
||||||
zIndex: 1,
|
color: (!node.color)?'':'white',
|
||||||
|
zIndex: app.graph._nodes.indexOf(node),
|
||||||
fontSize: `${t.d * 10.0}px`,
|
fontSize: `${t.d * 10.0}px`,
|
||||||
});
|
});
|
||||||
this.inputEl.hidden = !visible;
|
this.inputEl.hidden = !visible;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user