Merge upstream

This commit is contained in:
doctorpangloss 2024-04-24 22:41:43 -07:00
commit f965fb2bc0
21 changed files with 388 additions and 58 deletions

View File

@ -209,7 +209,7 @@ def recursive_execute(server: ExecutorToClientProgress,
for node_id, node_outputs in outputs.items():
output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs]
logging.error("!!! Exception during processing !!!")
logging.error(f"!!! Exception during processing!!! {ex}")
logging.error(traceback.format_exc())
error_details = {

View File

@ -357,7 +357,14 @@ def unet_config_from_diffusers_unet(state_dict, dtype=None):
'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1],
'use_temporal_attention': False, 'use_temporal_resblock': False, 'disable_self_attentions': [True, False, False]}
supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega, KOALA_700M, KOALA_1B, SD09_XS, SDXL_diffusers_ip2p]
SD_XS = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [1, 1, 1],
'transformer_depth': [0, 1, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -2, 'use_linear_in_transformer': False,
'context_dim': 768, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 1, 1, 1, 1],
'use_temporal_attention': False, 'use_temporal_resblock': False}
supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega, KOALA_700M, KOALA_1B, SD09_XS, SD_XS, SDXL_diffusers_ip2p]
for unet_config in supported_models:
matches = True

View File

@ -629,7 +629,8 @@ def supports_dtype(device, dtype): #TODO
def device_supports_non_blocking(device):
if is_device_mps(device):
return False #pytorch bug? mps doesn't support non blocking
return True
return False
# return True #TODO: figure out why this causes issues
def cast_to_device(tensor, device, dtype, copy=False):
with model_management_lock:

View File

@ -18,6 +18,26 @@ def apply_weight_decompose(dora_scale, weight):
return weight * (dora_scale / weight_norm)
def set_model_options_patch_replace(model_options, patch, name, block_name, number, transformer_index=None):
to = model_options["transformer_options"].copy()
if "patches_replace" not in to:
to["patches_replace"] = {}
else:
to["patches_replace"] = to["patches_replace"].copy()
if name not in to["patches_replace"]:
to["patches_replace"][name] = {}
else:
to["patches_replace"][name] = to["patches_replace"][name].copy()
if transformer_index is not None:
block = (block_name, number, transformer_index)
else:
block = (block_name, number)
to["patches_replace"][name][block] = patch
model_options["transformer_options"] = to
return model_options
class ModelPatcher:
def __init__(self, model, load_device, offload_device, size=0, current_device=None, weight_inplace_update=False):
@ -109,16 +129,7 @@ class ModelPatcher:
to["patches"][name] = to["patches"].get(name, []) + [patch]
def set_model_patch_replace(self, patch, name, block_name, number, transformer_index=None):
to = self.model_options["transformer_options"]
if "patches_replace" not in to:
to["patches_replace"] = {}
if name not in to["patches_replace"]:
to["patches_replace"][name] = {}
if transformer_index is not None:
block = (block_name, number, transformer_index)
else:
block = (block_name, number)
to["patches_replace"][name][block] = patch
self.model_options = set_model_options_patch_replace(self.model_options, patch, name, block_name, number, transformer_index=transformer_index)
def set_model_attn1_patch(self, patch):
self.set_model_patch(patch, "attn1_patch")

View File

@ -571,8 +571,8 @@ class LoraLoader:
return {"required": { "model": ("MODEL",),
"clip": ("CLIP", ),
"lora_name": (get_filename_list_with_downloadable("loras", KNOWN_LORAS),),
"strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL", "CLIP")
FUNCTION = "load_lora"
@ -605,7 +605,7 @@ class LoraLoaderModelOnly(LoraLoader):
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"lora_name": (folder_paths.get_filename_list("loras"), ),
"strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "load_lora_model_only"
@ -955,7 +955,7 @@ class GLIGENTextBoxApply:
return {"required": {"conditioning_to": ("CONDITIONING", ),
"clip": ("CLIP", ),
"gligen_textbox_model": ("GLIGEN", ),
"text": ("STRING", {"multiline": True}),
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}),
"width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),

View File

@ -210,16 +210,26 @@ class VAE:
self.first_stage_model = StageC_coder()
self.downscale_ratio = 32
self.latent_channels = 16
else:
elif "decoder.conv_in.weight" in sd:
#default SD1.x/SD2.x VAE parameters
ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
if 'encoder.down.2.downsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE
if 'encoder.down.2.downsample.conv.weight' not in sd and 'decoder.up.3.upsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE
ddconfig['ch_mult'] = [1, 2, 4]
self.downscale_ratio = 4
self.upscale_ratio = 4
self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4)
self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1]
if 'quant_conv.weight' in sd:
self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4)
else:
self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"},
encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': ddconfig},
decoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Decoder", 'params': ddconfig})
else:
logging.warning("WARNING: No VAE weights detected, VAE not initalized.")
self.first_stage_model = None
return
else:
self.first_stage_model = AutoencoderKL(**(config['params']))
self.first_stage_model = self.first_stage_model.eval()

View File

@ -47,7 +47,8 @@ class BASE:
return self.unet_config["in_channels"] > 4
def __init__(self, unet_config):
self.unet_config = unet_config
self.unet_config = unet_config.copy()
self.sampling_settings = self.sampling_settings.copy()
self.latent_format = self.latent_format()
for x in self.unet_extra_config:
self.unet_config[x] = self.unet_extra_config[x]

View File

@ -20,6 +20,10 @@ const colorPalettes = {
"MODEL": "#B39DDB", // light lavender-purple
"STYLE_MODEL": "#C2FFAE", // light green-yellow
"VAE": "#FF6E6E", // bright red
"NOISE": "#B0B0B0", // gray
"GUIDER": "#66FFFF", // cyan
"SAMPLER": "#ECB4B4", // very soft red
"SIGMAS": "#CDFFCD", // soft lime green
"TAESD": "#DCC274", // cheesecake
},
"litegraph_base": {

View File

@ -263,8 +263,18 @@ export function mergeIfValid(output, config2, forceUpdate, recreateWidget, confi
return { customConfig };
}
let useConversionSubmenusSetting;
app.registerExtension({
name: "Comfy.WidgetInputs",
init() {
useConversionSubmenusSetting = app.ui.settings.addSetting({
id: "Comfy.NodeInputConversionSubmenus",
name: "Node widget/input conversion sub-menus",
tooltip: "In the node context menu, place the entries that convert between input/widget in sub-menus.",
type: "boolean",
defaultValue: true,
});
},
async beforeRegisterNodeDef(nodeType, nodeData, app) {
// Add menu options to conver to/from widgets
const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions;
@ -299,12 +309,31 @@ app.registerExtension({
}
}
}
//Convert.. main menu
if (toInput.length) {
options.push(...toInput, null);
if (useConversionSubmenusSetting.value) {
options.push({
content: "Convert Widget to Input",
submenu: {
options: toInput,
},
});
} else {
options.push(...toInput, null);
}
}
if (toWidget.length) {
options.push(...toWidget, null);
if (useConversionSubmenusSetting.value) {
options.push({
content: "Convert Input to Widget",
submenu: {
options: toWidget,
},
});
} else {
options.push(...toWidget, null);
}
}
}

View File

@ -90,12 +90,15 @@ function dragElement(dragEl, settings) {
}).observe(dragEl);
function ensureInBounds() {
if (dragEl.classList.contains("comfy-menu-manual-pos")) {
try {
newPosX = Math.min(document.body.clientWidth - dragEl.clientWidth, Math.max(0, dragEl.offsetLeft));
newPosY = Math.min(document.body.clientHeight - dragEl.clientHeight, Math.max(0, dragEl.offsetTop));
positionElement();
}
catch(exception){
// robust
}
}
function positionElement() {

View File

@ -307,7 +307,9 @@ export const ComfyWidgets = {
return { widget: node.addWidget(widgetType, inputName, val,
function (v) {
if (config.round) {
this.value = Math.round(v/config.round)*config.round;
this.value = Math.round((v + Number.EPSILON)/config.round)*config.round;
if (this.value > config.max) this.value = config.max;
if (this.value < config.min) this.value = config.min;
} else {
this.value = v;
}

View File

@ -0,0 +1,45 @@
#from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
import numpy as np
import torch
def loglinear_interp(t_steps, num_steps):
"""
Performs log-linear interpolation of a given array of decreasing numbers.
"""
xs = np.linspace(0, 1, len(t_steps))
ys = np.log(t_steps[::-1])
new_xs = np.linspace(0, 1, num_steps)
new_ys = np.interp(new_xs, xs, ys)
interped_ys = np.exp(new_ys)[::-1].copy()
return interped_ys
NOISE_LEVELS = {"SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.6946151520, 1.8841921177, 1.3943805092, 0.9642583904, 0.6523686016, 0.3977456272, 0.1515232662, 0.0291671582],
"SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582],
"SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002]}
class AlignYourStepsScheduler:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"model_type": (["SD1", "SDXL", "SVD"], ),
"steps": ("INT", {"default": 10, "min": 10, "max": 10000}),
}
}
RETURN_TYPES = ("SIGMAS",)
CATEGORY = "sampling/custom_sampling/schedulers"
FUNCTION = "get_sigmas"
def get_sigmas(self, model_type, steps):
sigmas = NOISE_LEVELS[model_type][:]
if (steps + 1) != len(sigmas):
sigmas = loglinear_interp(sigmas, steps + 1)
sigmas[-1] = 0
return (torch.FloatTensor(sigmas), )
NODE_CLASS_MAPPINGS = {
"AlignYourStepsScheduler": AlignYourStepsScheduler,
}

View File

@ -1,4 +1,3 @@
#From https://github.com/kornia/kornia
import math
import torch

View File

@ -41,8 +41,8 @@ class KarrasScheduler:
def INPUT_TYPES(s):
return {"required":
{"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
"sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
"sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}),
"sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}),
"rho": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
}
}
@ -60,8 +60,8 @@ class ExponentialScheduler:
def INPUT_TYPES(s):
return {"required":
{"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
"sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
"sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}),
"sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}),
}
}
RETURN_TYPES = ("SIGMAS",)
@ -78,8 +78,8 @@ class PolyexponentialScheduler:
def INPUT_TYPES(s):
return {"required":
{"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
"sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
"sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}),
"sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}),
"rho": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
}
}
@ -119,8 +119,8 @@ class VPScheduler:
def INPUT_TYPES(s):
return {"required":
{"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"beta_d": ("FLOAT", {"default": 19.9, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), #TODO: fix default values
"beta_min": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
"beta_d": ("FLOAT", {"default": 19.9, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), #TODO: fix default values
"beta_min": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}),
"eps_s": ("FLOAT", {"default": 0.001, "min": 0.0, "max": 1.0, "step":0.0001, "round": False}),
}
}
@ -562,6 +562,52 @@ class SamplerCustomAdvanced:
out_denoised = out
return (out, out_denoised)
class AddNoise:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"model": ("MODEL",),
"noise": ("NOISE", ),
"sigmas": ("SIGMAS", ),
"latent_image": ("LATENT", ),
}
}
RETURN_TYPES = ("LATENT",)
FUNCTION = "add_noise"
CATEGORY = "_for_testing/custom_sampling/noise"
def add_noise(self, model, noise, sigmas, latent_image):
if len(sigmas) == 0:
return latent_image
latent = latent_image
latent_image = latent["samples"]
noisy = noise.generate_noise(latent)
model_sampling = model.get_model_object("model_sampling")
process_latent_out = model.get_model_object("process_latent_out")
process_latent_in = model.get_model_object("process_latent_in")
if len(sigmas) > 1:
scale = torch.abs(sigmas[0] - sigmas[-1])
else:
scale = sigmas[0]
if torch.count_nonzero(latent_image) > 0: #Don't shift the empty latent image.
latent_image = process_latent_in(latent_image)
noisy = model_sampling.noise_scaling(scale, noisy, latent_image)
noisy = process_latent_out(noisy)
noisy = torch.nan_to_num(noisy, nan=0.0, posinf=0.0, neginf=0.0)
out = latent.copy()
out["samples"] = noisy
return (out,)
NODE_CLASS_MAPPINGS = {
"SamplerCustom": SamplerCustom,
"BasicScheduler": BasicScheduler,
@ -585,5 +631,6 @@ NODE_CLASS_MAPPINGS = {
"BasicGuider": BasicGuider,
"RandomNoise": RandomNoise,
"DisableNoise": DisableNoise,
"AddNoise": AddNoise,
"SamplerCustomAdvanced": SamplerCustomAdvanced,
}

View File

@ -0,0 +1,60 @@
from . import nodes_model_merging
class ModelMergeSD1(nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific"
@classmethod
def INPUT_TYPES(s):
arg_dict = { "model1": ("MODEL",),
"model2": ("MODEL",)}
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
arg_dict["time_embed."] = argument
arg_dict["label_emb."] = argument
for i in range(12):
arg_dict["input_blocks.{}.".format(i)] = argument
for i in range(3):
arg_dict["middle_block.{}.".format(i)] = argument
for i in range(12):
arg_dict["output_blocks.{}.".format(i)] = argument
arg_dict["out."] = argument
return {"required": arg_dict}
class ModelMergeSDXL(nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific"
@classmethod
def INPUT_TYPES(s):
arg_dict = { "model1": ("MODEL",),
"model2": ("MODEL",)}
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
arg_dict["time_embed."] = argument
arg_dict["label_emb."] = argument
for i in range(9):
arg_dict["input_blocks.{}".format(i)] = argument
for i in range(3):
arg_dict["middle_block.{}".format(i)] = argument
for i in range(9):
arg_dict["output_blocks.{}".format(i)] = argument
arg_dict["out."] = argument
return {"required": arg_dict}
NODE_CLASS_MAPPINGS = {
"ModelMergeSD1": ModelMergeSD1,
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
"ModelMergeSDXL": ModelMergeSDXL,
}

View File

@ -0,0 +1,56 @@
#Modified/simplified version of the node from: https://github.com/pamparamm/sd-perturbed-attention
#If you want the one with more options see the above repo.
#My modified one here is more basic but has less chances of breaking with ComfyUI updates.
import comfy.model_patcher
import comfy.samplers
class PerturbedAttentionGuidance:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
}
}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
CATEGORY = "_for_testing"
def patch(self, model, scale):
unet_block = "middle"
unet_block_id = 0
m = model.clone()
def perturbed_attention(q, k, v, extra_options, mask=None):
return v
def post_cfg_function(args):
model = args["model"]
cond_pred = args["cond_denoised"]
cond = args["cond"]
cfg_result = args["denoised"]
sigma = args["sigma"]
model_options = args["model_options"].copy()
x = args["input"]
if scale == 0:
return cfg_result
# Replace Self-attention with PAG
model_options = comfy.model_patcher.set_model_options_patch_replace(model_options, perturbed_attention, "attn1", unet_block, unet_block_id)
(pag,) = comfy.samplers.calc_cond_batch(model, [cond], x, sigma, model_options)
return cfg_result + (cond_pred - pag) * scale
m.set_model_sampler_post_cfg_function(post_cfg_function)
return (m,)
NODE_CLASS_MAPPINGS = {
"PerturbedAttentionGuidance": PerturbedAttentionGuidance,
}

View File

@ -1,10 +1,18 @@
import torch
from comfy import sample
from comfy import samplers
from comfy import sampler_helpers
from comfy import node_helpers
def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale):
pos = noise_pred_pos - noise_pred_nocond
neg = noise_pred_neg - noise_pred_nocond
#TODO: This node should be removed and replaced with one that uses the new Guider/SamplerCustomAdvanced.
perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos
perp_neg = perp * neg_scale
cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
return cfg_result
#TODO: This node should be removed, it has been replaced with PerpNegGuider
class PerpNeg:
@classmethod
def INPUT_TYPES(s):
@ -33,12 +41,7 @@ class PerpNeg:
(noise_pred_nocond,) = samplers.calc_cond_batch(model, [nocond_processed], x, sigma, model_options)
pos = noise_pred_pos - noise_pred_nocond
neg = noise_pred_neg - noise_pred_nocond
perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos
perp_neg = perp * neg_scale
cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
cfg_result = x - cfg_result
cfg_result = x - perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale)
return cfg_result
m.set_model_sampler_cfg_function(cfg_function)
@ -46,10 +49,52 @@ class PerpNeg:
return (m, )
class Guider_PerpNeg(samplers.CFGGuider):
def set_conds(self, positive, negative, empty_negative_prompt):
empty_negative_prompt = node_helpers.conditioning_set_values(empty_negative_prompt, {"prompt_type": "negative"})
self.inner_set_conds({"positive": positive, "empty_negative_prompt": empty_negative_prompt, "negative": negative})
def set_cfg(self, cfg, neg_scale):
self.cfg = cfg
self.neg_scale = neg_scale
def predict_noise(self, x, timestep, model_options={}, seed=None):
positive_cond = self.conds.get("positive", None)
negative_cond = self.conds.get("negative", None)
empty_cond = self.conds.get("empty_negative_prompt", None)
out = samplers.calc_cond_batch(self.inner_model, [negative_cond, positive_cond, empty_cond], x, timestep, model_options)
return perp_neg(x, out[1], out[0], out[2], self.neg_scale, self.cfg)
class PerpNegGuider:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"model": ("MODEL",),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"empty_conditioning": ("CONDITIONING", ),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
"neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
}
}
RETURN_TYPES = ("GUIDER",)
FUNCTION = "get_guider"
CATEGORY = "_for_testing"
def get_guider(self, model, positive, negative, empty_conditioning, cfg, neg_scale):
guider = Guider_PerpNeg(model)
guider.set_conds(positive, negative, empty_conditioning)
guider.set_cfg(cfg, neg_scale)
return (guider,)
NODE_CLASS_MAPPINGS = {
"PerpNeg": PerpNeg,
"PerpNegGuider": PerpNegGuider,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"PerpNeg": "Perp-Neg",
"PerpNeg": "Perp-Neg (DEPRECATED by PerpNegGuider)",
}

View File

@ -141,7 +141,7 @@ class PhotoMakerEncode:
return {"required": { "photomaker": ("PHOTOMAKER",),
"image": ("IMAGE",),
"clip": ("CLIP", ),
"text": ("STRING", {"multiline": True, "default": "photograph of photomaker"}),
"text": ("STRING", {"multiline": True, "dynamicPrompts": True, "default": "photograph of photomaker"}),
}}
RETURN_TYPES = ("CONDITIONING",)

View File

@ -5,6 +5,7 @@ from PIL import Image
import math
from comfy import utils
from comfy import model_management
class Blend:
@ -102,6 +103,7 @@ class Blur:
if blur_radius == 0:
return (image,)
image = image.to(model_management.get_torch_device())
batch_size, height, width, channels = image.shape
kernel_size = blur_radius * 2 + 1
@ -112,7 +114,7 @@ class Blur:
blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
blurred = blurred.permute(0, 2, 3, 1)
return (blurred,)
return (blurred.to(model_management.intermediate_device()),)
class Quantize:
def __init__(self):
@ -225,6 +227,7 @@ class Sharpen:
return (image,)
batch_size, height, width, channels = image.shape
image = image.to(model_management.get_torch_device())
kernel_size = sharpen_radius * 2 + 1
kernel = gaussian_kernel(kernel_size, sigma, device=image.device) * -(alpha*10)
@ -239,7 +242,7 @@ class Sharpen:
result = torch.clamp(sharpened, 0, 1)
return (result,)
return (result.to(model_management.intermediate_device()),)
class ImageScaleToTotalPixels:
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]

View File

@ -1,9 +1,9 @@
import torch
from comfy import model_management
from comfy import utils
from comfy.model_downloader import get_filename_list_with_downloadable, KNOWN_UPSCALERS, get_or_download
from ..chainner_models import model_loading
from comfy import model_management
import torch
from comfy import utils
from comfy.cmd import folder_paths
class UpscaleModelLoader:
@ -40,9 +40,14 @@ class ImageUpscaleWithModel:
def upscale(self, upscale_model, image):
device = model_management.get_torch_device()
memory_required = model_management.module_size(upscale_model)
memory_required += (512 * 512 * 3) * image.element_size() * max(upscale_model.scale, 1.0) * 256.0 # The 256.0 is an estimate of how much some of these models take, TODO: make it more accurate
memory_required += image.nelement() * image.element_size()
model_management.free_memory(memory_required, device)
upscale_model.to(device)
in_img = image.movedim(-1, -3).to(device)
free_memory = model_management.get_free_memory(device)
tile = 512
overlap = 32
@ -50,11 +55,9 @@ class ImageUpscaleWithModel:
oom = True
while oom:
try:
steps = in_img.shape[0] * utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile,
tile_y=tile, overlap=overlap)
steps = in_img.shape[0] * utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap)
pbar = utils.ProgressBar(steps)
s = utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap,
upscale_amount=upscale_model.scale, pbar=pbar)
s = utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar)
oom = False
except model_management.OOM_EXCEPTION as e:
tile //= 2

View File

@ -204,13 +204,17 @@ export class EzWidget {
convertToWidget() {
if (!this.isConvertedToInput)
throw new Error(`Widget ${this.widget.name} cannot be converted as it is already a widget.`);
this.node.menu[`Convert ${this.widget.name} to widget`].call();
var menu = this.node.menu["Convert Input to Widget"].item.submenu.options;
var index = menu.findIndex(a => a.content == `Convert ${this.widget.name} to widget`);
menu[index].callback.call();
}
convertToInput() {
if (this.isConvertedToInput)
throw new Error(`Widget ${this.widget.name} cannot be converted as it is already an input.`);
this.node.menu[`Convert ${this.widget.name} to input`].call();
var menu = this.node.menu["Convert Widget to Input"].item.submenu.options;
var index = menu.findIndex(a => a.content == `Convert ${this.widget.name} to input`);
menu[index].callback.call();
}
}