some_workflows

This commit is contained in:
Emser 2023-10-15 16:59:35 +01:00
parent 3f26d9e916
commit dc44e3ed41
9 changed files with 8034 additions and 0 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1436
.a_Workflows/SDXL/depth.json Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,290 @@
import torch
import comfy.model_management
import comfy.sample
import latent_preview
def prepare_mask(mask, shape):
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
mask = mask.expand((-1,shape[1],-1,-1))
if mask.shape[0] < shape[0]:
mask = mask.repeat((shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:shape[0]]
return mask
def remap_range(value, minIn, MaxIn, minOut, maxOut):
if value > MaxIn: value = MaxIn;
if value < minIn: value = minIn;
finalValue = ((value - minIn) / (MaxIn - minIn)) * (maxOut - minOut) + minOut;
return finalValue;
class KSamplerSDXLAdvanced:
@classmethod
def INPUT_TYPES(s):
ui_widgets = {"required":
{
"model_model": ("MODEL",),
"model_refiner": ("MODEL",),
"CONDITIONING_model_pos": ("CONDITIONING", ),
"CONDITIONING_model_neg": ("CONDITIONING", ),
"CONDITIONING_refiner_pos": ("CONDITIONING", ),
"CONDITIONING_refiner_neg": ("CONDITIONING", ),
"latent_image": ("LATENT", ),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"cfg_scale": ("FLOAT", {"default": 7.5, "min": 0.0, "max": 100.0}),
# "cfg_rescale_multiplier": ("FLOAT", {"default": 1, "min": -1.0, "max": 2.0, "step": 0.1}),
"sampler": (comfy.samplers.KSampler.SAMPLERS, {"default": "dpmpp_2m"}),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"default": "karras"}),
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
"base_steps": ("INT", {"default": 12, "min": 0, "max": 10000}),
"refiner_steps": ("INT", {"default": 4, "min": 0, "max": 10000}),
"detail_level": ("FLOAT", {"default": 1, "min": 0.0, "max": 2.0, "step": 0.1}),
"detail_from": (["penultimate_step","base_sample"], {"default": "penultimate_step"}),
"noise_source": (["CPU","GPU"], {"default": "CPU"}),
"auto_rescale_tonemap": (["enable","disable"], {"default": "enable"}),
"rescale_tonemap_to": ("FLOAT", {"default": 7.5, "min": 0, "max": 30.0, "step": 0.5}),
# "refiner_extra_noise": (["enable","disable"], {"default": "disable"}),
# "base_noise": ("FLOAT", {"default": 1, "min": 0.0, "max": 10.0, "step": 0.01}),
# "noise_shift_end_refiner": ("INT", {"default": -1, "min": -10000, "max": 0})
},
"optional":
{
"SD15VAE": ("VAE", ),
"SDXLVAE": ("VAE", ),
}
}
return ui_widgets
RETURN_TYPES = ("LATENT",)
FUNCTION = "sample_sdxl"
CATEGORY = "sampling"
def patch_tonemap(self, model, multiplier):
def sampler_tonemap_reinhard(args):
cond = args["cond"]
uncond = args["uncond"]
cond_scale = args["cond_scale"]
noise_pred = (cond - uncond)
noise_pred_vector_magnitude = (torch.linalg.vector_norm(noise_pred, dim=(1)) + 0.0000000001)[:,None]
noise_pred /= noise_pred_vector_magnitude
mean = torch.mean(noise_pred_vector_magnitude, dim=(1,2,3), keepdim=True)
std = torch.std(noise_pred_vector_magnitude, dim=(1,2,3), keepdim=True)
top = (std * 3 + mean) * multiplier
#reinhard
noise_pred_vector_magnitude *= (1.0 / top)
new_magnitude = noise_pred_vector_magnitude / (noise_pred_vector_magnitude + 1.0)
new_magnitude *= top
return uncond + noise_pred * new_magnitude * cond_scale
m = model.clone()
m.set_model_sampler_cfg_function(sampler_tonemap_reinhard)
return m
# def patch_model(self, model, multiplier):
# def rescale_cfg(args):
# cond = args["cond"]
# uncond = args["uncond"]
# cond_scale = args["cond_scale"]
# x_cfg = uncond + cond_scale * (cond - uncond)
# ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True)
# ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True)
# x_rescaled = x_cfg * (ro_pos / ro_cfg)
# x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg
# return x_final
# m = model.clone()
# m.set_model_sampler_cfg_function(rescale_cfg)
# return m
def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
device = comfy.model_management.get_torch_device()
latent_image = latent["samples"]
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
batch_inds = latent["batch_index"] if "batch_index" in latent else None
noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
noise_mask = None
if "noise_mask" in latent:
noise_mask = latent["noise_mask"]
preview_format = "JPEG"
if preview_format not in ["JPEG", "PNG"]:
preview_format = "JPEG"
previewer = latent_preview.get_previewer(device, model.model.latent_format)
pbar = comfy.utils.ProgressBar(steps)
def callback(step, x0, x, total_steps):
preview_bytes = None
if previewer:
preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
pbar.update_absolute(step + 1, total_steps, preview_bytes)
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed)
out = latent.copy()
out["samples"] = samples
return out
def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
force_full_denoise = True
if return_with_leftover_noise == "enable":
force_full_denoise = False
disable_noise = False
if add_noise == "disable":
disable_noise = True
return self.common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
def calc_sigma(self, model, sampler_name, scheduler, steps, start_at_step, end_at_step):
device = comfy.model_management.get_torch_device()
end_at_step = min(steps, end_at_step)
start_at_step = min(start_at_step, end_at_step)
real_model = None
comfy.model_management.load_model_gpu(model)
real_model = model.model
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options)
sigmas = sampler.sigmas
sigma = sigmas[start_at_step] - sigmas[end_at_step]
sigma /= model.model.latent_format.scale_factor
sigma_output = sigma.cpu().numpy()
print("Calculated sigma:",sigma_output)
return sigma_output
def create_noisy_latents(self, source, seed, width, height, batch_size):
torch.manual_seed(seed)
if source == "CPU":
device = "cpu"
else:
device = comfy.model_management.get_torch_device()
noise = torch.randn((batch_size, 4, height // 8, width // 8), dtype=torch.float32, device=device).cpu()
return {"samples":noise}
def inject_noise(self, latents, strength, noise=None, mask=None):
s = latents.copy()
if noise is None:
return s
if latents["samples"].shape != noise["samples"].shape:
print("warning, shapes in InjectNoise not the same, ignoring")
return s
noised = s["samples"].clone() + noise["samples"].clone() * strength
if mask is not None:
mask = prepare_mask(mask, noised.shape)
noised = mask * noised + (1-mask) * latents["samples"]
s["samples"] = noised
return s
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475
def slerp(self, val, low, high):
dims = low.shape
#flatten to batches
low = low.reshape(dims[0], -1)
high = high.reshape(dims[0], -1)
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
# in case we divide by zero
low_norm[low_norm != low_norm] = 0.0
high_norm[high_norm != high_norm] = 0.0
omega = torch.acos((low_norm*high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
return res.reshape(dims)
def slerp_latents(self, latents1, factor, latents2=None, mask=None):
s = latents1.copy()
if latents2 is None:
return (s,)
if latents1["samples"].shape != latents2["samples"].shape:
print("warning, shapes in LatentSlerp not the same, ignoring")
return (s,)
slerped = self.slerp(factor, latents1["samples"].clone(), latents2["samples"].clone())
if mask is not None:
mask = prepare_mask(mask, slerped.shape)
slerped = mask * slerped + (1-mask) * latents1["samples"]
s["samples"] = slerped
return s
def compute_and_generate_noise(self,samples,seed,width,height,batch_size,model,sampler,scheduler,total_steps,start_at,end_at,source):
noisy_latent = self.create_noisy_latents(source,seed,width,height,batch_size)
sigma_balls = self.calc_sigma(model,sampler,scheduler,total_steps,start_at,end_at)
samples_output = self.inject_noise(samples,sigma_balls,noisy_latent)
return samples_output
def sample_sdxl(self, model_model, model_refiner, CONDITIONING_model_pos, CONDITIONING_model_neg, CONDITIONING_refiner_pos, CONDITIONING_refiner_neg, latent_image, seed, cfg_scale, sampler, scheduler, start_at_step, base_steps, refiner_steps,detail_level,detail_from,noise_source,auto_rescale_tonemap,rescale_tonemap_to,SD15VAE=None, SDXLVAE=None):
# if cfg_rescale_multiplier != 1:
# model_model = self.patch_model(model_model,cfg_rescale_multiplier)
# model_refiner = self.patch_model(model_refiner,cfg_rescale_multiplier)
if auto_rescale_tonemap == "enable" and cfg_scale!=rescale_tonemap_to:
scale_model = 1/cfg_scale*rescale_tonemap_to
model_model = self.patch_tonemap(model_model,scale_model)
if sampler == "uni_pc" or sampler == "uni_pc_bh2":
scale_model = 1/cfg_scale*7.5
model_refiner = self.patch_tonemap(model_refiner,scale_model)
for lat in latent_image['samples']:
d, y, x = lat.size()
break
batch_size = len(latent_image['samples'])
width = x*8
height = y*8
base_start_at = start_at_step
base_end_at = base_steps
base_total_steps = base_steps + refiner_steps
refiner_start_at = base_steps
refiner_end_at = base_steps + refiner_steps
refiner_total_steps = base_steps + refiner_steps
if sampler == "uni_pc" or sampler == "uni_pc_bh2":
noisy_base = self.compute_and_generate_noise(latent_image,seed,width,height,batch_size,model_model,sampler,scheduler,base_end_at-1,base_start_at,base_end_at-1,noise_source)
else:
noisy_base = self.compute_and_generate_noise(latent_image,seed,width,height,batch_size,model_model,sampler,scheduler,base_end_at,base_start_at,base_end_at,noise_source)
sample_model = self.sample(model_model,"disable",seed,base_total_steps,cfg_scale,sampler,scheduler,CONDITIONING_model_pos,CONDITIONING_model_neg,noisy_base,base_start_at,base_end_at,"disable")
if SD15VAE is not None and SDXLVAE is not None:
sample_model["samples"] = SD15VAE.decode(sample_model["samples"])
sample_model["samples"] = SDXLVAE.encode(sample_model["samples"])
if sampler == "uni_pc" or sampler == "uni_pc_bh2":
sampler = "dpmpp_2m"
scheduler = "karras"
if detail_level < 0.9999 or detail_level > 1:
if detail_from == "penultimate_step":
if detail_level > 1:
noisy_latent_1 = self.compute_and_generate_noise(sample_model,seed,width,height,batch_size,model_refiner,sampler,scheduler,refiner_total_steps+1,refiner_start_at,refiner_end_at+1,noise_source)
else:
noisy_latent_1 = self.compute_and_generate_noise(sample_model,seed,width,height,batch_size,model_refiner,sampler,scheduler,refiner_total_steps-1,refiner_start_at,refiner_end_at-1,noise_source)
else:
noisy_latent_1 = sample_model
noisy_latent_2 = self.compute_and_generate_noise(sample_model,seed,width,height,batch_size,model_refiner,sampler,scheduler,refiner_total_steps, refiner_start_at,refiner_end_at,noise_source)
if detail_level > 1:
noisy_latent_3 = self.slerp_latents(noisy_latent_1,remap_range(detail_level,1,2,1,0),noisy_latent_2)
else:
noisy_latent_3 = self.slerp_latents(noisy_latent_1,detail_level,noisy_latent_2)
else:
noisy_latent_3 = self.compute_and_generate_noise(sample_model,seed,width,height,batch_size,model_refiner,sampler,scheduler,refiner_total_steps, refiner_start_at,refiner_end_at,noise_source)
sample_refiner = self.sample(model_refiner,"disable",seed,refiner_total_steps,cfg_scale,sampler,scheduler,CONDITIONING_refiner_pos,CONDITIONING_refiner_neg,noisy_latent_3,refiner_start_at,refiner_end_at,"disable")
return (sample_refiner,)
NODE_CLASS_MAPPINGS = {
"KSamplerSDXLAdvanced": KSamplerSDXLAdvanced
}

View File

@ -0,0 +1,844 @@
{
"last_node_id": 34,
"last_link_id": 80,
"nodes": [
{
"id": 12,
"type": "VAEDecode",
"pos": [
2060,
240
],
"size": {
"0": 180,
"1": 60
},
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 2
},
{
"name": "vae",
"type": "VAE",
"link": 3
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
1
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 3,
"type": "VAELoader",
"pos": [
1640,
360
],
"size": {
"0": 360,
"1": 60
},
"flags": {},
"order": 0,
"mode": 0,
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
3
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"sdxl_vae_fixed.safetensors"
]
},
{
"id": 8,
"type": "CheckpointLoaderSimple",
"pos": [
80,
400
],
"size": {
"0": 360,
"1": 100
},
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
13
],
"shape": 3,
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
8,
9
],
"shape": 3,
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"SDXL\\sd_xl_refiner_1.0.safetensors"
]
},
{
"id": 14,
"type": "PrimitiveNode",
"pos": [
460,
260
],
"size": {
"0": 340,
"1": 220
},
"flags": {},
"order": 2,
"mode": 0,
"outputs": [
{
"name": "STRING",
"type": "STRING",
"links": [
4,
5
],
"slot_index": 0,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"title": "Positive prompt",
"properties": {},
"widgets_values": [
"amateur photo of a 22yo girl eye, detailed face, long hair, close-up on the eye, detailed eye, at night"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 15,
"type": "PrimitiveNode",
"pos": [
460,
520
],
"size": {
"0": 340,
"1": 220
},
"flags": {},
"order": 3,
"mode": 0,
"outputs": [
{
"name": "STRING",
"type": "STRING",
"links": [
6,
7
],
"slot_index": 0,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"title": "Negative prompt",
"properties": {},
"widgets_values": [
"3d render, grayscale, too dark, doll, not realistic, bad quality, low quality, drawing, sketch, simple, backlit, backlight, underexposure, underexposed, repetitive, tiled, cloned, child, copied, cartoon, (low quality:1.3), blurry, ugly, tiling, poorly drawn"
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 16,
"type": "EmptyLatentImage",
"pos": [
900,
660
],
"size": {
"0": 220,
"1": 120
},
"flags": {},
"order": 4,
"mode": 0,
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
18
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
1024,
1024,
1
],
"color": "#323",
"bgcolor": "#535"
},
{
"id": 2,
"type": "CheckpointLoaderSimple",
"pos": [
80,
260
],
"size": {
"0": 360,
"1": 100
},
"flags": {},
"order": 5,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
12
],
"shape": 3,
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
10,
11
],
"shape": 3,
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"SDXL\\sd_xl_base_1.0.safetensors"
]
},
{
"id": 9,
"type": "CLIPTextEncode",
"pos": [
900,
360
],
"size": {
"0": 220,
"1": 60
},
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 11
},
{
"name": "text",
"type": "STRING",
"link": 7,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
15
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"3d render, grayscale, too dark, doll, not realistic, bad quality, low quality, drawing, sketch, simple, backlit, backlight, underexposure, underexposed, repetitive, tiled, cloned, child, copied, cartoon, (low quality:1.3), blurry, ugly, tiling, poorly drawn"
]
},
{
"id": 10,
"type": "CLIPTextEncode",
"pos": [
900,
460
],
"size": {
"0": 220,
"1": 60
},
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 9
},
{
"name": "text",
"type": "STRING",
"link": 5,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
16
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"amateur photo of a 22yo girl eye, detailed face, long hair, close-up on the eye, detailed eye, at night"
]
},
{
"id": 11,
"type": "CLIPTextEncode",
"pos": [
900,
560
],
"size": {
"0": 220,
"1": 60
},
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 8
},
{
"name": "text",
"type": "STRING",
"link": 6,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
17
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"3d render, grayscale, too dark, doll, not realistic, bad quality, low quality, drawing, sketch, simple, backlit, backlight, underexposure, underexposed, repetitive, tiled, cloned, child, copied, cartoon, (low quality:1.3), blurry, ugly, tiling, poorly drawn"
]
},
{
"id": 4,
"type": "CLIPTextEncode",
"pos": [
900,
260
],
"size": {
"0": 220,
"1": 60
},
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 10
},
{
"name": "text",
"type": "STRING",
"link": 4,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
14
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"amateur photo of a 22yo girl eye, detailed face, long hair, close-up on the eye, detailed eye, at night"
]
},
{
"id": 13,
"type": "SaveImage",
"pos": [
2260,
240
],
"size": {
"0": 220,
"1": 270
},
"flags": {
"collapsed": true
},
"order": 13,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 1
}
],
"properties": {},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 1,
"type": "KSamplerSDXLAdvanced",
"pos": [
1220,
240
],
"size": {
"0": 393,
"1": 506
},
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "model_model",
"type": "MODEL",
"link": 12
},
{
"name": "model_refiner",
"type": "MODEL",
"link": 13
},
{
"name": "CONDITIONING_model_pos",
"type": "CONDITIONING",
"link": 14
},
{
"name": "CONDITIONING_model_neg",
"type": "CONDITIONING",
"link": 15
},
{
"name": "CONDITIONING_refiner_pos",
"type": "CONDITIONING",
"link": 16
},
{
"name": "CONDITIONING_refiner_neg",
"type": "CONDITIONING",
"link": 17
},
{
"name": "latent_image",
"type": "LATENT",
"link": 18
},
{
"name": "SD15VAE",
"type": "VAE",
"link": null
},
{
"name": "SDXLVAE",
"type": "VAE",
"link": null
},
{
"name": "seed",
"type": "INT",
"link": 19,
"widget": {
"name": "seed",
"config": [
"INT",
{
"default": 0,
"min": 0,
"max": 18446744073709552000
}
]
}
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
2
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSamplerSDXLAdvanced"
},
"widgets_values": [
702632666099873,
"randomize",
7.5,
"dpmpp_2m",
"karras",
0,
15,
5,
0.9999999999999999,
"penultimate_step",
"GPU",
"enable",
7.5
]
},
{
"id": 17,
"type": "PrimitiveNode",
"pos": [
900,
820
],
"size": [
259.717481376711,
82
],
"flags": {},
"order": 6,
"mode": 0,
"outputs": [
{
"name": "INT",
"type": "INT",
"links": [
19
],
"slot_index": 0,
"widget": {
"name": "seed",
"config": [
"INT",
{
"default": 0,
"min": 0,
"max": 18446744073709552000
}
]
}
}
],
"title": "SEED",
"properties": {},
"widgets_values": [
702632666099873,
"randomize"
],
"color": "#432",
"bgcolor": "#653"
}
],
"links": [
[
1,
12,
0,
13,
0,
"IMAGE"
],
[
2,
1,
0,
12,
0,
"LATENT"
],
[
3,
3,
0,
12,
1,
"VAE"
],
[
4,
14,
0,
4,
1,
"STRING"
],
[
5,
14,
0,
10,
1,
"STRING"
],
[
6,
15,
0,
11,
1,
"STRING"
],
[
7,
15,
0,
9,
1,
"STRING"
],
[
8,
8,
1,
11,
0,
"CLIP"
],
[
9,
8,
1,
10,
0,
"CLIP"
],
[
10,
2,
1,
4,
0,
"CLIP"
],
[
11,
2,
1,
9,
0,
"CLIP"
],
[
12,
2,
0,
1,
0,
"MODEL"
],
[
13,
8,
0,
1,
1,
"MODEL"
],
[
14,
4,
0,
1,
2,
"CONDITIONING"
],
[
15,
9,
0,
1,
3,
"CONDITIONING"
],
[
16,
10,
0,
1,
4,
"CONDITIONING"
],
[
17,
11,
0,
1,
5,
"CONDITIONING"
],
[
18,
16,
0,
1,
6,
"LATENT"
],
[
19,
17,
0,
1,
9,
"INT"
]
],
"groups": [],
"config": {},
"extra": {},
"version": 0.4
}

936
.a_Workflows/skeleton.json Normal file
View File

@ -0,0 +1,936 @@
{
"last_node_id": 22,
"last_link_id": 38,
"nodes": [
{
"id": 7,
"type": "CLIPTextEncode",
"pos": [
394,
179
],
"size": {
"0": 210,
"1": 54
},
"flags": {
"collapsed": true
},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 5
},
{
"name": "text",
"type": "STRING",
"link": 22,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
25
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"text, watermark"
]
},
{
"id": 13,
"type": "CLIPTextEncode",
"pos": [
421,
-255
],
"size": {
"0": 210,
"1": 54
},
"flags": {
"collapsed": true
},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 13
},
{
"name": "text",
"type": "STRING",
"link": 21,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
},
"slot_index": 1
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
33
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"text, watermark"
]
},
{
"id": 12,
"type": "CLIPTextEncode",
"pos": [
426,
-297
],
"size": {
"0": 219,
"1": 54
},
"flags": {
"collapsed": true
},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 11
},
{
"name": "text",
"type": "STRING",
"link": 18,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
},
"slot_index": 1
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
32
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"beautiful scenery of wildflowers and nature glass bottle landscape, purple galaxy bottle,"
]
},
{
"id": 18,
"type": "VAEDecode",
"pos": [
1065,
-334
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 13,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 36
},
{
"name": "vae",
"type": "VAE",
"link": 37
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
30
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 17,
"type": "KSampler",
"pos": [
721,
134
],
"size": {
"0": 315,
"1": 262
},
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 27
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 24
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 25
},
{
"name": "latent_image",
"type": "LATENT",
"link": 38
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
28,
35
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
1125294088641761,
"randomize",
20,
6,
"dpmpp_2s_ancestral",
"normal",
1
]
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1088,
190
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 28
},
{
"name": "vae",
"type": "VAE",
"link": 8
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
9
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
393,
134
],
"size": {
"0": 210,
"1": 54
},
"flags": {
"collapsed": true
},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 3
},
{
"name": "text",
"type": "STRING",
"link": 19,
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
24
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"beautiful scenery of wildflowers and nature glass bottle landscape, purple galaxy bottle,"
]
},
{
"id": 15,
"type": "PrimitiveNode",
"pos": [
-66,
-154
],
"size": {
"0": 402,
"1": 188
},
"flags": {},
"order": 0,
"mode": 0,
"outputs": [
{
"name": "STRING",
"type": "STRING",
"links": [
18,
19
],
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
},
"slot_index": 0
}
],
"title": "Positive Prompt",
"properties": {},
"widgets_values": [
"Hyper-maximalist closeup portrait photograph of an emser2 person, wearing a crown, sitting on a throne, amongst an elaborate and massive pile of treasures and golden statues in a dark mysterious tomb interior, coins, gold, diamonds, jewellery, crowns and sceptres, dust and cobwebs, rotten, horror, ornate, insane details, mist, vapour, smoke, dramatic lighting, luminous mist, eerie, ethereal, cinematic, by Dariusz Zawadzki, by Anton Semenow, by H.R. Geiger, by Bastien Lecouffe-Deharm, ((distinctive colour)), (ultra-realistic)"
],
"color": "#232",
"bgcolor": "#353",
"shape": 4
},
{
"id": 16,
"type": "PrimitiveNode",
"pos": [
-64,
85
],
"size": {
"0": 398,
"1": 140
},
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "STRING",
"type": "STRING",
"links": [
21,
22
],
"widget": {
"name": "text",
"config": [
"STRING",
{
"multiline": true
}
]
},
"slot_index": 0
}
],
"title": "Negative Prompt",
"properties": {},
"widgets_values": [
"duplicate figures, tacky, ugly, duplication, duplicates, deformed, mutation, twisted body, fused body, additional body, missing body, disfigured, imperfect anatomy, imperfect proportions, gross proportions, extra fingers, mutated hands, poorly drawn hands, extra limbs, malformed limbs, missing arms, additional arms, missing legs, additional legs, mutated feet, additional feet, fused toes, missing toes, additional toes, mutated hands, additional hands, fused fingers, missing fingers, additional fingers, long neck, duplicate heads, small head, oversized head, extra head, makeup, closed eyes, rolling eyes, weird eyes, extra eyes, additional eyebrows, coloured sclera, smudged face, blurred face, long face, poorly drawn face, cloned face, extra face, strange mouth, additional mouth, additional nose, misplaced ears, additional ears, cut-off, out-of-frame, grainy, blurred, blurry, writing, calligraphy, signature, text, numbers, coordinates, watermark, cartoon, painting, unrealistic, render, bad art, hat, b&w, frame watermark"
],
"color": "#332922",
"bgcolor": "#593930",
"shape": 4
},
{
"id": 9,
"type": "SaveImage",
"pos": [
1351,
168
],
"size": {
"0": 426.084228515625,
"1": 437.66387939453125
},
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 9
}
],
"properties": {},
"widgets_values": [
"base_output"
],
"shape": 1
},
{
"id": 20,
"type": "KSampler",
"pos": [
681,
-397
],
"size": {
"0": 315,
"1": 262
},
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 34
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 32
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 33
},
{
"name": "latent_image",
"type": "LATENT",
"link": 35
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
36
],
"shape": 3,
"slot_index": 0
}
],
"title": "KSampler for refiner (like img2img)",
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
279603388900375,
"randomize",
15,
8,
"dpmpp_2m",
"normal",
0.25
]
},
{
"id": 11,
"type": "CheckpointLoaderSimple",
"pos": [
-32,
-385
],
"size": {
"0": 336,
"1": 98
},
"flags": {
"collapsed": false
},
"order": 2,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
34
],
"shape": 3,
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
11,
13
],
"shape": 3,
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [
37
],
"shape": 3,
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"sd_xl_refiner_0.9.safetensors"
]
},
{
"id": 4,
"type": "CheckpointLoaderSimple",
"pos": [
-52.80264120483398,
358
],
"size": {
"0": 397,
"1": 98
},
"flags": {},
"order": 3,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
27
],
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
3,
5
],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [
8
],
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"sd_xl_base_0.9.safetensors"
]
},
{
"id": 19,
"type": "SaveImage",
"pos": [
1366,
-406
],
"size": {
"0": 462.1468811035156,
"1": 441.9457702636719
},
"flags": {},
"order": 14,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 30
}
],
"properties": {},
"widgets_values": [
"refiner_output"
],
"shape": 1
},
{
"id": 21,
"type": "EmptyLatentImage",
"pos": [
390,
242
],
"size": {
"0": 295.7807922363281,
"1": 106
},
"flags": {},
"order": 4,
"mode": 0,
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
38
],
"shape": 3,
"slot_index": 0
}
],
"title": "Image Size\n",
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
1080,
1080,
1
]
}
],
"links": [
[
3,
4,
1,
6,
0,
"CLIP"
],
[
5,
4,
1,
7,
0,
"CLIP"
],
[
8,
4,
2,
8,
1,
"VAE"
],
[
9,
8,
0,
9,
0,
"IMAGE"
],
[
11,
11,
1,
12,
0,
"CLIP"
],
[
13,
11,
1,
13,
0,
"CLIP"
],
[
18,
15,
0,
12,
1,
"STRING"
],
[
19,
15,
0,
6,
1,
"STRING"
],
[
21,
16,
0,
13,
1,
"STRING"
],
[
22,
16,
0,
7,
1,
"STRING"
],
[
24,
6,
0,
17,
1,
"CONDITIONING"
],
[
25,
7,
0,
17,
2,
"CONDITIONING"
],
[
27,
4,
0,
17,
0,
"MODEL"
],
[
28,
17,
0,
8,
0,
"LATENT"
],
[
30,
18,
0,
19,
0,
"IMAGE"
],
[
32,
12,
0,
20,
1,
"CONDITIONING"
],
[
33,
13,
0,
20,
2,
"CONDITIONING"
],
[
34,
11,
0,
20,
0,
"MODEL"
],
[
35,
17,
0,
20,
3,
"LATENT"
],
[
36,
20,
0,
18,
0,
"LATENT"
],
[
37,
11,
2,
18,
1,
"VAE"
],
[
38,
21,
0,
17,
3,
"LATENT"
]
],
"groups": [
{
"title": "REFINER HERE",
"bounding": [
-55,
-455,
381,
183
],
"color": "#3f789e"
},
{
"title": "BASE HERE",
"bounding": [
-77,
278,
444,
203
],
"color": "#8A8"
},
{
"title": "RESULT WITHOUT REFINER",
"bounding": [
1337,
84,
461,
543
],
"color": "#b58b2a"
},
{
"title": "RESULT WITH REFINER",
"bounding": [
1331,
-498,
552,
560
],
"color": "#a1309b"
},
{
"title": "SDXL 0.9",
"bounding": [
422,
-89,
180,
80
],
"color": "#8AA"
}
],
"config": {},
"extra": {},
"version": 0.4
}