mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-10 13:32:36 +08:00
Merge remote-tracking branch 'upstream/master'
# Conflicts: # comfy/samplers.py
This commit is contained in:
commit
29bc094880
@ -1,65 +0,0 @@
|
|||||||
import pygit2
|
|
||||||
from datetime import datetime
|
|
||||||
import sys
|
|
||||||
|
|
||||||
def pull(repo, remote_name='origin', branch='master'):
|
|
||||||
for remote in repo.remotes:
|
|
||||||
if remote.name == remote_name:
|
|
||||||
remote.fetch()
|
|
||||||
remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target
|
|
||||||
merge_result, _ = repo.merge_analysis(remote_master_id)
|
|
||||||
# Up to date, do nothing
|
|
||||||
if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
|
|
||||||
return
|
|
||||||
# We can just fastforward
|
|
||||||
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
|
|
||||||
repo.checkout_tree(repo.get(remote_master_id))
|
|
||||||
try:
|
|
||||||
master_ref = repo.lookup_reference('refs/heads/%s' % (branch))
|
|
||||||
master_ref.set_target(remote_master_id)
|
|
||||||
except KeyError:
|
|
||||||
repo.create_branch(branch, repo.get(remote_master_id))
|
|
||||||
repo.head.set_target(remote_master_id)
|
|
||||||
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
|
|
||||||
repo.merge(remote_master_id)
|
|
||||||
|
|
||||||
if repo.index.conflicts is not None:
|
|
||||||
for conflict in repo.index.conflicts:
|
|
||||||
print('Conflicts found in:', conflict[0].path)
|
|
||||||
raise AssertionError('Conflicts, ahhhhh!!')
|
|
||||||
|
|
||||||
user = repo.default_signature
|
|
||||||
tree = repo.index.write_tree()
|
|
||||||
commit = repo.create_commit('HEAD',
|
|
||||||
user,
|
|
||||||
user,
|
|
||||||
'Merge!',
|
|
||||||
tree,
|
|
||||||
[repo.head.target, remote_master_id])
|
|
||||||
# We need to do this or git CLI will think we are still merging.
|
|
||||||
repo.state_cleanup()
|
|
||||||
else:
|
|
||||||
raise AssertionError('Unknown merge analysis result')
|
|
||||||
|
|
||||||
|
|
||||||
repo = pygit2.Repository(str(sys.argv[1]))
|
|
||||||
ident = pygit2.Signature('comfyui', 'comfy@ui')
|
|
||||||
try:
|
|
||||||
print("stashing current changes")
|
|
||||||
repo.stash(ident)
|
|
||||||
except KeyError:
|
|
||||||
print("nothing to stash")
|
|
||||||
backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S'))
|
|
||||||
print("creating backup branch: {}".format(backup_branch_name))
|
|
||||||
repo.branches.local.create(backup_branch_name, repo.head.peel())
|
|
||||||
|
|
||||||
print("checking out master branch")
|
|
||||||
branch = repo.lookup_branch('master')
|
|
||||||
ref = repo.lookup_reference(branch.name)
|
|
||||||
repo.checkout(ref)
|
|
||||||
|
|
||||||
print("pulling latest changes")
|
|
||||||
pull(repo)
|
|
||||||
|
|
||||||
print("Done!")
|
|
||||||
|
|
||||||
@ -1,2 +0,0 @@
|
|||||||
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
|
||||||
pause
|
|
||||||
@ -1,3 +1,3 @@
|
|||||||
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
||||||
..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2
|
..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2
|
||||||
pause
|
pause
|
||||||
|
|||||||
@ -1,27 +0,0 @@
|
|||||||
HOW TO RUN:
|
|
||||||
|
|
||||||
if you have a NVIDIA gpu:
|
|
||||||
|
|
||||||
run_nvidia_gpu.bat
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
To run it in slow CPU mode:
|
|
||||||
|
|
||||||
run_cpu.bat
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
|
|
||||||
|
|
||||||
You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
RECOMMENDED WAY TO UPDATE:
|
|
||||||
To update the ComfyUI code: update\update_comfyui.bat
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
To update ComfyUI with the python dependencies:
|
|
||||||
update\update_comfyui_and_python_dependencies.bat
|
|
||||||
@ -1,2 +0,0 @@
|
|||||||
.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build
|
|
||||||
pause
|
|
||||||
@ -17,7 +17,7 @@ jobs:
|
|||||||
|
|
||||||
- shell: bash
|
- shell: bash
|
||||||
run: |
|
run: |
|
||||||
python -m pip wheel --no-cache-dir torch torchvision torchaudio xformers==0.0.19.dev516 --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
|
python -m pip wheel --no-cache-dir torch torchvision torchaudio xformers --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
|
||||||
python -m pip install --no-cache-dir ./temp_wheel_dir/*
|
python -m pip install --no-cache-dir ./temp_wheel_dir/*
|
||||||
echo installed basic
|
echo installed basic
|
||||||
ls -lah temp_wheel_dir
|
ls -lah temp_wheel_dir
|
||||||
|
|||||||
@ -46,6 +46,8 @@ jobs:
|
|||||||
mkdir update
|
mkdir update
|
||||||
cp -r ComfyUI/.ci/update_windows/* ./update/
|
cp -r ComfyUI/.ci/update_windows/* ./update/
|
||||||
cp -r ComfyUI/.ci/windows_base_files/* ./
|
cp -r ComfyUI/.ci/windows_base_files/* ./
|
||||||
|
cp -r ComfyUI/.ci/nightly/update_windows/* ./update/
|
||||||
|
cp -r ComfyUI/.ci/nightly/windows_base_files/* ./
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
|
|||||||
@ -7,6 +7,8 @@ A powerful and modular stable diffusion GUI and backend.
|
|||||||
This ui will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. For some workflow examples and see what ComfyUI can do you can check out:
|
This ui will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. For some workflow examples and see what ComfyUI can do you can check out:
|
||||||
### [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/)
|
### [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/)
|
||||||
|
|
||||||
|
### [Installing ComfyUI](#installing)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
|
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
|
||||||
- Fully supports SD1.x and SD2.x
|
- Fully supports SD1.x and SD2.x
|
||||||
|
|||||||
@ -10,6 +10,7 @@ parser.add_argument("--output-directory", type=str, default=None, help="Set the
|
|||||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
||||||
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
|
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
|
||||||
parser.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).")
|
parser.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).")
|
||||||
|
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
|
||||||
|
|
||||||
attn_group = parser.add_mutually_exclusive_group()
|
attn_group = parser.add_mutually_exclusive_group()
|
||||||
attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.")
|
attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.")
|
||||||
|
|||||||
@ -20,15 +20,30 @@ total_vram_available_mb = -1
|
|||||||
accelerate_enabled = False
|
accelerate_enabled = False
|
||||||
xpu_available = False
|
xpu_available = False
|
||||||
|
|
||||||
|
directml_enabled = False
|
||||||
|
if args.directml is not None:
|
||||||
|
import torch_directml
|
||||||
|
directml_enabled = True
|
||||||
|
device_index = args.directml
|
||||||
|
if device_index < 0:
|
||||||
|
directml_device = torch_directml.device()
|
||||||
|
else:
|
||||||
|
directml_device = torch_directml.device(device_index)
|
||||||
|
print("Using directml with device:", torch_directml.device_name(device_index))
|
||||||
|
# torch_directml.disable_tiled_resources(True)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
try:
|
if directml_enabled:
|
||||||
import intel_extension_for_pytorch as ipex
|
total_vram = 4097 #TODO
|
||||||
if torch.xpu.is_available():
|
else:
|
||||||
xpu_available = True
|
try:
|
||||||
total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024)
|
import intel_extension_for_pytorch as ipex
|
||||||
except:
|
if torch.xpu.is_available():
|
||||||
total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
|
xpu_available = True
|
||||||
|
total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024)
|
||||||
|
except:
|
||||||
|
total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
|
||||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||||
if not args.normalvram and not args.cpu:
|
if not args.normalvram and not args.cpu:
|
||||||
if total_vram <= 4096:
|
if total_vram <= 4096:
|
||||||
@ -217,6 +232,10 @@ def unload_if_low_vram(model):
|
|||||||
|
|
||||||
def get_torch_device():
|
def get_torch_device():
|
||||||
global xpu_available
|
global xpu_available
|
||||||
|
global directml_enabled
|
||||||
|
if directml_enabled:
|
||||||
|
global directml_device
|
||||||
|
return directml_device
|
||||||
if vram_state == VRAMState.MPS:
|
if vram_state == VRAMState.MPS:
|
||||||
return torch.device("mps")
|
return torch.device("mps")
|
||||||
if vram_state == VRAMState.CPU:
|
if vram_state == VRAMState.CPU:
|
||||||
@ -234,8 +253,14 @@ def get_autocast_device(dev):
|
|||||||
|
|
||||||
|
|
||||||
def xformers_enabled():
|
def xformers_enabled():
|
||||||
|
global xpu_available
|
||||||
|
global directml_enabled
|
||||||
if vram_state == VRAMState.CPU:
|
if vram_state == VRAMState.CPU:
|
||||||
return False
|
return False
|
||||||
|
if xpu_available:
|
||||||
|
return False
|
||||||
|
if directml_enabled:
|
||||||
|
return False
|
||||||
return XFORMERS_IS_AVAILABLE
|
return XFORMERS_IS_AVAILABLE
|
||||||
|
|
||||||
|
|
||||||
@ -251,6 +276,7 @@ def pytorch_attention_enabled():
|
|||||||
|
|
||||||
def get_free_memory(dev=None, torch_free_too=False):
|
def get_free_memory(dev=None, torch_free_too=False):
|
||||||
global xpu_available
|
global xpu_available
|
||||||
|
global directml_enabled
|
||||||
if dev is None:
|
if dev is None:
|
||||||
dev = get_torch_device()
|
dev = get_torch_device()
|
||||||
|
|
||||||
@ -258,7 +284,10 @@ def get_free_memory(dev=None, torch_free_too=False):
|
|||||||
mem_free_total = psutil.virtual_memory().available
|
mem_free_total = psutil.virtual_memory().available
|
||||||
mem_free_torch = mem_free_total
|
mem_free_torch = mem_free_total
|
||||||
else:
|
else:
|
||||||
if xpu_available:
|
if directml_enabled:
|
||||||
|
mem_free_total = 1024 * 1024 * 1024 #TODO
|
||||||
|
mem_free_torch = mem_free_total
|
||||||
|
elif xpu_available:
|
||||||
mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev)
|
mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev)
|
||||||
mem_free_torch = mem_free_total
|
mem_free_torch = mem_free_total
|
||||||
else:
|
else:
|
||||||
@ -293,9 +322,14 @@ def mps_mode():
|
|||||||
|
|
||||||
def should_use_fp16():
|
def should_use_fp16():
|
||||||
global xpu_available
|
global xpu_available
|
||||||
|
global directml_enabled
|
||||||
|
|
||||||
if FORCE_FP32:
|
if FORCE_FP32:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if directml_enabled:
|
||||||
|
return False
|
||||||
|
|
||||||
if cpu_mode() or mps_mode() or xpu_available:
|
if cpu_mode() or mps_mode() or xpu_available:
|
||||||
return False #TODO ?
|
return False #TODO ?
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,6 @@ from .ldm.modules.diffusionmodules.util import make_ddim_timesteps
|
|||||||
#The main sampling function shared by all the samplers
|
#The main sampling function shared by all the samplers
|
||||||
#Returns predicted noise
|
#Returns predicted noise
|
||||||
def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}):
|
def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}):
|
||||||
|
|
||||||
def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in):
|
def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in):
|
||||||
area = (x_in.shape[2], x_in.shape[3], 0, 0)
|
area = (x_in.shape[2], x_in.shape[3], 0, 0)
|
||||||
strength = 1.0
|
strength = 1.0
|
||||||
@ -24,8 +23,38 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
|
|||||||
adm_cond = cond[1]['adm_encoded']
|
adm_cond = cond[1]['adm_encoded']
|
||||||
|
|
||||||
input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
|
input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
|
||||||
mult = torch.ones_like(input_x) * strength
|
if 'mask' in cond[1]:
|
||||||
|
# Scale the mask to the size of the input
|
||||||
|
# The mask should have been resized as we began the sampling process
|
||||||
|
mask_strength = 1.0
|
||||||
|
if "mask_strength" in cond[1]:
|
||||||
|
mask_strength = cond[1]["mask_strength"]
|
||||||
|
mask = cond[1]['mask']
|
||||||
|
assert(mask.shape[1] == x_in.shape[2])
|
||||||
|
assert(mask.shape[2] == x_in.shape[3])
|
||||||
|
mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength
|
||||||
|
mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1)
|
||||||
|
else:
|
||||||
|
mask = torch.ones_like(input_x)
|
||||||
|
mult = mask * strength
|
||||||
|
|
||||||
|
if 'mask' not in cond[1]:
|
||||||
|
rr = 8
|
||||||
|
if area[2] != 0:
|
||||||
|
for t in range(rr):
|
||||||
|
mult[:,:,t:1+t,:] *= ((1.0/rr) * (t + 1))
|
||||||
|
if (area[0] + area[2]) < x_in.shape[2]:
|
||||||
|
for t in range(rr):
|
||||||
|
mult[:,:,area[0] - 1 - t:area[0] - t,:] *= ((1.0/rr) * (t + 1))
|
||||||
|
if area[3] != 0:
|
||||||
|
for t in range(rr):
|
||||||
|
mult[:,:,:,t:1+t] *= ((1.0/rr) * (t + 1))
|
||||||
|
if (area[1] + area[3]) < x_in.shape[3]:
|
||||||
|
for t in range(rr):
|
||||||
|
mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1))
|
||||||
|
|
||||||
|
conditioning = {}
|
||||||
|
conditioning['c_crossattn'] = cond[0]
|
||||||
rr = 8
|
rr = 8
|
||||||
if area[2] != 0:
|
if area[2] != 0:
|
||||||
for t in range(rr):
|
for t in range(rr):
|
||||||
@ -198,12 +227,6 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
|
|||||||
if control is not None:
|
if control is not None:
|
||||||
c['control'] = control.get_control(input_x, timestep_, c['c_crossattn'], len(cond_or_uncond))
|
c['control'] = control.get_control(input_x, timestep_, c['c_crossattn'], len(cond_or_uncond))
|
||||||
|
|
||||||
# if attention is not None:
|
|
||||||
# generated_attention = c['c_crossattn'][0]
|
|
||||||
# mixed_attention = attention_weight * torch.cat(attention) + (1 - attention_weight) * generated_attention
|
|
||||||
# c['c_crossattn'] = [mixed_attention]
|
|
||||||
|
|
||||||
|
|
||||||
if patches is not None:
|
if patches is not None:
|
||||||
if "patches" in transformer_options:
|
if "patches" in transformer_options:
|
||||||
cur_patches = transformer_options["patches"].copy()
|
cur_patches = transformer_options["patches"].copy()
|
||||||
@ -215,7 +238,6 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
|
|||||||
else:
|
else:
|
||||||
transformer_options["patches"] = patches
|
transformer_options["patches"] = patches
|
||||||
|
|
||||||
# transformer_options['return_attention'] = True
|
|
||||||
c['transformer_options'] = transformer_options
|
c['transformer_options'] = transformer_options
|
||||||
|
|
||||||
if transformer_options.get("return_attention", False):
|
if transformer_options.get("return_attention", False):
|
||||||
@ -325,6 +347,71 @@ def blank_inpaint_image_like(latent_image):
|
|||||||
blank_image[:,3] *= 0.1380
|
blank_image[:,3] *= 0.1380
|
||||||
return blank_image
|
return blank_image
|
||||||
|
|
||||||
|
def get_mask_aabb(masks):
|
||||||
|
if masks.numel() == 0:
|
||||||
|
return torch.zeros((0, 4), device=masks.device, dtype=torch.int)
|
||||||
|
|
||||||
|
b = masks.shape[0]
|
||||||
|
|
||||||
|
bounding_boxes = torch.zeros((b, 4), device=masks.device, dtype=torch.int)
|
||||||
|
is_empty = torch.zeros((b), device=masks.device, dtype=torch.bool)
|
||||||
|
for i in range(b):
|
||||||
|
mask = masks[i]
|
||||||
|
if mask.numel() == 0:
|
||||||
|
continue
|
||||||
|
if torch.max(mask != 0) == False:
|
||||||
|
is_empty[i] = True
|
||||||
|
continue
|
||||||
|
y, x = torch.where(mask)
|
||||||
|
bounding_boxes[i, 0] = torch.min(x)
|
||||||
|
bounding_boxes[i, 1] = torch.min(y)
|
||||||
|
bounding_boxes[i, 2] = torch.max(x)
|
||||||
|
bounding_boxes[i, 3] = torch.max(y)
|
||||||
|
|
||||||
|
return bounding_boxes, is_empty
|
||||||
|
|
||||||
|
def resolve_cond_masks(conditions, h, w, device):
|
||||||
|
# We need to decide on an area outside the sampling loop in order to properly generate opposite areas of equal sizes.
|
||||||
|
# While we're doing this, we can also resolve the mask device and scaling for performance reasons
|
||||||
|
for i in range(len(conditions)):
|
||||||
|
c = conditions[i]
|
||||||
|
if 'mask' in c[1]:
|
||||||
|
mask = c[1]['mask']
|
||||||
|
mask = mask.to(device=device)
|
||||||
|
modified = c[1].copy()
|
||||||
|
if len(mask.shape) == 2:
|
||||||
|
mask = mask.unsqueeze(0)
|
||||||
|
if mask.shape[2] != h or mask.shape[3] != w:
|
||||||
|
mask = torch.nn.functional.interpolate(mask.unsqueeze(1), size=(h, w), mode='bilinear', align_corners=False).squeeze(1)
|
||||||
|
|
||||||
|
if modified.get("set_area_to_bounds", False):
|
||||||
|
bounds = torch.max(torch.abs(mask),dim=0).values.unsqueeze(0)
|
||||||
|
boxes, is_empty = get_mask_aabb(bounds)
|
||||||
|
if is_empty[0]:
|
||||||
|
# Use the minimum possible size for efficiency reasons. (Since the mask is all-0, this becomes a noop anyway)
|
||||||
|
modified['area'] = (8, 8, 0, 0)
|
||||||
|
else:
|
||||||
|
box = boxes[0]
|
||||||
|
H, W, Y, X = (box[3] - box[1] + 1, box[2] - box[0] + 1, box[1], box[0])
|
||||||
|
# Make sure the height and width are divisible by 8
|
||||||
|
if X % 8 != 0:
|
||||||
|
newx = X // 8 * 8
|
||||||
|
W = W + (X - newx)
|
||||||
|
X = newx
|
||||||
|
if Y % 8 != 0:
|
||||||
|
newy = Y // 8 * 8
|
||||||
|
H = H + (Y - newy)
|
||||||
|
Y = newy
|
||||||
|
if H % 8 != 0:
|
||||||
|
H = H + (8 - (H % 8))
|
||||||
|
if W % 8 != 0:
|
||||||
|
W = W + (8 - (W % 8))
|
||||||
|
area = (int(H), int(W), int(Y), int(X))
|
||||||
|
modified['area'] = area
|
||||||
|
|
||||||
|
modified['mask'] = mask
|
||||||
|
conditions[i] = [c[0], modified]
|
||||||
|
|
||||||
def create_cond_with_same_area_if_none(conds, c):
|
def create_cond_with_same_area_if_none(conds, c):
|
||||||
if 'area' not in c[1]:
|
if 'area' not in c[1]:
|
||||||
return
|
return
|
||||||
@ -509,6 +596,10 @@ class KSampler:
|
|||||||
|
|
||||||
positive = positive[:]
|
positive = positive[:]
|
||||||
negative = negative[:]
|
negative = negative[:]
|
||||||
|
|
||||||
|
resolve_cond_masks(positive, noise.shape[2], noise.shape[3], self.device)
|
||||||
|
resolve_cond_masks(negative, noise.shape[2], noise.shape[3], self.device)
|
||||||
|
|
||||||
#make sure each cond area has an opposite one with the same area
|
#make sure each cond area has an opposite one with the same area
|
||||||
for c in positive:
|
for c in positive:
|
||||||
create_cond_with_same_area_if_none(negative, c)
|
create_cond_with_same_area_if_none(negative, c)
|
||||||
|
|||||||
64
nodes.py
64
nodes.py
@ -65,6 +65,36 @@ class ConditioningCombine:
|
|||||||
def combine(self, conditioning_1, conditioning_2):
|
def combine(self, conditioning_1, conditioning_2):
|
||||||
return (conditioning_1 + conditioning_2, )
|
return (conditioning_1 + conditioning_2, )
|
||||||
|
|
||||||
|
class ConditioningAverage :
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
|
||||||
|
"conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
||||||
|
}}
|
||||||
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
|
FUNCTION = "addWeighted"
|
||||||
|
|
||||||
|
CATEGORY = "conditioning"
|
||||||
|
|
||||||
|
def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
|
||||||
|
out = []
|
||||||
|
|
||||||
|
if len(conditioning_from) > 1:
|
||||||
|
print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
|
||||||
|
|
||||||
|
cond_from = conditioning_from[0][0]
|
||||||
|
|
||||||
|
for i in range(len(conditioning_to)):
|
||||||
|
t1 = conditioning_to[i][0]
|
||||||
|
t0 = cond_from[:,:t1.shape[1]]
|
||||||
|
if t0.shape[1] < t1.shape[1]:
|
||||||
|
t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)
|
||||||
|
|
||||||
|
tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
|
||||||
|
n = [tw, conditioning_to[i][1].copy()]
|
||||||
|
out.append(n)
|
||||||
|
return (out, )
|
||||||
|
|
||||||
class ConditioningSetArea:
|
class ConditioningSetArea:
|
||||||
def __init__(self, event_dispatcher):
|
def __init__(self, event_dispatcher):
|
||||||
self.event_dispatcher = event_dispatcher
|
self.event_dispatcher = event_dispatcher
|
||||||
@ -88,11 +118,41 @@ class ConditioningSetArea:
|
|||||||
n = [t[0], t[1].copy()]
|
n = [t[0], t[1].copy()]
|
||||||
n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
|
n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
|
||||||
n[1]['strength'] = strength
|
n[1]['strength'] = strength
|
||||||
|
n[1]['set_area_to_bounds'] = False
|
||||||
n[1]['min_sigma'] = min_sigma
|
n[1]['min_sigma'] = min_sigma
|
||||||
n[1]['max_sigma'] = max_sigma
|
n[1]['max_sigma'] = max_sigma
|
||||||
c.append(n)
|
c.append(n)
|
||||||
return (c, )
|
return (c, )
|
||||||
|
|
||||||
|
class ConditioningSetMask:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {"conditioning": ("CONDITIONING", ),
|
||||||
|
"mask": ("MASK", ),
|
||||||
|
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||||
|
"set_cond_area": (["default", "mask bounds"],),
|
||||||
|
}}
|
||||||
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
|
FUNCTION = "append"
|
||||||
|
|
||||||
|
CATEGORY = "conditioning"
|
||||||
|
|
||||||
|
def append(self, conditioning, mask, set_cond_area, strength):
|
||||||
|
c = []
|
||||||
|
set_area_to_bounds = False
|
||||||
|
if set_cond_area != "default":
|
||||||
|
set_area_to_bounds = True
|
||||||
|
if len(mask.shape) < 3:
|
||||||
|
mask = mask.unsqueeze(0)
|
||||||
|
for t in conditioning:
|
||||||
|
n = [t[0], t[1].copy()]
|
||||||
|
_, h, w = mask.shape
|
||||||
|
n[1]['mask'] = mask
|
||||||
|
n[1]['set_area_to_bounds'] = set_area_to_bounds
|
||||||
|
n[1]['mask_strength'] = strength
|
||||||
|
c.append(n)
|
||||||
|
return (c, )
|
||||||
|
|
||||||
class VAEDecode:
|
class VAEDecode:
|
||||||
def __init__(self, device="cpu", event_dispatcher=None):
|
def __init__(self, device="cpu", event_dispatcher=None):
|
||||||
self.device = device
|
self.device = device
|
||||||
@ -1217,8 +1277,10 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"ImageScale": ImageScale,
|
"ImageScale": ImageScale,
|
||||||
"ImageInvert": ImageInvert,
|
"ImageInvert": ImageInvert,
|
||||||
"ImagePadForOutpaint": ImagePadForOutpaint,
|
"ImagePadForOutpaint": ImagePadForOutpaint,
|
||||||
|
"ConditioningAverage ": ConditioningAverage ,
|
||||||
"ConditioningCombine": ConditioningCombine,
|
"ConditioningCombine": ConditioningCombine,
|
||||||
"ConditioningSetArea": ConditioningSetArea,
|
"ConditioningSetArea": ConditioningSetArea,
|
||||||
|
"ConditioningSetMask": ConditioningSetMask,
|
||||||
"KSamplerAdvanced": KSamplerAdvanced,
|
"KSamplerAdvanced": KSamplerAdvanced,
|
||||||
"SetLatentNoiseMask": SetLatentNoiseMask,
|
"SetLatentNoiseMask": SetLatentNoiseMask,
|
||||||
"LatentComposite": LatentComposite,
|
"LatentComposite": LatentComposite,
|
||||||
@ -1267,7 +1329,9 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
|||||||
"CLIPTextEncode": "CLIP Text Encode (Prompt)",
|
"CLIPTextEncode": "CLIP Text Encode (Prompt)",
|
||||||
"CLIPSetLastLayer": "CLIP Set Last Layer",
|
"CLIPSetLastLayer": "CLIP Set Last Layer",
|
||||||
"ConditioningCombine": "Conditioning (Combine)",
|
"ConditioningCombine": "Conditioning (Combine)",
|
||||||
|
"ConditioningAverage ": "Conditioning (Average)",
|
||||||
"ConditioningSetArea": "Conditioning (Set Area)",
|
"ConditioningSetArea": "Conditioning (Set Area)",
|
||||||
|
"ConditioningSetMask": "Conditioning (Set Mask)",
|
||||||
"ControlNetApply": "Apply ControlNet",
|
"ControlNetApply": "Apply ControlNet",
|
||||||
# Latent
|
# Latent
|
||||||
"VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
|
"VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
|
||||||
|
|||||||
@ -6,6 +6,7 @@ app.registerExtension({
|
|||||||
name: "Comfy.SlotDefaults",
|
name: "Comfy.SlotDefaults",
|
||||||
suggestionsNumber: null,
|
suggestionsNumber: null,
|
||||||
init() {
|
init() {
|
||||||
|
LiteGraph.search_filter_enabled = true;
|
||||||
LiteGraph.middle_click_slot_add_default_node = true;
|
LiteGraph.middle_click_slot_add_default_node = true;
|
||||||
this.suggestionsNumber = app.ui.settings.addSetting({
|
this.suggestionsNumber = app.ui.settings.addSetting({
|
||||||
id: "Comfy.NodeSuggestions.number",
|
id: "Comfy.NodeSuggestions.number",
|
||||||
@ -43,6 +44,14 @@ app.registerExtension({
|
|||||||
}
|
}
|
||||||
if (this.slot_types_default_out[type].includes(nodeId)) continue;
|
if (this.slot_types_default_out[type].includes(nodeId)) continue;
|
||||||
this.slot_types_default_out[type].push(nodeId);
|
this.slot_types_default_out[type].push(nodeId);
|
||||||
|
|
||||||
|
// Input types have to be stored as lower case
|
||||||
|
// Store each node that can handle this input type
|
||||||
|
const lowerType = type.toLocaleLowerCase();
|
||||||
|
if (!(lowerType in LiteGraph.registered_slot_in_types)) {
|
||||||
|
LiteGraph.registered_slot_in_types[lowerType] = { nodes: [] };
|
||||||
|
}
|
||||||
|
LiteGraph.registered_slot_in_types[lowerType].nodes.push(nodeType.comfyClass);
|
||||||
}
|
}
|
||||||
|
|
||||||
var outputs = nodeData["output"];
|
var outputs = nodeData["output"];
|
||||||
@ -53,6 +62,16 @@ app.registerExtension({
|
|||||||
}
|
}
|
||||||
|
|
||||||
this.slot_types_default_in[type].push(nodeId);
|
this.slot_types_default_in[type].push(nodeId);
|
||||||
|
|
||||||
|
// Store each node that can handle this output type
|
||||||
|
if (!(type in LiteGraph.registered_slot_out_types)) {
|
||||||
|
LiteGraph.registered_slot_out_types[type] = { nodes: [] };
|
||||||
|
}
|
||||||
|
LiteGraph.registered_slot_out_types[type].nodes.push(nodeType.comfyClass);
|
||||||
|
|
||||||
|
if(!LiteGraph.slot_types_out.includes(type)) {
|
||||||
|
LiteGraph.slot_types_out.push(type);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
var maxNum = this.suggestionsNumber.value;
|
var maxNum = this.suggestionsNumber.value;
|
||||||
this.setDefaults(maxNum);
|
this.setDefaults(maxNum);
|
||||||
|
|||||||
@ -3628,6 +3628,18 @@
|
|||||||
return size;
|
return size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
LGraphNode.prototype.inResizeCorner = function(canvasX, canvasY) {
|
||||||
|
var rows = this.outputs ? this.outputs.length : 1;
|
||||||
|
var outputs_offset = (this.constructor.slot_start_y || 0) + rows * LiteGraph.NODE_SLOT_HEIGHT;
|
||||||
|
return isInsideRectangle(canvasX,
|
||||||
|
canvasY,
|
||||||
|
this.pos[0] + this.size[0] - 15,
|
||||||
|
this.pos[1] + Math.max(this.size[1] - 15, outputs_offset),
|
||||||
|
20,
|
||||||
|
20
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns all the info available about a property of this node.
|
* returns all the info available about a property of this node.
|
||||||
*
|
*
|
||||||
@ -5877,14 +5889,7 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
if ( !this.connecting_node && !node.flags.collapsed && !this.live_mode ) {
|
if ( !this.connecting_node && !node.flags.collapsed && !this.live_mode ) {
|
||||||
//Search for corner for resize
|
//Search for corner for resize
|
||||||
if ( !skip_action &&
|
if ( !skip_action &&
|
||||||
node.resizable !== false &&
|
node.resizable !== false && node.inResizeCorner(e.canvasX, e.canvasY)
|
||||||
isInsideRectangle( e.canvasX,
|
|
||||||
e.canvasY,
|
|
||||||
node.pos[0] + node.size[0] - 5,
|
|
||||||
node.pos[1] + node.size[1] - 5,
|
|
||||||
10,
|
|
||||||
10
|
|
||||||
)
|
|
||||||
) {
|
) {
|
||||||
this.graph.beforeChange();
|
this.graph.beforeChange();
|
||||||
this.resizing_node = node;
|
this.resizing_node = node;
|
||||||
@ -6424,16 +6429,7 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
|
|
||||||
//Search for corner
|
//Search for corner
|
||||||
if (this.canvas) {
|
if (this.canvas) {
|
||||||
if (
|
if (node.inResizeCorner(e.canvasX, e.canvasY)) {
|
||||||
isInsideRectangle(
|
|
||||||
e.canvasX,
|
|
||||||
e.canvasY,
|
|
||||||
node.pos[0] + node.size[0] - 5,
|
|
||||||
node.pos[1] + node.size[1] - 5,
|
|
||||||
5,
|
|
||||||
5
|
|
||||||
)
|
|
||||||
) {
|
|
||||||
this.canvas.style.cursor = "se-resize";
|
this.canvas.style.cursor = "se-resize";
|
||||||
} else {
|
} else {
|
||||||
this.canvas.style.cursor = "crosshair";
|
this.canvas.style.cursor = "crosshair";
|
||||||
|
|||||||
@ -120,7 +120,7 @@ body {
|
|||||||
.comfy-menu > button,
|
.comfy-menu > button,
|
||||||
.comfy-menu-btns button,
|
.comfy-menu-btns button,
|
||||||
.comfy-menu .comfy-list button,
|
.comfy-menu .comfy-list button,
|
||||||
.comfy-modal button{
|
.comfy-modal button {
|
||||||
color: var(--input-text);
|
color: var(--input-text);
|
||||||
background-color: var(--comfy-input-bg);
|
background-color: var(--comfy-input-bg);
|
||||||
border-radius: 8px;
|
border-radius: 8px;
|
||||||
@ -129,6 +129,15 @@ body {
|
|||||||
margin-top: 2px;
|
margin-top: 2px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.comfy-menu > button:hover,
|
||||||
|
.comfy-menu-btns button:hover,
|
||||||
|
.comfy-menu .comfy-list button:hover,
|
||||||
|
.comfy-modal button:hover,
|
||||||
|
.comfy-settings-btn:hover {
|
||||||
|
filter: brightness(1.2);
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
.comfy-menu span.drag-handle {
|
.comfy-menu span.drag-handle {
|
||||||
width: 10px;
|
width: 10px;
|
||||||
height: 20px;
|
height: 20px;
|
||||||
@ -284,4 +293,7 @@ button.comfy-queue-btn {
|
|||||||
top: 0;
|
top: 0;
|
||||||
right: 2px;
|
right: 2px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.litecontextmenu {
|
||||||
|
z-index: 9999 !important;
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user