mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-13 23:12:35 +08:00
Merge branch 'comfyanonymous:master' into feature/toggle_migration
This commit is contained in:
commit
75e5d3d604
@ -38,6 +38,7 @@ parser.add_argument("--port", type=int, default=8188, help="Set the listen port.
|
|||||||
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
|
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
|
||||||
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
|
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
|
||||||
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
|
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
|
||||||
|
parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
|
||||||
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
||||||
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
||||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
||||||
|
|||||||
@ -189,12 +189,13 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
to_run += [(p, COND)]
|
to_run += [(p, COND)]
|
||||||
for x in uncond:
|
if uncond is not None:
|
||||||
p = get_area_and_mult(x, x_in, cond_concat_in, timestep)
|
for x in uncond:
|
||||||
if p is None:
|
p = get_area_and_mult(x, x_in, cond_concat_in, timestep)
|
||||||
continue
|
if p is None:
|
||||||
|
continue
|
||||||
|
|
||||||
to_run += [(p, UNCOND)]
|
to_run += [(p, UNCOND)]
|
||||||
|
|
||||||
while len(to_run) > 0:
|
while len(to_run) > 0:
|
||||||
first = to_run[0]
|
first = to_run[0]
|
||||||
@ -282,6 +283,9 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
|
|||||||
|
|
||||||
|
|
||||||
max_total_area = model_management.maximum_batch_area()
|
max_total_area = model_management.maximum_batch_area()
|
||||||
|
if math.isclose(cond_scale, 1.0):
|
||||||
|
uncond = None
|
||||||
|
|
||||||
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
|
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
|
||||||
if "sampler_cfg_function" in model_options:
|
if "sampler_cfg_function" in model_options:
|
||||||
args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep}
|
args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep}
|
||||||
|
|||||||
29
comfy/sd.py
29
comfy/sd.py
@ -72,6 +72,7 @@ def load_lora(lora, to_load):
|
|||||||
|
|
||||||
regular_lora = "{}.lora_up.weight".format(x)
|
regular_lora = "{}.lora_up.weight".format(x)
|
||||||
diffusers_lora = "{}_lora.up.weight".format(x)
|
diffusers_lora = "{}_lora.up.weight".format(x)
|
||||||
|
transformers_lora = "{}.lora_linear_layer.up.weight".format(x)
|
||||||
A_name = None
|
A_name = None
|
||||||
|
|
||||||
if regular_lora in lora.keys():
|
if regular_lora in lora.keys():
|
||||||
@ -82,6 +83,10 @@ def load_lora(lora, to_load):
|
|||||||
A_name = diffusers_lora
|
A_name = diffusers_lora
|
||||||
B_name = "{}_lora.down.weight".format(x)
|
B_name = "{}_lora.down.weight".format(x)
|
||||||
mid_name = None
|
mid_name = None
|
||||||
|
elif transformers_lora in lora.keys():
|
||||||
|
A_name = transformers_lora
|
||||||
|
B_name ="{}.lora_linear_layer.down.weight".format(x)
|
||||||
|
mid_name = None
|
||||||
|
|
||||||
if A_name is not None:
|
if A_name is not None:
|
||||||
mid = None
|
mid = None
|
||||||
@ -181,20 +186,29 @@ def model_lora_keys_clip(model, key_map={}):
|
|||||||
key_map[lora_key] = k
|
key_map[lora_key] = k
|
||||||
lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c])
|
lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c])
|
||||||
key_map[lora_key] = k
|
key_map[lora_key] = k
|
||||||
|
lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
|
||||||
|
key_map[lora_key] = k
|
||||||
|
|
||||||
k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
|
k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
|
||||||
if k in sdk:
|
if k in sdk:
|
||||||
lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
|
lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
|
||||||
key_map[lora_key] = k
|
key_map[lora_key] = k
|
||||||
clip_l_present = True
|
clip_l_present = True
|
||||||
|
lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
|
||||||
|
key_map[lora_key] = k
|
||||||
|
|
||||||
k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
|
k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
|
||||||
if k in sdk:
|
if k in sdk:
|
||||||
if clip_l_present:
|
if clip_l_present:
|
||||||
lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
|
lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
|
||||||
|
key_map[lora_key] = k
|
||||||
|
lora_key = "text_encoder_2.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
|
||||||
|
key_map[lora_key] = k
|
||||||
else:
|
else:
|
||||||
lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
|
lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
|
||||||
key_map[lora_key] = k
|
key_map[lora_key] = k
|
||||||
|
lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
|
||||||
|
key_map[lora_key] = k
|
||||||
|
|
||||||
return key_map
|
return key_map
|
||||||
|
|
||||||
@ -209,13 +223,16 @@ def model_lora_keys_unet(model, key_map={}):
|
|||||||
diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
|
diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
|
||||||
for k in diffusers_keys:
|
for k in diffusers_keys:
|
||||||
if k.endswith(".weight"):
|
if k.endswith(".weight"):
|
||||||
|
unet_key = "diffusion_model.{}".format(diffusers_keys[k])
|
||||||
key_lora = k[:-len(".weight")].replace(".", "_")
|
key_lora = k[:-len(".weight")].replace(".", "_")
|
||||||
key_map["lora_unet_{}".format(key_lora)] = "diffusion_model.{}".format(diffusers_keys[k])
|
key_map["lora_unet_{}".format(key_lora)] = unet_key
|
||||||
|
|
||||||
diffusers_lora_key = "unet.{}".format(k[:-len(".weight")].replace(".to_", ".processor.to_"))
|
diffusers_lora_prefix = ["", "unet."]
|
||||||
if diffusers_lora_key.endswith(".to_out.0"):
|
for p in diffusers_lora_prefix:
|
||||||
diffusers_lora_key = diffusers_lora_key[:-2]
|
diffusers_lora_key = "{}{}".format(p, k[:-len(".weight")].replace(".to_", ".processor.to_"))
|
||||||
key_map[diffusers_lora_key] = "diffusion_model.{}".format(diffusers_keys[k])
|
if diffusers_lora_key.endswith(".to_out.0"):
|
||||||
|
diffusers_lora_key = diffusers_lora_key[:-2]
|
||||||
|
key_map[diffusers_lora_key] = unet_key
|
||||||
return key_map
|
return key_map
|
||||||
|
|
||||||
def set_attr(obj, attr, value):
|
def set_attr(obj, attr, value):
|
||||||
|
|||||||
@ -2,6 +2,35 @@ import torch
|
|||||||
|
|
||||||
from nodes import MAX_RESOLUTION
|
from nodes import MAX_RESOLUTION
|
||||||
|
|
||||||
|
def composite(destination, source, x, y, mask = None, multiplier = 8):
|
||||||
|
x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier))
|
||||||
|
y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier))
|
||||||
|
|
||||||
|
left, top = (x // multiplier, y // multiplier)
|
||||||
|
right, bottom = (left + source.shape[3], top + source.shape[2],)
|
||||||
|
|
||||||
|
|
||||||
|
if mask is None:
|
||||||
|
mask = torch.ones_like(source)
|
||||||
|
else:
|
||||||
|
mask = mask.clone()
|
||||||
|
mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear")
|
||||||
|
mask = mask.repeat((source.shape[0], source.shape[1], 1, 1))
|
||||||
|
|
||||||
|
# calculate the bounds of the source that will be overlapping the destination
|
||||||
|
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
||||||
|
# of the destination
|
||||||
|
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
|
||||||
|
|
||||||
|
mask = mask[:, :, :visible_height, :visible_width]
|
||||||
|
inverse_mask = torch.ones_like(mask) - mask
|
||||||
|
|
||||||
|
source_portion = mask * source[:, :, :visible_height, :visible_width]
|
||||||
|
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
|
||||||
|
|
||||||
|
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
|
||||||
|
return destination
|
||||||
|
|
||||||
class LatentCompositeMasked:
|
class LatentCompositeMasked:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
@ -25,36 +54,31 @@ class LatentCompositeMasked:
|
|||||||
output = destination.copy()
|
output = destination.copy()
|
||||||
destination = destination["samples"].clone()
|
destination = destination["samples"].clone()
|
||||||
source = source["samples"]
|
source = source["samples"]
|
||||||
|
output["samples"] = composite(destination, source, x, y, mask, 8)
|
||||||
|
return (output,)
|
||||||
|
|
||||||
x = max(-source.shape[3] * 8, min(x, destination.shape[3] * 8))
|
class ImageCompositeMasked:
|
||||||
y = max(-source.shape[2] * 8, min(y, destination.shape[2] * 8))
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"destination": ("IMAGE",),
|
||||||
|
"source": ("IMAGE",),
|
||||||
|
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
||||||
|
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"mask": ("MASK",),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RETURN_TYPES = ("IMAGE",)
|
||||||
|
FUNCTION = "composite"
|
||||||
|
|
||||||
left, top = (x // 8, y // 8)
|
CATEGORY = "image"
|
||||||
right, bottom = (left + source.shape[3], top + source.shape[2],)
|
|
||||||
|
|
||||||
|
|
||||||
if mask is None:
|
|
||||||
mask = torch.ones_like(source)
|
|
||||||
else:
|
|
||||||
mask = mask.clone()
|
|
||||||
mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear")
|
|
||||||
mask = mask.repeat((source.shape[0], source.shape[1], 1, 1))
|
|
||||||
|
|
||||||
# calculate the bounds of the source that will be overlapping the destination
|
|
||||||
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
|
||||||
# of the destination
|
|
||||||
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
|
|
||||||
|
|
||||||
mask = mask[:, :, :visible_height, :visible_width]
|
|
||||||
inverse_mask = torch.ones_like(mask) - mask
|
|
||||||
|
|
||||||
source_portion = mask * source[:, :, :visible_height, :visible_width]
|
|
||||||
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
|
|
||||||
|
|
||||||
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
|
|
||||||
|
|
||||||
output["samples"] = destination
|
|
||||||
|
|
||||||
|
def composite(self, destination, source, x, y, mask = None):
|
||||||
|
destination = destination.clone().movedim(-1, 1)
|
||||||
|
output = composite(destination, source.movedim(-1, 1), x, y, mask, 1).movedim(1, -1)
|
||||||
return (output,)
|
return (output,)
|
||||||
|
|
||||||
class MaskToImage:
|
class MaskToImage:
|
||||||
@ -253,6 +277,7 @@ class FeatherMask:
|
|||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
"LatentCompositeMasked": LatentCompositeMasked,
|
"LatentCompositeMasked": LatentCompositeMasked,
|
||||||
|
"ImageCompositeMasked": ImageCompositeMasked,
|
||||||
"MaskToImage": MaskToImage,
|
"MaskToImage": MaskToImage,
|
||||||
"ImageToMask": ImageToMask,
|
"ImageToMask": ImageToMask,
|
||||||
"SolidMask": SolidMask,
|
"SolidMask": SolidMask,
|
||||||
|
|||||||
@ -43,6 +43,10 @@ def set_output_directory(output_dir):
|
|||||||
global output_directory
|
global output_directory
|
||||||
output_directory = output_dir
|
output_directory = output_dir
|
||||||
|
|
||||||
|
def set_temp_directory(temp_dir):
|
||||||
|
global temp_directory
|
||||||
|
temp_directory = temp_dir
|
||||||
|
|
||||||
def get_output_directory():
|
def get_output_directory():
|
||||||
global output_directory
|
global output_directory
|
||||||
return output_directory
|
return output_directory
|
||||||
@ -111,6 +115,8 @@ def add_model_folder_path(folder_name, full_folder_path):
|
|||||||
global folder_names_and_paths
|
global folder_names_and_paths
|
||||||
if folder_name in folder_names_and_paths:
|
if folder_name in folder_names_and_paths:
|
||||||
folder_names_and_paths[folder_name][0].append(full_folder_path)
|
folder_names_and_paths[folder_name][0].append(full_folder_path)
|
||||||
|
else:
|
||||||
|
folder_names_and_paths[folder_name] = ([full_folder_path], set())
|
||||||
|
|
||||||
def get_folder_paths(folder_name):
|
def get_folder_paths(folder_name):
|
||||||
return folder_names_and_paths[folder_name][0][:]
|
return folder_names_and_paths[folder_name][0][:]
|
||||||
|
|||||||
6
main.py
6
main.py
@ -100,7 +100,7 @@ def hijack_progress(server):
|
|||||||
|
|
||||||
|
|
||||||
def cleanup_temp():
|
def cleanup_temp():
|
||||||
temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
|
temp_dir = folder_paths.get_temp_directory()
|
||||||
if os.path.exists(temp_dir):
|
if os.path.exists(temp_dir):
|
||||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||||
|
|
||||||
@ -127,6 +127,10 @@ def load_extra_path_config(yaml_path):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
if args.temp_directory:
|
||||||
|
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
|
||||||
|
print(f"Setting temp directory to: {temp_dir}")
|
||||||
|
folder_paths.set_temp_directory(temp_dir)
|
||||||
cleanup_temp()
|
cleanup_temp()
|
||||||
|
|
||||||
loop = asyncio.new_event_loop()
|
loop = asyncio.new_event_loop()
|
||||||
|
|||||||
@ -9766,6 +9766,7 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
|
|
||||||
switch (w.type) {
|
switch (w.type) {
|
||||||
case "button":
|
case "button":
|
||||||
|
ctx.fillStyle = background_color;
|
||||||
if (w.clicked) {
|
if (w.clicked) {
|
||||||
ctx.fillStyle = "#AAA";
|
ctx.fillStyle = "#AAA";
|
||||||
w.clicked = false;
|
w.clicked = false;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user