mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-11 23:00:51 +08:00
Visualize region for conditioning area & support extra model paths
This commit is contained in:
parent
7e5645e956
commit
75ea299e74
12
comfy/sd.py
12
comfy/sd.py
@ -244,7 +244,7 @@ def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
|
|||||||
|
|
||||||
|
|
||||||
class CLIP:
|
class CLIP:
|
||||||
def __init__(self, config={}, embedding_directory=None, no_init=False):
|
def __init__(self, config={}, embedding_directories=None, no_init=False):
|
||||||
if no_init:
|
if no_init:
|
||||||
return
|
return
|
||||||
self.target_clip = config["target"]
|
self.target_clip = config["target"]
|
||||||
@ -261,7 +261,7 @@ class CLIP:
|
|||||||
tokenizer = sd1_clip.SD1Tokenizer
|
tokenizer = sd1_clip.SD1Tokenizer
|
||||||
|
|
||||||
self.cond_stage_model = clip(**(params))
|
self.cond_stage_model = clip(**(params))
|
||||||
self.tokenizer = tokenizer(embedding_directory=embedding_directory)
|
self.tokenizer = tokenizer(embedding_directories=embedding_directories)
|
||||||
self.patcher = ModelPatcher(self.cond_stage_model)
|
self.patcher = ModelPatcher(self.cond_stage_model)
|
||||||
|
|
||||||
def clone(self):
|
def clone(self):
|
||||||
@ -323,18 +323,18 @@ class VAE:
|
|||||||
samples = samples.cpu()
|
samples = samples.cpu()
|
||||||
return samples
|
return samples
|
||||||
|
|
||||||
def load_clip(ckpt_path, embedding_directory=None):
|
def load_clip(ckpt_path, embedding_directories=None):
|
||||||
clip_data = load_torch_file(ckpt_path)
|
clip_data = load_torch_file(ckpt_path)
|
||||||
config = {}
|
config = {}
|
||||||
if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
|
if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
|
||||||
config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
|
config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
|
||||||
else:
|
else:
|
||||||
config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
|
config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
|
||||||
clip = CLIP(config=config, embedding_directory=embedding_directory)
|
clip = CLIP(config=config, embedding_directories=embedding_directories)
|
||||||
clip.load_from_state_dict(clip_data)
|
clip.load_from_state_dict(clip_data)
|
||||||
return clip
|
return clip
|
||||||
|
|
||||||
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directories=None):
|
||||||
config = OmegaConf.load(config_path)
|
config = OmegaConf.load(config_path)
|
||||||
model_config_params = config['model']['params']
|
model_config_params = config['model']['params']
|
||||||
clip_config = model_config_params['cond_stage_config']
|
clip_config = model_config_params['cond_stage_config']
|
||||||
@ -355,7 +355,7 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e
|
|||||||
load_state_dict_to = [w]
|
load_state_dict_to = [w]
|
||||||
|
|
||||||
if output_clip:
|
if output_clip:
|
||||||
clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
|
clip = CLIP(config=clip_config, embedding_directories=embedding_directories)
|
||||||
w.cond_stage_model = clip.cond_stage_model
|
w.cond_stage_model = clip.cond_stage_model
|
||||||
load_state_dict_to = [w]
|
load_state_dict_to = [w]
|
||||||
|
|
||||||
|
|||||||
@ -2,6 +2,7 @@ import os
|
|||||||
|
|
||||||
from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig
|
from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig
|
||||||
import torch
|
import torch
|
||||||
|
import shared
|
||||||
|
|
||||||
class ClipTokenWeightEncoder:
|
class ClipTokenWeightEncoder:
|
||||||
def encode_token_weights(self, token_weight_pairs):
|
def encode_token_weights(self, token_weight_pairs):
|
||||||
@ -167,8 +168,8 @@ def unescape_important(text):
|
|||||||
text = text.replace("\0\2", "(")
|
text = text.replace("\0\2", "(")
|
||||||
return text
|
return text
|
||||||
|
|
||||||
def load_embed(embedding_name, embedding_directory):
|
def load_embed(embedding_name, embedding_directories):
|
||||||
embed_path = os.path.join(embedding_directory, embedding_name)
|
embed_path = shared.find_model_file("embeddings", embedding_name)
|
||||||
if not os.path.isfile(embed_path):
|
if not os.path.isfile(embed_path):
|
||||||
extensions = ['.safetensors', '.pt', '.bin']
|
extensions = ['.safetensors', '.pt', '.bin']
|
||||||
valid_file = None
|
valid_file = None
|
||||||
@ -195,7 +196,7 @@ def load_embed(embedding_name, embedding_directory):
|
|||||||
return next(iter(values))
|
return next(iter(values))
|
||||||
|
|
||||||
class SD1Tokenizer:
|
class SD1Tokenizer:
|
||||||
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None):
|
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directories=None):
|
||||||
if tokenizer_path is None:
|
if tokenizer_path is None:
|
||||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
|
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
|
||||||
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
|
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
|
||||||
@ -208,7 +209,7 @@ class SD1Tokenizer:
|
|||||||
self.pad_with_end = pad_with_end
|
self.pad_with_end = pad_with_end
|
||||||
vocab = self.tokenizer.get_vocab()
|
vocab = self.tokenizer.get_vocab()
|
||||||
self.inv_vocab = {v: k for k, v in vocab.items()}
|
self.inv_vocab = {v: k for k, v in vocab.items()}
|
||||||
self.embedding_directory = embedding_directory
|
self.embedding_directories = embedding_directories
|
||||||
self.max_word_length = 8
|
self.max_word_length = 8
|
||||||
|
|
||||||
def tokenize_with_weights(self, text):
|
def tokenize_with_weights(self, text):
|
||||||
@ -221,9 +222,9 @@ class SD1Tokenizer:
|
|||||||
for word in to_tokenize:
|
for word in to_tokenize:
|
||||||
temp_tokens = []
|
temp_tokens = []
|
||||||
embedding_identifier = "embedding:"
|
embedding_identifier = "embedding:"
|
||||||
if word.startswith(embedding_identifier) and self.embedding_directory is not None:
|
if word.startswith(embedding_identifier) and self.embedding_directories is not None:
|
||||||
embedding_name = word[len(embedding_identifier):].strip('\n')
|
embedding_name = word[len(embedding_identifier):].strip('\n')
|
||||||
embed = load_embed(embedding_name, self.embedding_directory)
|
embed = load_embed(embedding_name, self.embedding_directories)
|
||||||
if embed is not None:
|
if embed is not None:
|
||||||
if len(embed.shape) == 1:
|
if len(embed.shape) == 1:
|
||||||
temp_tokens += [(embed, t[1])]
|
temp_tokens += [(embed, t[1])]
|
||||||
|
|||||||
@ -30,5 +30,5 @@ class SD2ClipModel(sd1_clip.SD1ClipModel):
|
|||||||
self.layer_idx = layer_idx
|
self.layer_idx = layer_idx
|
||||||
|
|
||||||
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
|
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
|
||||||
def __init__(self, tokenizer_path=None, embedding_directory=None):
|
def __init__(self, tokenizer_path=None, embedding_directories=None):
|
||||||
super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory)
|
super().__init__(tokenizer_path, pad_with_end=False, embedding_directories=embedding_directories)
|
||||||
|
|||||||
10
config.yml
Normal file
10
config.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
config:
|
||||||
|
alignToGrid: True
|
||||||
|
gridSize: 20
|
||||||
|
paths:
|
||||||
|
configs: []
|
||||||
|
checkpoints: []
|
||||||
|
vae: []
|
||||||
|
clip: []
|
||||||
|
embeddings: []
|
||||||
|
loras: []
|
||||||
7
main.py
7
main.py
@ -27,6 +27,7 @@ if '--dont-upcast-attention' in sys.argv:
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
import nodes
|
import nodes
|
||||||
|
import shared
|
||||||
|
|
||||||
def get_input_data(inputs, class_def, outputs={}, prompt={}, extra_data={}):
|
def get_input_data(inputs, class_def, outputs={}, prompt={}, extra_data={}):
|
||||||
valid_inputs = class_def.INPUT_TYPES()
|
valid_inputs = class_def.INPUT_TYPES()
|
||||||
@ -230,6 +231,9 @@ def validate_inputs(prompt, item):
|
|||||||
if type_input == "STRING":
|
if type_input == "STRING":
|
||||||
val = str(val)
|
val = str(val)
|
||||||
inputs[x] = val
|
inputs[x] = val
|
||||||
|
if type_input == "REGION":
|
||||||
|
val = {"x": val["x"], "y": val["y"], "width": val["width"], "height": val["height"]}
|
||||||
|
inputs[x] = val
|
||||||
|
|
||||||
if len(info) > 1:
|
if len(info) > 1:
|
||||||
if "min" in info[1] and val < info[1]["min"]:
|
if "min" in info[1] and val < info[1]["min"]:
|
||||||
@ -378,6 +382,9 @@ class PromptServer(BaseHTTPRequestHandler):
|
|||||||
info['category'] = obj_class.CATEGORY
|
info['category'] = obj_class.CATEGORY
|
||||||
out[x] = info
|
out[x] = info
|
||||||
self.wfile.write(json.dumps(out).encode('utf-8'))
|
self.wfile.write(json.dumps(out).encode('utf-8'))
|
||||||
|
elif self.path == "/config":
|
||||||
|
self._set_headers(ct='application/json')
|
||||||
|
self.wfile.write(json.dumps(shared.config).encode('utf-8'))
|
||||||
elif self.path[1:] in os.listdir(self.server.server_dir):
|
elif self.path[1:] in os.listdir(self.server.server_dir):
|
||||||
if self.path[1:].endswith('.css'):
|
if self.path[1:].endswith('.css'):
|
||||||
self._set_headers(ct='text/css')
|
self._set_headers(ct='text/css')
|
||||||
|
|||||||
68
nodes.py
68
nodes.py
@ -16,26 +16,7 @@ sys.path.insert(0, os.path.join(sys.path[0], "comfy"))
|
|||||||
import comfy.samplers
|
import comfy.samplers
|
||||||
import comfy.sd
|
import comfy.sd
|
||||||
import model_management
|
import model_management
|
||||||
|
import shared
|
||||||
supported_ckpt_extensions = ['.ckpt']
|
|
||||||
supported_pt_extensions = ['.ckpt', '.pt', '.bin']
|
|
||||||
try:
|
|
||||||
import safetensors.torch
|
|
||||||
supported_ckpt_extensions += ['.safetensors']
|
|
||||||
supported_pt_extensions += ['.safetensors']
|
|
||||||
except:
|
|
||||||
print("Could not import safetensors, safetensors support disabled.")
|
|
||||||
|
|
||||||
def recursive_search(directory):
|
|
||||||
result = []
|
|
||||||
for root, subdir, file in os.walk(directory, followlinks=True):
|
|
||||||
for filepath in file:
|
|
||||||
#we os.path,join directory with a blank string to generate a path separator at the end.
|
|
||||||
result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),''))
|
|
||||||
return result
|
|
||||||
|
|
||||||
def filter_files_extensions(files, extensions):
|
|
||||||
return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))
|
|
||||||
|
|
||||||
class CLIPTextEncode:
|
class CLIPTextEncode:
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -65,10 +46,8 @@ class ConditioningSetArea:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": {"conditioning": ("CONDITIONING", ),
|
return {"required": {"conditioning": ("CONDITIONING", ),
|
||||||
"width": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
|
"latent": ("LATENT", ),
|
||||||
"height": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
|
"region": ("REGION", ),
|
||||||
"x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
|
|
||||||
"y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
|
|
||||||
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||||
}}
|
}}
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
@ -76,7 +55,11 @@ class ConditioningSetArea:
|
|||||||
|
|
||||||
CATEGORY = "conditioning"
|
CATEGORY = "conditioning"
|
||||||
|
|
||||||
def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
|
def append(self, conditioning, latent, region, strength, min_sigma=0.0, max_sigma=99.0):
|
||||||
|
width = region["width"]
|
||||||
|
height = region["height"]
|
||||||
|
x = region["x"]
|
||||||
|
y = region["y"]
|
||||||
c = copy.deepcopy(conditioning)
|
c = copy.deepcopy(conditioning)
|
||||||
for t in c:
|
for t in c:
|
||||||
t[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
|
t[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
|
||||||
@ -120,33 +103,28 @@ class VAEEncode:
|
|||||||
return (vae.encode(pixels), )
|
return (vae.encode(pixels), )
|
||||||
|
|
||||||
class CheckpointLoader:
|
class CheckpointLoader:
|
||||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
embedding_directories = shared.get_model_paths("embeddings")
|
||||||
config_dir = os.path.join(models_dir, "configs")
|
|
||||||
ckpt_dir = os.path.join(models_dir, "checkpoints")
|
|
||||||
embedding_directory = os.path.join(models_dir, "embeddings")
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "config_name": ("COMBO", { "choices": filter_files_extensions(recursive_search(s.config_dir), '.yaml') }),
|
return {"required": { "config_name": ("COMBO", { "choices": shared.get_model_files("configs") }),
|
||||||
"ckpt_name": ("COMBO", { "choices": filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions) })}}
|
"ckpt_name": ("COMBO", { "choices": shared.get_model_files("checkpoints") })}}
|
||||||
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
||||||
FUNCTION = "load_checkpoint"
|
FUNCTION = "load_checkpoint"
|
||||||
|
|
||||||
CATEGORY = "loaders"
|
CATEGORY = "loaders"
|
||||||
|
|
||||||
def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
|
def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
|
||||||
config_path = os.path.join(self.config_dir, config_name)
|
config_path = shared.find_model_file("configs", config_name)
|
||||||
ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
|
ckpt_path = shared.find_model_file("checkpoints", ckpt_name)
|
||||||
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
|
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directories=self.embedding_directories)
|
||||||
|
|
||||||
class LoraLoader:
|
class LoraLoader:
|
||||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
|
||||||
lora_dir = os.path.join(models_dir, "loras")
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "model": ("MODEL",),
|
return {"required": { "model": ("MODEL",),
|
||||||
"clip": ("CLIP", ),
|
"clip": ("CLIP", ),
|
||||||
"lora_name": ("COMBO", { "choices": filter_files_extensions(recursive_search(s.lora_dir), supported_pt_extensions) }),
|
"lora_name": ("COMBO", { "choices": shared.get_model_files("loras") }),
|
||||||
"strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
"strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||||
"strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
"strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||||
}}
|
}}
|
||||||
@ -156,16 +134,14 @@ class LoraLoader:
|
|||||||
CATEGORY = "loaders"
|
CATEGORY = "loaders"
|
||||||
|
|
||||||
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
|
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
|
||||||
lora_path = os.path.join(self.lora_dir, lora_name)
|
lora_path = shared.find_model_file("loras", lora_name)
|
||||||
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
|
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
|
||||||
return (model_lora, clip_lora)
|
return (model_lora, clip_lora)
|
||||||
|
|
||||||
class VAELoader:
|
class VAELoader:
|
||||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
|
||||||
vae_dir = os.path.join(models_dir, "vae")
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "vae_name": ("COMBO", { "choices": filter_files_extensions(recursive_search(s.vae_dir), supported_pt_extensions) })}}
|
return {"required": { "vae_name": ("COMBO", { "choices": shared.get_model_files("vae") })}}
|
||||||
RETURN_TYPES = ("VAE",)
|
RETURN_TYPES = ("VAE",)
|
||||||
FUNCTION = "load_vae"
|
FUNCTION = "load_vae"
|
||||||
|
|
||||||
@ -173,16 +149,14 @@ class VAELoader:
|
|||||||
|
|
||||||
#TODO: scale factor?
|
#TODO: scale factor?
|
||||||
def load_vae(self, vae_name):
|
def load_vae(self, vae_name):
|
||||||
vae_path = os.path.join(self.vae_dir, vae_name)
|
vae_path = shared.find_model_file("vae", vae_name)
|
||||||
vae = comfy.sd.VAE(ckpt_path=vae_path)
|
vae = comfy.sd.VAE(ckpt_path=vae_path)
|
||||||
return (vae,)
|
return (vae,)
|
||||||
|
|
||||||
class CLIPLoader:
|
class CLIPLoader:
|
||||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
|
||||||
clip_dir = os.path.join(models_dir, "clip")
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "clip_name": ("COMBO", { "choices": filter_files_extensions(recursive_search(s.clip_dir), supported_pt_extensions) }),
|
return {"required": { "clip_name": ("COMBO", { "choices": shared.get_model_files("clip") }),
|
||||||
"stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
|
"stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
|
||||||
}}
|
}}
|
||||||
RETURN_TYPES = ("CLIP",)
|
RETURN_TYPES = ("CLIP",)
|
||||||
@ -191,8 +165,8 @@ class CLIPLoader:
|
|||||||
CATEGORY = "loaders"
|
CATEGORY = "loaders"
|
||||||
|
|
||||||
def load_clip(self, clip_name, stop_at_clip_layer):
|
def load_clip(self, clip_name, stop_at_clip_layer):
|
||||||
clip_path = os.path.join(self.clip_dir, clip_name)
|
clip_path = shared.find_model_file("clip", clip_name)
|
||||||
clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
|
clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directories=CheckpointLoader.embedding_directories)
|
||||||
clip.clip_layer(stop_at_clip_layer)
|
clip.clip_layer(stop_at_clip_layer)
|
||||||
return (clip,)
|
return (clip,)
|
||||||
|
|
||||||
|
|||||||
@ -7,6 +7,6 @@ open-clip-torch
|
|||||||
transformers
|
transformers
|
||||||
safetensors
|
safetensors
|
||||||
pytorch_lightning
|
pytorch_lightning
|
||||||
|
pyyaml
|
||||||
accelerate
|
accelerate
|
||||||
|
|
||||||
|
|||||||
62
shared.py
Normal file
62
shared.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
import os.path
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
supported_ckpt_extensions = ['.ckpt']
|
||||||
|
supported_pt_extensions = ['.ckpt', '.pt', '.bin']
|
||||||
|
try:
|
||||||
|
import safetensors.torch
|
||||||
|
supported_ckpt_extensions += ['.safetensors']
|
||||||
|
supported_pt_extensions += ['.safetensors']
|
||||||
|
except:
|
||||||
|
print("Could not import safetensors, safetensors support disabled.")
|
||||||
|
|
||||||
|
|
||||||
|
model_kinds = {
|
||||||
|
"configs": [".yml"],
|
||||||
|
"checkpoints": supported_ckpt_extensions,
|
||||||
|
"vae": supported_pt_extensions,
|
||||||
|
"clip": supported_pt_extensions,
|
||||||
|
"embeddings": supported_pt_extensions,
|
||||||
|
"loras": supported_pt_extensions,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def recursive_search(directory):
|
||||||
|
result = []
|
||||||
|
for root, subdir, file in os.walk(directory, followlinks=True):
|
||||||
|
for filepath in file:
|
||||||
|
#we os.path,join directory with a blank string to generate a path separator at the end.
|
||||||
|
result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),''))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def filter_files_extensions(files, extensions):
|
||||||
|
return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))
|
||||||
|
|
||||||
|
def get_files(directories, extensions):
|
||||||
|
files = []
|
||||||
|
for dir in directories:
|
||||||
|
files.extend(recursive_search(dir))
|
||||||
|
return filter_files_extensions(files, extensions)
|
||||||
|
|
||||||
|
def get_model_paths(kind):
|
||||||
|
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
||||||
|
model_dir = os.path.join(models_dir, kind)
|
||||||
|
return [model_dir] + config["paths"][kind]
|
||||||
|
|
||||||
|
def get_model_files(kind):
|
||||||
|
exts = model_kinds[kind]
|
||||||
|
paths = get_model_paths(kind)
|
||||||
|
return get_files(paths, exts)
|
||||||
|
|
||||||
|
def find_model_file(kind, basename):
|
||||||
|
# TODO: find by model hash instead of filename
|
||||||
|
for path in get_model_paths(kind):
|
||||||
|
file = os.path.join(path, basename)
|
||||||
|
if os.path.isfile(file):
|
||||||
|
return file
|
||||||
|
raise FileNotFoundError("Model not found: " + basename)
|
||||||
|
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
with open("config.yml", "r") as f:
|
||||||
|
config = yaml.safe_load(f)["config"]
|
||||||
@ -54,6 +54,7 @@
|
|||||||
|
|
||||||
<canvas id='mycanvas' width='1000' height='1000' style='width: 100%; height: 100%;'></canvas>
|
<canvas id='mycanvas' width='1000' height='1000' style='width: 100%; height: 100%;'></canvas>
|
||||||
<script type="text/javascript" src="widgets.js"></script>
|
<script type="text/javascript" src="widgets.js"></script>
|
||||||
|
<script type="text/javascript" src="nodes.js"></script>
|
||||||
<script>
|
<script>
|
||||||
var graph = new LGraph();
|
var graph = new LGraph();
|
||||||
|
|
||||||
@ -109,6 +110,22 @@ function afterLoadGraph()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function onConfig(json) {
|
||||||
|
if (json['alignToGrid']) {
|
||||||
|
graph.config.align_to_grid = json['alignToGrid'];
|
||||||
|
canvas.align_to_grid = json['alignToGrid'];
|
||||||
|
}
|
||||||
|
if (json['gridSize']) {
|
||||||
|
LiteGraph.CANVAS_GRID_SIZE = json['gridSize'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fetch("config", {cache: "no-store"})
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(json => onConfig(json));
|
||||||
|
|
||||||
|
|
||||||
function onObjectInfo(json) {
|
function onObjectInfo(json) {
|
||||||
for (let key in json) {
|
for (let key in json) {
|
||||||
function MyNode()
|
function MyNode()
|
||||||
@ -128,10 +145,7 @@ function onObjectInfo(json) {
|
|||||||
|
|
||||||
let type = inp[x][0];
|
let type = inp[x][0];
|
||||||
let widgetClass = COMFY_WIDGETS[type];
|
let widgetClass = COMFY_WIDGETS[type];
|
||||||
if (Array.isArray(type)) {
|
if (widgetClass) {
|
||||||
w = this.addWidget("combo", x, type[0], function(v){}, { values: type } );
|
|
||||||
this._widgets += [w]
|
|
||||||
} else if (widgetClass) {
|
|
||||||
new widgetClass(opts).addWidget(this, x);
|
new widgetClass(opts).addWidget(this, x);
|
||||||
} else {
|
} else {
|
||||||
this.addInput(x, type);
|
this.addInput(x, type);
|
||||||
@ -153,6 +167,13 @@ function onObjectInfo(json) {
|
|||||||
MyNode.class_type_comfy = json[key]['name'];
|
MyNode.class_type_comfy = json[key]['name'];
|
||||||
MyNode.__json_data = json[key]
|
MyNode.__json_data = json[key]
|
||||||
|
|
||||||
|
let props = COMFY_NODES[json[key]['name']];
|
||||||
|
if (props) {
|
||||||
|
for (const prop in props) {
|
||||||
|
MyNode.prototype[prop] = props[prop];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
LiteGraph.registerNodeType(key, MyNode);
|
LiteGraph.registerNodeType(key, MyNode);
|
||||||
MyNode.category = json[key]['category'];
|
MyNode.category = json[key]['category'];
|
||||||
};
|
};
|
||||||
|
|||||||
@ -6744,6 +6744,7 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
this.node_dragged.pos[0] = Math.round(this.node_dragged.pos[0]);
|
this.node_dragged.pos[0] = Math.round(this.node_dragged.pos[0]);
|
||||||
this.node_dragged.pos[1] = Math.round(this.node_dragged.pos[1]);
|
this.node_dragged.pos[1] = Math.round(this.node_dragged.pos[1]);
|
||||||
if (this.graph.config.align_to_grid || this.align_to_grid ) {
|
if (this.graph.config.align_to_grid || this.align_to_grid ) {
|
||||||
|
console.warn("AAA")
|
||||||
this.node_dragged.alignToGrid();
|
this.node_dragged.alignToGrid();
|
||||||
}
|
}
|
||||||
if( this.onNodeMoved )
|
if( this.onNodeMoved )
|
||||||
@ -9916,6 +9917,9 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
var range = w.options.max - w.options.min;
|
var range = w.options.max - w.options.min;
|
||||||
var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1);
|
var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1);
|
||||||
w.value = w.options.min + (w.options.max - w.options.min) * nvalue;
|
w.value = w.options.min + (w.options.max - w.options.min) * nvalue;
|
||||||
|
if (w.options.step) {
|
||||||
|
w.value = Math.round( w.value / w.options.step ) * w.options.step;
|
||||||
|
}
|
||||||
if (w.callback) {
|
if (w.callback) {
|
||||||
setTimeout(function() {
|
setTimeout(function() {
|
||||||
inner_value_change(w, w.value);
|
inner_value_change(w, w.value);
|
||||||
@ -9927,7 +9931,12 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
case "combo":
|
case "combo":
|
||||||
var old_value = w.value;
|
var old_value = w.value;
|
||||||
if (event.type == LiteGraph.pointerevents_method+"move" && w.type == "number") {
|
if (event.type == LiteGraph.pointerevents_method+"move" && w.type == "number") {
|
||||||
w.value += event.deltaX * 0.1 * (w.options.step || 1);
|
if (w.options.step) {
|
||||||
|
w.value += Math.ceil( Math.abs( event.deltaX * 0.01 / w.options.step ) ) * w.options.step * Math.sign(event.deltaX);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
w.value += event.deltaX * 0.1;
|
||||||
|
}
|
||||||
if ( w.options.min != null && w.value < w.options.min ) {
|
if ( w.options.min != null && w.value < w.options.min ) {
|
||||||
w.value = w.options.min;
|
w.value = w.options.min;
|
||||||
}
|
}
|
||||||
@ -9946,7 +9955,13 @@ LGraphNode.prototype.executeAction = function(action)
|
|||||||
|
|
||||||
var delta = x < 40 ? -1 : x > widget_width - 40 ? 1 : 0;
|
var delta = x < 40 ? -1 : x > widget_width - 40 ? 1 : 0;
|
||||||
if (w.type == "number") {
|
if (w.type == "number") {
|
||||||
w.value += delta * 0.1 * (w.options.step || 1);
|
console.log(w);
|
||||||
|
if (w.options.step) {
|
||||||
|
w.value += w.options.step * Math.sign(delta);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
w.value += delta * 0.1;
|
||||||
|
}
|
||||||
if ( w.options.min != null && w.value < w.options.min ) {
|
if ( w.options.min != null && w.value < w.options.min ) {
|
||||||
w.value = w.options.min;
|
w.value = w.options.min;
|
||||||
}
|
}
|
||||||
|
|||||||
10
webshit/nodes.js
Normal file
10
webshit/nodes.js
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
var COMFY_NODES = [];
|
||||||
|
|
||||||
|
COMFY_NODES["EmptyLatentImage"] = {
|
||||||
|
onExecute: function() {
|
||||||
|
for (var idx of this.outputs[0].links) {
|
||||||
|
let link = this.graph.links[idx];
|
||||||
|
link.data = this.widgets.map(function(w) { return w.value; });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
@ -21,7 +21,7 @@ class IntWidget extends BaseWidget {
|
|||||||
let s = this.options.step / 10;
|
let s = this.options.step / 10;
|
||||||
this.value = Math.round( v / s ) * s;
|
this.value = Math.round( v / s ) * s;
|
||||||
};
|
};
|
||||||
let w = node.addWidget("number", x, this.default_val, onSet, { min: this.min_val, max: this.max_val, step: 10.0 * this.step_val});
|
let w = node.addWidget("number", x, this.default_val, onSet, { min: this.min_val, max: this.max_val, step: this.step_val});
|
||||||
node._widgets += [w]
|
node._widgets += [w]
|
||||||
if (x == "seed" || x == "noise_seed") {
|
if (x == "seed" || x == "noise_seed") {
|
||||||
let w1 = node.addWidget("toggle", "Random seed after every gen", true, function(v){}, { on: "enabled", off: "disabled" } );
|
let w1 = node.addWidget("toggle", "Random seed after every gen", true, function(v){}, { on: "enabled", off: "disabled" } );
|
||||||
@ -46,7 +46,7 @@ class FloatWidget extends BaseWidget {
|
|||||||
// if (min_val == 0.0 && max_val == 1.0) {
|
// if (min_val == 0.0 && max_val == 1.0) {
|
||||||
// w = this.slider = this.addWidget("slider", x, default_val, function(v){}, { min: min_val, max: max_val} );
|
// w = this.slider = this.addWidget("slider", x, default_val, function(v){}, { min: min_val, max: max_val} );
|
||||||
// } else {
|
// } else {
|
||||||
let w = node.addWidget("number", x, this.default_val, function(v){}, { min: this.min_val, max: this.max_val, step: 10.0 * this.step_val} );
|
let w = node.addWidget("number", x, this.default_val, function(v){}, { min: this.min_val, max: this.max_val, step: this.step_val} );
|
||||||
// }
|
// }
|
||||||
node._widgets += [w];
|
node._widgets += [w];
|
||||||
}
|
}
|
||||||
@ -160,3 +160,97 @@ class ComboWidget extends BaseWidget {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
COMFY_WIDGETS["COMBO"] = ComboWidget;
|
COMFY_WIDGETS["COMBO"] = ComboWidget;
|
||||||
|
|
||||||
|
|
||||||
|
class RegionWidget extends BaseWidget {
|
||||||
|
constructor(opts) {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
addWidget(node, x) {
|
||||||
|
var w = {
|
||||||
|
type: "region",
|
||||||
|
name: x,
|
||||||
|
region: { x: 0, y: 0, width: 2048, height: 2048 },
|
||||||
|
get value() {
|
||||||
|
return this.region;
|
||||||
|
},
|
||||||
|
set value(x) {
|
||||||
|
this.region = x;
|
||||||
|
},
|
||||||
|
callback: function(v){console.log("CB!", v);},
|
||||||
|
options: {},
|
||||||
|
draw: function(ctx, node, widget_width, y, H){
|
||||||
|
ctx.save();
|
||||||
|
|
||||||
|
var size = this.size[1] * 0.5;
|
||||||
|
var margin = 0.25;
|
||||||
|
var h = this.size[1] * 0.8;
|
||||||
|
// ctx.font = this.properties.font || (size * 0.8).toFixed(0) + "px Arial";
|
||||||
|
var w = ctx.measureText(this.title).width;
|
||||||
|
var x = margin * this.size[0] * 0.25;
|
||||||
|
|
||||||
|
var latentParams = node.getInputData(1, true);
|
||||||
|
if (latentParams) {
|
||||||
|
var latentWidth = latentParams[0];
|
||||||
|
var latentHeight = latentParams[1];
|
||||||
|
|
||||||
|
var widgetWidth = node.widgets[1]
|
||||||
|
var widgetHeight = node.widgets[2]
|
||||||
|
var widgetX = node.widgets[3]
|
||||||
|
var widgetY = node.widgets[4]
|
||||||
|
widgetWidth.options.max = latentWidth;
|
||||||
|
widgetWidth.value = Math.min(widgetWidth.value, latentWidth);
|
||||||
|
this.region.width = widgetWidth.value;
|
||||||
|
|
||||||
|
widgetHeight.options.max = latentHeight;
|
||||||
|
widgetHeight.value = Math.min(widgetHeight.value, latentHeight);
|
||||||
|
this.region.height = widgetHeight.value;
|
||||||
|
|
||||||
|
widgetX.options.max = latentWidth;
|
||||||
|
widgetX.value = Math.min(widgetX.value, latentWidth - widgetWidth.value);
|
||||||
|
this.region.x = widgetX.value;
|
||||||
|
|
||||||
|
widgetY.options.max = latentHeight;
|
||||||
|
widgetY.value = Math.min(widgetY.value, latentHeight - widgetHeight.value);
|
||||||
|
this.region.y = widgetY.value;
|
||||||
|
|
||||||
|
ctx.fillStyle = "#FFF";
|
||||||
|
ctx.strokeRect(x, y, this.size[0], this.size[1]);
|
||||||
|
|
||||||
|
ctx.fillStyle = "#AAF";
|
||||||
|
ctx.fillRect(
|
||||||
|
x + (this.region.x / latentWidth) * this.size[0],
|
||||||
|
y + (this.region.y / latentHeight) * this.size[1],
|
||||||
|
(this.region.width / latentWidth) * this.size[0],
|
||||||
|
(this.region.height / latentHeight) * this.size[1]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ctx.textAlign = "left";
|
||||||
|
// ctx.fillStyle = "#AAA";
|
||||||
|
// ctx.fillText(this.title, size * 1.2 + x, h * 0.85);
|
||||||
|
// ctx.textAlign = "left";
|
||||||
|
|
||||||
|
ctx.restore();
|
||||||
|
},
|
||||||
|
mouse: function(event, pos, node) {
|
||||||
|
return true;
|
||||||
|
},
|
||||||
|
computeSize() {
|
||||||
|
return this.size;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
w.size = [150, 150];
|
||||||
|
w.parent = node;
|
||||||
|
w = node.addCustomWidget(w);
|
||||||
|
node.addWidget("number", "width", 512, (v) => w.region.width = v, { property: "region_width", min: 64, max: 2048, step: 64 });
|
||||||
|
node.addWidget("number", "height", 512, (v) => w.region.height = v, { property: "region_height", min: 64, max: 2048, step: 64 });
|
||||||
|
node.addWidget("number", "x", 0, (v) => w.region.x = v, { property: "region_x", min: 0, max: 2048, step: 64 });
|
||||||
|
node.addWidget("number", "y", 0, (v) => w.region.y = v, { property: "region_y", min: 0, max: 2048, step: 64 });
|
||||||
|
// console.log(node.getInputData("Width"))
|
||||||
|
node._widgets += [w]
|
||||||
|
console.log(node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
COMFY_WIDGETS["REGION"] = RegionWidget;
|
||||||
|
|||||||
1
workflows/area_composition.json
Normal file
1
workflows/area_composition.json
Normal file
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user