fix nodes

This commit is contained in:
doctorpangloss 2023-12-06 16:11:27 -08:00
parent 01312a55a4
commit 3fd5de9784
9 changed files with 33 additions and 25 deletions

View File

@ -127,9 +127,13 @@ def hijack_progress(server):
def cleanup_temp():
temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
try:
temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
except NameError:
# __file__ was not defined
pass
def load_extra_path_config(yaml_path):
@ -178,9 +182,12 @@ def main():
server = server_module.PromptServer(loop)
q = execution.PromptQueue(server)
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
load_extra_path_config(extra_model_paths_config_path)
try:
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
load_extra_path_config(extra_model_paths_config_path)
except NameError:
pass
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):

View File

@ -5,6 +5,7 @@ import os
import pkgutil
import time
import types
import typing
from . import base_nodes
from comfy_extras import nodes as comfy_extras_nodes
@ -12,7 +13,7 @@ from comfy_extras import nodes as comfy_extras_nodes
try:
import custom_nodes
except:
custom_nodes = None
custom_nodes: typing.Optional[types.ModuleType] = None
from .package_typing import ExportedNodes
from functools import reduce
from pkg_resources import resource_filename, iter_entry_points
@ -36,18 +37,14 @@ def _import_nodes_in_module(exported_nodes: ExportedNodes, module: types.ModuleT
if not os.path.isdir(abs_web_directory):
raise ImportError(path=abs_web_directory)
exported_nodes.EXTENSION_WEB_DIRS[module.__name__] = abs_web_directory
return node_class_mappings and len(node_class_mappings) > 0 or web_directory
def _import_and_enumerate_nodes_in_module(module: types.ModuleType, print_import_times=False) -> ExportedNodes:
exported_nodes = ExportedNodes()
timings = []
if hasattr(module, 'NODE_CLASS_MAPPINGS'):
node_class_mappings = getattr(module, 'NODE_CLASS_MAPPINGS', None)
node_display_names = getattr(module, 'NODE_DISPLAY_NAME_MAPPINGS', None)
if node_class_mappings:
exported_nodes.NODE_CLASS_MAPPINGS.update(node_class_mappings)
if node_display_names:
exported_nodes.NODE_DISPLAY_NAME_MAPPINGS.update(node_display_names)
if _import_nodes_in_module(exported_nodes, module):
pass
else:
# Iterate through all the submodules
for _, name, is_pkg in pkgutil.iter_modules(module.__path__):
@ -68,7 +65,7 @@ def _import_and_enumerate_nodes_in_module(module: types.ModuleType, print_import
success = False
timings.append((time.perf_counter() - time_before, full_name, success))
if print_import_times and len(timings) > 0:
if print_import_times and len(timings) > 0 or any(not success for (_, _, success) in timings):
for (duration, module_name, success) in sorted(timings):
print(f"{duration:6.1f} seconds{'' if success else ' (IMPORT FAILED)'}, {module_name}")
return exported_nodes

View File

@ -435,6 +435,7 @@ class SDTokenizer:
text = escape_important(text)
parsed_weights = token_weights(text, 1.0)
vocab = self.tokenizer.get_vocab()
#tokenize words
tokens = []
@ -459,7 +460,12 @@ class SDTokenizer:
else:
continue
#parse word
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]])
exact_word = f"{word}</w>"
if exact_word in vocab:
tokenizer_result = [vocab[exact_word]]
else:
tokenizer_result = self.tokenizer(word)["input_ids"][self.tokens_start:-1]
tokens.append([(t, weight) for t in tokenizer_result])
#reshape token array to CLIP input size
batched_tokens = []

View File

@ -1,7 +1,7 @@
import comfy.samplers
import comfy.sample
from comfy.k_diffusion import sampling as k_diffusion_sampling
import latent_preview
from comfy.cmd import latent_preview
import torch
import comfy.utils

View File

@ -1,5 +1,5 @@
import nodes
import folder_paths
from comfy.nodes.common import MAX_RESOLUTION
from comfy.cmd import folder_paths
from comfy.cli_args import args
from PIL import Image
@ -9,7 +9,6 @@ import numpy as np
import json
import os
MAX_RESOLUTION = nodes.MAX_RESOLUTION
class ImageCrop:
@classmethod

View File

@ -1,4 +1,3 @@
import folder_paths
import comfy.sd
import comfy.model_sampling
import torch

View File

@ -1,8 +1,8 @@
import nodes
from comfy.nodes.common import MAX_RESOLUTION
import torch
import comfy.utils
import comfy.sd
import folder_paths
from comfy.cmd import folder_paths
class ImageOnlyCheckpointLoader:
@ -27,8 +27,8 @@ class SVD_img2vid_Conditioning:
return {"required": { "clip_vision": ("CLIP_VISION",),
"init_image": ("IMAGE",),
"vae": ("VAE",),
"width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}),
"width": ("INT", {"default": 1024, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
"video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}),
"motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}),
"fps": ("INT", {"default": 6, "min": 1, "max": 1024}),