mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-27 14:50:20 +08:00
Update to latest ComfyUI
This commit is contained in:
parent
5823497d55
commit
17b14110ab
@ -20,9 +20,15 @@ from ..cmd.folder_paths import add_model_folder_path # pylint: disable=import-e
|
|||||||
|
|
||||||
REQUEST_TIMEOUT = 10 # seconds
|
REQUEST_TIMEOUT = 10 # seconds
|
||||||
|
|
||||||
|
|
||||||
def check_frontend_version():
|
def check_frontend_version():
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def frontend_install_warning_message() -> str:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
class Asset(TypedDict):
|
class Asset(TypedDict):
|
||||||
url: str
|
url: str
|
||||||
|
|
||||||
@ -141,8 +147,6 @@ class FrontendManager:
|
|||||||
|
|
||||||
comfyui-workflow-templates is not installed.
|
comfyui-workflow-templates is not installed.
|
||||||
|
|
||||||
{frontend_install_warning_message()}
|
|
||||||
|
|
||||||
********** ERROR ***********
|
********** ERROR ***********
|
||||||
""".strip()
|
""".strip()
|
||||||
)
|
)
|
||||||
|
|||||||
@ -131,7 +131,7 @@ def _create_parser() -> EnhancedConfigArgParser:
|
|||||||
parser.add_argument("--deterministic", action="store_true",
|
parser.add_argument("--deterministic", action="store_true",
|
||||||
help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")
|
help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")
|
||||||
|
|
||||||
parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. Pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops")
|
parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. Pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops", default=set())
|
||||||
|
|
||||||
parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
|
parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
|
||||||
parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI. Raises an error if nodes cannot be imported,")
|
parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI. Raises an error if nodes cannot be imported,")
|
||||||
|
|||||||
@ -71,7 +71,8 @@ async def __execute_prompt(
|
|||||||
args.update(configuration)
|
args.update(configuration)
|
||||||
|
|
||||||
with tracer.start_as_current_span("Initialize Prompt Executor", context=span_context):
|
with tracer.start_as_current_span("Initialize Prompt Executor", context=span_context):
|
||||||
prompt_executor = PromptExecutor(progress_handler, lru_size=configuration.cache_lru if configuration is not None else 0)
|
# todo: deal with new caching features
|
||||||
|
prompt_executor = PromptExecutor(progress_handler)
|
||||||
prompt_executor.raise_exceptions = True
|
prompt_executor.raise_exceptions = True
|
||||||
_prompt_executor.executor = prompt_executor
|
_prompt_executor.executor = prompt_executor
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
@ -116,7 +117,8 @@ def convert_to_transformers(sd, prefix):
|
|||||||
return sd
|
return sd
|
||||||
|
|
||||||
|
|
||||||
def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
|
def load_clipvision_from_sd(sd, prefix="", convert_keys=False) -> Optional[ClipVisionModel]:
|
||||||
|
json_config: dict = {}
|
||||||
if convert_keys:
|
if convert_keys:
|
||||||
sd = convert_to_transformers(sd, prefix)
|
sd = convert_to_transformers(sd, prefix)
|
||||||
if "vision_model.encoder.layers.47.layer_norm1.weight" in sd:
|
if "vision_model.encoder.layers.47.layer_norm1.weight" in sd:
|
||||||
|
|||||||
@ -745,7 +745,7 @@ def validate_inputs(prompt, item, validated: typing.Dict[str, ValidateInputsTupl
|
|||||||
r = get_nodes().NODE_CLASS_MAPPINGS[o_class_type].RETURN_TYPES
|
r = get_nodes().NODE_CLASS_MAPPINGS[o_class_type].RETURN_TYPES
|
||||||
received_type = r[val[1]]
|
received_type = r[val[1]]
|
||||||
received_types[x] = received_type
|
received_types[x] = received_type
|
||||||
any_enum = received_type == [] and (isinstance(type_input, list) or isinstance(type_input, tuple))
|
any_enum = received_type == [] and (isinstance(input_type, list) or isinstance(input_type, tuple))
|
||||||
|
|
||||||
if 'input_types' not in validate_function_inputs and not validate_node_input(received_type, input_type) and not any_enum:
|
if 'input_types' not in validate_function_inputs and not validate_node_input(received_type, input_type) and not any_enum:
|
||||||
details = f"{x}, {received_type} != {input_type}"
|
details = f"{x}, {received_type} != {input_type}"
|
||||||
@ -857,8 +857,8 @@ def validate_inputs(prompt, item, validated: typing.Dict[str, ValidateInputsTupl
|
|||||||
if "\\" in val:
|
if "\\" in val:
|
||||||
# try to normalize paths for comparison purposes
|
# try to normalize paths for comparison purposes
|
||||||
val = canonicalize_path(val)
|
val = canonicalize_path(val)
|
||||||
if all(isinstance(item, (str, PathLike)) for item in type_input):
|
if all(isinstance(item, (str, PathLike)) for item in combo_options):
|
||||||
type_input = [canonicalize_path(item) for item in type_input]
|
combo_options = [canonicalize_path(item) for item in combo_options]
|
||||||
if val not in combo_options:
|
if val not in combo_options:
|
||||||
input_config = info
|
input_config = info
|
||||||
list_info = ""
|
list_info = ""
|
||||||
|
|||||||
@ -36,6 +36,10 @@ _session = Session()
|
|||||||
_hf_fs = HfFileSystem()
|
_hf_fs = HfFileSystem()
|
||||||
|
|
||||||
|
|
||||||
|
def get_filename_list(folder_name: str) -> list[str]:
|
||||||
|
return get_filename_list_with_downloadable(folder_name)
|
||||||
|
|
||||||
|
|
||||||
def get_filename_list_with_downloadable(folder_name: str, known_files: Optional[List[Downloadable] | KnownDownloadables] = None) -> List[str]:
|
def get_filename_list_with_downloadable(folder_name: str, known_files: Optional[List[Downloadable] | KnownDownloadables] = None) -> List[str]:
|
||||||
if known_files is None:
|
if known_files is None:
|
||||||
known_files = _get_known_models_for_folder_name(folder_name)
|
known_files = _get_known_models_for_folder_name(folder_name)
|
||||||
@ -45,6 +49,13 @@ def get_filename_list_with_downloadable(folder_name: str, known_files: Optional[
|
|||||||
return list(map(canonicalize_path, sorted(list(existing | downloadable))))
|
return list(map(canonicalize_path, sorted(list(existing | downloadable))))
|
||||||
|
|
||||||
|
|
||||||
|
def get_full_path_or_raise(folder_name: str, filename: str) -> str:
|
||||||
|
res = get_or_download(folder_name, filename)
|
||||||
|
if res is None:
|
||||||
|
raise FileNotFoundError(f"{folder_name} does not contain {filename}")
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
def get_or_download(folder_name: str, filename: str, known_files: Optional[List[Downloadable] | KnownDownloadables] = None) -> Optional[str]:
|
def get_or_download(folder_name: str, filename: str, known_files: Optional[List[Downloadable] | KnownDownloadables] = None) -> Optional[str]:
|
||||||
if known_files is None:
|
if known_files is None:
|
||||||
known_files = _get_known_models_for_folder_name(folder_name)
|
known_files = _get_known_models_for_folder_name(folder_name)
|
||||||
|
|||||||
@ -102,6 +102,7 @@ def string_to_torch_dtype(string):
|
|||||||
|
|
||||||
|
|
||||||
def image_alpha_fix(destination, source):
|
def image_alpha_fix(destination, source):
|
||||||
|
import torch
|
||||||
if destination.shape[-1] < source.shape[-1]:
|
if destination.shape[-1] < source.shape[-1]:
|
||||||
source = source[..., :destination.shape[-1]]
|
source = source[..., :destination.shape[-1]]
|
||||||
elif destination.shape[-1] > source.shape[-1]:
|
elif destination.shape[-1] > source.shape[-1]:
|
||||||
|
|||||||
@ -1714,8 +1714,9 @@ class LoadImage:
|
|||||||
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
|
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
|
||||||
mask = 1. - torch.from_numpy(mask)
|
mask = 1. - torch.from_numpy(mask)
|
||||||
elif i.mode == 'P' and 'transparency' in i.info:
|
elif i.mode == 'P' and 'transparency' in i.info:
|
||||||
mask = np.array(i.convert('RGBA').getchannel('A')).astype(np.float32) / 255.0
|
mask = np.array(i.convert('RGBA').getchannel('A')).astype(np.float32) / 255.0
|
||||||
mask = 1. - torch.from_numpy(mask)else:
|
mask = 1. - torch.from_numpy(mask)
|
||||||
|
else:
|
||||||
mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
|
mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
|
||||||
output_images.append(image)
|
output_images.append(image)
|
||||||
output_masks.append(mask.unsqueeze(0))
|
output_masks.append(mask.unsqueeze(0))
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from . import conds
|
|
||||||
from . import model_management
|
from . import model_management
|
||||||
from . import patcher_extension
|
from . import patcher_extension
|
||||||
from . import utils
|
from . import utils
|
||||||
@ -113,9 +112,9 @@ def cleanup_additional_models(models):
|
|||||||
|
|
||||||
|
|
||||||
def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None):
|
def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None):
|
||||||
executor = comfy.patcher_extension.WrapperExecutor.new_executor(
|
executor = patcher_extension.WrapperExecutor.new_executor(
|
||||||
_prepare_sampling,
|
_prepare_sampling,
|
||||||
comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.PREPARE_SAMPLING, model_options, is_model_options=True)
|
patcher_extension.get_all_wrappers(patcher_extension.WrappersMP.PREPARE_SAMPLING, model_options, is_model_options=True)
|
||||||
)
|
)
|
||||||
return executor.execute(model, noise_shape, conds, model_options=model_options)
|
return executor.execute(model, noise_shape, conds, model_options=model_options)
|
||||||
|
|
||||||
|
|||||||
@ -16,6 +16,7 @@ class PT5XlModel(sd1_clip.SDClipModel):
|
|||||||
class PT5XlTokenizer(sd1_clip.SDTokenizer):
|
class PT5XlTokenizer(sd1_clip.SDTokenizer):
|
||||||
def __init__(self, embedding_directory=None, **kwargs):
|
def __init__(self, embedding_directory=None, **kwargs):
|
||||||
tokenizer_path = resources.files("comfy.text_encoders.t5_pile_tokenizer") / "tokenizer.model"
|
tokenizer_path = resources.files("comfy.text_encoders.t5_pile_tokenizer") / "tokenizer.model"
|
||||||
|
tokenizer_data = kwargs.pop("tokenizer_data", {})
|
||||||
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1, tokenizer_data=tokenizer_data)
|
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1, tokenizer_data=tokenizer_data)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -21,6 +21,7 @@ class HyditBertModel(sd1_clip.SDClipModel):
|
|||||||
class HyditBertTokenizer(sd1_clip.SDTokenizer):
|
class HyditBertTokenizer(sd1_clip.SDTokenizer):
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
tokenizer_path = get_package_as_path(f"{__package__}.hydit_clip_tokenizer")
|
tokenizer_path = get_package_as_path(f"{__package__}.hydit_clip_tokenizer")
|
||||||
|
tokenizer_data = kwargs.pop("tokenizer_data", {})
|
||||||
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=1024, embedding_key='chinese_roberta', tokenizer_class=BertTokenizer, pad_to_max_length=False, max_length=512, min_length=77, tokenizer_data=tokenizer_data)
|
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=1024, embedding_key='chinese_roberta', tokenizer_class=BertTokenizer, pad_to_max_length=False, max_length=512, min_length=77, tokenizer_data=tokenizer_data)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,16 +1,18 @@
|
|||||||
import folder_paths
|
|
||||||
import comfy.sd
|
|
||||||
import comfy.model_management
|
import comfy.model_management
|
||||||
|
import comfy.sd
|
||||||
|
from comfy.cmd import folder_paths
|
||||||
|
from comfy import model_downloader
|
||||||
|
|
||||||
|
|
||||||
class QuadrupleCLIPLoader:
|
class QuadrupleCLIPLoader:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
|
return {"required": {"clip_name1": (model_downloader.get_filename_list("text_encoders"),),
|
||||||
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
|
"clip_name2": (model_downloader.get_filename_list("text_encoders"),),
|
||||||
"clip_name3": (folder_paths.get_filename_list("text_encoders"), ),
|
"clip_name3": (model_downloader.get_filename_list("text_encoders"),),
|
||||||
"clip_name4": (folder_paths.get_filename_list("text_encoders"), )
|
"clip_name4": (model_downloader.get_filename_list("text_encoders"),)
|
||||||
}}
|
}}
|
||||||
|
|
||||||
RETURN_TYPES = ("CLIP",)
|
RETURN_TYPES = ("CLIP",)
|
||||||
FUNCTION = "load_clip"
|
FUNCTION = "load_clip"
|
||||||
|
|
||||||
@ -19,35 +21,37 @@ class QuadrupleCLIPLoader:
|
|||||||
DESCRIPTION = "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct"
|
DESCRIPTION = "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct"
|
||||||
|
|
||||||
def load_clip(self, clip_name1, clip_name2, clip_name3, clip_name4):
|
def load_clip(self, clip_name1, clip_name2, clip_name3, clip_name4):
|
||||||
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
|
clip_path1 = model_downloader.get_full_path_or_raise("text_encoders", clip_name1)
|
||||||
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
|
clip_path2 = model_downloader.get_full_path_or_raise("text_encoders", clip_name2)
|
||||||
clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3)
|
clip_path3 = model_downloader.get_full_path_or_raise("text_encoders", clip_name3)
|
||||||
clip_path4 = folder_paths.get_full_path_or_raise("text_encoders", clip_name4)
|
clip_path4 = model_downloader.get_full_path_or_raise("text_encoders", clip_name4)
|
||||||
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4], embedding_directory=folder_paths.get_folder_paths("embeddings"))
|
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4], embedding_directory=folder_paths.get_folder_paths("embeddings"))
|
||||||
return (clip,)
|
return (clip,)
|
||||||
|
|
||||||
|
|
||||||
class CLIPTextEncodeHiDream:
|
class CLIPTextEncodeHiDream:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": {
|
return {"required": {
|
||||||
"clip": ("CLIP", ),
|
"clip": ("CLIP",),
|
||||||
"clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
"clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||||
"clip_g": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
"clip_g": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||||
"t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
"t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||||
"llama": ("STRING", {"multiline": True, "dynamicPrompts": True})
|
"llama": ("STRING", {"multiline": True, "dynamicPrompts": True})
|
||||||
}}
|
}}
|
||||||
|
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
FUNCTION = "encode"
|
FUNCTION = "encode"
|
||||||
|
|
||||||
CATEGORY = "advanced/conditioning"
|
CATEGORY = "advanced/conditioning"
|
||||||
|
|
||||||
def encode(self, clip, clip_l, clip_g, t5xxl, llama):
|
def encode(self, clip, clip_l, clip_g, t5xxl, llama):
|
||||||
|
|
||||||
tokens = clip.tokenize(clip_g)
|
tokens = clip.tokenize(clip_g)
|
||||||
tokens["l"] = clip.tokenize(clip_l)["l"]
|
tokens["l"] = clip.tokenize(clip_l)["l"]
|
||||||
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
||||||
tokens["llama"] = clip.tokenize(llama)["llama"]
|
tokens["llama"] = clip.tokenize(llama)["llama"]
|
||||||
return (clip.encode_from_tokens_scheduled(tokens), )
|
return (clip.encode_from_tokens_scheduled(tokens),)
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
"QuadrupleCLIPLoader": QuadrupleCLIPLoader,
|
"QuadrupleCLIPLoader": QuadrupleCLIPLoader,
|
||||||
@ -447,6 +447,8 @@ class VoxelToMesh:
|
|||||||
mesh_function = voxel_to_mesh
|
mesh_function = voxel_to_mesh
|
||||||
elif algorithm == "surface net":
|
elif algorithm == "surface net":
|
||||||
mesh_function = voxel_to_mesh_surfnet
|
mesh_function = voxel_to_mesh_surfnet
|
||||||
|
else:
|
||||||
|
mesh_function = None
|
||||||
|
|
||||||
for x in voxel.data:
|
for x in voxel.data:
|
||||||
v, f = mesh_function(x, threshold=threshold, device=None)
|
v, f = mesh_function(x, threshold=threshold, device=None)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user