mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-21 12:00:49 +08:00
Merge branch 'master' into dr-support-pip-cm
This commit is contained in:
commit
98b5183ed8
11
comfy/sd.py
11
comfy/sd.py
@ -18,6 +18,7 @@ import comfy.ldm.hunyuan3d.vae
|
|||||||
import comfy.ldm.ace.vae.music_dcae_pipeline
|
import comfy.ldm.ace.vae.music_dcae_pipeline
|
||||||
import yaml
|
import yaml
|
||||||
import math
|
import math
|
||||||
|
import os
|
||||||
|
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
|
|
||||||
@ -977,6 +978,12 @@ def load_gligen(ckpt_path):
|
|||||||
model = model.half()
|
model = model.half()
|
||||||
return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device())
|
return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device())
|
||||||
|
|
||||||
|
def model_detection_error_hint(path, state_dict):
|
||||||
|
filename = os.path.basename(path)
|
||||||
|
if 'lora' in filename.lower():
|
||||||
|
return "\nHINT: This seems to be a Lora file and Lora files should be put in the lora folder and loaded with a lora loader node.."
|
||||||
|
return ""
|
||||||
|
|
||||||
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
|
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
|
||||||
logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.")
|
logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.")
|
||||||
model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True)
|
model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True)
|
||||||
@ -1005,7 +1012,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
|
|||||||
sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True)
|
sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True)
|
||||||
out = load_state_dict_guess_config(sd, output_vae, output_clip, output_clipvision, embedding_directory, output_model, model_options, te_model_options=te_model_options, metadata=metadata)
|
out = load_state_dict_guess_config(sd, output_vae, output_clip, output_clipvision, embedding_directory, output_model, model_options, te_model_options=te_model_options, metadata=metadata)
|
||||||
if out is None:
|
if out is None:
|
||||||
raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
|
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(ckpt_path, model_detection_error_hint(ckpt_path, sd)))
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None):
|
def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None):
|
||||||
@ -1177,7 +1184,7 @@ def load_diffusion_model(unet_path, model_options={}):
|
|||||||
model = load_diffusion_model_state_dict(sd, model_options=model_options)
|
model = load_diffusion_model_state_dict(sd, model_options=model_options)
|
||||||
if model is None:
|
if model is None:
|
||||||
logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path))
|
logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path))
|
||||||
raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
|
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd)))
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def load_unet(unet_path, dtype=None):
|
def load_unet(unet_path, dtype=None):
|
||||||
|
|||||||
@ -1,24 +1,24 @@
|
|||||||
from nodes import MAX_RESOLUTION
|
from nodes import MAX_RESOLUTION
|
||||||
|
|
||||||
class CLIPTextEncodePixArtAlpha:
|
class CLIPTextEncodePixArtAlpha:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": {
|
return {"required": {
|
||||||
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
||||||
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
||||||
# "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
# "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||||
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
|
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
|
||||||
}}
|
}}
|
||||||
|
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
FUNCTION = "encode"
|
FUNCTION = "encode"
|
||||||
CATEGORY = "advanced/conditioning"
|
CATEGORY = "advanced/conditioning"
|
||||||
DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma."
|
DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma."
|
||||||
|
|
||||||
def encode(self, clip, width, height, text):
|
def encode(self, clip, width, height, text):
|
||||||
tokens = clip.tokenize(text)
|
tokens = clip.tokenize(text)
|
||||||
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),)
|
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),)
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
"CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha,
|
"CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -123,6 +123,8 @@ class CacheSet:
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org")
|
||||||
|
|
||||||
def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}):
|
def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}):
|
||||||
valid_inputs = class_def.INPUT_TYPES()
|
valid_inputs = class_def.INPUT_TYPES()
|
||||||
input_data_all = {}
|
input_data_all = {}
|
||||||
@ -1045,6 +1047,11 @@ class PromptQueue:
|
|||||||
if status is not None:
|
if status is not None:
|
||||||
status_dict = copy.deepcopy(status._asdict())
|
status_dict = copy.deepcopy(status._asdict())
|
||||||
|
|
||||||
|
# Remove sensitive data from extra_data before storing in history
|
||||||
|
for sensitive_val in SENSITIVE_EXTRA_DATA_KEYS:
|
||||||
|
if sensitive_val in prompt[3]:
|
||||||
|
prompt[3].pop(sensitive_val)
|
||||||
|
|
||||||
self.history[prompt[1]] = {
|
self.history[prompt[1]] = {
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"outputs": {},
|
"outputs": {},
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
comfyui-frontend-package==1.23.4
|
comfyui-frontend-package==1.23.4
|
||||||
comfyui-workflow-templates==0.1.35
|
comfyui-workflow-templates==0.1.36
|
||||||
comfyui-embedded-docs==0.2.4
|
comfyui-embedded-docs==0.2.4
|
||||||
comfyui_manager
|
comfyui_manager
|
||||||
torch
|
torch
|
||||||
|
|||||||
@ -10,11 +10,11 @@ import urllib.parse
|
|||||||
server_address = "127.0.0.1:8188"
|
server_address = "127.0.0.1:8188"
|
||||||
client_id = str(uuid.uuid4())
|
client_id = str(uuid.uuid4())
|
||||||
|
|
||||||
def queue_prompt(prompt):
|
def queue_prompt(prompt, prompt_id):
|
||||||
p = {"prompt": prompt, "client_id": client_id}
|
p = {"prompt": prompt, "client_id": client_id, "prompt_id": prompt_id}
|
||||||
data = json.dumps(p).encode('utf-8')
|
data = json.dumps(p).encode('utf-8')
|
||||||
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
|
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
|
||||||
return json.loads(urllib.request.urlopen(req).read())
|
urllib.request.urlopen(req).read()
|
||||||
|
|
||||||
def get_image(filename, subfolder, folder_type):
|
def get_image(filename, subfolder, folder_type):
|
||||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||||
@ -27,7 +27,8 @@ def get_history(prompt_id):
|
|||||||
return json.loads(response.read())
|
return json.loads(response.read())
|
||||||
|
|
||||||
def get_images(ws, prompt):
|
def get_images(ws, prompt):
|
||||||
prompt_id = queue_prompt(prompt)['prompt_id']
|
prompt_id = str(uuid.uuid4())
|
||||||
|
queue_prompt(prompt, prompt_id)
|
||||||
output_images = {}
|
output_images = {}
|
||||||
while True:
|
while True:
|
||||||
out = ws.recv()
|
out = ws.recv()
|
||||||
|
|||||||
@ -684,7 +684,7 @@ class PromptServer():
|
|||||||
|
|
||||||
if "prompt" in json_data:
|
if "prompt" in json_data:
|
||||||
prompt = json_data["prompt"]
|
prompt = json_data["prompt"]
|
||||||
prompt_id = str(uuid.uuid4())
|
prompt_id = str(json_data.get("prompt_id", uuid.uuid4()))
|
||||||
valid = await execution.validate_prompt(prompt_id, prompt)
|
valid = await execution.validate_prompt(prompt_id, prompt)
|
||||||
extra_data = {}
|
extra_data = {}
|
||||||
if "extra_data" in json_data:
|
if "extra_data" in json_data:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user