From b40143984c1bed9cd1bc73a373a80a90c625eb0f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 12 Jul 2025 00:49:26 -0700 Subject: [PATCH 1/6] Add model detection error hint for lora. (#8880) --- comfy/sd.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 5b95cf75a..8081b167c 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -18,6 +18,7 @@ import comfy.ldm.hunyuan3d.vae import comfy.ldm.ace.vae.music_dcae_pipeline import yaml import math +import os import comfy.utils @@ -977,6 +978,12 @@ def load_gligen(ckpt_path): model = model.half() return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device()) +def model_detection_error_hint(path, state_dict): + filename = os.path.basename(path) + if 'lora' in filename.lower(): + return "\nHINT: This seems to be a Lora file and Lora files should be put in the lora folder and loaded with a lora loader node.." + return "" + def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None): logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.") model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True) @@ -1005,7 +1012,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True) out = load_state_dict_guess_config(sd, output_vae, output_clip, output_clipvision, embedding_directory, output_model, model_options, te_model_options=te_model_options, metadata=metadata) if out is None: - raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path)) + raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(ckpt_path, model_detection_error_hint(ckpt_path, sd))) return out def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None): @@ -1177,7 +1184,7 @@ def load_diffusion_model(unet_path, model_options={}): model = load_diffusion_model_state_dict(sd, model_options=model_options) if model is None: logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path)) - raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) + raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd))) return model def load_unet(unet_path, dtype=None): From 480375f3495e9e1437faf47eb2a11222c9acf3f0 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 13 Jul 2025 01:46:27 -0700 Subject: [PATCH 2/6] Remove auth tokens from history storage (#8889) Remove auth_token_comfy_org and api_key_comfy_org from extra_data before storing prompt history to prevent sensitive authentication tokens from being persisted in the history endpoint response. --- execution.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/execution.py b/execution.py index 90cefc023..bd638afba 100644 --- a/execution.py +++ b/execution.py @@ -1045,6 +1045,12 @@ class PromptQueue: if status is not None: status_dict = copy.deepcopy(status._asdict()) + # Remove auth tokens from extra_data before storing in history + if "auth_token_comfy_org" in prompt[3]: + del prompt[3]["auth_token_comfy_org"] + if "api_key_comfy_org" in prompt[3]: + del prompt[3]["api_key_comfy_org"] + self.history[prompt[1]] = { "prompt": prompt, "outputs": {}, From 4831e9c2c47b97f85fd771521f247a017d1f43e1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 13 Jul 2025 01:59:17 -0700 Subject: [PATCH 3/6] Refactor previous pr. (#8893) --- execution.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/execution.py b/execution.py index bd638afba..c3a62f1cb 100644 --- a/execution.py +++ b/execution.py @@ -123,6 +123,8 @@ class CacheSet: } return result +SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org") + def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}): valid_inputs = class_def.INPUT_TYPES() input_data_all = {} @@ -1045,11 +1047,10 @@ class PromptQueue: if status is not None: status_dict = copy.deepcopy(status._asdict()) - # Remove auth tokens from extra_data before storing in history - if "auth_token_comfy_org" in prompt[3]: - del prompt[3]["auth_token_comfy_org"] - if "api_key_comfy_org" in prompt[3]: - del prompt[3]["api_key_comfy_org"] + # Remove sensitive data from extra_data before storing in history + for sensitive_val in SENSITIVE_EXTRA_DATA_KEYS: + if sensitive_val in prompt[3]: + prompt[3].pop(sensitive_val) self.history[prompt[1]] = { "prompt": prompt, From 9ca581c9416d799db0d7c55ac957a5fa486798c8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 14 Jul 2025 10:10:20 -0700 Subject: [PATCH 4/6] Remove windows line endings. (#8902) --- comfy_extras/nodes_pixart.py | 48 ++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/comfy_extras/nodes_pixart.py b/comfy_extras/nodes_pixart.py index c7209c468..8d9276afe 100644 --- a/comfy_extras/nodes_pixart.py +++ b/comfy_extras/nodes_pixart.py @@ -1,24 +1,24 @@ -from nodes import MAX_RESOLUTION - -class CLIPTextEncodePixArtAlpha: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - # "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ), - }} - - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" - CATEGORY = "advanced/conditioning" - DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma." - - def encode(self, clip, width, height, text): - tokens = clip.tokenize(text) - return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),) - -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha, -} +from nodes import MAX_RESOLUTION + +class CLIPTextEncodePixArtAlpha: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + # "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + CATEGORY = "advanced/conditioning" + DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma." + + def encode(self, clip, width, height, text): + tokens = clip.tokenize(text) + return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),) + +NODE_CLASS_MAPPINGS = { + "CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha, +} From 861c3bbb3d2330dc7dff7567ffcf07946ace23b8 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 15 Jul 2025 01:27:57 +0800 Subject: [PATCH 5/6] Upate template to 0.1.36 (#8904) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 19a40ca0e..7705918a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.35 +comfyui-workflow-templates==0.1.36 comfyui-embedded-docs==0.2.4 torch torchsde From 260a5ca5d9997bfa1cec5a4922cb066187e6daf0 Mon Sep 17 00:00:00 2001 From: FeepingCreature <540727+FeepingCreature@users.noreply.github.com> Date: Mon, 14 Jul 2025 20:48:31 +0200 Subject: [PATCH 6/6] Allow the prompt request to specify the prompt ID. (#8189) This makes it easier to write asynchronous clients that submit requests, because they can store the task immediately. Duplicate prompt IDs are rejected by the job queue. --- script_examples/websockets_api_example.py | 11 ++++++----- server.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/script_examples/websockets_api_example.py b/script_examples/websockets_api_example.py index d696d2bba..58f26cfb6 100644 --- a/script_examples/websockets_api_example.py +++ b/script_examples/websockets_api_example.py @@ -10,11 +10,11 @@ import urllib.parse server_address = "127.0.0.1:8188" client_id = str(uuid.uuid4()) -def queue_prompt(prompt): - p = {"prompt": prompt, "client_id": client_id} +def queue_prompt(prompt, prompt_id): + p = {"prompt": prompt, "client_id": client_id, "prompt_id": prompt_id} data = json.dumps(p).encode('utf-8') - req = urllib.request.Request("http://{}/prompt".format(server_address), data=data) - return json.loads(urllib.request.urlopen(req).read()) + req = urllib.request.Request("http://{}/prompt".format(server_address), data=data) + urllib.request.urlopen(req).read() def get_image(filename, subfolder, folder_type): data = {"filename": filename, "subfolder": subfolder, "type": folder_type} @@ -27,7 +27,8 @@ def get_history(prompt_id): return json.loads(response.read()) def get_images(ws, prompt): - prompt_id = queue_prompt(prompt)['prompt_id'] + prompt_id = str(uuid.uuid4()) + queue_prompt(prompt, prompt_id) output_images = {} while True: out = ws.recv() diff --git a/server.py b/server.py index e8bad9f4e..71a58f0fa 100644 --- a/server.py +++ b/server.py @@ -678,7 +678,7 @@ class PromptServer(): if "prompt" in json_data: prompt = json_data["prompt"] - prompt_id = str(uuid.uuid4()) + prompt_id = str(json_data.get("prompt_id", uuid.uuid4())) valid = await execution.validate_prompt(prompt_id, prompt) extra_data = {} if "extra_data" in json_data: