From c515bdf3710e77233e5b4baa24e038de8e86f56e Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Wed, 8 Jan 2025 06:03:56 +0900 Subject: [PATCH 1/2] fixed: robust loading `comfy.settings.json` (#6383) https://github.com/comfyanonymous/ComfyUI/issues/6371 --- app/app_settings.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/app/app_settings.py b/app/app_settings.py index efe87adbd..a545df92e 100644 --- a/app/app_settings.py +++ b/app/app_settings.py @@ -1,6 +1,7 @@ import os import json from aiohttp import web +import logging class AppSettings(): @@ -11,8 +12,12 @@ class AppSettings(): file = self.user_manager.get_request_user_filepath( request, "comfy.settings.json") if os.path.isfile(file): - with open(file) as f: - return json.load(f) + try: + with open(file) as f: + return json.load(f) + except: + logging.error(f"The user settings file is corrupted: {file}") + return {} else: return {} From d0f3752e332ad9b2d8ee6f9c4317868aa685a62e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Jan 2025 17:32:29 -0500 Subject: [PATCH 2/2] Properly calculate inner dim for t5 model. This is required to support some different types of t5 models. --- comfy/text_encoders/t5.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/text_encoders/t5.py b/comfy/text_encoders/t5.py index 38d8d5234..7405528e2 100644 --- a/comfy/text_encoders/t5.py +++ b/comfy/text_encoders/t5.py @@ -227,8 +227,9 @@ class T5(torch.nn.Module): super().__init__() self.num_layers = config_dict["num_layers"] model_dim = config_dict["d_model"] + inner_dim = config_dict["d_kv"] * config_dict["num_heads"] - self.encoder = T5Stack(self.num_layers, model_dim, model_dim, config_dict["d_ff"], config_dict["dense_act_fn"], config_dict["is_gated_act"], config_dict["num_heads"], config_dict["model_type"] != "umt5", dtype, device, operations) + self.encoder = T5Stack(self.num_layers, model_dim, inner_dim, config_dict["d_ff"], config_dict["dense_act_fn"], config_dict["is_gated_act"], config_dict["num_heads"], config_dict["model_type"] != "umt5", dtype, device, operations) self.dtype = dtype self.shared = operations.Embedding(config_dict["vocab_size"], model_dim, device=device, dtype=dtype)