From cd0b48de5aa70b2c9da095f13758cc18de483cc3 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" Date: Wed, 28 Jan 2026 01:56:59 +0900 Subject: [PATCH] update DB --- custom-node-list.json | 24 ++++++++++++------------ node_db/new/custom-node-list.json | 12 ++++++++++++ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/custom-node-list.json b/custom-node-list.json index 9e76f906..be49d0d4 100644 --- a/custom-node-list.json +++ b/custom-node-list.json @@ -41945,6 +41945,18 @@ "install_type": "git-clone", "description": "Enforces Qwen's training resolution buckets for precise Latent space generation." }, + { + "author": "frankluise5220", + "title": "LoraHelper Chat Nodes", + "reference": "https://github.com/frankluise5220/ComfyUI-Lorahelper", + "nodename_pattern": "UniversalGGUFLoader|UniversalAIChat|LH_History_Monitor", + "files": [ + "https://github.com/frankluise5220/ComfyUI-Lorahelper" + ], + "install_type": "git-clone", + "description": "LLM-based chat nodes for image description, tagging and prompt enhancement using GGUF models", + "tags": ["llm", "vision", "prompt generation", "lora", "Qwen3"] + }, @@ -42333,18 +42345,6 @@ ], "install_type": "unzip", "description": "Various image processing nodes." - }, - { - "author": "frankluise5220", - "title": "LoraHelper Chat Nodes", - "reference": "https://github.com/frankluise5220/ComfyUI-Lorahelper", - "nodename_pattern": "UniversalGGUFLoader|UniversalAIChat|LH_History_Monitor", - "files": [ - "https://github.com/frankluise5220/ComfyUI-Lorahelper" - ], - "install_type": "git-clone", - "description": "LLM-based chat nodes for image description, tagging and prompt enhancement using GGUF models", - "tags": ["llm", "vision", "prompt generation", "lora", "Qwen3"] }, { "author": "aimingfail", diff --git a/node_db/new/custom-node-list.json b/node_db/new/custom-node-list.json index b888d46b..6fc6b283 100644 --- a/node_db/new/custom-node-list.json +++ b/node_db/new/custom-node-list.json @@ -22,6 +22,18 @@ "install_type": "git-clone", "description": "AMD Unified Memory TeaCache for LTX2 (LTXAV). Lean single-file caching for 48-layer dual-stream transformer. Caches both video and audio residuals. No CPU offload - designed for AMD APUs with unified memory. ~35% speedup on cached denoising steps." }, + { + "author": "frankluise5220", + "title": "LoraHelper Chat Nodes", + "reference": "https://github.com/frankluise5220/ComfyUI-Lorahelper", + "nodename_pattern": "UniversalGGUFLoader|UniversalAIChat|LH_History_Monitor", + "files": [ + "https://github.com/frankluise5220/ComfyUI-Lorahelper" + ], + "install_type": "git-clone", + "description": "LLM-based chat nodes for image description, tagging and prompt enhancement using GGUF models", + "tags": ["llm", "vision", "prompt generation", "lora", "Qwen3"] + }, { "author": "gmorks", "title": "ComfyUI-WatchPoint",