From e2f9b3860d2443ffed53d8d47d6dda838df1ff44 Mon Sep 17 00:00:00 2001 From: alisson-anjos Date: Sun, 24 Mar 2024 20:03:19 +0000 Subject: [PATCH] =?UTF-8?q?Add=20=C2=96LLaVa-Describer=20custom=20nodes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- custom-node-list.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/custom-node-list.json b/custom-node-list.json index 36ddb2e7..aefdad74 100644 --- a/custom-node-list.json +++ b/custom-node-list.json @@ -6846,6 +6846,16 @@ ], "install_type": "unzip", "description": "This is a node to convert an image into a CMYK Halftone dot image." + }, + { + "author": "alisson-anjos", + "title": "ComfyUI-LLaVA-Describer", + "reference": "https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer", + "files": [ + "https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer" + ], + "install_type": "git-clone", + "description": "This is an extension for ComfyUI to extract descriptions from your images using the multimodal model called LLaVa. The LLaVa model - Large Language and Vision Assistant, although trained on a relatively small dataset, demonstrates exceptional capabilities in understanding images and answering questions about them. This model shows behaviors similar to multimodal models like GPT-4, even when presented with unseen images and instructions." } ] }