From 3736dc90e120c5bdec3f4dc64c9a3161797c2970 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 9 Apr 2023 11:29:55 +0100 Subject: [PATCH 1/3] Add arrow to menu items with submenu --- web/style.css | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/web/style.css b/web/style.css index afea3a8b8..0a4ebe664 100644 --- a/web/style.css +++ b/web/style.css @@ -257,3 +257,15 @@ button.comfy-queue-btn { color: #ddd; border-radius: 12px 0 0 12px; } + +.litegraph .litemenu-entry.has_submenu { + position: relative; + padding-right: 20px; + } + + .litemenu-entry.has_submenu::after { + content: ">"; + position: absolute; + top: 0; + right: 2px; + } \ No newline at end of file From 0f5352d96cf73d30581758e9c41cad6979424858 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 9 Apr 2023 11:30:31 +0100 Subject: [PATCH 2/3] style --- web/style.css | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/web/style.css b/web/style.css index 0a4ebe664..d00a2fbe2 100644 --- a/web/style.css +++ b/web/style.css @@ -262,10 +262,11 @@ button.comfy-queue-btn { position: relative; padding-right: 20px; } - + .litemenu-entry.has_submenu::after { content: ">"; position: absolute; top: 0; right: 2px; - } \ No newline at end of file + } + \ No newline at end of file From 92eca60ec94d21b271b14eb7c832add963b09173 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 9 Apr 2023 15:47:35 -0400 Subject: [PATCH 3/3] Fix for new transformers version. --- comfy/clip_vision.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index cb29df432..efb2d5384 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,6 +1,7 @@ from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor from .utils import load_torch_file, transformers_convert import os +import torch class ClipVisionModel(): def __init__(self, json_config): @@ -20,7 +21,8 @@ class ClipVisionModel(): self.model.load_state_dict(sd, strict=False) def encode_image(self, image): - inputs = self.processor(images=[image[0]], return_tensors="pt") + img = torch.clip((255. * image[0]), 0, 255).round().int() + inputs = self.processor(images=[img], return_tensors="pt") outputs = self.model(**inputs) return outputs