diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index cb29df432..efb2d5384 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,6 +1,7 @@ from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor from .utils import load_torch_file, transformers_convert import os +import torch class ClipVisionModel(): def __init__(self, json_config): @@ -20,7 +21,8 @@ class ClipVisionModel(): self.model.load_state_dict(sd, strict=False) def encode_image(self, image): - inputs = self.processor(images=[image[0]], return_tensors="pt") + img = torch.clip((255. * image[0]), 0, 255).round().int() + inputs = self.processor(images=[img], return_tensors="pt") outputs = self.model(**inputs) return outputs diff --git a/web/style.css b/web/style.css index afea3a8b8..d00a2fbe2 100644 --- a/web/style.css +++ b/web/style.css @@ -257,3 +257,16 @@ button.comfy-queue-btn { color: #ddd; border-radius: 12px 0 0 12px; } + +.litegraph .litemenu-entry.has_submenu { + position: relative; + padding-right: 20px; + } + + .litemenu-entry.has_submenu::after { + content: ">"; + position: absolute; + top: 0; + right: 2px; + } + \ No newline at end of file