mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-23 21:00:16 +08:00
Merge branch 'master' of github.com:comfyanonymous/ComfyUI
This commit is contained in:
commit
c2fa74f625
@ -17,5 +17,5 @@ def open_image(file_path: str) -> Image.Image:
|
|||||||
if ext == ".exr":
|
if ext == ".exr":
|
||||||
yield _open_exr(file_path)
|
yield _open_exr(file_path)
|
||||||
else:
|
else:
|
||||||
with node_helpers.open_image(file_path) as image:
|
with node_helpers.pillow(Image.open, file_path) as image:
|
||||||
yield image
|
yield image
|
||||||
|
|||||||
@ -162,7 +162,7 @@ class BaseModel(torch.nn.Module):
|
|||||||
|
|
||||||
c_concat = kwargs.get("noise_concat", None)
|
c_concat = kwargs.get("noise_concat", None)
|
||||||
if c_concat is not None:
|
if c_concat is not None:
|
||||||
out['c_concat'] = conds.CONDNoiseShape(data)
|
out['c_concat'] = conds.CONDNoiseShape(c_concat)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
from PIL import Image, ImageFile, UnidentifiedImageError
|
from PIL import ImageFile, UnidentifiedImageError
|
||||||
|
|
||||||
def conditioning_set_values(conditioning, values={}):
|
def conditioning_set_values(conditioning, values={}):
|
||||||
c = []
|
c = []
|
||||||
@ -10,16 +10,15 @@ def conditioning_set_values(conditioning, values={}):
|
|||||||
|
|
||||||
return c
|
return c
|
||||||
|
|
||||||
def open_image(path):
|
def pillow(fn, arg):
|
||||||
prev_value = None
|
prev_value = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
img = Image.open(path)
|
x = fn(arg)
|
||||||
except (UnidentifiedImageError, ValueError): #PIL issues #4472 and #2445
|
except (OSError, UnidentifiedImageError, ValueError): #PIL issues #4472 and #2445, also fixes ComfyUI issue #3416
|
||||||
prev_value = ImageFile.LOAD_TRUNCATED_IMAGES
|
prev_value = ImageFile.LOAD_TRUNCATED_IMAGES
|
||||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||||
img = Image.open(path)
|
x = fn(arg)
|
||||||
finally:
|
finally:
|
||||||
if prev_value is not None:
|
if prev_value is not None:
|
||||||
ImageFile.LOAD_TRUNCATED_IMAGES = prev_value
|
ImageFile.LOAD_TRUNCATED_IMAGES = prev_value
|
||||||
return img
|
return x
|
||||||
|
|||||||
@ -1450,6 +1450,9 @@ class LoadImage:
|
|||||||
|
|
||||||
def load_image(self, image: str):
|
def load_image(self, image: str):
|
||||||
image_path = folder_paths.get_annotated_filepath(image)
|
image_path = folder_paths.get_annotated_filepath(image)
|
||||||
|
|
||||||
|
img = node_helpers.pillow(Image.open, image_path)
|
||||||
|
|
||||||
output_images = []
|
output_images = []
|
||||||
output_masks = []
|
output_masks = []
|
||||||
|
|
||||||
@ -1461,16 +1464,8 @@ class LoadImage:
|
|||||||
return load_exr(image_path, srgb=False)
|
return load_exr(image_path, srgb=False)
|
||||||
with open_image(image_path) as img:
|
with open_image(image_path) as img:
|
||||||
for i in ImageSequence.Iterator(img):
|
for i in ImageSequence.Iterator(img):
|
||||||
prev_value = None
|
i = node_helpers.pillow(ImageOps.exif_transpose, i)
|
||||||
try:
|
|
||||||
i = ImageOps.exif_transpose(i)
|
|
||||||
except OSError:
|
|
||||||
prev_value = ImageFile.LOAD_TRUNCATED_IMAGES
|
|
||||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
|
||||||
i = ImageOps.exif_transpose(i)
|
|
||||||
finally:
|
|
||||||
if prev_value is not None:
|
|
||||||
ImageFile.LOAD_TRUNCATED_IMAGES = prev_value
|
|
||||||
if i.mode == 'I':
|
if i.mode == 'I':
|
||||||
i = i.point(lambda i: i * (1 / 255))
|
i = i.point(lambda i: i * (1 / 255))
|
||||||
image = i.convert("RGB")
|
image = i.convert("RGB")
|
||||||
@ -1525,8 +1520,8 @@ class LoadImageMask:
|
|||||||
FUNCTION = "load_image"
|
FUNCTION = "load_image"
|
||||||
def load_image(self, image, channel):
|
def load_image(self, image, channel):
|
||||||
image_path = folder_paths.get_annotated_filepath(image)
|
image_path = folder_paths.get_annotated_filepath(image)
|
||||||
i = Image.open(image_path)
|
i = node_helpers.pillow(Image.open, image_path)
|
||||||
i = ImageOps.exif_transpose(i)
|
i = node_helpers.pillow(ImageOps.exif_transpose, i)
|
||||||
if i.getbands() != ("R", "G", "B", "A"):
|
if i.getbands() != ("R", "G", "B", "A"):
|
||||||
if i.mode == 'I':
|
if i.mode == 'I':
|
||||||
i = i.point(lambda i: i * (1 / 255))
|
i = i.point(lambda i: i * (1 / 255))
|
||||||
|
|||||||
@ -485,7 +485,11 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
|
|||||||
clip = CLIP(clip_target, embedding_directory=embedding_directory)
|
clip = CLIP(clip_target, embedding_directory=embedding_directory)
|
||||||
m, u = clip.load_sd(clip_sd, full_model=True)
|
m, u = clip.load_sd(clip_sd, full_model=True)
|
||||||
if len(m) > 0:
|
if len(m) > 0:
|
||||||
logging.warning("clip missing: {}".format(m))
|
m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m))
|
||||||
|
if len(m_filter) > 0:
|
||||||
|
logging.warning("clip missing: {}".format(m))
|
||||||
|
else:
|
||||||
|
logging.debug("clip missing: {}".format(m))
|
||||||
|
|
||||||
if len(u) > 0:
|
if len(u) > 0:
|
||||||
logging.debug("clip unexpected {}:".format(u))
|
logging.debug("clip unexpected {}:".format(u))
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user