mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-28 07:10:15 +08:00
Update silver_custom.py
Change testing on hand tracking image to mask
This commit is contained in:
parent
8e9bcc378c
commit
72d262d11e
@ -6,43 +6,56 @@ class ExpandImageMask:
|
|||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"images": ("IMAGE",)
|
"images": ("IMAGE", )
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CATEGORY = "mask"
|
CATEGORY = "mask"
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE",)
|
RETURN_TYPES = ("IMAGE", "MASK", )
|
||||||
FUNCTION = "image_to_mask_image"
|
FUNCTION = "image_to_mask_image"
|
||||||
|
|
||||||
def image_to_mask_image(self, images):
|
def image_to_mask_image(self, images):
|
||||||
mask_images = []
|
mask_images = []
|
||||||
for image in images:
|
for image in images:
|
||||||
i = 255. * image.cpu().numpy()
|
i = 255. * image.cpu().numpy()
|
||||||
# opencv_image = PIL.Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
|
||||||
# cv2.imwrite('opencv_image.png', i)
|
|
||||||
|
|
||||||
|
# Convert to grayscale
|
||||||
image_gray = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
|
image_gray = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
|
||||||
image_gray = cv2.blur(image_gray, (10, 10))
|
|
||||||
# cv2.imwrite('image_gray1.png', image_gray)
|
|
||||||
image_gray = cv2.blur(image_gray, (20, 20))
|
|
||||||
# cv2.imwrite('image_gray2.png', image_gray)
|
|
||||||
|
|
||||||
# Convert the image to the expected data type
|
# Apply blurring to grayscale image
|
||||||
|
image_gray = cv2.blur(image_gray, (10, 10))
|
||||||
|
image_gray = cv2.blur(image_gray, (20, 20))
|
||||||
|
|
||||||
|
# Convert image to the expected data type
|
||||||
image_gray = cv2.convertScaleAbs(image_gray)
|
image_gray = cv2.convertScaleAbs(image_gray)
|
||||||
|
|
||||||
# Apply the threshold using the modified image
|
# Apply threshold to grayscale image
|
||||||
(thresh, im_bw) = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
|
(thresh, im_bw) = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
|
||||||
|
|
||||||
|
# Apply blurring to binary mask image
|
||||||
ksize = (50, 50)
|
ksize = (50, 50)
|
||||||
im_bw = cv2.blur(im_bw, ksize)
|
im_bw = cv2.blur(im_bw, ksize)
|
||||||
im_bw = cv2.threshold(im_bw, thresh, 255, cv2.THRESH_BINARY)[1]
|
|
||||||
im_bw = cv2.bitwise_not(im_bw)
|
|
||||||
# cv2.imwrite('im_bw.png', im_bw)
|
|
||||||
|
|
||||||
# Convert the binary mask image to a PyTorch tensor
|
# Threshold binary mask image again
|
||||||
|
im_bw = cv2.threshold(im_bw, thresh, 255, cv2.THRESH_BINARY)[1]
|
||||||
|
|
||||||
|
# Invert binary mask image
|
||||||
|
# im_bw = cv2.bitwise_not(im_bw)
|
||||||
|
|
||||||
|
# Convert binary mask image to PyTorch tensor
|
||||||
img = torch.from_numpy(im_bw).unsqueeze(0).float()
|
img = torch.from_numpy(im_bw).unsqueeze(0).float()
|
||||||
|
|
||||||
|
# Append mask image tensor to list
|
||||||
mask_images.append(img)
|
mask_images.append(img)
|
||||||
return tuple(mask_images)
|
|
||||||
|
# Stack list of mask image tensors into a single tensor
|
||||||
|
mask_images_tensor = torch.cat(mask_images)
|
||||||
|
|
||||||
|
# Return tuple of mask images and single mask image
|
||||||
|
single_mask_image = mask_images_tensor[0, :, :]
|
||||||
|
return mask_images_tensor, single_mask_image
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
"ExpandImageMask": ExpandImageMask
|
"ExpandImageMask": ExpandImageMask
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user