Add new processors and change names

This commit is contained in:
Hacker 17082006 2023-02-20 22:03:17 +07:00
parent fd21cbb13e
commit fb373b34e2

View File

@ -2,6 +2,7 @@ from . import canny, hed, midas, mlsd, openpose, uniformer
from .util import HWC3 from .util import HWC3
import torch import torch
import numpy as np import numpy as np
import cv2
def img_np_to_tensor(img_np): def img_np_to_tensor(img_np):
return torch.from_numpy(img_np.astype(np.float32) / 255.0)[None,] return torch.from_numpy(img_np.astype(np.float32) / 255.0)[None,]
@ -11,8 +12,16 @@ def img_tensor_to_np(img_tensor):
return img_tensor.squeeze(0).numpy().astype(np.uint8) return img_tensor.squeeze(0).numpy().astype(np.uint8)
#Thanks ChatGPT #Thanks ChatGPT
def common_annotator_call(annotator_callback, tensor_image, *args):
call_result = annotator_callback(img_tensor_to_np(tensor_image), *args)
if type(call_result) is tuple:
for i in range(len(call_result)):
call_result[i] = HWC3(call_result[i])
else:
call_result = HWC3(call_result)
return call_result
class CannyPreprocessor: class CannyEdgePreproces:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE", ) , return {"required": { "image": ("IMAGE", ) ,
@ -26,27 +35,59 @@ class CannyPreprocessor:
CATEGORY = "preprocessor" CATEGORY = "preprocessor"
def detect_edge(self, image, low_threshold, high_threshold, l2gradient): def detect_edge(self, image, low_threshold, high_threshold, l2gradient):
apply_canny = canny.CannyDetector() #Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_canny2image.py
image = apply_canny(img_tensor_to_np(image), low_threshold, high_threshold, l2gradient == "enable") np_detected_map = common_annotator_call(canny.CannyDetector(), image, low_threshold, high_threshold, l2gradient == "enable")
image = img_np_to_tensor(HWC3(image)) return (img_np_to_tensor(np_detected_map),)
return (image,)
class HEDPreprocessor: class HEDPreproces:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",) }} return {"required": { "image": ("IMAGE",) }}
RETURN_TYPES = ("IMAGE",) RETURN_TYPES = ("IMAGE",)
FUNCTION = "detect_edge" FUNCTION = "detect_boundary"
CATEGORY = "preprocessor" CATEGORY = "preprocessor"
def detect_edge(self, image): def detect_boundary(self, image):
apply_hed = hed.HEDdetector() #Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_hed2image.py
image = apply_hed(img_tensor_to_np(image)) np_detected_map = common_annotator_call(hed.HEDdetector(), image)
image = img_np_to_tensor(HWC3(image)) return (img_np_to_tensor(np_detected_map),)
return (image,)
class MIDASPreprocessor: class ScribblePreprocess:
@classmethod
def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",) }}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "transform_scribble"
CATEGORY = "preprocessor"
def transform_scribble(self, image):
#Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_scribble2image.py
np_img = img_tensor_to_np(image)
np_detected_map = np.zeros_like(np_img, dtype=np.uint8)
np_detected_map[np.min(np_img, axis=2) < 127] = 255
return (img_np_to_tensor(np_detected_map),)
class FakeScribblePreprocess:
@classmethod
def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",) }}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "transform_scribble"
CATEGORY = "preprocessor"
def transform_scribble(self, image):
#Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_fake_scribble2image.py
np_detected_map = common_annotator_call(hed.HEDdetector(), image)
np_detected_map = hed.nms(np_detected_map, 127, 3.0)
np_detected_map = cv2.GaussianBlur(np_detected_map, (0, 0), 3.0)
np_detected_map[np_detected_map > 4] = 255
np_detected_map[np_detected_map < 255] = 0
return (img_np_to_tensor(np_detected_map),)
class MIDASDepthPreprocess:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE", ) , return {"required": { "image": ("IMAGE", ) ,
@ -59,12 +100,28 @@ class MIDASPreprocessor:
CATEGORY = "preprocessor" CATEGORY = "preprocessor"
def estimate_depth(self, image, a, bg_threshold): def estimate_depth(self, image, a, bg_threshold):
model_midas = midas.MidasDetector() #Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_depth2image.py
image, _ = model_midas(img_tensor_to_np(image), a, bg_threshold) depth_map_np, normal_map_np = common_annotator_call(midas.MidasDetector(), image, a, bg_threshold)
image = img_np_to_tensor(HWC3(image)) return (img_np_to_tensor(depth_map_np),)
return (image,)
class MLSDPreprocessor: class MIDASNormalPreprocess:
@classmethod
def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE", ) ,
"a": ("FLOAT", {"default": np.pi * 2.0, "min": 0.0, "max": np.pi * 5.0, "step": 0.1}),
"bg_threshold": ("FLOAT", {"default": 0.1, "min": 0, "max": 1, "step": 0.1})
}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "estimate_normal"
CATEGORY = "preprocessor"
def estimate_normal(self, image, a, bg_threshold):
#Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_depth2image.py
depth_map_np, normal_map_np = common_annotator_call(midas.MidasDetector(), image, a, bg_threshold)
return (img_np_to_tensor(normal_map_np),)
class MLSDPreprocess:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",) , return {"required": { "image": ("IMAGE",) ,
@ -78,12 +135,11 @@ class MLSDPreprocessor:
CATEGORY = "preprocessor" CATEGORY = "preprocessor"
def detect_edge(self, image, score_threshold, dist_threshold): def detect_edge(self, image, score_threshold, dist_threshold):
model_mlsd = mlsd.MLSDdetector() #Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_hough2image.py
image = model_mlsd(img_tensor_to_np(image), score_threshold, dist_threshold) np_detected_map = common_annotator_call(mlsd.MLSDdetector(), image, score_threshold, dist_threshold)
image = img_np_to_tensor(HWC3(image)) return (img_np_to_tensor(np_detected_map),)
return (image,)
class OpenPosePreprocessor: class OpenposePreprocess:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE", ), return {"required": { "image": ("IMAGE", ),
@ -95,12 +151,11 @@ class OpenPosePreprocessor:
CATEGORY = "preprocessor" CATEGORY = "preprocessor"
def estimate_pose(self, image, detect_hand): def estimate_pose(self, image, detect_hand):
model_openpose = openpose.OpenposeDetector() #Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_pose2image.py
image, _ = model_openpose(img_tensor_to_np(image), detect_hand == "enable") np_detected_map = common_annotator_call(openpose.OpenposeDetector(), image, detect_hand == "enable")
image = img_np_to_tensor(HWC3(image)) return (img_np_to_tensor(np_detected_map),)
return (image,)
class UniformerPreprocessor: class UniformerPreprocess:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE", ) return {"required": { "image": ("IMAGE", )
@ -111,15 +166,18 @@ class UniformerPreprocessor:
CATEGORY = "preprocessor" CATEGORY = "preprocessor"
def semantic_segmentate(self, image): def semantic_segmentate(self, image):
model_uniformer = uniformer.UniformerDetector() #Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_seg2image.py
image = model_uniformer(img_np_to_tensor(image)) np_detected_map = common_annotator_call(uniformer.UniformerDetector(), image)
image = img_np_to_tensor(HWC3(image)) return (img_np_to_tensor(np_detected_map),)
return (image,)
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {
"CannyPreprocessor": CannyPreprocessor, "CannyEdgePreproces": CannyEdgePreproces,
"HEDPreprocessor": HEDPreprocessor, "M-LSDPreprocess": MLSDPreprocess,
"DepthPreprocessor": MIDASPreprocessor, "HEDPreproces": HEDPreproces,
"MLSDPreprocessor": MLSDPreprocessor, "ScribblePreprocess": ScribblePreprocess,
"OpenPosePreprocessor": OpenPosePreprocessor, "FakeScribblePreprocess": FakeScribblePreprocess,
"OpenposePreprocess": OpenposePreprocess,
"MiDaS-DepthPreprocess": MIDASDepthPreprocess,
"MiDaS-NormalPreprocess": MIDASNormalPreprocess,
"SemSegPreprocess": UniformerPreprocess
} }