mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-21 03:40:16 +08:00
Add UniformerPreprocessor and the need of torchvision
This commit is contained in:
parent
96a0804a11
commit
f2b476fbac
36
nodes.py
36
nodes.py
@ -9,7 +9,6 @@ import copy
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
from PIL.PngImagePlugin import PngInfo
|
from PIL.PngImagePlugin import PngInfo
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torchvision.transforms as transforms
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.join(sys.path[0], "comfy"))
|
sys.path.insert(0, os.path.join(sys.path[0], "comfy"))
|
||||||
|
|
||||||
@ -232,8 +231,8 @@ class CannyPreprocessor:
|
|||||||
|
|
||||||
def detect_edge(self, image, low_threshold, high_threshold, l2gradient):
|
def detect_edge(self, image, low_threshold, high_threshold, l2gradient):
|
||||||
apply_canny = canny.CannyDetector()
|
apply_canny = canny.CannyDetector()
|
||||||
transform = transforms.ToTensor()
|
image = apply_canny(image.numpy(), low_threshold, high_threshold, l2gradient == "enable")
|
||||||
image = transform(apply_canny(image.numpy(), low_threshold, high_threshold, l2gradient == "enable"))
|
image = torch.from_numpy(image)
|
||||||
return (image,)
|
return (image,)
|
||||||
|
|
||||||
class HEDPreprocessor:
|
class HEDPreprocessor:
|
||||||
@ -247,8 +246,7 @@ class HEDPreprocessor:
|
|||||||
|
|
||||||
def detect_edge(self, image):
|
def detect_edge(self, image):
|
||||||
apply_hed = hed.HEDdetector()
|
apply_hed = hed.HEDdetector()
|
||||||
transform = transforms.ToTensor()
|
image = torch.from_numpy(apply_hed(image.numpy()))
|
||||||
image = transform(apply_hed(image.numpy()))
|
|
||||||
return (image,)
|
return (image,)
|
||||||
|
|
||||||
class MIDASPreprocessor:
|
class MIDASPreprocessor:
|
||||||
@ -265,8 +263,8 @@ class MIDASPreprocessor:
|
|||||||
|
|
||||||
def estimate_depth(self, image, a, bg_threshold):
|
def estimate_depth(self, image, a, bg_threshold):
|
||||||
model_midas = midas.MidasDetector()
|
model_midas = midas.MidasDetector()
|
||||||
transform = transforms.ToTensor()
|
image = model_midas(image.numpy(), a, bg_threshold)
|
||||||
image = transform(model_midas(image.numpy(), a, bg_threshold))
|
image = torch.from_numpy(image)
|
||||||
return (image,)
|
return (image,)
|
||||||
|
|
||||||
class MLSDPreprocessor:
|
class MLSDPreprocessor:
|
||||||
@ -284,8 +282,8 @@ class MLSDPreprocessor:
|
|||||||
|
|
||||||
def detect_edge(self, image, score_threshold, dist_threshold):
|
def detect_edge(self, image, score_threshold, dist_threshold):
|
||||||
model_mlsd = mlsd.MLSDdetector()
|
model_mlsd = mlsd.MLSDdetector()
|
||||||
transform = transforms.ToTensor()
|
image = model_mlsd(image.numpy(), score_threshold, dist_threshold)
|
||||||
image = transform(model_mlsd(image.numpy(), score_threshold, dist_threshold))
|
image = torch.from_numpy(image)
|
||||||
return (image,)
|
return (image,)
|
||||||
|
|
||||||
class OpenPosePreprocessor:
|
class OpenPosePreprocessor:
|
||||||
@ -301,10 +299,26 @@ class OpenPosePreprocessor:
|
|||||||
|
|
||||||
def estimate_pose(self, image, detect_hand):
|
def estimate_pose(self, image, detect_hand):
|
||||||
model_openpose = openpose.OpenposeDetector()
|
model_openpose = openpose.OpenposeDetector()
|
||||||
transform = transforms.ToTensor()
|
image = model_openpose(image.numpy(), detect_hand == "enable")
|
||||||
image = transform(model_openpose(image.numpy(), detect_hand == "enable"))
|
image = torch.from_numpy(image)
|
||||||
return (image,)
|
return (image,)
|
||||||
|
|
||||||
|
class UniformerPreprocessor:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": { "image": ("IMAGE", )
|
||||||
|
}}
|
||||||
|
RETURN_TYPES = ("IMAGE",)
|
||||||
|
FUNCTION = "semantic_segmentate"
|
||||||
|
|
||||||
|
CATEGORY = "preprocessor"
|
||||||
|
|
||||||
|
def semantic_segmentate(self, image):
|
||||||
|
model_uniformer = uniformer.UniformerDetector()
|
||||||
|
image = torch.from_numpy(model_uniformer(image.numpy()))
|
||||||
|
return (image,)
|
||||||
|
|
||||||
|
|
||||||
class ControlNetLoader:
|
class ControlNetLoader:
|
||||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
||||||
controlnet_dir = os.path.join(models_dir, "controlnet")
|
controlnet_dir = os.path.join(models_dir, "controlnet")
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
torch
|
torch
|
||||||
torchdiffeq
|
torchdiffeq
|
||||||
torchsde
|
torchsde
|
||||||
torchvision
|
|
||||||
omegaconf
|
omegaconf
|
||||||
einops
|
einops
|
||||||
open-clip-torch
|
open-clip-torch
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user