From f45c0af88aaefc6e8022d9f60ebf99f76fa4686e Mon Sep 17 00:00:00 2001 From: Silversith Date: Sun, 23 Apr 2023 19:23:12 +0200 Subject: [PATCH] Extra Node + Move Face Restore Move face restoration to comfy_extras and added a node to convert hand tracking to mask image --- .../facerestore/__init__.py | 4 +- .../facerestore/facelib/__init__.py | 0 .../facerestore/facelib/detection/__init__.py | 4 +- .../facelib/detection/align_trans.py | 0 .../facelib/detection/matlab_cp2tform.py | 0 .../detection/retinaface/retinaface.py | 6 +- .../detection/retinaface/retinaface_net.py | 0 .../detection/retinaface/retinaface_utils.py | 0 .../facelib/detection/yolov5face/__init__.py | 0 .../detection/yolov5face/face_detector.py | 6 +- .../detection/yolov5face/utils}/__init__.py | 0 .../detection/yolov5face/utils/autoanchor.py | 0 .../detection/yolov5face/utils/datasets.py | 0 .../yolov5face/utils/extract_ckpt.py | 0 .../detection/yolov5face/utils/general.py | 0 .../detection/yolov5face/utils/torch_utils.py | 0 .../facerestore/facelib/parsing/__init__.py | 2 +- .../facerestore/facelib/parsing/bisenet.py | 0 .../facerestore/facelib/parsing/parsenet.py | 0 .../facerestore/facelib/parsing/resnet.py | 0 .../facerestore/facelib/utils/__init__.py | 0 .../facelib/utils/face_restoration_helper.py | 6 +- .../facerestore/facelib/utils/face_utils.py | 4 +- .../facerestore/facelib/utils/misc.py | 0 comfy_extras/nodes_mask.py | 1 - comfy_extras/silver_custom.py | 49 ++- .../detection/yolov5face/models/common.py | 299 ------------------ .../yolov5face/models/experimental.py | 45 --- .../detection/yolov5face/models/yolo.py | 235 -------------- .../detection/yolov5face/models/yolov5l.yaml | 47 --- .../detection/yolov5face/models/yolov5n.yaml | 45 --- .../detection/yolov5face/utils/__init__.py | 0 folder_paths.py | 1 + im_bw.png | Bin 0 -> 2970 bytes image_gray1.png | Bin 0 -> 5730 bytes image_gray2.png | Bin 0 -> 5417 bytes nodes.py | 22 +- opencv_image.png | Bin 0 -> 4701 bytes 38 files changed, 75 insertions(+), 701 deletions(-) rename {custom_nodes => comfy_extras}/facerestore/__init__.py (98%) rename {custom_nodes => comfy_extras}/facerestore/facelib/__init__.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/__init__.py (97%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/align_trans.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/matlab_cp2tform.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/retinaface/retinaface.py (98%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/retinaface/retinaface_net.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/retinaface/retinaface_utils.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/yolov5face/__init__.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/yolov5face/face_detector.py (96%) rename {custom_nodes/facerestore/facelib/detection/yolov5face/models => comfy_extras/facerestore/facelib/detection/yolov5face/utils}/__init__.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/yolov5face/utils/autoanchor.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/yolov5face/utils/datasets.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/yolov5face/utils/extract_ckpt.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/yolov5face/utils/general.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/detection/yolov5face/utils/torch_utils.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/parsing/__init__.py (93%) rename {custom_nodes => comfy_extras}/facerestore/facelib/parsing/bisenet.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/parsing/parsenet.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/parsing/resnet.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/utils/__init__.py (100%) rename {custom_nodes => comfy_extras}/facerestore/facelib/utils/face_restoration_helper.py (99%) rename {custom_nodes => comfy_extras}/facerestore/facelib/utils/face_utils.py (98%) rename {custom_nodes => comfy_extras}/facerestore/facelib/utils/misc.py (100%) delete mode 100644 custom_nodes/facerestore/facelib/detection/yolov5face/models/common.py delete mode 100644 custom_nodes/facerestore/facelib/detection/yolov5face/models/experimental.py delete mode 100644 custom_nodes/facerestore/facelib/detection/yolov5face/models/yolo.py delete mode 100644 custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5l.yaml delete mode 100644 custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5n.yaml delete mode 100644 custom_nodes/facerestore/facelib/detection/yolov5face/utils/__init__.py create mode 100644 im_bw.png create mode 100644 image_gray1.png create mode 100644 image_gray2.png create mode 100644 opencv_image.png diff --git a/custom_nodes/facerestore/__init__.py b/comfy_extras/facerestore/__init__.py similarity index 98% rename from custom_nodes/facerestore/__init__.py rename to comfy_extras/facerestore/__init__.py index 17e3e0649..392cb29f7 100644 --- a/custom_nodes/facerestore/__init__.py +++ b/comfy_extras/facerestore/__init__.py @@ -7,8 +7,8 @@ import comfy.utils import numpy as np import cv2 import math -from custom_nodes.facerestore.facelib.utils.face_restoration_helper import FaceRestoreHelper -from custom_nodes.facerestore.facelib.detection.retinaface import retinaface +from comfy_extras.facerestore.facelib.utils.face_restoration_helper import FaceRestoreHelper +from comfy_extras.facerestore.facelib.detection.retinaface import retinaface from torchvision.transforms.functional import normalize def img2tensor(imgs, bgr2rgb=True, float32=True): diff --git a/custom_nodes/facerestore/facelib/__init__.py b/comfy_extras/facerestore/facelib/__init__.py similarity index 100% rename from custom_nodes/facerestore/facelib/__init__.py rename to comfy_extras/facerestore/facelib/__init__.py diff --git a/custom_nodes/facerestore/facelib/detection/__init__.py b/comfy_extras/facerestore/facelib/detection/__init__.py similarity index 97% rename from custom_nodes/facerestore/facelib/detection/__init__.py rename to comfy_extras/facerestore/facelib/detection/__init__.py index e1bbc10bc..54c728cd1 100644 --- a/custom_nodes/facerestore/facelib/detection/__init__.py +++ b/comfy_extras/facerestore/facelib/detection/__init__.py @@ -3,8 +3,8 @@ import torch from torch import nn from copy import deepcopy -from custom_nodes.facerestore.facelib.utils import load_file_from_url -from custom_nodes.facerestore.facelib.detection.yolov5face.models.common import Conv +from comfy_extras.facerestore.facelib.utils import load_file_from_url +from comfy_extras.facerestore.facelib.detection.yolov5face.models.common import Conv from .retinaface.retinaface import RetinaFace from .yolov5face.face_detector import YoloDetector diff --git a/custom_nodes/facerestore/facelib/detection/align_trans.py b/comfy_extras/facerestore/facelib/detection/align_trans.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/align_trans.py rename to comfy_extras/facerestore/facelib/detection/align_trans.py diff --git a/custom_nodes/facerestore/facelib/detection/matlab_cp2tform.py b/comfy_extras/facerestore/facelib/detection/matlab_cp2tform.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/matlab_cp2tform.py rename to comfy_extras/facerestore/facelib/detection/matlab_cp2tform.py diff --git a/custom_nodes/facerestore/facelib/detection/retinaface/retinaface.py b/comfy_extras/facerestore/facelib/detection/retinaface/retinaface.py similarity index 98% rename from custom_nodes/facerestore/facelib/detection/retinaface/retinaface.py rename to comfy_extras/facerestore/facelib/detection/retinaface/retinaface.py index df7a91f2a..2efd82ae7 100644 --- a/custom_nodes/facerestore/facelib/detection/retinaface/retinaface.py +++ b/comfy_extras/facerestore/facelib/detection/retinaface/retinaface.py @@ -6,9 +6,9 @@ import torch.nn.functional as F from PIL import Image from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter -from custom_nodes.facerestore.facelib.detection.align_trans import get_reference_facial_points, warp_and_crop_face -from custom_nodes.facerestore.facelib.detection.retinaface.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head -from custom_nodes.facerestore.facelib.detection.retinaface.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm, +from comfy_extras.facerestore.facelib.detection.align_trans import get_reference_facial_points, warp_and_crop_face +from comfy_extras.facerestore.facelib.detection.retinaface.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head +from comfy_extras.facerestore.facelib.detection.retinaface.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm, py_cpu_nms) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') diff --git a/custom_nodes/facerestore/facelib/detection/retinaface/retinaface_net.py b/comfy_extras/facerestore/facelib/detection/retinaface/retinaface_net.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/retinaface/retinaface_net.py rename to comfy_extras/facerestore/facelib/detection/retinaface/retinaface_net.py diff --git a/custom_nodes/facerestore/facelib/detection/retinaface/retinaface_utils.py b/comfy_extras/facerestore/facelib/detection/retinaface/retinaface_utils.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/retinaface/retinaface_utils.py rename to comfy_extras/facerestore/facelib/detection/retinaface/retinaface_utils.py diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/__init__.py b/comfy_extras/facerestore/facelib/detection/yolov5face/__init__.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/yolov5face/__init__.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/__init__.py diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/face_detector.py b/comfy_extras/facerestore/facelib/detection/yolov5face/face_detector.py similarity index 96% rename from custom_nodes/facerestore/facelib/detection/yolov5face/face_detector.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/face_detector.py index 337668328..f785ead03 100644 --- a/custom_nodes/facerestore/facelib/detection/yolov5face/face_detector.py +++ b/comfy_extras/facerestore/facelib/detection/yolov5face/face_detector.py @@ -7,9 +7,9 @@ import numpy as np import torch from torch import nn -from custom_nodes.facerestore.facelib.detection.yolov5face.models.yolo import Model -from custom_nodes.facerestore.facelib.detection.yolov5face.utils.datasets import letterbox -from custom_nodes.facerestore.facelib.detection.yolov5face.utils.general import ( +from comfy_extras.facerestore.facelib.detection.yolov5face.models.yolo import Model +from comfy_extras.facerestore.facelib.detection.yolov5face.utils.datasets import letterbox +from comfy_extras.facerestore.facelib.detection.yolov5face.utils.general import ( check_img_size, non_max_suppression_face, scale_coords, diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/models/__init__.py b/comfy_extras/facerestore/facelib/detection/yolov5face/utils/__init__.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/yolov5face/models/__init__.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/utils/__init__.py diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/utils/autoanchor.py b/comfy_extras/facerestore/facelib/detection/yolov5face/utils/autoanchor.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/yolov5face/utils/autoanchor.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/utils/autoanchor.py diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/utils/datasets.py b/comfy_extras/facerestore/facelib/detection/yolov5face/utils/datasets.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/yolov5face/utils/datasets.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/utils/datasets.py diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/utils/extract_ckpt.py b/comfy_extras/facerestore/facelib/detection/yolov5face/utils/extract_ckpt.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/yolov5face/utils/extract_ckpt.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/utils/extract_ckpt.py diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/utils/general.py b/comfy_extras/facerestore/facelib/detection/yolov5face/utils/general.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/yolov5face/utils/general.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/utils/general.py diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/utils/torch_utils.py b/comfy_extras/facerestore/facelib/detection/yolov5face/utils/torch_utils.py similarity index 100% rename from custom_nodes/facerestore/facelib/detection/yolov5face/utils/torch_utils.py rename to comfy_extras/facerestore/facelib/detection/yolov5face/utils/torch_utils.py diff --git a/custom_nodes/facerestore/facelib/parsing/__init__.py b/comfy_extras/facerestore/facelib/parsing/__init__.py similarity index 93% rename from custom_nodes/facerestore/facelib/parsing/__init__.py rename to comfy_extras/facerestore/facelib/parsing/__init__.py index 6e57df110..e2e63cc40 100644 --- a/custom_nodes/facerestore/facelib/parsing/__init__.py +++ b/comfy_extras/facerestore/facelib/parsing/__init__.py @@ -1,6 +1,6 @@ import torch -from custom_nodes.facerestore.facelib.utils import load_file_from_url +from comfy_extras.facerestore.facelib.utils import load_file_from_url from .bisenet import BiSeNet from .parsenet import ParseNet diff --git a/custom_nodes/facerestore/facelib/parsing/bisenet.py b/comfy_extras/facerestore/facelib/parsing/bisenet.py similarity index 100% rename from custom_nodes/facerestore/facelib/parsing/bisenet.py rename to comfy_extras/facerestore/facelib/parsing/bisenet.py diff --git a/custom_nodes/facerestore/facelib/parsing/parsenet.py b/comfy_extras/facerestore/facelib/parsing/parsenet.py similarity index 100% rename from custom_nodes/facerestore/facelib/parsing/parsenet.py rename to comfy_extras/facerestore/facelib/parsing/parsenet.py diff --git a/custom_nodes/facerestore/facelib/parsing/resnet.py b/comfy_extras/facerestore/facelib/parsing/resnet.py similarity index 100% rename from custom_nodes/facerestore/facelib/parsing/resnet.py rename to comfy_extras/facerestore/facelib/parsing/resnet.py diff --git a/custom_nodes/facerestore/facelib/utils/__init__.py b/comfy_extras/facerestore/facelib/utils/__init__.py similarity index 100% rename from custom_nodes/facerestore/facelib/utils/__init__.py rename to comfy_extras/facerestore/facelib/utils/__init__.py diff --git a/custom_nodes/facerestore/facelib/utils/face_restoration_helper.py b/comfy_extras/facerestore/facelib/utils/face_restoration_helper.py similarity index 99% rename from custom_nodes/facerestore/facelib/utils/face_restoration_helper.py rename to comfy_extras/facerestore/facelib/utils/face_restoration_helper.py index 38a50c120..e257528d2 100644 --- a/custom_nodes/facerestore/facelib/utils/face_restoration_helper.py +++ b/comfy_extras/facerestore/facelib/utils/face_restoration_helper.py @@ -4,9 +4,9 @@ import os import torch from torchvision.transforms.functional import normalize -from custom_nodes.facerestore.facelib.detection import init_detection_model -from custom_nodes.facerestore.facelib.parsing import init_parsing_model -from custom_nodes.facerestore.facelib.utils.misc import img2tensor, imwrite +from comfy_extras.facerestore.facelib.detection import init_detection_model +from comfy_extras.facerestore.facelib.parsing import init_parsing_model +from comfy_extras.facerestore.facelib.utils.misc import img2tensor, imwrite def get_largest_face(det_faces, h, w): diff --git a/custom_nodes/facerestore/facelib/utils/face_utils.py b/comfy_extras/facerestore/facelib/utils/face_utils.py similarity index 98% rename from custom_nodes/facerestore/facelib/utils/face_utils.py rename to comfy_extras/facerestore/facelib/utils/face_utils.py index 7e9520762..a84a666ed 100644 --- a/custom_nodes/facerestore/facelib/utils/face_utils.py +++ b/comfy_extras/facerestore/facelib/utils/face_utils.py @@ -211,8 +211,8 @@ def paste_face_back(img, face, inverse_affine): if __name__ == '__main__': import os - from custom_nodes.facerestore.facelib.detection import init_detection_model - from custom_nodes.facerestore.facelib.utils.face_restoration_helper import get_largest_face + from comfy_extras.facerestore.facelib.detection import init_detection_model + from comfy_extras.facerestore.facelib.utils.face_restoration_helper import get_largest_face img_path = '/home/wxt/datasets/ffhq/ffhq_wild/00009.png' img_name = os.splitext(os.path.basename(img_path))[0] diff --git a/custom_nodes/facerestore/facelib/utils/misc.py b/comfy_extras/facerestore/facelib/utils/misc.py similarity index 100% rename from custom_nodes/facerestore/facelib/utils/misc.py rename to comfy_extras/facerestore/facelib/utils/misc.py diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 131cd6a9f..848fb550a 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -1,5 +1,4 @@ import torch - from nodes import MAX_RESOLUTION class LatentCompositeMasked: diff --git a/comfy_extras/silver_custom.py b/comfy_extras/silver_custom.py index dd82c19cf..370b07138 100644 --- a/comfy_extras/silver_custom.py +++ b/comfy_extras/silver_custom.py @@ -1,18 +1,49 @@ -class Note: - def __init__(self): - pass +import cv2 +import torch +class ExpandImageMask: @classmethod def INPUT_TYPES(s): - return {"required": {"text": ("STRING", {"multiline": True})}} + return { + "required": { + "images": ("IMAGE",) + } + } - RETURN_TYPES = () - FUNCTION = "Note" + CATEGORY = "mask" - OUTPUT_NODE = False + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_to_mask_image" - CATEGORY = "silver_custom" + def image_to_mask_image(self, images): + mask_images = [] + for image in images: + i = 255. * image.cpu().numpy() + # opencv_image = PIL.Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + # cv2.imwrite('opencv_image.png', i) + + image_gray = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) + image_gray = cv2.blur(image_gray, (10, 10)) + # cv2.imwrite('image_gray1.png', image_gray) + image_gray = cv2.blur(image_gray, (20, 20)) + # cv2.imwrite('image_gray2.png', image_gray) + + # Convert the image to the expected data type + image_gray = cv2.convertScaleAbs(image_gray) + + # Apply the threshold using the modified image + (thresh, im_bw) = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) + ksize = (50, 50) + im_bw = cv2.blur(im_bw, ksize) + im_bw = cv2.threshold(im_bw, thresh, 255, cv2.THRESH_BINARY)[1] + im_bw = cv2.bitwise_not(im_bw) + # cv2.imwrite('im_bw.png', im_bw) + + # Convert the binary mask image to a PyTorch tensor + img = torch.from_numpy(im_bw).unsqueeze(0).float() + mask_images.append(img) + return tuple(mask_images) NODE_CLASS_MAPPINGS = { - "Note": Note + "ExpandImageMask": ExpandImageMask } diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/models/common.py b/custom_nodes/facerestore/facelib/detection/yolov5face/models/common.py deleted file mode 100644 index 0df9b5b4d..000000000 --- a/custom_nodes/facerestore/facelib/detection/yolov5face/models/common.py +++ /dev/null @@ -1,299 +0,0 @@ -# This file contains modules common to various models - -import math - -import numpy as np -import torch -from torch import nn - -from custom_nodes.facerestore.facelib.detection.yolov5face.utils.datasets import letterbox -from custom_nodes.facerestore.facelib.detection.yolov5face.utils.general import ( - make_divisible, - non_max_suppression, - scale_coords, - xyxy2xywh, -) - - -def autopad(k, p=None): # kernel, padding - # Pad to 'same' - if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad - return p - - -def channel_shuffle(x, groups): - batchsize, num_channels, height, width = x.data.size() - channels_per_group = torch.div(num_channels, groups, rounding_mode="trunc") - - # reshape - x = x.view(batchsize, groups, channels_per_group, height, width) - x = torch.transpose(x, 1, 2).contiguous() - - # flatten - return x.view(batchsize, -1, height, width) - - -def DWConv(c1, c2, k=1, s=1, act=True): - # Depthwise convolution - return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - -class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def fuseforward(self, x): - return self.act(self.conv(x)) - - -class StemBlock(nn.Module): - def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True): - super().__init__() - self.stem_1 = Conv(c1, c2, k, s, p, g, act) - self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0) - self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1) - self.stem_2p = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) - self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0) - - def forward(self, x): - stem_1_out = self.stem_1(x) - stem_2a_out = self.stem_2a(stem_1_out) - stem_2b_out = self.stem_2b(stem_2a_out) - stem_2p_out = self.stem_2p(stem_1_out) - return self.stem_3(torch.cat((stem_2b_out, stem_2p_out), 1)) - - -class Bottleneck(nn.Module): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2, 3, 1, g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class BottleneckCSP(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) - self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.LeakyReLU(0.1, inplace=True) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) - - -class C3(nn.Module): - # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) - - -class ShuffleV2Block(nn.Module): - def __init__(self, inp, oup, stride): - super().__init__() - - if not 1 <= stride <= 3: - raise ValueError("illegal stride value") - self.stride = stride - - branch_features = oup // 2 - - if self.stride > 1: - self.branch1 = nn.Sequential( - self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), - nn.BatchNorm2d(inp), - nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), - nn.BatchNorm2d(branch_features), - nn.SiLU(), - ) - else: - self.branch1 = nn.Sequential() - - self.branch2 = nn.Sequential( - nn.Conv2d( - inp if (self.stride > 1) else branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - bias=False, - ), - nn.BatchNorm2d(branch_features), - nn.SiLU(), - self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), - nn.BatchNorm2d(branch_features), - nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), - nn.BatchNorm2d(branch_features), - nn.SiLU(), - ) - - @staticmethod - def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): - return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) - - def forward(self, x): - if self.stride == 1: - x1, x2 = x.chunk(2, dim=1) - out = torch.cat((x1, self.branch2(x2)), dim=1) - else: - out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) - out = channel_shuffle(out, 2) - return out - - -class SPP(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, k=(5, 9, 13)): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - - def forward(self, x): - x = self.cv1(x) - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - - -class Focus(nn.Module): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) - - -class Concat(nn.Module): - # Concatenate a list of tensors along dimension - def __init__(self, dimension=1): - super().__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class NMS(nn.Module): - # Non-Maximum Suppression (NMS) module - conf = 0.25 # confidence threshold - iou = 0.45 # IoU threshold - classes = None # (optional list) filter by class - - def forward(self, x): - return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) - - -class AutoShape(nn.Module): - # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - img_size = 640 # inference size (pixels) - conf = 0.25 # NMS confidence threshold - iou = 0.45 # NMS IoU threshold - classes = None # (optional list) filter by class - - def __init__(self, model): - super().__init__() - self.model = model.eval() - - def autoshape(self): - print("autoShape already enabled, skipping... ") # model already converted to model.autoshape() - return self - - def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=720, width=1280, RGB images example inputs are: - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(720,1280,3) - # numpy: = np.zeros((720,1280,3)) # HWC - # torch: = torch.zeros(16,3,720,1280) # BCHW - # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - - p = next(self.model.parameters()) # for device and type - if isinstance(imgs, torch.Tensor): # torch - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images - shape0, shape1 = [], [] # image and inference shapes - for i, im in enumerate(imgs): - im = np.array(im) # to numpy - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = size / max(s) # gain - shape1.append([y * g for y in s]) - imgs[i] = im # update - shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad - x = np.stack(x, 0) if n > 1 else x[0][None] # stack - x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255.0 # uint8 to fp16/32 - - # Inference - with torch.no_grad(): - y = self.model(x, augment, profile)[0] # forward - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - - # Post-process - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - - return Detections(imgs, y, self.names) - - -class Detections: - # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, names=None): - super().__init__() - d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1.0, 1.0], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays - self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) - self.names = names # class names - self.xyxy = pred # xyxy pixels - self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels - self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized - self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) - - def __len__(self): - return self.n - - def tolist(self): - # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)] - for d in x: - for k in ["imgs", "pred", "xyxy", "xyxyn", "xywh", "xywhn"]: - setattr(d, k, getattr(d, k)[0]) # pop out of list - return x diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/models/experimental.py b/custom_nodes/facerestore/facelib/detection/yolov5face/models/experimental.py deleted file mode 100644 index 9d230ce48..000000000 --- a/custom_nodes/facerestore/facelib/detection/yolov5face/models/experimental.py +++ /dev/null @@ -1,45 +0,0 @@ -# # This file contains experimental modules - -import numpy as np -import torch -from torch import nn - -from custom_nodes.facerestore.facelib.detection.yolov5face.models.common import Conv - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super().__init__() - groups = len(k) - if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1e-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels - else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) - - def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolo.py b/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolo.py deleted file mode 100644 index 1c7078cd8..000000000 --- a/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolo.py +++ /dev/null @@ -1,235 +0,0 @@ -import math -from copy import deepcopy -from pathlib import Path - -import torch -import yaml # for torch hub -from torch import nn - -from custom_nodes.facerestore.facelib.detection.yolov5face.models.common import ( - C3, - NMS, - SPP, - AutoShape, - Bottleneck, - BottleneckCSP, - Concat, - Conv, - DWConv, - Focus, - ShuffleV2Block, - StemBlock, -) -from custom_nodes.facerestore.facelib.detection.yolov5face.models.experimental import CrossConv, MixConv2d -from custom_nodes.facerestore.facelib.detection.yolov5face.utils.autoanchor import check_anchor_order -from custom_nodes.facerestore.facelib.detection.yolov5face.utils.general import make_divisible -from custom_nodes.facerestore.facelib.detection.yolov5face.utils.torch_utils import copy_attr, fuse_conv_and_bn - - -class Detect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super().__init__() - self.nc = nc # number of classes - self.no = nc + 5 + 10 # number of outputs per anchor - - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer("anchors", a) # shape(nl,na,2) - self.register_buffer("anchor_grid", a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - def forward(self, x): - z = [] # inference output - if self.export: - for i in range(self.nl): - x[i] = self.m[i](x[i]) - return x - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = torch.full_like(x[i], 0) - y[..., [0, 1, 2, 3, 4, 15]] = x[i][..., [0, 1, 2, 3, 4, 15]].sigmoid() - y[..., 5:15] = x[i][..., 5:15] - - y[..., 0:2] = (y[..., 0:2] * 2.0 - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - - y[..., 5:7] = ( - y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x1 y1 - y[..., 7:9] = ( - y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x2 y2 - y[..., 9:11] = ( - y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x3 y3 - y[..., 11:13] = ( - y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x4 y4 - y[..., 13:15] = ( - y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x5 y5 - - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)], indexing="ij") # for pytorch>=1.10 - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class Model(nn.Module): - def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None): # model, input channels, number of classes - super().__init__() - self.yaml_file = Path(cfg).name - with Path(cfg).open(encoding="utf8") as f: - self.yaml = yaml.safe_load(f) # model dict - - # Define model - ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels - if nc and nc != self.yaml["nc"]: - self.yaml["nc"] = nc # override yaml value - - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml["nc"])] # default names - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, Detect): - s = 128 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - m.anchors /= m.stride.view(-1, 1, 1) - check_anchor_order(m) - self.stride = m.stride - self._initialize_biases() # only run once - - def forward(self, x): - return self.forward_once(x) # single-scale inference, train - - def forward_once(self, x): - y = [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - - x = m(x) # run - y.append(x if m.i in self.save else None) # save output - - return x - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(("%6g Conv2d.bias:" + "%10.3g" * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print("Fusing layers... ") - for m in self.model.modules(): - if isinstance(m, Conv) and hasattr(m, "bn"): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, "bn") # remove batchnorm - m.forward = m.fuseforward # update forward - elif type(m) is nn.Upsample: - m.recompute_scale_factor = None # torch 1.11.0 compatibility - return self - - def nms(self, mode=True): # add or remove NMS module - present = isinstance(self.model[-1], NMS) # last layer is NMS - if mode and not present: - print("Adding NMS... ") - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name=str(m.i), module=m) # add - self.eval() - elif not mode and present: - print("Removing NMS... ") - self.model = self.model[:-1] # remove - return self - - def autoshape(self): # add autoShape module - print("Adding autoShape... ") - m = AutoShape(self) # wrap model - copy_attr(m, self, include=("yaml", "nc", "hyp", "names", "stride"), exclude=()) # copy attributes - return m - - -def parse_model(d, ch): # model_dict, input_channels(3) - anchors, nc, gd, gw = d["anchors"], d["nc"], d["depth_multiple"], d["width_multiple"] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [ - Conv, - Bottleneck, - SPP, - DWConv, - MixConv2d, - Focus, - CrossConv, - BottleneckCSP, - C3, - ShuffleV2Block, - StemBlock, - ]: - c1, c2 = ch[f], args[0] - - c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 - - args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3]: - args.insert(2, n) - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) - elif m is Detect: - args.append([ch[x + 1] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - else: - c2 = ch[f] - - m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace("__main__.", "") # module type - np = sum(x.numel() for x in m_.parameters()) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - ch.append(c2) - return nn.Sequential(*layers), sorted(save) diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5l.yaml b/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5l.yaml deleted file mode 100644 index 0532b0e22..000000000 --- a/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5l.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# parameters -nc: 1 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple - -# anchors -anchors: - - [4,5, 8,10, 13,16] # P3/8 - - [23,29, 43,55, 73,105] # P4/16 - - [146,217, 231,300, 335,433] # P5/32 - -# YOLOv5 backbone -backbone: - # [from, number, module, args] - [[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 2-P3/8 - [-1, 9, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 4-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 6-P5/32 - [-1, 1, SPP, [1024, [3,5,7]]], - [-1, 3, C3, [1024, False]], # 8 - ] - -# YOLOv5 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 5], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 12 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 3], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 16 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 13], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 19 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 9], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 22 (P5/32-large) - - [[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] \ No newline at end of file diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5n.yaml b/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5n.yaml deleted file mode 100644 index caba6bed6..000000000 --- a/custom_nodes/facerestore/facelib/detection/yolov5face/models/yolov5n.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# parameters -nc: 1 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple - -# anchors -anchors: - - [4,5, 8,10, 13,16] # P3/8 - - [23,29, 43,55, 73,105] # P4/16 - - [146,217, 231,300, 335,433] # P5/32 - -# YOLOv5 backbone -backbone: - # [from, number, module, args] - [[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4 - [-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8 - [-1, 3, ShuffleV2Block, [128, 1]], # 2 - [-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16 - [-1, 7, ShuffleV2Block, [256, 1]], # 4 - [-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32 - [-1, 3, ShuffleV2Block, [512, 1]], # 6 - ] - -# YOLOv5 head -head: - [[-1, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P4 - [-1, 1, C3, [128, False]], # 10 - - [-1, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 2], 1, Concat, [1]], # cat backbone P3 - [-1, 1, C3, [128, False]], # 14 (P3/8-small) - - [-1, 1, Conv, [128, 3, 2]], - [[-1, 11], 1, Concat, [1]], # cat head P4 - [-1, 1, C3, [128, False]], # 17 (P4/16-medium) - - [-1, 1, Conv, [128, 3, 2]], - [[-1, 7], 1, Concat, [1]], # cat head P5 - [-1, 1, C3, [128, False]], # 20 (P5/32-large) - - [[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/custom_nodes/facerestore/facelib/detection/yolov5face/utils/__init__.py b/custom_nodes/facerestore/facelib/detection/yolov5face/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/folder_paths.py b/folder_paths.py index bd713eb5c..01f47a437 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -30,6 +30,7 @@ folder_names_and_paths["controlnet"] = ([os.path.join(models_dir, "controlnet"), folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions) folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes")], []) +folder_names_and_paths["comfy_extras"] = ([os.path.join(base_path, "comfy_extras")], []) output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") diff --git a/im_bw.png b/im_bw.png new file mode 100644 index 0000000000000000000000000000000000000000..e32dc2b67684746520daf793bad97d37aea66989 GIT binary patch literal 2970 zcmeH}Ur1A76u`I4Y`V!zom&(CmSTclU75D&rm49cv(k`?ZhrZX5a)1^UMh*&t}R5T z6S}EQf+!-;haQ|_YV)C)S9`FRU>JkTCs7&bgGwZa+vtQ;#c6Ac#Qv(&Cy<;n&yt-n|RLkm9Xf?az>xGAs?p~Oc;Z{0qp%JY% zbl&@MftirIlDa+99*TQCF`dpXiJQ6e$kdphk{w}5pBGV=8KuJL z{qFI-G3#ceIFAn5uoE3Q@s2j@q}_k}<{`FF9L8lL6Ut8-6tP%VS~>PF zQ4uiC@ak{eB_0e#GJtWeg@@BKJoIrE9?ECp0NS`4Jd}3nAdI#@Q3`9>T%5I_aXVrs zlvnAxN_drHlvkbDk-*xR^Wz_oyAuVPEN5aAnEeeTs29v200U;f078!dh+w-}&fgG< zOm%Jpq@|WaP$`b27a>&>avLCT8Gr??^B}`%0QG47DnKmMdl5Ens&^ZpNJRjZ$kYH3 zgN*=cI4OW=Oaj0eGyxpICIBTeH3LN9E`SJ>0N*Z>ViCYClz`mERT);$pSf1#Lfau! zEz>Oz=*U={LC-qkY*dfGiaUl}_1j8H&x9!g6=W3F)7pXKR*Ek^1s%4nrYe2#GOHA?an})v+;@GMtXjO~P}(TFKd-%y7|-!lEZeAl zKiQtf4vQz8v*DY$^QqhvF|N{-2VN1Qgr+iZmU$;9lXo-OHSr2dECt1XSc5Ox7n1x! T8~Oe%|I@86GnS5()ZO_B84Z2W literal 0 HcmV?d00001 diff --git a/image_gray1.png b/image_gray1.png new file mode 100644 index 0000000000000000000000000000000000000000..f75361bfb6039b4eb4321ece0c92c8521a214ead GIT binary patch literal 5730 zcmeHLdsNbS+9#vZnoEdhUmGzxQ-F=XW^pTtCnA ze4gk0%Ki`&{`SuIc0wSKw<8F+SO~-keBIjZ44#mSoN9jvaBm|LS1|JgI~E-wM238sQWDUh0tZ z9emaA-~99ji8np*W)$9#!W%C9@3sQ_G#rHt!|g0Oy5?TXRqk+KxsOh*xw&$+0vU^Y z<`;{*+5HiD8NKGtye4SoDrJ|*(Ifkj*qA!D8>}8z$BxYf-%6-`vp@(Rp2AUGp_m`k zo*vQf!-9W#QHT7HI#*x|%1R!b%wQkGq8zP&rxBKs9$6l80c%X$-`1XDm&uQCMZDHM*2&8_$6uzmw0S)Cr>7r53< z1=_J+(*{*d%n)v~>eyN-bNkfjUwJwy#n8rg<|uct;46t0*P$=w%t1}l!4JpA+~F1z zd;eFWCJjTY`f&fBLEgX0U@}4!1&Okp;juL!q-5)Fin29KjYJG}WLTnwvd-6ehvt^Y zlZ5*Ggba!!+LYd-ARY@{Gp~tvsx8Oj5i8k$uL%5$`VUH#v^LqUB}*g+S)ikNsM}i2`ub-Nf*dodwa{|Td~;2 zoRHWg!focFmaY?)=3(UyJC#Fd<>?qCzKG|%ta?eG|JBF4CxgRzqVbl88i_D7^n`We z@xnvf=vsGI2@F%2o=6>Nn-K&8-$C-}M)56dD&oe~a?v3x#nwi#dS24o+ZMRY5JPuY z87dcLpma_|K7-8)6ZLd~{w&FZ`~>S{@=VuAY!*;f^w{c%cCU?9O@FTKV+#gA9ou*U zc;Da7cQ#G`c!k2`xD;ZiF#QsW)i}T;ol!8g$yV7}LpP$wt(9k%O!7r_0G}%-6w@DP zW`bt@8KyWRp|n$z=QH=}?ITw8Mi%oqHx(yOC#p3fzF=#!^&EFPSRPMWcy!;Vx;6{+ zjYN!_a3q=GwK83WtP~y6Fi0xb66Hgh)(Xs0;Fhdj8jqrxH^e$oisx+ksuDyGJAqGc z*R~kXxhf5-maB8JoJ8?Y)wci=6is|2gsw%L7j6wHg|!|uFuICcsZlWkUR(^u z9p)XE#Q*SqY5u0n3Cr}$M;ShL-?xs0-4w*lcse|yVcz~oeR=>YNVB@OxM&&SHZVXA z#ZJFQUe2Se<4khS;4ys@4)Dq?2@K6(NsTn?*nPKyB_Co%^UPh~=VDn-2D_!HJV*)ZTP z=oA?wRKI@eHNeiUcdG7G!5yLqp?R_bsTcA5`siNz$B6SrxMd%k`wddZiy}tGV00tyu}=Z(;8aEH=$t(zhaH`Kn`J=2i$7U&24gmOj_eW*XG^J^6w4^dLnu~`%>o~IToM?b z01W=Pl^N1B26EUv^(C6U@S)I(`RNnTXB zvT0-DEUD%Z_vHE`LT_F(x0||svZ9B8TBH!wW`0ym_gu50Y}Do{FAzv=dKg@Lg*-C*qWR!WOaSoixvu)DHq z#mC}3I>pNH8d`Kw<&bDx1*U%Jk~(LvEoT@6z+OqMzfh$9fVzwI_W}tI8N2iPb5WMn zR!J(GHI7?bnk~O3?g@YOMQfvh(JsuyA6IMy$(toEHbU>a2Qv4WPM!E!0`cCXCi;Xy z%kmQVM%Js`10^gkDrl!HVGDLmK%VL%e=tDNWE;L6eMr_SrrQd>WWGQ3Wk8FclQtov zP3ymBLB0{)u$4MqB%VpOL{<=a4%P_ZfRz$Q=?lCYT8LJvh?=(&8hHVjfk2s zIzmsw1Z(Z%Ez-hO-4{9}iQh_yxCvh)&EEfrL)UD)Cm-c`O4#5KJ1vbeAje59U7>)%qe`cM6HLDf}TadGCj zbOziHOJfKoaUuEDB7Fe8ChoFN)4Q6K`|Z2pk^mT!e9u}C$R=xHx0rrH0xNMbGZwR6 z@S<11O|~&8-~MXX17lr|q=udte(zdgFk_d^lO<>Pacy(CH{4V9`#U-z=)1 z?2_6(?=W%S+rE{<29P|SEFR;7Yi8P9sa`efNi%tuJEUR(U$b#85-BrZWp-dKOAo*~ zcMVnPXvS~JKNv==n{=u7w!<&r`YVP+%!%xLOF5%seq~V|Pz*O4g*#%2m4c%;j70*Q zW;#eQhOhY~av+z~VwCiPDV(?Eh_R_Pr7Ma+WNexgsPEeEWc+bxH z_&PzF{ETABBw3IR<7af61^><_ML5763ybNJ}7N6vj45^`eEC1LCQf)iMzO(3of$L_Y!ge zgpU_&L_ov}TyRvbuGXKkg-LsOU2S_6PM zp8P?Xfbhk}19IJ4j~eUi2%#M8ntL-hPgt#IdFqLpQs$AV{6*10RM%I^Zo$(OVvwea za5p7Bcju99$}Q2$qof9X&YV6cvGXUahbg#%R!$8Zn~5K4Pd|0Zi+vK`kAgsNz+um4 zg(^c3_chgxviIL~F8ZjO9#CH=x*$PU@-!2i^PNlroA)xY$32D1$Q$6d28~DP=7YF=26I2Q|fit$}n_y4;#{*2i6@L zztAa;C;jiEkZqAc_TmKF_iHRLuNp`$EjtGmvHJwbk{%^4-l|%pl literal 0 HcmV?d00001 diff --git a/image_gray2.png b/image_gray2.png new file mode 100644 index 0000000000000000000000000000000000000000..cd38b12cf8c281a5392ac58fa76c1a89009e81ae GIT binary patch literal 5417 zcmeHLe^8otnpcY37rizwXpWn;)|WRZ%BkG!T*oaaA_ax8(V$|ZHM;4Zn|60b6N6E* znjpnWaHIGOr)R;ON-;>bS#>8-sb1Y=M4i(sSa&j&xQRF%(ItvCi9f3MU~lg4`}byo zGY+HA_jy0x&+~jf&ol4!(^;8sg#I)%C@AO+u}F{;6hwfpul+j$zk`1Ldt+=+P{f8< zke*i^{I{W&nDnZR1I8b);2zWUQECSLW#D=EA(g;!qqf1V0g+xPF@Ibcm^>$*~=8#a<^khRR|s@5*062qb= z6H%;aGSS+NFTa;WP+=;>-w;GY33WW>ovUh_QK{5mp}0zE3+v<|B$^r}ocn>Ocqbht z3nR#6;?7x2r_-s`Y7|-9oBk~c@9FQ4!&QiV>;j2Sr#%z#F>5)46@xRmk3)n+vin$_ zPIt@@Myk6r{+UgO9Z>TGXgbe0nTzcFrE_LF%0GJ~A$?F)xI z&*-cZPi_wn58M6?QDN9b7$ow^*@j5S)ne1e>Gh=rdc9Uti1M zmNSaq+qh*NoM9%i?82)jCJ-W}RnB&K;W(872yqszdSK9EWlAJ1hYkC9IiK(F%FQgV zjfO71(7lgJXGaUDvtj zGbezE4F%pl=@<6sWtrbfD;G`)`650aK@aE{j@i3(ADE9|d9I_2Vbj#>dRb*&ROLcj zsJh5rj}p|&2#(_#+jN)j`z|1sf9A^*CGOivkSfz<%#INEK4 zO}u+*Mld<8LtlQ6?)~oJ`EJ+wDmLldSHG`m=^TxcWv0e|N;hP&4Pi9mBun)qw?Hq` zuinsozq-B!KSQh$ba4Q(-ue!%`G7(bn>j!c4NVGmt9iP$E}=|b zIlhClDmhPY)axWa)}6+yK7nyNN?lDBwq=GQ+O6QG&JE2EIRS$fFkE3j{qPprI^M@I zF!)#v8c|-_QJDu!M9CUVpUPh-6rX!mY?W0&5?GN-XP$rd=8uR5jR`_b&aRa()EgN*K}1@+Y!NQMnb%M+-nFF|=cZQHCI6FM zR@$Kzl^Mp}8PyblK)P>ASgTmLIM8ePC-Dmf7b4S|XB9?Q)Dbaov(=EgCALf4fbvj* zbQ9z|3s~+gNM(HX`Lwwy=e4;h#pSA_ypUUc+-T3rX7|+JQpPyyZKhxN z0k@!7+2pZ7LJSVM}opdNHF<98n_ZU zU68oe-`ix#x#{%4?GAwpu9IeczlB913w6mf)Oi0egxP};RCh<;l4TbB13OR=m%?cH z5hQ~9CDrEs)!ZGmR-;iC5&vXmKNbJNQCs|W`P|T0cz~tQ5rp9HQewZkWzs)8O$5s$ zmT#xmK_eKOw=TXLeZJcpz;@75gX#`rtN&3Vkt9k5x<*_0>4dfE9Gcg+6=;E-pGSKh zy2Jq{yX~)!*sq0uGng>zaO4Ph2N+gtu{Pe-jYDTa?AD?o7WA+mHZt9|FHU-50{?UP zNh9^kq?dX51$xnceXn@Fk`nF7tzyuuoh{ceEL*joUe=ReKphyiZN`EO!I|_ie8;JU_l`J3%$)s$5MndzIUK8G_E<#WY z({%cC;%EjRC^ls;Wv}&3nw8o*!N5bGgmjer9rh7eRC}o+9g9YYlc#EOi|^5EIrH3k z?os6X5ZM#=v#Py$srJq7Jbq19|*K|7s*AE}092>Lbs6)xMu9&Aacjc`9T_YDZ^9%L3I?KGbZk8vZi?h?qxi#-cw|B+S)S{@nmK?fH-t7q# z{Gp3PEiWy21WCs~cNwx6P}OY6IyU-NWm{uE%I7spqD&Pe=(&sBw>>6KaNn?Xa2V#+ zZY~=p#2fBs@#6!Q-lkE_UvEhGcb@(nv_qqNeIQ=wQAmGNFUv2`(!|$JnxR)Y_5c!D zW>lsuzQ@EDDJ?7gf6l?^UhX`ociwm58yLNQ0^K|JFx$I?tbOpm@lmE8eGEz&Zeq_+ z-V-@$!FpC}Tmf^LL<9 z;sQ+m<91V;3HTdtfNF2%EJ~*?E=W!K(vA<2Z$=e$>b^_QS87t;_N=T2@XX|l^f|j7 zBd|LN4vcYJ*|*XulVxG_OlaLNU3o{TL1ZbQ?mr6?6FVdhgO5AL0Z%(uOWpY$K0eUf z#7-*xIQ140uiPZ4)mcZj8L&*%%O;7#n+;yMco!5B_h{xQw4k#$p+2av^g5p)9-Y7X z`CVVuJq@)?Y))IferLRS|26XQ+uvQh)mZtiB|-744iiGG@N!wTMwbCa1&A05lRUYb z;Xg(c{?Q5vn?@BdZ1hX_nThRY9dxBf@jQmgU3C~h9X{x}a{3_;-OPugROWTaGG$SE z)y@2!`?*Q3+UF0?S9$nKUYX%QHy1Lwo8@N_$*ff=#4b9e)Z^*1gSL&>PT$bpx`a(v z>-wDDW8NZ28FMH=H*>NxJ77|Nfftqr(m~snd+BaI!*I}^#YfhDH$6cH=ldF_Ce%on zIO|s}!LDVNd(JXo-mLrN=5YR*4J%P0>%@hbZ zbICK)2n)u!!)ro-T5u3cU@6_$3>KBm)U1)gxNGr-X^M#+gD=#cHkXB#TqAFEzs z8+?J2Ve0MI+>L?-u-+@b>Y(P?ZX)(XI6CUQcG44AkYQ!}(=cczNgwzpY@`%^Yt~_W zpg(>Yy2Neh z;<1)5!y}F_x9gJyLMf3(4i!-8gc^rRj&Iu zk(_S(>l?xdq2P1`85x1=!>X5ztWDV^2%^&T;J6m{Hr(Hy3|Mhv+`3;DL9bVquox5By)M{u=|w7{vep literal 0 HcmV?d00001 diff --git a/nodes.py b/nodes.py index a82b9730a..b62850a09 100644 --- a/nodes.py +++ b/nodes.py @@ -1217,9 +1217,23 @@ def load_custom_nodes(): if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue load_custom_node(module_path) +def load_comfy_extras_nodes(): + node_paths = folder_paths.get_folder_paths("comfy_extras") + for comfy_extras_path in node_paths: + possible_modules = os.listdir(comfy_extras_path) + if "__pycache__" in possible_modules: + possible_modules.remove("__pycache__") + + for possible_module in possible_modules: + module_path = os.path.join(comfy_extras_path, possible_module) + if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue + load_custom_node(module_path) + + def init_custom_nodes(): + load_comfy_extras_nodes() load_custom_nodes() - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "silver_custom.py")) + # load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) + # load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) + # load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py")) + # load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "silver_custom.py")) diff --git a/opencv_image.png b/opencv_image.png new file mode 100644 index 0000000000000000000000000000000000000000..aa704166d2a40c4c581962054b0cb036da7b4ff1 GIT binary patch literal 4701 zcmeHLeM}Q)7(c9zMaM|CL?(-LixU#FwR50MhAl2G%*?NdjLq2|SvD0mQ_KHK~qbMJuB%?=BD0QIMauu(orQP%1p(gI%CHtp;LEiWG zJkRg#)u_p#g!ZHXI-C;TiW?xi>(Yh z#AO?=#A5Nwu(i3^i9YQ8^AXvbL`~;j+P@|Ws|a6&Qxa~5@EC+QLwF~KzXstC!N553ATLT5*(|*QD=pTUSz=<5CytvX7YI#Cg|kYE>KAX4iNUHAdHDD`#fX)&=)7lCFB-SF)`gQBT8(3VAJQ^)M1l zmzg+>=po8tJMCBi^qk8?C^H$z^j$C|Ni1+|w2OI)GxP|!@+93J6eAr0`(|RCUEl_3 zM=5W-NH8_^ySM+q=i;p#Z$%>+I7ZG(>vbpWR{LhFW3rXaD6Agnd18Yn7>T8Wo&&() zdNQyaxNxySidM)fKzVs=P(mxT7c|lhv;vcLyVb)C`yHHs_c2hsx!K-FJNo$u)=1DM z&hgT4eSlXgh6sUUrS=xukz7v_C8RY2+#7n`i;+*#D%~Y`Ia1p7NN{^xfH;>Cud4O< zIKvLu6sXLB%qYuU!3$heLm+wey%-3)MXg7+#lSUThc^j~91PoHQ^@kv5#FRI!zjbH zGZK7|cAN)Db&huhmsiwQ4N8@agxB1M|wZt5scIKa%H8lIsNfp;m`kd&V&N)bR?QhvAOEV|#DlkPUd~LgiM?bc%f3F>Ez)S5=-2YU;@(PU+gp zc_K0c$#ku#q8)V&WNHCar?xVpT*V&Ho^ifl5t{Cb_BE-EP*T2U zQ3o5oN*ogWpVpU4J%a<(&2;Dyad5m~(C^Y|+cNCtMq-3vtvoM`h`3qWn(SL;xs?yu zTgzTmRiskwAvQWRnc|w})Yw43 zuLmVOk&(z@^h`6jFA%Qkw$XFoskTTcO@Yjfd_rh|Ln`>_f5O(}lV*54Q|+R$8Wrvo zp_;T?TlKy+xt1%3&F&BhgNZAf)1NiaUZ3}ac2YOhMGUos{QarJVvD~O*0C^%SU?i1 z?Eoi5H=bjNk)8HYGw4TKRylJ&$lsS(h|td*XmP@J`zW{Y28!PUrOCke`BN;{EvCXV zPFRIn92OGl9O+}JROwuVzA%br{o^}zMo%9Cp$c2bQp-W|v)z#5H*>