Abhishek Gola
commited on
Commit
·
b473270
1
Parent(s):
cc14b30
Updated yunet usage and added .gitattributes
Browse files- .gitattributes +26 -0
- .gitignore +9 -0
- demo.py +4 -5
- yunet.py +55 -0
.gitattributes
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Caffe
|
3 |
+
*.caffemodel filter=lfs diff=lfs merge=lfs -text
|
4 |
+
|
5 |
+
# Tensorflow
|
6 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.pbtxt filter=lfs diff=lfs merge=lfs -text
|
8 |
+
|
9 |
+
# Torch
|
10 |
+
*.t7 filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.net filter=lfs diff=lfs merge=lfs -text
|
12 |
+
|
13 |
+
# Darknet
|
14 |
+
*.weights filter=lfs diff=lfs merge=lfs -text
|
15 |
+
|
16 |
+
# ONNX
|
17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
+
|
19 |
+
# NPY
|
20 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
21 |
+
|
22 |
+
# Images
|
23 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.pyc
|
2 |
+
**/__pycache__
|
3 |
+
**/__pycache__/**
|
4 |
+
|
5 |
+
.vscode
|
6 |
+
|
7 |
+
build/
|
8 |
+
**/build
|
9 |
+
**/build/**
|
demo.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
-
import sys
|
2 |
import argparse
|
3 |
-
import copy
|
4 |
import datetime
|
5 |
|
6 |
import numpy as np
|
7 |
import cv2 as cv
|
|
|
8 |
|
9 |
# Check OpenCV version
|
10 |
opencv_python_version = lambda str_version: tuple(map(int, (str_version.split("."))))
|
@@ -12,10 +11,10 @@ assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"),
|
|
12 |
"Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python"
|
13 |
|
14 |
from facial_fer_model import FacialExpressionRecog
|
15 |
-
|
16 |
-
sys.path.append('../face_detection_yunet')
|
17 |
from yunet import YuNet
|
18 |
|
|
|
|
|
19 |
# Valid combinations of backends and targets
|
20 |
backend_target_pairs = [
|
21 |
[cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
|
@@ -87,7 +86,7 @@ if __name__ == '__main__':
|
|
87 |
backend_id = backend_target_pairs[args.backend_target][0]
|
88 |
target_id = backend_target_pairs[args.backend_target][1]
|
89 |
|
90 |
-
detect_model = YuNet(modelPath=
|
91 |
|
92 |
fer_model = FacialExpressionRecog(modelPath=args.model,
|
93 |
backendId=backend_id,
|
|
|
|
|
1 |
import argparse
|
|
|
2 |
import datetime
|
3 |
|
4 |
import numpy as np
|
5 |
import cv2 as cv
|
6 |
+
from huggingface_hub import hf_hub_download
|
7 |
|
8 |
# Check OpenCV version
|
9 |
opencv_python_version = lambda str_version: tuple(map(int, (str_version.split("."))))
|
|
|
11 |
"Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python"
|
12 |
|
13 |
from facial_fer_model import FacialExpressionRecog
|
|
|
|
|
14 |
from yunet import YuNet
|
15 |
|
16 |
+
yunet_model_path = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
|
17 |
+
|
18 |
# Valid combinations of backends and targets
|
19 |
backend_target_pairs = [
|
20 |
[cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
|
|
|
86 |
backend_id = backend_target_pairs[args.backend_target][0]
|
87 |
target_id = backend_target_pairs[args.backend_target][1]
|
88 |
|
89 |
+
detect_model = YuNet(modelPath=yunet_model_path)
|
90 |
|
91 |
fer_model = FacialExpressionRecog(modelPath=args.model,
|
92 |
backendId=backend_id,
|
yunet.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is part of OpenCV Zoo project.
|
2 |
+
# It is subject to the license terms in the LICENSE file found in the same directory.
|
3 |
+
#
|
4 |
+
# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
|
5 |
+
# Third party copyrights are property of their respective owners.
|
6 |
+
|
7 |
+
from itertools import product
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import cv2 as cv
|
11 |
+
|
12 |
+
class YuNet:
|
13 |
+
def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, backendId=0, targetId=0):
|
14 |
+
self._modelPath = modelPath
|
15 |
+
self._inputSize = tuple(inputSize) # [w, h]
|
16 |
+
self._confThreshold = confThreshold
|
17 |
+
self._nmsThreshold = nmsThreshold
|
18 |
+
self._topK = topK
|
19 |
+
self._backendId = backendId
|
20 |
+
self._targetId = targetId
|
21 |
+
|
22 |
+
self._model = cv.FaceDetectorYN.create(
|
23 |
+
model=self._modelPath,
|
24 |
+
config="",
|
25 |
+
input_size=self._inputSize,
|
26 |
+
score_threshold=self._confThreshold,
|
27 |
+
nms_threshold=self._nmsThreshold,
|
28 |
+
top_k=self._topK,
|
29 |
+
backend_id=self._backendId,
|
30 |
+
target_id=self._targetId)
|
31 |
+
|
32 |
+
@property
|
33 |
+
def name(self):
|
34 |
+
return self.__class__.__name__
|
35 |
+
|
36 |
+
def setBackendAndTarget(self, backendId, targetId):
|
37 |
+
self._backendId = backendId
|
38 |
+
self._targetId = targetId
|
39 |
+
self._model = cv.FaceDetectorYN.create(
|
40 |
+
model=self._modelPath,
|
41 |
+
config="",
|
42 |
+
input_size=self._inputSize,
|
43 |
+
score_threshold=self._confThreshold,
|
44 |
+
nms_threshold=self._nmsThreshold,
|
45 |
+
top_k=self._topK,
|
46 |
+
backend_id=self._backendId,
|
47 |
+
target_id=self._targetId)
|
48 |
+
|
49 |
+
def setInputSize(self, input_size):
|
50 |
+
self._model.setInputSize(tuple(input_size))
|
51 |
+
|
52 |
+
def infer(self, image):
|
53 |
+
# Forward
|
54 |
+
faces = self._model.detect(image)
|
55 |
+
return np.empty(shape=(0, 5)) if faces[1] is None else faces[1]
|