Limit combinations of backends and targets in demos and benchmark (#145)
Browse files* limit backend and target combination in demos and benchmark
* simpler version checking
- demo.py +31 -26
- pphumanseg.py +3 -6
demo.py
CHANGED
@@ -11,33 +11,36 @@ import cv2 as cv
|
|
11 |
|
12 |
from pphumanseg import PPHumanSeg
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
try:
|
27 |
-
backends += [cv.dnn.DNN_BACKEND_TIMVX]
|
28 |
-
targets += [cv.dnn.DNN_TARGET_NPU]
|
29 |
-
help_msg_backends += "; {:d}: TIMVX"
|
30 |
-
help_msg_targets += "; {:d}: NPU"
|
31 |
-
except:
|
32 |
-
print('This version of OpenCV does not support TIM-VX and NPU. Visit https://github.com/opencv/opencv/wiki/TIM-VX-Backend-For-Running-OpenCV-On-NPU for more information.')
|
33 |
|
34 |
parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)')
|
35 |
-
parser.add_argument('--input', '-i', type=str,
|
36 |
-
|
37 |
-
parser.add_argument('--
|
38 |
-
|
39 |
-
parser.add_argument('--
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
args = parser.parse_args()
|
42 |
|
43 |
def get_color_map_list(num_classes):
|
@@ -97,8 +100,10 @@ def visualize(image, result, weight=0.6, fps=None):
|
|
97 |
|
98 |
|
99 |
if __name__ == '__main__':
|
|
|
|
|
100 |
# Instantiate PPHumanSeg
|
101 |
-
model = PPHumanSeg(modelPath=args.model, backendId=
|
102 |
|
103 |
if args.input is not None:
|
104 |
# Read image and resize to 192x192
|
|
|
11 |
|
12 |
from pphumanseg import PPHumanSeg
|
13 |
|
14 |
+
# Check OpenCV version
|
15 |
+
assert cv.__version__ >= "4.7.0", \
|
16 |
+
"Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
|
17 |
+
|
18 |
+
# Valid combinations of backends and targets
|
19 |
+
backend_target_pairs = [
|
20 |
+
[cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
|
21 |
+
[cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],
|
22 |
+
[cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],
|
23 |
+
[cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],
|
24 |
+
[cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
|
25 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)')
|
28 |
+
parser.add_argument('--input', '-i', type=str,
|
29 |
+
help='Usage: Set input path to a certain image, omit if using camera.')
|
30 |
+
parser.add_argument('--model', '-m', type=str, default='human_segmentation_pphumanseg_2023mar.onnx',
|
31 |
+
help='Usage: Set model path, defaults to human_segmentation_pphumanseg_2023mar.onnx.')
|
32 |
+
parser.add_argument('--backend_target', '-bt', type=int, default=0,
|
33 |
+
help='''Choose one of the backend-target pair to run this demo:
|
34 |
+
{:d}: (default) OpenCV implementation + CPU,
|
35 |
+
{:d}: CUDA + GPU (CUDA),
|
36 |
+
{:d}: CUDA + GPU (CUDA FP16),
|
37 |
+
{:d}: TIM-VX + NPU,
|
38 |
+
{:d}: CANN + NPU
|
39 |
+
'''.format(*[x for x in range(len(backend_target_pairs))]))
|
40 |
+
parser.add_argument('--save', '-s', action='store_true',
|
41 |
+
help='Usage: Specify to save a file with results. Invalid in case of camera input.')
|
42 |
+
parser.add_argument('--vis', '-v', action='store_true',
|
43 |
+
help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
|
44 |
args = parser.parse_args()
|
45 |
|
46 |
def get_color_map_list(num_classes):
|
|
|
100 |
|
101 |
|
102 |
if __name__ == '__main__':
|
103 |
+
backend_id = backend_target_pairs[args.backend_target][0]
|
104 |
+
target_id = backend_target_pairs[args.backend_target][1]
|
105 |
# Instantiate PPHumanSeg
|
106 |
+
model = PPHumanSeg(modelPath=args.model, backendId=backend_id, targetId=target_id)
|
107 |
|
108 |
if args.input is not None:
|
109 |
# Read image and resize to 192x192
|
pphumanseg.py
CHANGED
@@ -28,12 +28,10 @@ class PPHumanSeg:
|
|
28 |
def name(self):
|
29 |
return self.__class__.__name__
|
30 |
|
31 |
-
def
|
32 |
-
self._backendId =
|
|
|
33 |
self._model.setPreferableBackend(self._backendId)
|
34 |
-
|
35 |
-
def setTarget(self, target_id):
|
36 |
-
self._targetId = target_id
|
37 |
self._model.setPreferableTarget(self._targetId)
|
38 |
|
39 |
def _preprocess(self, image):
|
@@ -69,4 +67,3 @@ class PPHumanSeg:
|
|
69 |
|
70 |
result = np.argmax(outputBlob, axis=1).astype(np.uint8)
|
71 |
return result
|
72 |
-
|
|
|
28 |
def name(self):
|
29 |
return self.__class__.__name__
|
30 |
|
31 |
+
def setBackendAndTarget(self, backendId, targetId):
|
32 |
+
self._backendId = backendId
|
33 |
+
self._targetId = targetId
|
34 |
self._model.setPreferableBackend(self._backendId)
|
|
|
|
|
|
|
35 |
self._model.setPreferableTarget(self._targetId)
|
36 |
|
37 |
def _preprocess(self, image):
|
|
|
67 |
|
68 |
result = np.argmax(outputBlob, axis=1).astype(np.uint8)
|
69 |
return result
|
|