Yuantao Feng
commited on
Commit
·
18103a9
1
Parent(s):
bd0a204
Decoupling metrics from benchmark to allow different kinds of forward process (#14)
Browse files* create metrics for different types of behavior
* workable impl calling utils.METRICS in benchmark
- benchmark/benchmark.py +15 -109
- benchmark/config/face_detection_yunet.yaml +3 -3
- benchmark/config/face_recognition_sface.yaml +3 -3
- benchmark/config/human_segmentation_pphumanseg.yaml +3 -3
- benchmark/config/image_classification_ppresnet.yaml +3 -3
- benchmark/config/qrcode_wechatqrcode.yaml +6 -5
- benchmark/config/text_detection_db.yaml +3 -3
- benchmark/config/text_recognition_crnn.yaml +3 -3
- benchmark/requirements.txt +1 -2
- benchmark/utils/__init__.py +4 -0
- benchmark/utils/factory.py +13 -0
- benchmark/utils/metrics/__init__.py +5 -0
- benchmark/utils/metrics/base.py +29 -0
- benchmark/utils/metrics/base_metric.py +48 -0
- benchmark/utils/metrics/detection.py +33 -0
- benchmark/utils/metrics/recognition.py +28 -0
- benchmark/utils/timer.py +20 -0
benchmark/benchmark.py
CHANGED
|
@@ -2,61 +2,30 @@ import os
|
|
| 2 |
import argparse
|
| 3 |
|
| 4 |
import yaml
|
| 5 |
-
import tqdm
|
| 6 |
import numpy as np
|
| 7 |
import cv2 as cv
|
| 8 |
|
|
|
|
| 9 |
from models import MODELS
|
|
|
|
| 10 |
|
| 11 |
parser = argparse.ArgumentParser("Benchmarks for OpenCV Zoo.")
|
| 12 |
parser.add_argument('--cfg', '-c', type=str,
|
| 13 |
help='Benchmarking on the given config.')
|
| 14 |
args = parser.parse_args()
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
self._tm = cv.TickMeter()
|
| 21 |
-
self._time_record = []
|
| 22 |
-
self._calls = 0
|
| 23 |
-
|
| 24 |
-
def start(self):
|
| 25 |
-
self._tm.start()
|
| 26 |
-
|
| 27 |
-
def stop(self):
|
| 28 |
-
self._tm.stop()
|
| 29 |
-
self._calls += 1
|
| 30 |
-
self._time_record.append(self._tm.getTimeMilli())
|
| 31 |
-
self._tm.reset()
|
| 32 |
-
|
| 33 |
-
def reset(self):
|
| 34 |
-
self._time_record = []
|
| 35 |
-
self._calls = 0
|
| 36 |
-
|
| 37 |
-
def getResult(self):
|
| 38 |
-
if self._reduction == 'median':
|
| 39 |
-
return self._getMedian(self._time_record[self._warmup:])
|
| 40 |
-
elif self._reduction == 'gmean':
|
| 41 |
-
return self._getGMean(self._time_record[self._warmup:])
|
| 42 |
-
else:
|
| 43 |
-
raise NotImplementedError()
|
| 44 |
-
|
| 45 |
-
def _getMedian(self, records):
|
| 46 |
-
''' Return median time
|
| 47 |
-
'''
|
| 48 |
-
l = len(records)
|
| 49 |
-
mid = int(l / 2)
|
| 50 |
-
if l % 2 == 0:
|
| 51 |
-
return (records[mid] + records[mid - 1]) / 2
|
| 52 |
-
else:
|
| 53 |
-
return records[mid]
|
| 54 |
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
| 60 |
|
| 61 |
class Data:
|
| 62 |
def __init__(self, **kwargs):
|
|
@@ -105,56 +74,6 @@ class Data:
|
|
| 105 |
else:
|
| 106 |
return self._files[idx], image
|
| 107 |
|
| 108 |
-
class Metric:
|
| 109 |
-
def __init__(self, **kwargs):
|
| 110 |
-
self._sizes = kwargs.pop('sizes', None)
|
| 111 |
-
self._warmup = kwargs.pop('warmup', 3)
|
| 112 |
-
self._repeat = kwargs.pop('repeat', 10)
|
| 113 |
-
assert self._warmup < self._repeat, 'The value of warmup must be smaller than the value of repeat.'
|
| 114 |
-
self._batch_size = kwargs.pop('batchSize', 1)
|
| 115 |
-
self._reduction = kwargs.pop('reduction', 'median')
|
| 116 |
-
|
| 117 |
-
self._timer = Timer(self._warmup, self._reduction)
|
| 118 |
-
|
| 119 |
-
def getReduction(self):
|
| 120 |
-
return self._reduction
|
| 121 |
-
|
| 122 |
-
def forward(self, model, *args, **kwargs):
|
| 123 |
-
img = args[0]
|
| 124 |
-
h, w, _ = img.shape
|
| 125 |
-
if not self._sizes:
|
| 126 |
-
self._sizes = [[w, h]]
|
| 127 |
-
|
| 128 |
-
results = dict()
|
| 129 |
-
self._timer.reset()
|
| 130 |
-
if len(args) == 1:
|
| 131 |
-
for size in self._sizes:
|
| 132 |
-
img_r = cv.resize(img, size)
|
| 133 |
-
try:
|
| 134 |
-
model.setInputSize(size)
|
| 135 |
-
except:
|
| 136 |
-
pass
|
| 137 |
-
# TODO: batched inference
|
| 138 |
-
# input_data = [img] * self._batch_size
|
| 139 |
-
input_data = img_r
|
| 140 |
-
for _ in range(self._repeat+self._warmup):
|
| 141 |
-
self._timer.start()
|
| 142 |
-
model.infer(input_data)
|
| 143 |
-
self._timer.stop()
|
| 144 |
-
results[str(size)] = self._timer.getResult()
|
| 145 |
-
else:
|
| 146 |
-
# TODO: batched inference
|
| 147 |
-
# input_data = [args] * self._batch_size
|
| 148 |
-
bboxes = args[1]
|
| 149 |
-
for idx, bbox in enumerate(bboxes):
|
| 150 |
-
for _ in range(self._repeat+self._warmup):
|
| 151 |
-
self._timer.start()
|
| 152 |
-
model.infer(img, bbox)
|
| 153 |
-
self._timer.stop()
|
| 154 |
-
results['bbox{}'.format(idx)] = self._timer.getResult()
|
| 155 |
-
|
| 156 |
-
return results
|
| 157 |
-
|
| 158 |
class Benchmark:
|
| 159 |
def __init__(self, **kwargs):
|
| 160 |
self._data_dict = kwargs.pop('data', None)
|
|
@@ -162,7 +81,8 @@ class Benchmark:
|
|
| 162 |
self._data = Data(**self._data_dict)
|
| 163 |
|
| 164 |
self._metric_dict = kwargs.pop('metric', None)
|
| 165 |
-
self._metric = Metric(**self._metric_dict)
|
|
|
|
| 166 |
|
| 167 |
backend_id = kwargs.pop('backend', 'default')
|
| 168 |
available_backends = dict(
|
|
@@ -206,20 +126,6 @@ class Benchmark:
|
|
| 206 |
total_latency += latency
|
| 207 |
print(' {}, latency ({}): {:.4f} ms'.format(key, self._metric.getReduction(), latency))
|
| 208 |
|
| 209 |
-
|
| 210 |
-
def build_from_cfg(cfg, registery):
|
| 211 |
-
obj_name = cfg.pop('name')
|
| 212 |
-
obj = registery.get(obj_name)
|
| 213 |
-
return obj(**cfg)
|
| 214 |
-
|
| 215 |
-
def prepend_pythonpath(cfg):
|
| 216 |
-
for k, v in cfg.items():
|
| 217 |
-
if isinstance(v, dict):
|
| 218 |
-
prepend_pythonpath(v)
|
| 219 |
-
else:
|
| 220 |
-
if 'path' in k.lower():
|
| 221 |
-
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)
|
| 222 |
-
|
| 223 |
if __name__ == '__main__':
|
| 224 |
assert args.cfg.endswith('yaml'), 'Currently support configs of yaml format only.'
|
| 225 |
with open(args.cfg, 'r') as f:
|
|
|
|
| 2 |
import argparse
|
| 3 |
|
| 4 |
import yaml
|
|
|
|
| 5 |
import numpy as np
|
| 6 |
import cv2 as cv
|
| 7 |
|
| 8 |
+
# from ..models import MODELS
|
| 9 |
from models import MODELS
|
| 10 |
+
from utils import METRICS
|
| 11 |
|
| 12 |
parser = argparse.ArgumentParser("Benchmarks for OpenCV Zoo.")
|
| 13 |
parser.add_argument('--cfg', '-c', type=str,
|
| 14 |
help='Benchmarking on the given config.')
|
| 15 |
args = parser.parse_args()
|
| 16 |
|
| 17 |
+
def build_from_cfg(cfg, registery, key='name'):
|
| 18 |
+
obj_name = cfg.pop(key)
|
| 19 |
+
obj = registery.get(obj_name)
|
| 20 |
+
return obj(**cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
def prepend_pythonpath(cfg):
|
| 23 |
+
for k, v in cfg.items():
|
| 24 |
+
if isinstance(v, dict):
|
| 25 |
+
prepend_pythonpath(v)
|
| 26 |
+
else:
|
| 27 |
+
if 'path' in k.lower():
|
| 28 |
+
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)
|
| 29 |
|
| 30 |
class Data:
|
| 31 |
def __init__(self, **kwargs):
|
|
|
|
| 74 |
else:
|
| 75 |
return self._files[idx], image
|
| 76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
class Benchmark:
|
| 78 |
def __init__(self, **kwargs):
|
| 79 |
self._data_dict = kwargs.pop('data', None)
|
|
|
|
| 81 |
self._data = Data(**self._data_dict)
|
| 82 |
|
| 83 |
self._metric_dict = kwargs.pop('metric', None)
|
| 84 |
+
# self._metric = Metric(**self._metric_dict)
|
| 85 |
+
self._metric = build_from_cfg(self._metric_dict, registery=METRICS, key='type')
|
| 86 |
|
| 87 |
backend_id = kwargs.pop('backend', 'default')
|
| 88 |
available_backends = dict(
|
|
|
|
| 126 |
total_latency += latency
|
| 127 |
print(' {}, latency ({}): {:.4f} ms'.format(key, self._metric.getReduction(), latency))
|
| 128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
if __name__ == '__main__':
|
| 130 |
assert args.cfg.endswith('yaml'), 'Currently support configs of yaml format only.'
|
| 131 |
with open(args.cfg, 'r') as f:
|
benchmark/config/face_detection_yunet.yaml
CHANGED
|
@@ -4,13 +4,13 @@ Benchmark:
|
|
| 4 |
path: "benchmark/data/face/detection"
|
| 5 |
files: ["group.jpg", "concerts.jpg", "dance.jpg"]
|
| 6 |
metric:
|
|
|
|
| 7 |
sizes: # [[w1, h1], ...], Omit to run at original scale
|
| 8 |
- [160, 120]
|
| 9 |
- [640, 480]
|
| 10 |
-
warmup:
|
| 11 |
repeat: 10
|
| 12 |
-
|
| 13 |
-
reduction: 'median'
|
| 14 |
backend: "default"
|
| 15 |
target: "cpu"
|
| 16 |
|
|
|
|
| 4 |
path: "benchmark/data/face/detection"
|
| 5 |
files: ["group.jpg", "concerts.jpg", "dance.jpg"]
|
| 6 |
metric:
|
| 7 |
+
type: "Detection"
|
| 8 |
sizes: # [[w1, h1], ...], Omit to run at original scale
|
| 9 |
- [160, 120]
|
| 10 |
- [640, 480]
|
| 11 |
+
warmup: 30
|
| 12 |
repeat: 10
|
| 13 |
+
reduction: "median"
|
|
|
|
| 14 |
backend: "default"
|
| 15 |
target: "cpu"
|
| 16 |
|
benchmark/config/face_recognition_sface.yaml
CHANGED
|
@@ -5,10 +5,10 @@ Benchmark:
|
|
| 5 |
files: ["Aaron_Tippin_0001.jpg", "Alvaro_Uribe_0028.jpg", "Alvaro_Uribe_0029.jpg", "Jose_Luis_Rodriguez_Zapatero_0001.jpg"]
|
| 6 |
useLabel: True
|
| 7 |
metric: # 'sizes' is omitted since this model requires input of fixed size
|
| 8 |
-
|
|
|
|
| 9 |
repeat: 10
|
| 10 |
-
|
| 11 |
-
reduction: 'median'
|
| 12 |
backend: "default"
|
| 13 |
target: "cpu"
|
| 14 |
|
|
|
|
| 5 |
files: ["Aaron_Tippin_0001.jpg", "Alvaro_Uribe_0028.jpg", "Alvaro_Uribe_0029.jpg", "Jose_Luis_Rodriguez_Zapatero_0001.jpg"]
|
| 6 |
useLabel: True
|
| 7 |
metric: # 'sizes' is omitted since this model requires input of fixed size
|
| 8 |
+
type: "Recognition"
|
| 9 |
+
warmup: 30
|
| 10 |
repeat: 10
|
| 11 |
+
reduction: "median"
|
|
|
|
| 12 |
backend: "default"
|
| 13 |
target: "cpu"
|
| 14 |
|
benchmark/config/human_segmentation_pphumanseg.yaml
CHANGED
|
@@ -6,10 +6,10 @@ Benchmark:
|
|
| 6 |
toRGB: True
|
| 7 |
resize: [192, 192]
|
| 8 |
metric:
|
| 9 |
-
|
|
|
|
| 10 |
repeat: 10
|
| 11 |
-
|
| 12 |
-
reduction: 'median'
|
| 13 |
backend: "default"
|
| 14 |
target: "cpu"
|
| 15 |
|
|
|
|
| 6 |
toRGB: True
|
| 7 |
resize: [192, 192]
|
| 8 |
metric:
|
| 9 |
+
type: "Base"
|
| 10 |
+
warmup: 30
|
| 11 |
repeat: 10
|
| 12 |
+
reduction: "median"
|
|
|
|
| 13 |
backend: "default"
|
| 14 |
target: "cpu"
|
| 15 |
|
benchmark/config/image_classification_ppresnet.yaml
CHANGED
|
@@ -7,10 +7,10 @@ Benchmark:
|
|
| 7 |
resize: [256, 256]
|
| 8 |
centerCrop: 224
|
| 9 |
metric:
|
| 10 |
-
|
|
|
|
| 11 |
repeat: 10
|
| 12 |
-
|
| 13 |
-
reduction: 'median'
|
| 14 |
backend: "default"
|
| 15 |
target: "cpu"
|
| 16 |
|
|
|
|
| 7 |
resize: [256, 256]
|
| 8 |
centerCrop: 224
|
| 9 |
metric:
|
| 10 |
+
type: "Base"
|
| 11 |
+
warmup: 30
|
| 12 |
repeat: 10
|
| 13 |
+
reduction: "median"
|
|
|
|
| 14 |
backend: "default"
|
| 15 |
target: "cpu"
|
| 16 |
|
benchmark/config/qrcode_wechatqrcode.yaml
CHANGED
|
@@ -4,10 +4,11 @@ Benchmark:
|
|
| 4 |
path: "benchmark/data/qrcode"
|
| 5 |
files: ["opencv.png", "opencv_zoo.png"]
|
| 6 |
metric:
|
|
|
|
| 7 |
sizes:
|
| 8 |
- [100, 100]
|
| 9 |
- [300, 300]
|
| 10 |
-
warmup:
|
| 11 |
repeat: 10
|
| 12 |
reduction: "median"
|
| 13 |
backend: "default"
|
|
@@ -15,7 +16,7 @@ Benchmark:
|
|
| 15 |
|
| 16 |
Model:
|
| 17 |
name: "WeChatQRCode"
|
| 18 |
-
detect_prototxt_path: "models/qrcode_wechatqrcode/
|
| 19 |
-
detect_model_path: "models/qrcode_wechatqrcode/
|
| 20 |
-
sr_prototxt_path: "models/qrcode_wechatqrcode/
|
| 21 |
-
sr_model_path: "models/qrcode_wechatqrcode/
|
|
|
|
| 4 |
path: "benchmark/data/qrcode"
|
| 5 |
files: ["opencv.png", "opencv_zoo.png"]
|
| 6 |
metric:
|
| 7 |
+
type: "Detection"
|
| 8 |
sizes:
|
| 9 |
- [100, 100]
|
| 10 |
- [300, 300]
|
| 11 |
+
warmup: 30
|
| 12 |
repeat: 10
|
| 13 |
reduction: "median"
|
| 14 |
backend: "default"
|
|
|
|
| 16 |
|
| 17 |
Model:
|
| 18 |
name: "WeChatQRCode"
|
| 19 |
+
detect_prototxt_path: "models/qrcode_wechatqrcode/detect_2021nov.prototxt"
|
| 20 |
+
detect_model_path: "models/qrcode_wechatqrcode/detect_2021nov.caffemodel"
|
| 21 |
+
sr_prototxt_path: "models/qrcode_wechatqrcode/sr_2021nov.prototxt"
|
| 22 |
+
sr_model_path: "models/qrcode_wechatqrcode/sr_2021nov.caffemodel"
|
benchmark/config/text_detection_db.yaml
CHANGED
|
@@ -4,12 +4,12 @@ Benchmark:
|
|
| 4 |
path: "benchmark/data/text"
|
| 5 |
files: ["1.jpg", "2.jpg", "3.jpg"]
|
| 6 |
metric:
|
|
|
|
| 7 |
sizes: # [[w1, h1], ...], Omit to run at original scale
|
| 8 |
- [640, 480]
|
| 9 |
-
warmup:
|
| 10 |
repeat: 10
|
| 11 |
-
|
| 12 |
-
reduction: 'median'
|
| 13 |
backend: "default"
|
| 14 |
target: "cpu"
|
| 15 |
|
|
|
|
| 4 |
path: "benchmark/data/text"
|
| 5 |
files: ["1.jpg", "2.jpg", "3.jpg"]
|
| 6 |
metric:
|
| 7 |
+
type: "Detection"
|
| 8 |
sizes: # [[w1, h1], ...], Omit to run at original scale
|
| 9 |
- [640, 480]
|
| 10 |
+
warmup: 30
|
| 11 |
repeat: 10
|
| 12 |
+
reduction: "median"
|
|
|
|
| 13 |
backend: "default"
|
| 14 |
target: "cpu"
|
| 15 |
|
benchmark/config/text_recognition_crnn.yaml
CHANGED
|
@@ -5,10 +5,10 @@ Benchmark:
|
|
| 5 |
files: ["1.jpg", "2.jpg", "3.jpg"]
|
| 6 |
useLabel: True
|
| 7 |
metric: # 'sizes' is omitted since this model requires input of fixed size
|
| 8 |
-
|
|
|
|
| 9 |
repeat: 10
|
| 10 |
-
|
| 11 |
-
reduction: 'median'
|
| 12 |
backend: "default"
|
| 13 |
target: "cpu"
|
| 14 |
|
|
|
|
| 5 |
files: ["1.jpg", "2.jpg", "3.jpg"]
|
| 6 |
useLabel: True
|
| 7 |
metric: # 'sizes' is omitted since this model requires input of fixed size
|
| 8 |
+
type: "Recognition"
|
| 9 |
+
warmup: 30
|
| 10 |
repeat: 10
|
| 11 |
+
reduction: "median"
|
|
|
|
| 12 |
backend: "default"
|
| 13 |
target: "cpu"
|
| 14 |
|
benchmark/requirements.txt
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
-
numpy
|
| 2 |
opencv-python==4.5.4.58
|
| 3 |
-
tqdm
|
| 4 |
pyyaml
|
| 5 |
requests
|
|
|
|
| 1 |
+
numpy
|
| 2 |
opencv-python==4.5.4.58
|
|
|
|
| 3 |
pyyaml
|
| 4 |
requests
|
benchmark/utils/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .factory import (METRICS, DATALOADERS)
|
| 2 |
+
from .metrics import *
|
| 3 |
+
|
| 4 |
+
__all__ = ['METRICS', 'DATALOADERS']
|
benchmark/utils/factory.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Registery:
|
| 2 |
+
def __init__(self, name):
|
| 3 |
+
self._name = name
|
| 4 |
+
self._dict = dict()
|
| 5 |
+
|
| 6 |
+
def get(self, key):
|
| 7 |
+
return self._dict[key]
|
| 8 |
+
|
| 9 |
+
def register(self, item):
|
| 10 |
+
self._dict[item.__name__] = item
|
| 11 |
+
|
| 12 |
+
METRICS = Registery('Metrics')
|
| 13 |
+
DATALOADERS = Registery('DataLoaders')
|
benchmark/utils/metrics/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base import Base
|
| 2 |
+
from .detection import Detection
|
| 3 |
+
from .recognition import Recognition
|
| 4 |
+
|
| 5 |
+
__all__ = ['Base', 'Detection', 'Recognition']
|
benchmark/utils/metrics/base.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2 as cv
|
| 2 |
+
|
| 3 |
+
from .base_metric import BaseMetric
|
| 4 |
+
from ..factory import METRICS
|
| 5 |
+
|
| 6 |
+
@METRICS.register
|
| 7 |
+
class Base(BaseMetric):
|
| 8 |
+
def __init__(self, **kwargs):
|
| 9 |
+
super().__init__(**kwargs)
|
| 10 |
+
|
| 11 |
+
def forward(self, model, *args, **kwargs):
|
| 12 |
+
img = args[0]
|
| 13 |
+
if not self._sizes:
|
| 14 |
+
h, w, _ = img.shape
|
| 15 |
+
self._sizes.append([w, h])
|
| 16 |
+
|
| 17 |
+
results = dict()
|
| 18 |
+
self._timer.reset()
|
| 19 |
+
for size in self._sizes:
|
| 20 |
+
input_data = cv.resize(img, size)
|
| 21 |
+
for _ in range(self._warmup):
|
| 22 |
+
model.infer(input_data)
|
| 23 |
+
for _ in range(self._repeat):
|
| 24 |
+
self._timer.start()
|
| 25 |
+
model.infer(input_data)
|
| 26 |
+
self._timer.stop()
|
| 27 |
+
results[str(size)] = self._getResult()
|
| 28 |
+
|
| 29 |
+
return results
|
benchmark/utils/metrics/base_metric.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2 as cv
|
| 2 |
+
|
| 3 |
+
from ..timer import Timer
|
| 4 |
+
|
| 5 |
+
class BaseMetric:
|
| 6 |
+
def __init__(self, **kwargs):
|
| 7 |
+
self._sizes = kwargs.pop('sizes', None)
|
| 8 |
+
if self._sizes is None:
|
| 9 |
+
self._sizes = []
|
| 10 |
+
self._warmup = kwargs.pop('warmup', 3)
|
| 11 |
+
self._repeat = kwargs.pop('repeat', 10)
|
| 12 |
+
self._reduction = kwargs.pop('reduction', 'median')
|
| 13 |
+
|
| 14 |
+
self._timer = Timer()
|
| 15 |
+
|
| 16 |
+
def _calcMedian(self, records):
|
| 17 |
+
''' Return the median of records
|
| 18 |
+
'''
|
| 19 |
+
l = len(records)
|
| 20 |
+
mid = int(l / 2)
|
| 21 |
+
if l % 2 == 0:
|
| 22 |
+
return (records[mid] + records[mid - 1]) / 2
|
| 23 |
+
else:
|
| 24 |
+
return records[mid]
|
| 25 |
+
|
| 26 |
+
def _calcGMean(self, records, drop_largest=3):
|
| 27 |
+
''' Return the geometric mean of records after drop the first drop_largest
|
| 28 |
+
'''
|
| 29 |
+
l = len(records)
|
| 30 |
+
if l <= drop_largest:
|
| 31 |
+
print('len(records)({}) <= drop_largest({}), stop dropping.'.format(l, drop_largest))
|
| 32 |
+
records_sorted = sorted(records, reverse=True)
|
| 33 |
+
return sum(records_sorted[drop_largest:]) / (l - drop_largest)
|
| 34 |
+
|
| 35 |
+
def _getResult(self):
|
| 36 |
+
records = self._timer.getRecords()
|
| 37 |
+
if self._reduction == 'median':
|
| 38 |
+
return self._calcMedian(records)
|
| 39 |
+
elif self._reduction == 'gmean':
|
| 40 |
+
return self._calcGMean(records)
|
| 41 |
+
else:
|
| 42 |
+
raise NotImplementedError('Reduction {} is not supported'.format(self._reduction))
|
| 43 |
+
|
| 44 |
+
def getReduction(self):
|
| 45 |
+
return self._reduction
|
| 46 |
+
|
| 47 |
+
def forward(self, model, *args, **kwargs):
|
| 48 |
+
raise NotImplementedError('Not implemented')
|
benchmark/utils/metrics/detection.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2 as cv
|
| 2 |
+
|
| 3 |
+
from .base_metric import BaseMetric
|
| 4 |
+
from ..factory import METRICS
|
| 5 |
+
|
| 6 |
+
@METRICS.register
|
| 7 |
+
class Detection(BaseMetric):
|
| 8 |
+
def __init__(self, **kwargs):
|
| 9 |
+
super().__init__(**kwargs)
|
| 10 |
+
|
| 11 |
+
def forward(self, model, *args, **kwargs):
|
| 12 |
+
img = args[0]
|
| 13 |
+
if not self._sizes:
|
| 14 |
+
h, w, _ = img.shape
|
| 15 |
+
self._sizes.append([w, h])
|
| 16 |
+
|
| 17 |
+
results = dict()
|
| 18 |
+
self._timer.reset()
|
| 19 |
+
for size in self._sizes:
|
| 20 |
+
input_data = cv.resize(img, size)
|
| 21 |
+
try:
|
| 22 |
+
model.setInputSize(size)
|
| 23 |
+
except:
|
| 24 |
+
pass
|
| 25 |
+
for _ in range(self._warmup):
|
| 26 |
+
model.infer(input_data)
|
| 27 |
+
for _ in range(self._repeat):
|
| 28 |
+
self._timer.start()
|
| 29 |
+
model.infer(input_data)
|
| 30 |
+
self._timer.stop()
|
| 31 |
+
results[str(size)] = self._getResult()
|
| 32 |
+
|
| 33 |
+
return results
|
benchmark/utils/metrics/recognition.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2 as cv
|
| 2 |
+
|
| 3 |
+
from .base_metric import BaseMetric
|
| 4 |
+
from ..factory import METRICS
|
| 5 |
+
|
| 6 |
+
@METRICS.register
|
| 7 |
+
class Recognition(BaseMetric):
|
| 8 |
+
def __init__(self, **kwargs):
|
| 9 |
+
super().__init__(**kwargs)
|
| 10 |
+
|
| 11 |
+
def forward(self, model, *args, **kwargs):
|
| 12 |
+
img, bboxes = args
|
| 13 |
+
if not self._sizes:
|
| 14 |
+
h, w, _ = img.shape
|
| 15 |
+
self._sizes.append([w, h])
|
| 16 |
+
|
| 17 |
+
results = dict()
|
| 18 |
+
self._timer.reset()
|
| 19 |
+
for idx, bbox in enumerate(bboxes):
|
| 20 |
+
for _ in range(self._warmup):
|
| 21 |
+
model.infer(img, bbox)
|
| 22 |
+
for _ in range(self._repeat):
|
| 23 |
+
self._timer.start()
|
| 24 |
+
model.infer(img, bbox)
|
| 25 |
+
self._timer.stop()
|
| 26 |
+
results['bbox{}'.format(idx)] = self._getResult()
|
| 27 |
+
|
| 28 |
+
return results
|
benchmark/utils/timer.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2 as cv
|
| 2 |
+
|
| 3 |
+
class Timer:
|
| 4 |
+
def __init__(self):
|
| 5 |
+
self._tm = cv.TickMeter()
|
| 6 |
+
self._record = []
|
| 7 |
+
|
| 8 |
+
def start(self):
|
| 9 |
+
self._tm.start()
|
| 10 |
+
|
| 11 |
+
def stop(self):
|
| 12 |
+
self._tm.stop()
|
| 13 |
+
self._record.append(self._tm.getTimeMilli())
|
| 14 |
+
self._tm.reset()
|
| 15 |
+
|
| 16 |
+
def reset(self):
|
| 17 |
+
self._record = []
|
| 18 |
+
|
| 19 |
+
def getRecords(self):
|
| 20 |
+
return self._record
|