-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathspeed_test.py
128 lines (111 loc) · 4.52 KB
/
speed_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#///////////// Tesing Images ////////////
test_imgs = ['data/coco/val2017/000000022935.jpg']
__supported_models__ = ['GumbelTwoStageDetector', 'MaskRCNN']
#///////////// Testing Models ///////////
all_models = \
"""
configs/mask_rcnn/vit-adapter-t-3x.py, pretrained/vit-adapter-t-3x.pth
configs/mask_rcnn/svit-adapter-t-0.5x-ftune.py, pretrained/svit-adapter-t-0.5x.pth
configs/mask_rcnn/vit-adapter-s-3x.py, pretrained/vit-adapter-s-3x.pth
configs/mask_rcnn/svit-adapter-s-0.33x-ftune.py, pretrained/svit-adapter-s-0.33x.pth
"""
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
import mmcv
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F401,F403
import os.path as osp
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmcv.parallel import collate, scatter
import torch
import time
from global_storage.global_storage import __global_storage__
def parse_args():
parser = ArgumentParser()
parser.add_argument('--out', type=str, default="demo", help='out dir')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
imgs = test_imgs[0]
WARM_UP = 100
N_TEST = 200
REPEAT = 2
# build the model from a config file and a checkpoint file
for line in all_models.split("\n"):
if line.startswith('_') or line == '':
continue
tmp_config, tmp_checkpoint = [x.strip() for x in line.split(',')]
break
model = init_detector(tmp_config, None if tmp_checkpoint=='' else tmp_checkpoint, device=args.device)
assert type(model).__name__ in __supported_models__
# result = inference_detector(model, args.img)
# ---------------------------------------------------------
# ----- prepare the same images ready for all models ------
# ---------------------------------------------------------
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
device = next(model.parameters()).device # model device
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
assert False, 'CPU inference not supported for testing speed.'
# ---------------------------------------------------------
# --------- test throughput for all models ------------
# ---------------------------------------------------------
for line in all_models.split("\n"):
if line.startswith('_') or line == '':
continue
model_config, checkpoint = [x.strip() for x in line.split(',')]
model = init_detector(model_config, checkpoint, device=args.device)
model.eval()
print('speed (imgs/s):')
with torch.no_grad():
for k in range(WARM_UP):
model(return_loss=False, rescale=True, **data)
for k in range(REPEAT):
start = time.time()
for i in range(N_TEST):
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
end = time.time()
elapse = end - start
speed = N_TEST / elapse
print(f'{model_config}: {speed:.3f}')
print('\n')
if __name__ == '__main__':
args = parse_args()
assert args.async_test is False
main(args)