File tree Expand file tree Collapse file tree 15 files changed +37
-19
lines changed Expand file tree Collapse file tree 15 files changed +37
-19
lines changed Original file line number Diff line number Diff line change @@ -64,7 +64,7 @@ def benchmark(
64
64
num_workers = num_workers ,
65
65
pin_memory = True ,
66
66
)
67
- test_results , run_hash = evaluate_classification (
67
+ test_results , speed_mem_metrics , run_hash = evaluate_classification (
68
68
model = model ,
69
69
test_loader = test_loader ,
70
70
model_output_transform = model_output_transform ,
@@ -84,6 +84,7 @@ def benchmark(
84
84
config = config ,
85
85
dataset = cls .dataset .__name__ ,
86
86
results = test_results ,
87
+ speed_mem_metrics = speed_mem_metrics ,
87
88
pytorch_hub_id = pytorch_hub_url ,
88
89
model = paper_model_name ,
89
90
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -64,7 +64,7 @@ def benchmark(
64
64
num_workers = num_workers ,
65
65
pin_memory = True ,
66
66
)
67
- test_results , run_hash = evaluate_classification (
67
+ test_results , speed_mem_metrics , run_hash = evaluate_classification (
68
68
model = model ,
69
69
test_loader = test_loader ,
70
70
model_output_transform = model_output_transform ,
@@ -84,6 +84,7 @@ def benchmark(
84
84
config = config ,
85
85
dataset = cls .dataset .__name__ ,
86
86
results = test_results ,
87
+ speed_mem_metrics = speed_mem_metrics ,
87
88
pytorch_hub_id = pytorch_hub_url ,
88
89
model = paper_model_name ,
89
90
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -200,7 +200,7 @@ def benchmark(
200
200
num_workers = num_workers ,
201
201
pin_memory = pin_memory ,
202
202
)
203
- test_results , run_hash = evaluate_classification (
203
+ test_results , speed_mem_metrics , run_hash = evaluate_classification (
204
204
model = model ,
205
205
test_loader = test_loader ,
206
206
model_output_transform = model_output_transform ,
@@ -220,6 +220,7 @@ def benchmark(
220
220
config = config ,
221
221
dataset = cls .dataset .__name__ ,
222
222
results = test_results ,
223
+ speed_mem_metrics = speed_mem_metrics ,
223
224
pytorch_hub_id = pytorch_hub_url ,
224
225
model = paper_model_name ,
225
226
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -63,7 +63,7 @@ def benchmark(
63
63
num_workers = num_workers ,
64
64
pin_memory = True ,
65
65
)
66
- test_results , run_hash = evaluate_classification (
66
+ test_results , speed_mem_metrics , run_hash = evaluate_classification (
67
67
model = model ,
68
68
test_loader = test_loader ,
69
69
model_output_transform = model_output_transform ,
@@ -83,6 +83,7 @@ def benchmark(
83
83
config = config ,
84
84
dataset = cls .dataset .__name__ ,
85
85
results = test_results ,
86
+ speed_mem_metrics = speed_mem_metrics ,
86
87
pytorch_hub_id = pytorch_hub_url ,
87
88
model = paper_model_name ,
88
89
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -63,7 +63,7 @@ def benchmark(
63
63
num_workers = num_workers ,
64
64
pin_memory = True ,
65
65
)
66
- test_results , run_hash = evaluate_classification (
66
+ test_results , speed_mem_metrics , run_hash = evaluate_classification (
67
67
model = model ,
68
68
test_loader = test_loader ,
69
69
model_output_transform = model_output_transform ,
@@ -83,6 +83,7 @@ def benchmark(
83
83
config = config ,
84
84
dataset = cls .dataset .__name__ ,
85
85
results = test_results ,
86
+ speed_mem_metrics = speed_mem_metrics ,
86
87
pytorch_hub_id = pytorch_hub_url ,
87
88
model = paper_model_name ,
88
89
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -66,7 +66,7 @@ def benchmark(
66
66
num_workers = num_workers ,
67
67
pin_memory = True ,
68
68
)
69
- test_results , run_hash = evaluate_classification (
69
+ test_results , speed_mem_metrics , run_hash = evaluate_classification (
70
70
model = model ,
71
71
test_loader = test_loader ,
72
72
model_output_transform = model_output_transform ,
@@ -86,6 +86,7 @@ def benchmark(
86
86
config = config ,
87
87
dataset = cls .dataset .__name__ ,
88
88
results = test_results ,
89
+ speed_mem_metrics = speed_mem_metrics ,
89
90
pytorch_hub_id = pytorch_hub_url ,
90
91
model = paper_model_name ,
91
92
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -62,11 +62,14 @@ def evaluate_classification(
62
62
63
63
end = time .time ()
64
64
65
+ speed_mem_metrics = {
66
+ 'Tasks Per Second' : test_loader .batch_size / inference_time .avg ,
67
+ 'Memory Allocated' : memory_allocated
68
+ }
69
+
65
70
return (
66
71
{"Top 1 Accuracy" : top1 .avg / 100 ,
67
- "Top 5 Accuracy" : top5 .avg / 100 ,
68
- 'Tasks Per Second' : test_loader .batch_size / inference_time .avg ,
69
- 'Memory Allocated' : memory_allocated },
72
+ "Top 5 Accuracy" : top5 .avg / 100 }, speed_mem_metrics ,
70
73
run_hash ,
71
74
)
72
75
Original file line number Diff line number Diff line change @@ -217,7 +217,7 @@ def benchmark(
217
217
collate_fn = collate_fn ,
218
218
)
219
219
test_loader .no_classes = 91 # Number of classes for COCO Detection
220
- test_results , run_hash = evaluate_detection_coco (
220
+ test_results , speed_mem_metrics , run_hash = evaluate_detection_coco (
221
221
model = model ,
222
222
test_loader = test_loader ,
223
223
model_output_transform = model_output_transform ,
@@ -232,6 +232,7 @@ def benchmark(
232
232
config = config ,
233
233
dataset = 'COCO minival' ,
234
234
results = test_results ,
235
+ speed_mem_metrics = speed_mem_metrics ,
235
236
pytorch_hub_id = pytorch_hub_url ,
236
237
model = paper_model_name ,
237
238
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -249,7 +249,7 @@ def evaluate_detection_coco(
249
249
'Tasks Per Second' : test_loader .batch_size / inference_time .avg ,
250
250
'Memory Allocated' : memory_allocated }
251
251
252
- return ({ ** get_coco_metrics (coco_evaluator ), ** device_metrics } , run_hash )
252
+ return (get_coco_metrics (coco_evaluator ), device_metrics , run_hash )
253
253
254
254
255
255
def evaluate_detection_voc (
Original file line number Diff line number Diff line change @@ -85,7 +85,7 @@ def benchmark(
85
85
collate_fn = collate_fn ,
86
86
)
87
87
test_loader .no_classes = 150 # Number of classes for ADE20K
88
- test_results , run_hash = evaluate_segmentation (
88
+ test_results , speed_mem_metrics , run_hash = evaluate_segmentation (
89
89
model = model ,
90
90
test_loader = test_loader ,
91
91
model_output_transform = model_output_transform ,
@@ -100,6 +100,7 @@ def benchmark(
100
100
config = config ,
101
101
dataset = cls .dataset .__name__ + " val" ,
102
102
results = test_results ,
103
+ speed_mem_metrics = speed_mem_metrics ,
103
104
pytorch_hub_id = pytorch_hub_url ,
104
105
model = paper_model_name ,
105
106
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -85,7 +85,7 @@ def benchmark(
85
85
collate_fn = collate_fn ,
86
86
)
87
87
test_loader .no_classes = 12 # Number of classes for CamVid
88
- test_results = evaluate_segmentation (
88
+ test_results , speed_mem_metrics , run_hash = evaluate_segmentation (
89
89
model = model ,
90
90
test_loader = test_loader ,
91
91
model_output_transform = model_output_transform ,
@@ -100,6 +100,7 @@ def benchmark(
100
100
config = config ,
101
101
dataset = cls .dataset .__name__ ,
102
102
results = test_results ,
103
+ speed_mem_metrics = speed_mem_metrics ,
103
104
pytorch_hub_id = pytorch_hub_url ,
104
105
model = paper_model_name ,
105
106
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -88,7 +88,7 @@ def benchmark(
88
88
collate_fn = collate_fn ,
89
89
)
90
90
test_loader .no_classes = 19 # Number of classes for Cityscapes
91
- test_results = evaluate_segmentation (
91
+ test_results , speed_mem_metrics , run_hash = evaluate_segmentation (
92
92
model = model ,
93
93
test_loader = test_loader ,
94
94
model_output_transform = model_output_transform ,
@@ -103,6 +103,7 @@ def benchmark(
103
103
config = config ,
104
104
dataset = cls .dataset .__name__ ,
105
105
results = test_results ,
106
+ speed_mem_metrics = speed_mem_metrics ,
106
107
pytorch_hub_id = pytorch_hub_url ,
107
108
model = paper_model_name ,
108
109
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -85,7 +85,7 @@ def benchmark(
85
85
collate_fn = collate_fn ,
86
86
)
87
87
test_loader .no_classes = 59 # Number of classes for PASCALContext
88
- test_results = evaluate_segmentation (
88
+ test_results , speed_mem_metrics , run_hash = evaluate_segmentation (
89
89
model = model ,
90
90
test_loader = test_loader ,
91
91
model_output_transform = model_output_transform ,
@@ -100,6 +100,7 @@ def benchmark(
100
100
config = config ,
101
101
dataset = cls .dataset .__name__ ,
102
102
results = test_results ,
103
+ speed_mem_metrics = speed_mem_metrics ,
103
104
pytorch_hub_id = pytorch_hub_url ,
104
105
model = paper_model_name ,
105
106
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -130,7 +130,7 @@ def benchmark(
130
130
collate_fn = collate_fn ,
131
131
)
132
132
test_loader .no_classes = 21 # Number of classes for PASCALVOC
133
- test_results , run_hash = evaluate_segmentation (
133
+ test_results , speed_mem_metrics , run_hash = evaluate_segmentation (
134
134
model = model ,
135
135
test_loader = test_loader ,
136
136
model_output_transform = model_output_transform ,
@@ -144,6 +144,7 @@ def benchmark(
144
144
config = config ,
145
145
dataset = 'PASCAL VOC %s %s' % (dataset_year , "val" ),
146
146
results = test_results ,
147
+ speed_mem_metrics = speed_mem_metrics ,
147
148
pytorch_hub_id = pytorch_hub_url ,
148
149
model = paper_model_name ,
149
150
model_description = model_description ,
Original file line number Diff line number Diff line change @@ -182,8 +182,11 @@ def evaluate_segmentation(
182
182
183
183
acc_global , acc , iu = confmat .compute ()
184
184
185
+ speed_mem_metrics = {
186
+ 'Tasks Per Second' : test_loader .batch_size / inference_time .avg ,
187
+ 'Memory Allocated' : memory_allocated }
188
+
185
189
return {
186
190
"Accuracy" : acc_global .item (),
187
- "Mean IOU" : iu .mean ().item (),
188
- 'Tasks Per Second' : test_loader .batch_size / inference_time .avg ,
189
- 'Memory Allocated' : memory_allocated }, run_hash
191
+ "Mean IOU" : iu .mean ().item ()}, \
192
+ speed_mem_metrics , run_hash
You can’t perform that action at this time.
0 commit comments