-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathiou_metric.py
206 lines (182 loc) · 8.58 KB
/
iou_metric.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
import os.path as osp
from copy import deepcopy
from collections import OrderedDict
from typing import Dict, List, Optional, Sequence
from PIL import Image
from prettytable import PrettyTable
import numpy as np
import torch
from mmengine.logging import MMLogger, print_log
from mmseg.evaluation import IoUMetric as MMSEG_IoUMetric
from mmseg.registry import METRICS
@METRICS.register_module()
class CustomIoUMetric(MMSEG_IoUMetric):
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
num_classes = len(self.dataset_meta['classes'])
palette = deepcopy(self.dataset_meta['palette'])
# if data_samples[0].get('reduce_zero_label', False):
# palette.insert(0, [0, 0, 0])
palette = [i for rgb in palette for i in rgb]
for data_sample in data_samples:
pred_label = data_sample['pred_sem_seg']['data'].squeeze()
# format_only always for test dataset without ground truth
if not self.format_only:
label = data_sample['gt_sem_seg']['data'].squeeze().to(
pred_label)
self.results.append(
self.intersect_and_union(pred_label, label, num_classes,
self.ignore_index))
# format_result
if self.output_dir is not None:
basename = osp.splitext(osp.basename(
data_sample['img_path']))[0]
png_filename = osp.abspath(
osp.join(self.output_dir, f'{basename}.png'))
output_mask = pred_label.cpu().numpy()
# The index range of official ADE20k dataset is from 0 to 150.
# But the index range of output is from 0 to 149.
# That is because we set reduce_zero_label=True.
# if data_sample.get('reduce_zero_label', False):
# output_mask = output_mask + 1
output = Image.fromarray(output_mask.astype(np.uint8))
output = output.convert('P')
output.putpalette(palette)
output.save(png_filename)
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results. The key
mainly includes aAcc, mIoU, mAcc, mDice, mFscore, mPrecision,
mRecall.
"""
logger: MMLogger = MMLogger.get_current_instance()
if self.format_only:
logger.info(f'results are saved to {osp.dirname(self.output_dir)}')
return OrderedDict()
# convert list of tuples to tuple of lists, e.g.
# [(A_1, B_1, C_1, D_1), ..., (A_n, B_n, C_n, D_n)] to
# ([A_1, ..., A_n], ..., [D_1, ..., D_n])
results = tuple(zip(*results))
assert len(results) == 4
total_area_intersect = sum(results[0])
total_area_union = sum(results[1])
total_area_pred_label = sum(results[2])
total_area_label = sum(results[3])
ret_metrics = self.total_area_to_metrics(
total_area_intersect, total_area_union, total_area_pred_label,
total_area_label, self.metrics, self.nan_to_num, self.beta)
class_names = self.dataset_meta['classes']
# summary table
ret_metrics_summary = OrderedDict({
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
metrics = dict()
for key, val in ret_metrics_summary.items():
if key == 'aAcc' or key == 'Kappa':
metrics[key] = val
else:
metrics['m' + key] = val
# each class table
ret_metrics.pop('aAcc', None)
ret_metrics.pop('Kappa', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)
print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
return metrics
@staticmethod
def total_area_to_metrics(total_area_intersect: np.ndarray,
total_area_union: np.ndarray,
total_area_pred_label: np.ndarray,
total_area_label: np.ndarray,
metrics: List[str] = ['mIoU'],
nan_to_num: Optional[int] = None,
beta: int = 1):
"""Calculate evaluation metrics
Args:
total_area_intersect (np.ndarray): The intersection of prediction
and ground truth histogram on all classes.
total_area_union (np.ndarray): The union of prediction and ground
truth histogram on all classes.
total_area_pred_label (np.ndarray): The prediction histogram on
all classes.
total_area_label (np.ndarray): The ground truth histogram on
all classes.
metrics (List[str] | str): Metrics to be evaluated, 'mIoU' and
'mDice'.
nan_to_num (int, optional): If specified, NaN values will be
replaced by the numbers defined by the user. Default: None.
beta (int): Determines the weight of recall in the combined score.
Default: 1.
Returns:
Dict[str, np.ndarray]: per category evaluation metrics,
shape (num_classes, ).
"""
def f_score(precision, recall, beta=1):
"""calculate the f-score value.
Args:
precision (float | torch.Tensor): The precision value.
recall (float | torch.Tensor): The recall value.
beta (int): Determines the weight of recall in the combined
score. Default: 1.
Returns:
[torch.tensor]: The f-score value.
"""
score = (1 + beta**2) * (precision * recall) / (
(beta**2 * precision) + recall)
return score
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice', 'mFscore', 'Kappa']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError(f'metrics {metrics} is not supported')
all_acc = total_area_intersect.sum() / total_area_label.sum()
ret_metrics = OrderedDict({'aAcc': all_acc})
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
acc = total_area_intersect / total_area_label
ret_metrics['IoU'] = iou
ret_metrics['Acc'] = acc
elif metric == 'mDice':
dice = 2 * total_area_intersect / (total_area_pred_label +
total_area_label)
acc = total_area_intersect / total_area_label
ret_metrics['Dice'] = dice
ret_metrics['Acc'] = acc
elif metric == 'mFscore':
precision = total_area_intersect / total_area_pred_label
recall = total_area_intersect / total_area_label
f_value = torch.tensor([
f_score(x[0], x[1], beta) for x in zip(precision, recall)
])
ret_metrics['Fscore'] = f_value
ret_metrics['Precision'] = precision
ret_metrics['Recall'] = recall
elif metric == 'Kappa':
po = all_acc
pe = sum(total_area_pred_label *
total_area_label) / total_area_label.sum()**2
kappa = (po - pe) / (1 - pe)
ret_metrics['Kappa'] = kappa
ret_metrics = {
metric: value.numpy()
for metric, value in ret_metrics.items()
}
if nan_to_num is not None:
ret_metrics = OrderedDict({
metric: np.nan_to_num(metric_value, nan=nan_to_num)
for metric, metric_value in ret_metrics.items()
})
return ret_metrics