forked from facebookresearch/Detectron
-
Notifications
You must be signed in to change notification settings - Fork 0
/
voc_dataset_evaluator.py
179 lines (162 loc) · 6.95 KB
/
voc_dataset_evaluator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""PASCAL VOC dataset evaluation interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
import os
import shutil
import uuid
from core.config import cfg
from datasets.dataset_catalog import DATASETS
from datasets.dataset_catalog import DEVKIT_DIR
from datasets.voc_eval import voc_eval
from utils.io import save_object
logger = logging.getLogger(__name__)
def evaluate_boxes(
json_dataset,
all_boxes,
output_dir,
use_salt=True,
cleanup=True,
use_matlab=False
):
salt = '_{}'.format(str(uuid.uuid4())) if use_salt else ''
filenames = _write_voc_results_files(json_dataset, all_boxes, salt)
_do_python_eval(json_dataset, salt, output_dir)
if use_matlab:
_do_matlab_eval(json_dataset, salt, output_dir)
if cleanup:
for filename in filenames:
shutil.copy(filename, output_dir)
os.remove(filename)
return None
def _write_voc_results_files(json_dataset, all_boxes, salt):
filenames = []
image_set_path = voc_info(json_dataset)['image_set_path']
assert os.path.exists(image_set_path), \
'Image set path does not exist: {}'.format(image_set_path)
with open(image_set_path, 'r') as f:
image_index = [x.strip() for x in f.readlines()]
# Sanity check that order of images in json dataset matches order in the
# image set
roidb = json_dataset.get_roidb()
for i, entry in enumerate(roidb):
index = os.path.splitext(os.path.split(entry['image'])[1])[0]
assert index == image_index[i]
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
logger.info('Writing VOC results for: {}'.format(cls))
filename = _get_voc_results_file_template(json_dataset,
salt).format(cls)
filenames.append(filename)
assert len(all_boxes[cls_ind]) == len(image_index)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(image_index):
dets = all_boxes[cls_ind][im_ind]
if type(dets) == list:
assert len(dets) == 0, \
'dets should be numpy.ndarray or empty list'
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
return filenames
def _get_voc_results_file_template(json_dataset, salt):
info = voc_info(json_dataset)
year = info['year']
image_set = info['image_set']
devkit_path = info['devkit_path']
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = 'comp4' + salt + '_det_' + image_set + '_{:s}.txt'
return os.path.join(devkit_path, 'results', 'VOC' + year, 'Main', filename)
def _do_python_eval(json_dataset, salt, output_dir='output'):
info = voc_info(json_dataset)
year = info['year']
anno_path = info['anno_path']
image_set_path = info['image_set_path']
devkit_path = info['devkit_path']
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(year) < 2010 else False
logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for _, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
filename = _get_voc_results_file_template(
json_dataset, salt).format(cls)
rec, prec, ap = voc_eval(
filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
logger.info('AP for {} = {:.4f}'.format(cls, ap))
res_file = os.path.join(output_dir, cls + '_pr.pkl')
save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
logger.info('~~~~~~~~')
logger.info('Results:')
for ap in aps:
logger.info('{:.3f}'.format(ap))
logger.info('{:.3f}'.format(np.mean(aps)))
logger.info('~~~~~~~~')
logger.info('')
logger.info('----------------------------------------------------------')
logger.info('Results computed with the **unofficial** Python eval code.')
logger.info('Results should be very close to the official MATLAB code.')
logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
logger.info('-- Thanks, The Management')
logger.info('----------------------------------------------------------')
def _do_matlab_eval(json_dataset, salt, output_dir='output'):
import subprocess
logger.info('-----------------------------------------------------')
logger.info('Computing results with the official MATLAB eval code.')
logger.info('-----------------------------------------------------')
info = voc_info(json_dataset)
path = os.path.join(
cfg.ROOT_DIR, 'lib', 'datasets', 'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(info['devkit_path'], 'comp4' + salt, info['image_set'],
output_dir)
logger.info('Running:\n{}'.format(cmd))
subprocess.call(cmd, shell=True)
def voc_info(json_dataset):
year = json_dataset.name[4:8]
image_set = json_dataset.name[9:]
devkit_path = DATASETS[json_dataset.name][DEVKIT_DIR]
assert os.path.exists(devkit_path), \
'Devkit directory {} not found'.format(devkit_path)
anno_path = os.path.join(
devkit_path, 'VOC' + year, 'Annotations', '{:s}.xml')
image_set_path = os.path.join(
devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt')
return dict(
year=year,
image_set=image_set,
devkit_path=devkit_path,
anno_path=anno_path,
image_set_path=image_set_path)