From fc35429bf042e348091c4015e784635f103ae7f0 Mon Sep 17 00:00:00 2001 From: ATRI SAXENA <30728764+AtriSaxena@users.noreply.github.com> Date: Mon, 27 Apr 2020 18:35:46 +0530 Subject: [PATCH] Find Average Recall and Precision. As many people looking to find the Average Recall and Precision like in issue #352 It's better we should add this in evaluation code. --- eval.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/eval.py b/eval.py index 4a8c672fd..6d1df6205 100644 --- a/eval.py +++ b/eval.py @@ -163,6 +163,8 @@ def write_voc_results_file(all_boxes, dataset): def do_python_eval(output_dir='output', use_07=True): cachedir = os.path.join(devkit_path, 'annotations_cache') aps = [] + rec = [] + prec = [] # The PASCAL VOC metric changed in 2010 use_07_metric = use_07 print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) @@ -174,10 +176,14 @@ def do_python_eval(output_dir='output', use_07=True): filename, annopath, imgsetpath.format(set_type), cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] + rec += [rec] + prec += [prec] print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print('Mean AP = {:.4f}'.format(np.mean(aps))) + print('Average Recall = {:.4f}'.format(np.mean(rec))) + print('Average Precision = {:.4f}'.format(np.mean(prec)))` print('~~~~~~~~') print('Results:') for ap in aps: