From feea9c1a65c73475803847c83545b5e7ee6c528c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 7 Mar 2020 10:26:08 -0800 Subject: [PATCH] P and R evaluated at 0.5 score --- utils/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/utils.py b/utils/utils.py index b2dad445..d59527c9 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -188,6 +188,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls): unique_classes = np.unique(target_cls) # Create Precision-Recall curve and compute AP for each class + pr_score = 0.5 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 s = [len(unique_classes), tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) for ci, c in enumerate(unique_classes): @@ -204,18 +205,18 @@ def ap_per_class(tp, conf, pred_cls, target_cls): # Recall recall = tpc / (n_gt + 1e-16) # recall curve - r[ci] = recall[-1] + r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve - p[ci] = precision[-1] + p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j] = compute_ap(recall[:, j], precision[:, j]) # Plot - # fig, ax = plt.subplots(1, 1, figsize=(4, 4)) + # fig, ax = plt.subplots(1, 1, figsize=(5, 5)) # ax.plot(recall, precision) # ax.set_xlabel('Recall') # ax.set_ylabel('Precision')