mAP corrected to per-class

This commit is contained in:
Glenn Jocher 2018-09-10 15:12:13 +02:00
parent 6116acb8c2
commit e7dab5a42f
2 changed files with 66 additions and 19 deletions

31
test.py
View File

@ -55,7 +55,11 @@ print('Compute mAP...')
outputs = []
targets = None
APs = []
mAPs = []
TP = []
confidence = []
pred_class = []
target_class = []
for batch_i, (imgs, targets) in enumerate(dataloader):
imgs = imgs.to(device)
@ -78,7 +82,7 @@ for batch_i, (imgs, targets) in enumerate(dataloader):
if detections is None:
# If there are no detections but there are annotations mask as zero AP
if annotations.size(0) != 0:
APs.append(0)
mAPs.append(0)
continue
# Get detections sorted by decreasing confidence scores
@ -107,22 +111,17 @@ for batch_i, (imgs, targets) in enumerate(dataloader):
else:
correct.append(0)
# Extract true and false positives
true_positives = np.array(correct)
false_positives = 1 - true_positives
# Compute Average Precision (AP) per class
AP = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6], target_cls=annotations[:, 0])
# Compute cumulative false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# Compute mean AP for this image
mAP = AP.mean()
# Compute recall and precision at all ranks
recall = true_positives / annotations.size(0) if annotations.size(0) else true_positives
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# Append image mAP to list of validation mAPs
mAPs.append(mAP)
# Compute average precision
AP = compute_ap(recall, precision)
APs.append(AP)
# Print image mAP and running mean mAP
print('+ Sample [%d/%d] AP: %.4f (%.4f)' % (len(mAPs), len(dataloader) * opt.batch_size, AP, np.mean(mAPs)))
print("+ Sample [%d/%d] AP: %.4f (%.4f)" % (len(APs), len(dataloader) * opt.batch_size, AP, np.mean(APs)))
print("Mean Average Precision: %.4f" % np.mean(APs))
print('Mean Average Precision: %.4f' % np.mean(mAPs))

View File

@ -79,6 +79,53 @@ def xywh2xyxy(x): # Convert bounding box format from [x, y, w, h] to [x1, y1, x
return y
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (list).
conf: Objectness value from 0-1 (list).
pred_cls: Predicted object classes (list).
target_cls: True object classes (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# lists/pytorch to numpy
tp, conf, pred_cls, target_cls = np.array(tp), np.array(conf), np.array(pred_cls), np.array(target_cls)
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
# Create Precision-Recall curve and compute AP for each class
ap = []
for c in unique_classes:
i = pred_cls == c
nGT = sum(target_cls == c) # Number of ground truth objects
if sum(i) == 0:
ap.append(0)
else:
# Accumulate FPs and TPs
fpa = np.cumsum(1 - tp[i])
tpa = np.cumsum(tp[i])
# Recall
recall = tpa / (nGT + 1e-16)
# Precision
precision = tpa / (tpa + fpa)
# AP from recall-precision curve
ap.append(compute_ap(recall, precision))
return np.array(ap)
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
@ -90,6 +137,7 @@ def compute_ap(recall, precision):
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
@ -175,15 +223,15 @@ def build_targets(pred_boxes, pred_conf, pred_cls, target, anchor_wh, nA, nC, nG
# Select best iou_pred and anchor
iou_anch_best, a = iou_anch.max(0) # best anchor [0-2] for each target
# Select best IOU target-anchor combo in case multiple targets want same anchor
# Select best unique target-anchor combinations
if nTb > 1:
iou_order = np.argsort(-iou_anch_best) # best to worst
# Unique anchor selection (slow but retains original order)
# Unique anchor selection (slower but retains original order)
u = torch.cat((gi, gj, a), 0).view(3, -1).numpy()
_, first_unique = np.unique(u[:, iou_order], axis=1, return_index=True) # first unique indices
# Unique anchor selection (fast but does not retain order) TODO: update to retain original order
# Unique anchor selection (faster but does not retain order) TODO: update to retain original order
# u = gi.float() * 0.4361538773074043 + gj.float() * 0.28012496588736746 + a.float() * 0.6627147212460307
# _, first_unique_sorted = np.unique(u[iou_order], return_index=True) # first unique indices