mAP corrected to per-class
This commit is contained in:
parent
6116acb8c2
commit
e7dab5a42f
31
test.py
31
test.py
|
@ -55,7 +55,11 @@ print('Compute mAP...')
|
||||||
|
|
||||||
outputs = []
|
outputs = []
|
||||||
targets = None
|
targets = None
|
||||||
APs = []
|
mAPs = []
|
||||||
|
TP = []
|
||||||
|
confidence = []
|
||||||
|
pred_class = []
|
||||||
|
target_class = []
|
||||||
for batch_i, (imgs, targets) in enumerate(dataloader):
|
for batch_i, (imgs, targets) in enumerate(dataloader):
|
||||||
imgs = imgs.to(device)
|
imgs = imgs.to(device)
|
||||||
|
|
||||||
|
@ -78,7 +82,7 @@ for batch_i, (imgs, targets) in enumerate(dataloader):
|
||||||
if detections is None:
|
if detections is None:
|
||||||
# If there are no detections but there are annotations mask as zero AP
|
# If there are no detections but there are annotations mask as zero AP
|
||||||
if annotations.size(0) != 0:
|
if annotations.size(0) != 0:
|
||||||
APs.append(0)
|
mAPs.append(0)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Get detections sorted by decreasing confidence scores
|
# Get detections sorted by decreasing confidence scores
|
||||||
|
@ -107,22 +111,17 @@ for batch_i, (imgs, targets) in enumerate(dataloader):
|
||||||
else:
|
else:
|
||||||
correct.append(0)
|
correct.append(0)
|
||||||
|
|
||||||
# Extract true and false positives
|
# Compute Average Precision (AP) per class
|
||||||
true_positives = np.array(correct)
|
AP = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6], target_cls=annotations[:, 0])
|
||||||
false_positives = 1 - true_positives
|
|
||||||
|
|
||||||
# Compute cumulative false positives and true positives
|
# Compute mean AP for this image
|
||||||
false_positives = np.cumsum(false_positives)
|
mAP = AP.mean()
|
||||||
true_positives = np.cumsum(true_positives)
|
|
||||||
|
|
||||||
# Compute recall and precision at all ranks
|
# Append image mAP to list of validation mAPs
|
||||||
recall = true_positives / annotations.size(0) if annotations.size(0) else true_positives
|
mAPs.append(mAP)
|
||||||
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
|
|
||||||
|
|
||||||
# Compute average precision
|
# Print image mAP and running mean mAP
|
||||||
AP = compute_ap(recall, precision)
|
print('+ Sample [%d/%d] AP: %.4f (%.4f)' % (len(mAPs), len(dataloader) * opt.batch_size, AP, np.mean(mAPs)))
|
||||||
APs.append(AP)
|
|
||||||
|
|
||||||
print("+ Sample [%d/%d] AP: %.4f (%.4f)" % (len(APs), len(dataloader) * opt.batch_size, AP, np.mean(APs)))
|
|
||||||
|
|
||||||
print("Mean Average Precision: %.4f" % np.mean(APs))
|
print('Mean Average Precision: %.4f' % np.mean(mAPs))
|
||||||
|
|
|
@ -79,6 +79,53 @@ def xywh2xyxy(x): # Convert bounding box format from [x, y, w, h] to [x1, y1, x
|
||||||
return y
|
return y
|
||||||
|
|
||||||
|
|
||||||
|
def ap_per_class(tp, conf, pred_cls, target_cls):
|
||||||
|
""" Compute the average precision, given the recall and precision curves.
|
||||||
|
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
|
||||||
|
# Arguments
|
||||||
|
tp: True positives (list).
|
||||||
|
conf: Objectness value from 0-1 (list).
|
||||||
|
pred_cls: Predicted object classes (list).
|
||||||
|
target_cls: True object classes (list).
|
||||||
|
# Returns
|
||||||
|
The average precision as computed in py-faster-rcnn.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# lists/pytorch to numpy
|
||||||
|
tp, conf, pred_cls, target_cls = np.array(tp), np.array(conf), np.array(pred_cls), np.array(target_cls)
|
||||||
|
|
||||||
|
# Sort by objectness
|
||||||
|
i = np.argsort(-conf)
|
||||||
|
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
|
||||||
|
|
||||||
|
# Find unique classes
|
||||||
|
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
|
||||||
|
|
||||||
|
# Create Precision-Recall curve and compute AP for each class
|
||||||
|
ap = []
|
||||||
|
for c in unique_classes:
|
||||||
|
i = pred_cls == c
|
||||||
|
nGT = sum(target_cls == c) # Number of ground truth objects
|
||||||
|
|
||||||
|
if sum(i) == 0:
|
||||||
|
ap.append(0)
|
||||||
|
else:
|
||||||
|
# Accumulate FPs and TPs
|
||||||
|
fpa = np.cumsum(1 - tp[i])
|
||||||
|
tpa = np.cumsum(tp[i])
|
||||||
|
|
||||||
|
# Recall
|
||||||
|
recall = tpa / (nGT + 1e-16)
|
||||||
|
|
||||||
|
# Precision
|
||||||
|
precision = tpa / (tpa + fpa)
|
||||||
|
|
||||||
|
# AP from recall-precision curve
|
||||||
|
ap.append(compute_ap(recall, precision))
|
||||||
|
|
||||||
|
return np.array(ap)
|
||||||
|
|
||||||
|
|
||||||
def compute_ap(recall, precision):
|
def compute_ap(recall, precision):
|
||||||
""" Compute the average precision, given the recall and precision curves.
|
""" Compute the average precision, given the recall and precision curves.
|
||||||
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
|
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
|
||||||
|
@ -90,6 +137,7 @@ def compute_ap(recall, precision):
|
||||||
"""
|
"""
|
||||||
# correct AP calculation
|
# correct AP calculation
|
||||||
# first append sentinel values at the end
|
# first append sentinel values at the end
|
||||||
|
|
||||||
mrec = np.concatenate(([0.], recall, [1.]))
|
mrec = np.concatenate(([0.], recall, [1.]))
|
||||||
mpre = np.concatenate(([0.], precision, [0.]))
|
mpre = np.concatenate(([0.], precision, [0.]))
|
||||||
|
|
||||||
|
@ -175,15 +223,15 @@ def build_targets(pred_boxes, pred_conf, pred_cls, target, anchor_wh, nA, nC, nG
|
||||||
# Select best iou_pred and anchor
|
# Select best iou_pred and anchor
|
||||||
iou_anch_best, a = iou_anch.max(0) # best anchor [0-2] for each target
|
iou_anch_best, a = iou_anch.max(0) # best anchor [0-2] for each target
|
||||||
|
|
||||||
# Select best IOU target-anchor combo in case multiple targets want same anchor
|
# Select best unique target-anchor combinations
|
||||||
if nTb > 1:
|
if nTb > 1:
|
||||||
iou_order = np.argsort(-iou_anch_best) # best to worst
|
iou_order = np.argsort(-iou_anch_best) # best to worst
|
||||||
|
|
||||||
# Unique anchor selection (slow but retains original order)
|
# Unique anchor selection (slower but retains original order)
|
||||||
u = torch.cat((gi, gj, a), 0).view(3, -1).numpy()
|
u = torch.cat((gi, gj, a), 0).view(3, -1).numpy()
|
||||||
_, first_unique = np.unique(u[:, iou_order], axis=1, return_index=True) # first unique indices
|
_, first_unique = np.unique(u[:, iou_order], axis=1, return_index=True) # first unique indices
|
||||||
|
|
||||||
# Unique anchor selection (fast but does not retain order) TODO: update to retain original order
|
# Unique anchor selection (faster but does not retain order) TODO: update to retain original order
|
||||||
# u = gi.float() * 0.4361538773074043 + gj.float() * 0.28012496588736746 + a.float() * 0.6627147212460307
|
# u = gi.float() * 0.4361538773074043 + gj.float() * 0.28012496588736746 + a.float() * 0.6627147212460307
|
||||||
# _, first_unique_sorted = np.unique(u[iou_order], return_index=True) # first unique indices
|
# _, first_unique_sorted = np.unique(u[iou_order], return_index=True) # first unique indices
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue