This commit is contained in:
Glenn Jocher 2018-09-10 15:50:37 +02:00
parent 873abaeef4
commit c1492ae4fb
4 changed files with 9 additions and 19 deletions

View File

@ -18,7 +18,7 @@ parser.add_argument('-txt_out', type=bool, default=False)
parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('-class_path', type=str, default='data/coco.names', help='path to class label file')
parser.add_argument('-conf_thres', type=float, default=0.99, help='object confidence threshold')
parser.add_argument('-conf_thres', type=float, default=0.9, help='object confidence threshold')
parser.add_argument('-nms_thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('-batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('-img_size', type=int, default=32 * 13, help='size of each image dimension')
@ -34,7 +34,7 @@ def detect(opt):
model = Darknet(opt.cfg, opt.img_size)
#weights_path = 'checkpoints/yolov3.weights'
weights_path = 'checkpoints/latest.pt'
weights_path = 'checkpoints/yolov3.pt'
if weights_path.endswith('.weights'): # saved in darknet format
load_weights(model, weights_path)
else: # endswith('.pt'), saved in pytorch format

18
test.py
View File

@ -48,18 +48,11 @@ model.to(device).eval()
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
dataloader = load_images_and_labels(test_path, batch_size=opt.batch_size, img_size=opt.img_size)
n_gt = 0
correct = 0
print('Compute mAP...')
outputs = []
correct = 0
targets = None
mAPs = []
TP = []
confidence = []
pred_class = []
target_class = []
outputs, mAPs, TP, confidence, pred_class, target_class = [], [], [], [], [], []
for batch_i, (imgs, targets) in enumerate(dataloader):
imgs = imgs.to(device)
@ -67,9 +60,6 @@ for batch_i, (imgs, targets) in enumerate(dataloader):
output = model(imgs)
output = non_max_suppression(output, conf_thres=opt.conf_thres, nms_thres=opt.nms_thres)
# import matplotlib.pyplot as plt
# plt.imshow(imgs[1][0])
# Compute average precision for each sample
for sample_i in range(len(targets)):
correct = []
@ -112,7 +102,8 @@ for batch_i, (imgs, targets) in enumerate(dataloader):
correct.append(0)
# Compute Average Precision (AP) per class
AP = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6], target_cls=annotations[:, 0])
target_cls = annotations[:, 0] if annotations.size(0) > 1 else annotations[0]
AP = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6], target_cls=target_cls)
# Compute mean AP for this image
mAP = AP.mean()
@ -123,5 +114,4 @@ for batch_i, (imgs, targets) in enumerate(dataloader):
# Print image mAP and running mean mAP
print('+ Sample [%d/%d] AP: %.4f (%.4f)' % (len(mAPs), len(dataloader) * opt.batch_size, mAP, np.mean(mAPs)))
print('Mean Average Precision: %.4f' % np.mean(mAPs))

View File

@ -11,4 +11,4 @@ gsutil cp gs://ultralytics/fresh9_5_e201.pt yolov3/checkpoints
python3 detect.py
# Test
python3 test.py -img_size 416 -weights_path checkpoints/latest.pt
python3 test.py -img_size 416 -weights_path checkpoints/yolov3.weights

View File

@ -105,7 +105,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls):
ap = []
for c in unique_classes:
i = pred_cls == c
nGT = sum(target_cls == c) # Number of ground truth objects
n_gt = sum(target_cls == c) # Number of ground truth objects
if sum(i) == 0:
ap.append(0)
@ -115,7 +115,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls):
tpa = np.cumsum(tp[i])
# Recall
recall = tpa / (nGT + 1e-16)
recall = tpa / (n_gt + 1e-16)
# Precision
precision = tpa / (tpa + fpa)