From c1492ae4fb3aa3614f8478ae71fc10003f9a2b9d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 Sep 2018 15:50:37 +0200 Subject: [PATCH] updates --- detect.py | 4 ++-- test.py | 18 ++++-------------- utils/gcp.sh | 2 +- utils/utils.py | 4 ++-- 4 files changed, 9 insertions(+), 19 deletions(-) diff --git a/detect.py b/detect.py index 76c6d3c2..8603ed9f 100755 --- a/detect.py +++ b/detect.py @@ -18,7 +18,7 @@ parser.add_argument('-txt_out', type=bool, default=False) parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path') parser.add_argument('-class_path', type=str, default='data/coco.names', help='path to class label file') -parser.add_argument('-conf_thres', type=float, default=0.99, help='object confidence threshold') +parser.add_argument('-conf_thres', type=float, default=0.9, help='object confidence threshold') parser.add_argument('-nms_thres', type=float, default=0.45, help='iou threshold for non-maximum suppression') parser.add_argument('-batch_size', type=int, default=1, help='size of the batches') parser.add_argument('-img_size', type=int, default=32 * 13, help='size of each image dimension') @@ -34,7 +34,7 @@ def detect(opt): model = Darknet(opt.cfg, opt.img_size) #weights_path = 'checkpoints/yolov3.weights' - weights_path = 'checkpoints/latest.pt' + weights_path = 'checkpoints/yolov3.pt' if weights_path.endswith('.weights'): # saved in darknet format load_weights(model, weights_path) else: # endswith('.pt'), saved in pytorch format diff --git a/test.py b/test.py index 730ad5b6..389e96a0 100644 --- a/test.py +++ b/test.py @@ -48,18 +48,11 @@ model.to(device).eval() # dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu) dataloader = load_images_and_labels(test_path, batch_size=opt.batch_size, img_size=opt.img_size) -n_gt = 0 -correct = 0 - print('Compute mAP...') -outputs = [] +correct = 0 targets = None -mAPs = [] -TP = [] -confidence = [] -pred_class = [] -target_class = [] +outputs, mAPs, TP, confidence, pred_class, target_class = [], [], [], [], [], [] for batch_i, (imgs, targets) in enumerate(dataloader): imgs = imgs.to(device) @@ -67,9 +60,6 @@ for batch_i, (imgs, targets) in enumerate(dataloader): output = model(imgs) output = non_max_suppression(output, conf_thres=opt.conf_thres, nms_thres=opt.nms_thres) - # import matplotlib.pyplot as plt - # plt.imshow(imgs[1][0]) - # Compute average precision for each sample for sample_i in range(len(targets)): correct = [] @@ -112,7 +102,8 @@ for batch_i, (imgs, targets) in enumerate(dataloader): correct.append(0) # Compute Average Precision (AP) per class - AP = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6], target_cls=annotations[:, 0]) + target_cls = annotations[:, 0] if annotations.size(0) > 1 else annotations[0] + AP = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6], target_cls=target_cls) # Compute mean AP for this image mAP = AP.mean() @@ -123,5 +114,4 @@ for batch_i, (imgs, targets) in enumerate(dataloader): # Print image mAP and running mean mAP print('+ Sample [%d/%d] AP: %.4f (%.4f)' % (len(mAPs), len(dataloader) * opt.batch_size, mAP, np.mean(mAPs))) - print('Mean Average Precision: %.4f' % np.mean(mAPs)) diff --git a/utils/gcp.sh b/utils/gcp.sh index 2c9da943..50e1b9a5 100644 --- a/utils/gcp.sh +++ b/utils/gcp.sh @@ -11,4 +11,4 @@ gsutil cp gs://ultralytics/fresh9_5_e201.pt yolov3/checkpoints python3 detect.py # Test -python3 test.py -img_size 416 -weights_path checkpoints/latest.pt +python3 test.py -img_size 416 -weights_path checkpoints/yolov3.weights diff --git a/utils/utils.py b/utils/utils.py index ad39a26d..121f352e 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -105,7 +105,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls): ap = [] for c in unique_classes: i = pred_cls == c - nGT = sum(target_cls == c) # Number of ground truth objects + n_gt = sum(target_cls == c) # Number of ground truth objects if sum(i) == 0: ap.append(0) @@ -115,7 +115,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls): tpa = np.cumsum(tp[i]) # Recall - recall = tpa / (nGT + 1e-16) + recall = tpa / (n_gt + 1e-16) # Precision precision = tpa / (tpa + fpa)