From a284fc921db7b87710651238e5e0951c08c59ff1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 8 Sep 2018 14:46:22 +0200 Subject: [PATCH] updates --- detect.py | 2 +- test.py | 5 ++++- utils/datasets.py | 3 +-- utils/utils.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/detect.py b/detect.py index b2acb320..76c6d3c2 100755 --- a/detect.py +++ b/detect.py @@ -133,7 +133,7 @@ def detect(opt): # Add the bbox to the plot label = '%s %.2f' % (classes[int(cls_pred)], conf) color = bbox_colors[int(np.where(unique_classes == int(cls_pred))[0])] - plot_one_box([x1, y1, x2, y2], img, label=label, color=color, line_thickness=3) + plot_one_box([x1, y1, x2, y2], img, label=label, color=color) if opt.plot_flag: # Save generated image with detections diff --git a/test.py b/test.py index f0faeab1..9d54421a 100644 --- a/test.py +++ b/test.py @@ -8,7 +8,7 @@ parser = argparse.ArgumentParser() parser.add_argument('-batch_size', type=int, default=32, help='size of each image batch') parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='path to model config file') parser.add_argument('-data_config_path', type=str, default='cfg/coco.data', help='path to data config file') -parser.add_argument('-weights_path', type=str, default='checkpoints/yolov3.weights', help='path to weights file') +parser.add_argument('-weights_path', type=str, default='checkpoints/yolov3.pt', help='path to weights file') parser.add_argument('-class_path', type=str, default='data/coco.names', help='path to class label file') parser.add_argument('-iou_thres', type=float, default=0.5, help='iou threshold required to qualify as detected') parser.add_argument('-conf_thres', type=float, default=0.5, help='object confidence threshold') @@ -63,6 +63,9 @@ for batch_i, (imgs, targets) in enumerate(dataloader): output = model(imgs) output = non_max_suppression(output, conf_thres=opt.conf_thres, nms_thres=opt.nms_thres) + # import matplotlib.pyplot as plt + # plt.imshow(imgs[1][0]) + # Compute average precision for each sample for sample_i in range(len(targets)): correct = [] diff --git a/utils/datasets.py b/utils/datasets.py index 385384eb..0ecdfa5e 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -89,8 +89,7 @@ class load_images_and_labels(): # for training def __iter__(self): self.count = -1 - self.shuffled_vector = np.random.permutation(self.nF) # shuffled vector - # self.shuffled_vector = np.arange(self.nF) # not shuffled + self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) return self def __next__(self): diff --git a/utils/utils.py b/utils/utils.py index 44367b81..3b6c0a65 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -40,7 +40,7 @@ def xview_class_weights(indices): # weights of each class in the training set, def plot_one_box(x, img, color=None, label=None, line_thickness=None): # Plots one bounding box on image img - tl = line_thickness or round(0.003 * max(img.shape[0:2])) # line thickness + tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 # line thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl)