diff --git a/test.py b/test.py index bdbb258e..9d827339 100644 --- a/test.py +++ b/test.py @@ -176,14 +176,17 @@ def test( map = cocoEval.stats[1] # update mAP to pycocotools mAP # Return results - return mp, mr, map, mf1, loss / len(dataloader) + maps = np.zeros(nc) + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map, mf1, loss / len(dataloader)), maps if __name__ == '__main__': parser = argparse.ArgumentParser(prog='test.py') parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch') parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path') - parser.add_argument('--data-cfg', type=str, default='data/coco.data', help='coco.data file path') + parser.add_argument('--data-cfg', type=str, default='data/coco_64img.data', help='coco.data file path') parser.add_argument('--weights', type=str, default='weights/yolov3-spp.weights', help='path to weights file') parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected') parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') diff --git a/train.py b/train.py index f2d8d8ea..548b32b9 100644 --- a/train.py +++ b/train.py @@ -23,6 +23,7 @@ hyp = {'xy': 0.5, # xy loss gain 'weight_decay': 0.0005, # optimizer weight decay } + # Original # hyp = {'xy': 0.5, # xy loss gain # 'wh': 0.0625, # wh loss gain @@ -36,7 +37,6 @@ hyp = {'xy': 0.5, # xy loss gain # } - def train( cfg, data_cfg, @@ -119,7 +119,7 @@ def train( # plt.savefig('LR.png', dpi=300) # Dataset - dataset = LoadImagesAndLabels(train_path, img_size, batch_size, augment=True) + dataset = LoadImagesAndLabels(train_path, img_size, batch_size, augment=True, image_weighting=False) # Initialize distributed training if torch.cuda.device_count() > 1: @@ -147,6 +147,7 @@ def train( model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights model_info(model) nb = len(dataloader) + maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0) # P, R, mAP, F1, test_loss n_burnin = min(round(nb / 5 + 1), 1000) # burn-in batches for f in glob.glob('train_batch*.jpg') + glob.glob('test_batch*.jpg'): @@ -165,6 +166,9 @@ def train( if int(name.split('.')[1]) < cutoff: # if layer < 75 p.requires_grad = False if epoch == 0 else True + # Update image weights (optional) + dataset.image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=1 - maps) + mloss = torch.zeros(5).to(device) # mean losses for i, (imgs, targets, _, _) in enumerate(dataloader): imgs = imgs.to(device) @@ -218,10 +222,10 @@ def train( print('multi_scale img_size = %g' % dataset.img_size) # Calculate mAP (always test final epoch, skip first 5 if opt.nosave) - if not (opt.notest or (opt.nosave and epoch < 5)) or epoch == epochs - 1: + if not (opt.notest or (opt.nosave and epoch < 10)) or epoch == epochs - 1: with torch.no_grad(): - results = test.test(cfg, data_cfg, batch_size=batch_size, img_size=img_size, model=model, - conf_thres=0.1) + results, maps = test.test(cfg, data_cfg, batch_size=batch_size, img_size=img_size, model=model, + conf_thres=0.1) # Write epoch results with open('results.txt', 'a') as file: diff --git a/utils/datasets.py b/utils/datasets.py index a1f435f2..58b7780e 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -130,12 +130,13 @@ class LoadWebcam: # for inference class LoadImagesAndLabels(Dataset): # for training/testing - def __init__(self, path, img_size=416, batch_size=16, augment=False, rect=True): + def __init__(self, path, img_size=416, batch_size=16, augment=False, rect=True, image_weighting=False): with open(path, 'r') as f: img_files = f.read().splitlines() self.img_files = list(filter(lambda x: len(x) > 0, img_files)) n = len(self.img_files) + self.n = n assert n > 0, 'No images found in %s' % path self.img_size = img_size self.augment = augment @@ -145,9 +146,11 @@ class LoadImagesAndLabels(Dataset): # for training/testing replace('.bmp', '.txt'). replace('.png', '.txt') for x in self.img_files] + self.image_weighting = image_weighting + self.rect = False if image_weighting else rect + # Rectangular Training https://github.com/ultralytics/yolov3/issues/232 - self.pad_rectangular = rect - if self.pad_rectangular: + if self.rect: from PIL import Image bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches @@ -200,6 +203,9 @@ class LoadImagesAndLabels(Dataset): # for training/testing return len(self.img_files) def __getitem__(self, index): + if self.image_weighting: + index = random.choices(range(self.n), weights=self.image_weights, k=1)[0] # random weighted index + img_path = self.img_files[index] label_path = self.label_files[index] @@ -230,7 +236,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing # Letterbox h, w, _ = img.shape - if self.pad_rectangular: + if self.rect: new_shape = self.batch_shapes[self.batch[index]] img, ratio, padw, padh = letterbox(img, new_shape=new_shape, mode='rect') else: @@ -389,7 +395,7 @@ def random_affine(img, targets=(), degrees=(-10, 10), translate=(.1, .1), scale= h = xy[:, 3] - xy[:, 1] area = w * h ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) - i = (w > 2) & (h > 2) & (area / (area0 + 1e-16) > 0.1) & (ar < 10) + i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10) targets = targets[i] targets[:, 1:5] = xy[i] diff --git a/utils/utils.py b/utils/utils.py index c27d911c..60b225d9 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -61,6 +61,15 @@ def labels_to_class_weights(labels, nc=80): return torch.Tensor(weights) +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class mAPs + n = len(labels) + class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + def coco_class_weights(): # frequency of each class in coco train2014 n = [187437, 4955, 30920, 6033, 3838, 4332, 3160, 7051, 7677, 9167, 1316, 1372, 833, 6757, 7355, 3302, 3776, 4671, 6769, 5706, 3908, 903, 3686, 3596, 6200, 7920, 8779, 4505, 4272, 1862, 4698, 1962, 4403, 6659, 2402, 2689,