diff --git a/train.py b/train.py index 8c8e5b15..bc2a3e3b 100644 --- a/train.py +++ b/train.py @@ -64,9 +64,9 @@ def train( torch.backends.cudnn.benchmark = True # unsuitable for multiscale # Configure run - data_cfg = parse_data_cfg(data_cfg) - train_path = data_cfg['train'] - nc = int(data_cfg['classes']) # number of classes + data_dict = parse_data_cfg(data_cfg) + train_path = data_dict['train'] + nc = int(data_dict['classes']) # number of classes # Initialize model model = Darknet(cfg, img_size).to(device) @@ -276,12 +276,12 @@ def print_mutation(hyp, results): if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--epochs', type=int, default=273, help='number of epochs') - parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch') + parser.add_argument('--batch-size', type=int, default=4, help='size of each image batch') parser.add_argument('--accumulate', type=int, default=1, help='accumulate gradient x batches before optimizing') parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path') - parser.add_argument('--data-cfg', type=str, default='data/coco.data', help='coco.data file path') + parser.add_argument('--data-cfg', type=str, default='data/coco_10img.data', help='coco.data file path') parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') - parser.add_argument('--img-size', type=int, default=416, help='pixels') + parser.add_argument('--img-size', type=int, default=320, help='pixels') parser.add_argument('--resume', action='store_true', help='resume training flag') parser.add_argument('--transfer', action='store_true', help='transfer learning flag') parser.add_argument('--num-workers', type=int, default=2, help='number of Pytorch DataLoader workers')