diff --git a/train.py b/train.py index efa92980..052aaf11 100644 --- a/train.py +++ b/train.py @@ -76,7 +76,7 @@ def train( if multi_scale: img_size = round((img_size / 32) * 1.5) * 32 # initiate with maximum multi_scale size - opt.num_workers = 0 # bug https://github.com/ultralytics/yolov3/issues/174 + # opt.num_workers = 0 # bug https://github.com/ultralytics/yolov3/issues/174 else: torch.backends.cudnn.benchmark = True # unsuitable for multiscale @@ -308,9 +308,9 @@ if __name__ == '__main__': parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch') parser.add_argument('--accumulate', type=int, default=1, help='accumulate gradient x batches before optimizing') parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path') - parser.add_argument('--data-cfg', type=str, default='data/coco.data', help='coco.data file path') + parser.add_argument('--data-cfg', type=str, default='data/coco_32img.data', help='coco.data file path') parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') - parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)') + parser.add_argument('--img-size', type=int, default=320, help='inference size (pixels)') parser.add_argument('--resume', action='store_true', help='resume training flag') parser.add_argument('--transfer', action='store_true', help='transfer learning flag') parser.add_argument('--num-workers', type=int, default=4, help='number of Pytorch DataLoader workers')