diff --git a/README.md b/README.md index cbb37de9..fd529607 100755 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ Python 3.7 or later with the following `pip3 install -U -r requirements.txt` pac **Start Training:** Run `train.py` to begin training after downloading COCO data with `data/get_coco_dataset.sh` and specifying COCO path on line 37 (local) or line 39 (cloud). Training runs about 1 hour per COCO epoch on a 1080 Ti. -**Resume Training:** Run `train.py -resume 1` to resume training from the most recently saved checkpoint `latest.pt`. +**Resume Training:** Run `train.py --resume` to resume training from the most recently saved checkpoint `latest.pt`. Each epoch trains on 120,000 images from the train and validate COCO sets, and tests on 5000 images from the COCO validate set. An Nvidia GTX 1080 Ti will process about 10-15 epochs/day depending on image size and augmentation (13 epochs/day at 416 pixels with default augmentation). Loss plots for the bounding boxes, objectness and class confidence should appear similar to results shown here (results in progress to 160 epochs, will update). diff --git a/train.py b/train.py index 5bc9cf9f..38f86035 100644 --- a/train.py +++ b/train.py @@ -224,7 +224,7 @@ if __name__ == '__main__': parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path') parser.add_argument('--multi-scale', default=False, help='random image sizes per batch 320 - 608') parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels') - parser.add_argument('--resume', default=False, help='resume training flag') + parser.add_argument('--resume', action='store_true', help='resume training flag') parser.add_argument('--report', default=False, help='report TP, FP, FN, P and R per batch (slower)') parser.add_argument('--freeze-darknet53', default=False, help='freeze darknet53.conv.74 layers for first epoch') parser.add_argument('--var', type=float, default=0, help='optional test variable') diff --git a/utils/gcp.sh b/utils/gcp.sh index 130f3249..32647259 100755 --- a/utils/gcp.sh +++ b/utils/gcp.sh @@ -4,7 +4,7 @@ sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3 && python3 train.py # Resume -python3 train.py --resume 1 +python3 train.py --resume # Detect gsutil cp gs://ultralytics/yolov3.pt yolov3/weights @@ -24,7 +24,7 @@ python3 test.py --img_size 416 --weights weights/backup5.pt --nms_thres 0.45 # Download and Resume sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3 wget https://storage.googleapis.com/ultralytics/yolov3.pt -O weights/latest.pt -python3 train.py --img_size 416 --batch_size 16 --epochs 1 --resume 1 +python3 train.py --img_size 416 --batch_size 16 --epochs 1 --resume python3 test.py --img_size 416 --weights weights/latest.pt --conf_thres 0.5 # Copy latest.pt to bucket