From ab9ee6aa9ad0919761e6e0003b908a3b41218440 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Nov 2018 19:45:39 +0100 Subject: [PATCH] updates --- test.py | 2 +- train.py | 14 +++++++++----- utils/gcp.sh | 2 +- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/test.py b/test.py index 89e00a52..4d739993 100644 --- a/test.py +++ b/test.py @@ -5,7 +5,7 @@ from utils.datasets import * from utils.utils import * parser = argparse.ArgumentParser(prog='test.py') -parser.add_argument('-batch_size', type=int, default=64, help='size of each image batch') +parser.add_argument('-batch_size', type=int, default=32, help='size of each image batch') parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='path to model config file') parser.add_argument('-data_config_path', type=str, default='cfg/coco.data', help='path to data config file') parser.add_argument('-weights_path', type=str, default='weights/yolov3.pt', help='path to weights file') diff --git a/train.py b/train.py index dbfd8749..2f200ac8 100644 --- a/train.py +++ b/train.py @@ -14,6 +14,7 @@ parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='cfg file p parser.add_argument('-img_size', type=int, default=32 * 13, help='size of each image dimension') parser.add_argument('-resume', default=False, help='resume training flag') parser.add_argument('-batch_report', default=False, help='report TP, FP, FN, P and R per batch (slower)') +parser.add_argument('-optimizer', default='SGD', help='Optimizer') opt = parser.parse_args() print(opt) @@ -68,9 +69,10 @@ def main(opt): # p.requires_grad = False # Set optimizer - # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters())) - optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), - lr=1e-3, momentum=.9, weight_decay=5e-4) + if opt.optimizer is 'Adam': + optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4, weight_decay=5e-4) + else: + optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3, momentum=.9, weight_decay=5e-4) start_epoch = checkpoint['epoch'] + 1 if checkpoint['optimizer'] is not None: @@ -91,8 +93,10 @@ def main(opt): model.to(device).train() # Set optimizer - # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=5e-4) - optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=.9, weight_decay=5e-4) + if opt.optimizer is 'Adam': + optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4, weight_decay=5e-4) + else: + optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3, momentum=.9, weight_decay=5e-4) # Set scheduler # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[54, 61], gamma=0.1) diff --git a/utils/gcp.sh b/utils/gcp.sh index 691e1257..b041bf46 100644 --- a/utils/gcp.sh +++ b/utils/gcp.sh @@ -11,7 +11,7 @@ gsutil cp gs://ultralytics/yolov3.pt yolov3/weights python3 detect.py # Test -python3 test.py -img_size 416 -weights_path weights/latest.pt -conf_thres 0.5 +python3 test.py -img_size 416 -weights_path weights/latest.pt # Download and Test sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3