Fix argument parser bad practice

Keep parsing inside __main__ block and call methods with arguments

Add double -- for long argument names (- reserved for shortcuts)
This commit is contained in:
Guillermo García 2018-12-05 14:31:08 +01:00
parent 5a566454f5
commit c807c16b79
5 changed files with 188 additions and 113 deletions

View File

@ -56,7 +56,7 @@ Checkpoints are saved in `/checkpoints` directory. Run `detect.py` to apply trai
Run `test.py` to validate the official YOLOv3 weights `checkpoints/yolov3.weights` against the 5000 validation images. You should obtain a mAP of .581 using this repo (https://github.com/ultralytics/yolov3), compared to .579 as reported in darknet (https://arxiv.org/abs/1804.02767).
Run `test.py -weights_path checkpoints/latest.pt` to validate against the latest training checkpoint.
Run `test.py --weights checkpoints/latest.pt` to validate against the latest training checkpoint.
# Contact

111
detect.py
View File

@ -5,45 +5,39 @@ from models import *
from utils.datasets import *
from utils.utils import *
f_path = os.path.dirname(os.path.realpath(__file__)) + '/'
parser = argparse.ArgumentParser()
# Get data configuration
parser.add_argument('-image_folder', type=str, default='data/samples', help='path to images')
parser.add_argument('-output_folder', type=str, default='output', help='path to outputs')
parser.add_argument('-plot_flag', type=bool, default=True)
parser.add_argument('-txt_out', type=bool, default=False)
parser.add_argument('-cfg', type=str, default=f_path + 'cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('-class_path', type=str, default=f_path + 'data/coco.names', help='path to class label file')
parser.add_argument('-conf_thres', type=float, default=0.50, help='object confidence threshold')
parser.add_argument('-nms_thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('-batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('-img_size', type=int, default=32 * 13, help='size of each image dimension')
opt = parser.parse_args()
print(opt)
from utils import torch_utils
def main(opt):
def detect(
net_config_path,
images_path,
weights_file_path='weights/yolov3.pt',
output='output',
batch_size=16,
img_size=416,
conf_thres=0.3,
nms_thres=0.45,
save_txt=False,
save_images=False,
class_path='data/coco.names',
):
device = torch_utils.select_device()
print("Using device: \"{}\"".format(device))
os.system('rm -rf ' + opt.output_folder)
os.makedirs(opt.output_folder, exist_ok=True)
os.system('rm -rf ' + output)
os.makedirs(output, exist_ok=True)
# Load model
model = Darknet(opt.cfg, opt.img_size)
model = Darknet(net_config_path, img_size)
weights_path = f_path + 'weights/yolov3.pt'
if weights_path.endswith('.pt'): # pytorch format
if weights_path.endswith('weights/yolov3.pt') and not os.path.isfile(weights_path):
os.system('wget https://storage.googleapis.com/ultralytics/yolov3.pt -O ' + weights_path)
if weights_file_path.endswith('.pt'): # pytorch format
if weights_file_path.endswith('weights/yolov3.pt') and not os.path.isfile(weights_file_path):
os.system('wget https://storage.googleapis.com/ultralytics/yolov3.pt -O ' + weights_file_path)
else: # darknet format
load_weights(model, weights_path)
load_weights(model, weights_file_path)
checkpoint = torch.load(weights_path, map_location='cpu')
checkpoint = torch.load(weights_file_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
del checkpoint
@ -61,8 +55,8 @@ def main(opt):
model.to(device).eval()
# Set Dataloader
classes = load_classes(opt.class_path) # Extracts class labels from file
dataloader = load_images(opt.image_folder, batch_size=opt.batch_size, img_size=opt.img_size)
classes = load_classes(class_path) # Extracts class labels from file
dataloader = load_images(images_path, batch_size=batch_size, img_size=img_size)
imgs = [] # Stores image paths
img_detections = [] # Stores detections for each image index
@ -73,10 +67,10 @@ def main(opt):
# Get detections
with torch.no_grad():
pred = model(torch.from_numpy(img).unsqueeze(0).to(device))
pred = pred[pred[:, :, 4] > opt.conf_thres]
pred = pred[pred[:, :, 4] > conf_thres]
if len(pred) > 0:
detections = non_max_suppression(pred.unsqueeze(0), opt.conf_thres, opt.nms_thres)
detections = non_max_suppression(pred.unsqueeze(0), conf_thres, nms_thres)
img_detections.extend(detections)
imgs.extend(img_paths)
@ -93,15 +87,15 @@ def main(opt):
for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
print("image %g: '%s'" % (img_i, path))
if opt.plot_flag:
if save_images:
img = cv2.imread(path)
# The amount of padding that was added
pad_x = max(img.shape[0] - img.shape[1], 0) * (opt.img_size / max(img.shape))
pad_y = max(img.shape[1] - img.shape[0], 0) * (opt.img_size / max(img.shape))
pad_x = max(img.shape[0] - img.shape[1], 0) * (img_size / max(img.shape))
pad_y = max(img.shape[1] - img.shape[0], 0) * (img_size / max(img.shape))
# Image height and width after padding is removed
unpad_h = opt.img_size - pad_y
unpad_w = opt.img_size - pad_x
unpad_h = img_size - pad_y
unpad_w = img_size - pad_x
# Draw bounding boxes and labels of detections
if detections is not None:
@ -109,7 +103,7 @@ def main(opt):
bbox_colors = random.sample(color_list, len(unique_classes))
# write results to .txt file
results_img_path = os.path.join(opt.output_folder, path.split('/')[-1])
results_img_path = os.path.join(output, path.split('/')[-1])
results_txt_path = results_img_path + '.txt'
if os.path.isfile(results_txt_path):
os.remove(results_txt_path)
@ -129,24 +123,55 @@ def main(opt):
x1, y1, x2, y2 = max(x1, 0), max(y1, 0), max(x2, 0), max(y2, 0)
# write to file
if opt.txt_out:
if save_txt:
with open(results_txt_path, 'a') as file:
file.write(('%g %g %g %g %g %g \n') % (x1, y1, x2, y2, cls_pred, cls_conf * conf))
if opt.plot_flag:
if save_images:
# Add the bbox to the plot
label = '%s %.2f' % (classes[int(cls_pred)], conf)
color = bbox_colors[int(np.where(unique_classes == int(cls_pred))[0])]
plot_one_box([x1, y1, x2, y2], img, label=label, color=color)
if opt.plot_flag:
if save_images:
# Save generated image with detections
cv2.imwrite(results_img_path.replace('.bmp', '.jpg').replace('.tif', '.jpg'), img)
if platform == 'darwin': # MacOS (local)
os.system('open ' + opt.output_folder)
os.system('open ' + output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Get data configuration
parser.add_argument('--image-folder', type=str, default='data/samples', help='path to images')
parser.add_argument('--output-folder', type=str, default='output', help='path to outputs')
parser.add_argument('--plot-flag', type=bool, default=True)
parser.add_argument('--txt-out', type=bool, default=False)
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--class-path', type=str, default='data/coco.names', help='path to class label file')
parser.add_argument('--conf-thres', type=float, default=0.50, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('--batch-size', type=int, default=1, help='size of the batches')
parser.add_argument('--img-size', type=int, default=32 * 13, help='size of each image dimension')
opt = parser.parse_args()
print(opt)
torch.cuda.empty_cache()
main(opt)
init_seeds()
detect(
opt.cfg,
opt.image_folder,
output=opt.output_folder,
batch_size=opt.batch_size,
img_size=opt.img_size,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
save_txt=opt.txt_out,
save_images=opt.plot_flag,
class_path=opt.class_path,
)

81
test.py
View File

@ -6,47 +6,44 @@ from utils.utils import *
from utils import torch_utils
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('-batch_size', type=int, default=32, help='size of each image batch')
parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='path to model config file')
parser.add_argument('-data_config_path', type=str, default='cfg/coco.data', help='path to data config file')
parser.add_argument('-weights_path', type=str, default='weights/yolov3.pt', help='path to weights file')
parser.add_argument('-class_path', type=str, default='data/coco.names', help='path to class label file')
parser.add_argument('-iou_thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('-conf_thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('-nms_thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('-n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')
parser.add_argument('-img_size', type=int, default=416, help='size of each image dimension')
opt = parser.parse_args()
print(opt, end='\n\n')
def main(opt):
def test(
net_config_path,
data_config_path,
weights_file_path,
class_path=None,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
n_cpus=0,
):
device = torch_utils.select_device()
print("Using device: \"{}\"".format(device))
# Configure run
data_config = parse_data_config(opt.data_config_path)
data_config = parse_data_config(data_config_path)
nC = int(data_config['classes']) # number of classes (80 for COCO)
test_path = data_config['valid']
# Initiate model
model = Darknet(opt.cfg, opt.img_size)
model = Darknet(net_config_path, img_size)
# Load weights
if opt.weights_path.endswith('.pt'): # pytorch format
checkpoint = torch.load(opt.weights_path, map_location='cpu')
if weights_file_path.endswith('.pt'): # pytorch format
checkpoint = torch.load(weights_file_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
del checkpoint
else: # darknet format
load_weights(model, opt.weights_path)
load_weights(model, weights_file_path)
model.to(device).eval()
# Get dataloader
# dataset = load_images_with_labels(test_path)
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
dataloader = load_images_and_labels(test_path, batch_size=opt.batch_size, img_size=opt.img_size)
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=n_cpus)
dataloader = load_images_and_labels(test_path, batch_size=batch_size, img_size=img_size)
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class = [], [], [], [], [], [], [], []
@ -55,7 +52,7 @@ def main(opt):
with torch.no_grad():
output = model(imgs.to(device))
output = non_max_suppression(output, conf_thres=opt.conf_thres, nms_thres=opt.nms_thres)
output = non_max_suppression(output, conf_thres=conf_thres, nms_thres=nms_thres)
# Compute average precision for each sample
for sample_i, (labels, detections) in enumerate(zip(targets, output)):
@ -80,7 +77,7 @@ def main(opt):
target_cls = labels[:, 0]
# Extract target boxes as (x1, y1, x2, y2)
target_boxes = xywh2xyxy(labels[:, 1:5]) * opt.img_size
target_boxes = xywh2xyxy(labels[:, 1:5]) * img_size
detected = []
for *pred_bbox, conf, obj_conf, obj_pred in detections:
@ -91,7 +88,7 @@ def main(opt):
# Extract index of largest overlap
best_i = np.argmax(iou)
# If overlap exceeds threshold and classification is correct mark as correct
if iou[best_i] > opt.iou_thres and obj_pred == labels[best_i, 0] and best_i not in detected:
if iou[best_i] > iou_thres and obj_pred == labels[best_i, 0] and best_i not in detected:
correct.append(1)
detected.append(best_i)
else:
@ -121,9 +118,10 @@ def main(opt):
# Print mAP per class
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP') + '\n\nmAP Per Class:')
classes = load_classes(opt.class_path) # Extracts class labels from file
for i, c in enumerate(classes):
print('%15s: %-.4f' % (c, AP_accum[i] / AP_accum_count[i]))
if class_path:
classes = load_classes(class_path) # Extracts class labels from file
for i, c in enumerate(classes):
print('%15s: %-.4f' % (c, AP_accum[i] / AP_accum_count[i]))
# Return mAP
return mean_mAP, mean_R, mean_P
@ -131,6 +129,31 @@ def main(opt):
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='path to model config file')
parser.add_argument('--data-config-path', type=str, default='cfg/coco.data', help='path to data config file')
parser.add_argument('--weights', type=str, default='weights/yolov3.pt', help='path to weights file')
parser.add_argument('--class-path', type=str, default='data/coco.names', help='path to class label file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('--n-cpus', type=int, default=0, help='number of cpu threads to use during batch generation')
parser.add_argument('--img-size', type=int, default=416, help='size of each image dimension')
opt = parser.parse_args()
print(opt, end='\n\n')
init_seeds()
mAP = main(opt)
mAP = test(
opt.cfg,
opt.data_config_path,
opt.weights,
class_path=opt.class_path,
batch_size=opt.batch_size,
img_size=opt.img_size,
iou_thres=opt.iou_thres,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
n_cpus=opt.n_cpus,
)

View File

@ -8,51 +8,48 @@ from utils.utils import *
from utils import torch_utils
parser = argparse.ArgumentParser()
parser.add_argument('-epochs', type=int, default=100, help='number of epochs')
parser.add_argument('-batch_size', type=int, default=16, help='size of each image batch')
parser.add_argument('-data_config_path', type=str, default='cfg/coco.data', help='data config file path')
parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('-multi_scale', default=False, help='random image sizes per batch 320 - 608')
parser.add_argument('-img_size', type=int, default=32 * 13, help='pixels')
parser.add_argument('-resume', default=False, help='resume training flag')
parser.add_argument('-batch_report', default=False, help='report TP, FP, FN, P and R per batch (slower)')
parser.add_argument('-freeze_darknet53', default=False, help='freeze darknet53.conv.74 layers for first epoch')
parser.add_argument('-var', type=float, default=0, help='optional test variable')
opt = parser.parse_args()
if opt.multi_scale: # pass maximum multi_scale size
opt.img_size = 608
print(opt)
# Import test.py to get mAP after each epoch
sys.argv[1:] = [] # delete any train.py command-line arguments before they reach test.py
import test # must follow sys.argv[1:] = []
import test
def main(opt):
def train(
net_config_path,
data_config_path,
img_size=416,
resume=False,
epochs=100,
batch_size=16,
report=False,
multi_scale=False,
freeze_backbone=True,
var=0,
):
device = torch_utils.select_device()
print("Using device: \"{}\"".format(device))
if not opt.multi_scale:
if not multi_scale:
torch.backends.cudnn.benchmark = True
os.makedirs('weights', exist_ok=True)
# Configure run
data_config = parse_data_config(opt.data_config_path)
data_config = parse_data_config(data_config_path)
num_classes = int(data_config['classes'])
train_path = '../coco/trainvalno5k.txt'
# Initialize model
model = Darknet(opt.cfg, opt.img_size)
model = Darknet(net_config_path, img_size)
# Get dataloader
dataloader = load_images_and_labels(train_path, batch_size=opt.batch_size, img_size=opt.img_size,
multi_scale=opt.multi_scale, augment=True)
if multi_scale: # pass maximum multi_scale size
img_size = 608
dataloader = load_images_and_labels(train_path, batch_size=batch_size, img_size=img_size,
multi_scale=multi_scale, augment=True)
lr0 = 0.001
if opt.resume:
if resume:
checkpoint = torch.load('weights/latest.pt', map_location='cpu')
model.load_state_dict(checkpoint['model'])
@ -103,7 +100,7 @@ def main(opt):
mean_recall, mean_precision = 0, 0
print('%11s' * 16 % (
'Epoch', 'Batch', 'x', 'y', 'w', 'h', 'conf', 'cls', 'total', 'P', 'R', 'nTargets', 'TP', 'FP', 'FN', 'time'))
for epoch in range(opt.epochs):
for epoch in range(epochs):
epoch += start_epoch
# Update scheduler (automatic)
@ -118,7 +115,7 @@ def main(opt):
g['lr'] = lr
# Freeze darknet53.conv.74 layers for first epoch
if opt.freeze_darknet53:
if freeze_backbone is not False:
if epoch == 0:
for i, (name, p) in enumerate(model.named_parameters()):
if int(name.split('.')[1]) < 75: # if layer < 75
@ -143,7 +140,7 @@ def main(opt):
g['lr'] = lr
# Compute loss, compute gradient, update parameters
loss = model(imgs.to(device), targets, batch_report=opt.batch_report, var=opt.var)
loss = model(imgs.to(device), targets, batch_report=report, var=var)
loss.backward()
# accumulated_batches = 1 # accumulate gradient for 4 batches before stepping optimizer
@ -156,7 +153,7 @@ def main(opt):
for key, val in model.losses.items():
rloss[key] = (rloss[key] * ui + val) / (ui + 1)
if opt.batch_report:
if report:
TP, FP, FN = metrics
metrics += model.losses['metrics']
@ -173,7 +170,7 @@ def main(opt):
mean_recall = recall[k].mean()
s = ('%11s%11s' + '%11.3g' * 14) % (
'%g/%g' % (epoch, opt.epochs - 1), '%g/%g' % (i, len(dataloader) - 1), rloss['x'],
'%g/%g' % (epoch, epochs - 1), '%g/%g' % (i, len(dataloader) - 1), rloss['x'],
rloss['y'], rloss['w'], rloss['h'], rloss['conf'], rloss['cls'],
rloss['loss'], mean_precision, mean_recall, model.losses['nT'], model.losses['TP'],
model.losses['FP'], model.losses['FN'], time.time() - t1)
@ -201,8 +198,13 @@ def main(opt):
os.system('cp weights/latest.pt weights/backup' + str(epoch) + '.pt')
# Calculate mAP
test.opt.weights_path = 'weights/latest.pt'
mAP, R, P = test.main(test.opt)
mAP, R, P = test.test(
net_config_path,
data_config_path,
'weights/latest.pt',
batch_size=batch_size,
img_size=img_size,
)
# Write epoch results
with open('results.txt', 'a') as file:
@ -215,7 +217,32 @@ def main(opt):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100, help='number of epochs')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--data-config-path', type=str, default='cfg/coco.data', help='data config file path')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--multi-scale', default=False, help='random image sizes per batch 320 - 608')
parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels')
parser.add_argument('--resume', default=False, help='resume training flag')
parser.add_argument('--report', default=False, help='report TP, FP, FN, P and R per batch (slower)')
parser.add_argument('--freeze-darknet53', default=False, help='freeze darknet53.conv.74 layers for first epoch')
parser.add_argument('--var', type=float, default=0, help='optional test variable')
opt = parser.parse_args()
print(opt, end='\n\n')
init_seeds()
torch.cuda.empty_cache()
main(opt)
train(
opt.cfg,
opt.data_config_path,
img_size=opt.img_size,
resume=opt.resume,
epochs=opt.epochs,
batch_size=opt.batch_size,
report=opt.report,
multi_scale=opt.multi_scale,
freeze_backbone=opt.freeze_darknet53,
var=opt.var,
)

14
utils/gcp.sh Normal file → Executable file
View File

@ -4,28 +4,28 @@
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3 && python3 train.py
# Resume
python3 train.py -resume 1
python3 train.py --resume 1
# Detect
gsutil cp gs://ultralytics/yolov3.pt yolov3/weights
python3 detect.py
# Test
python3 test.py -img_size 416 -weights_path weights/latest.pt
python3 test.py --img_size 416 --weights weights/latest.pt
# Test Darknet
python3 test.py -img_size 416 -weights_path ../darknet/backup/yolov3.backup
python3 test.py --img_size 416 --weights ../darknet/backup/yolov3.backup
# Download and Test
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3
wget https://pjreddie.com/media/files/yolov3.weights -P weights
python3 test.py -img_size 416 -weights_path weights/backup5.pt -nms_thres 0.45
python3 test.py --img_size 416 --weights weights/backup5.pt --nms_thres 0.45
# Download and Resume
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3
wget https://storage.googleapis.com/ultralytics/yolov3.pt -O weights/latest.pt
python3 train.py -img_size 416 -batch_size 16 -epochs 1 -resume 1
python3 test.py -img_size 416 -weights_path weights/latest.pt -conf_thres 0.5
python3 train.py --img_size 416 --batch_size 16 --epochs 1 --resume 1
python3 test.py --img_size 416 --weights weights/latest.pt --conf_thres 0.5
# Copy latest.pt to bucket
gsutil cp yolov3/weights/latest.pt gs://ultralytics
@ -36,6 +36,6 @@ wget https://storage.googleapis.com/ultralytics/latest.pt
# Testing
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3
python3 train.py -epochs 3 -var 64
python3 train.py --epochs 3 --var 64
sudo shutdown