Merge pull request #45 from guigarfr/argparse

Argparse PR
This commit is contained in:
Glenn Jocher 2018-12-10 12:47:31 +01:00 committed by GitHub
commit 362b41436a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 266 additions and 134 deletions

View File

@ -21,7 +21,7 @@ Python 3.7 or later with the following `pip3 install -U -r requirements.txt` pac
**Start Training:** Run `train.py` to begin training after downloading COCO data with `data/get_coco_dataset.sh` and specifying COCO path on line 37 (local) or line 39 (cloud). Training runs about 1 hour per COCO epoch on a 1080 Ti.
**Resume Training:** Run `train.py -resume 1` to resume training from the most recently saved checkpoint `latest.pt`.
**Resume Training:** Run `train.py --resume` to resume training from the most recently saved checkpoint `latest.pt`.
Each epoch trains on 120,000 images from the train and validate COCO sets, and tests on 5000 images from the COCO validate set. An Nvidia GTX 1080 Ti will process about 10-15 epochs/day depending on image size and augmentation (13 epochs/day at 416 pixels with default augmentation). Loss plots for the bounding boxes, objectness and class confidence should appear similar to results shown here (results in progress to 160 epochs, will update).
@ -56,7 +56,7 @@ Checkpoints are saved in `/checkpoints` directory. Run `detect.py` to apply trai
Run `test.py` to validate the official YOLOv3 weights `checkpoints/yolov3.weights` against the 5000 validation images. You should obtain a mAP of .581 using this repo (https://github.com/ultralytics/yolov3), compared to .579 as reported in darknet (https://arxiv.org/abs/1804.02767).
Run `test.py -weights_path checkpoints/latest.pt` to validate against the latest training checkpoint.
Run `test.py --weights checkpoints/latest.pt` to validate against the latest training checkpoint.
# Contact

120
detect.py
View File

@ -5,45 +5,42 @@ from models import *
from utils.datasets import *
from utils.utils import *
cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if cuda else 'cpu')
f_path = os.path.dirname(os.path.realpath(__file__)) + '/'
parser = argparse.ArgumentParser()
# Get data configuration
parser.add_argument('-image_folder', type=str, default='data/samples', help='path to images')
parser.add_argument('-output_folder', type=str, default='output', help='path to outputs')
parser.add_argument('-plot_flag', type=bool, default=True)
parser.add_argument('-txt_out', type=bool, default=False)
parser.add_argument('-cfg', type=str, default=f_path + 'cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('-class_path', type=str, default=f_path + 'data/coco.names', help='path to class label file')
parser.add_argument('-conf_thres', type=float, default=0.50, help='object confidence threshold')
parser.add_argument('-nms_thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('-batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('-img_size', type=int, default=32 * 13, help='size of each image dimension')
opt = parser.parse_args()
print(opt)
from utils import torch_utils
def main(opt):
os.system('rm -rf ' + opt.output_folder)
os.makedirs(opt.output_folder, exist_ok=True)
def detect(
net_config_path,
data_config_path,
images_path,
weights_file_path='weights/yolov3.pt',
output='output',
batch_size=16,
img_size=416,
conf_thres=0.3,
nms_thres=0.45,
save_txt=False,
save_images=False,
):
device = torch_utils.select_device()
print("Using device: \"{}\"".format(device))
os.system('rm -rf ' + output)
os.makedirs(output, exist_ok=True)
data_config = parse_data_config(data_config_path)
# Load model
model = Darknet(opt.cfg, opt.img_size)
model = Darknet(net_config_path, img_size)
weights_path = f_path + 'weights/yolov3.pt'
if weights_path.endswith('.pt'): # pytorch format
if weights_path.endswith('weights/yolov3.pt') and not os.path.isfile(weights_path):
os.system('wget https://storage.googleapis.com/ultralytics/yolov3.pt -O ' + weights_path)
checkpoint = torch.load(weights_path, map_location='cpu')
if weights_file_path.endswith('.pt'): # pytorch format
if weights_file_path.endswith('weights/yolov3.pt') and not os.path.isfile(weights_file_path):
os.system('wget https://storage.googleapis.com/ultralytics/yolov3.pt -O ' + weights_file_path)
checkpoint = torch.load(weights_file_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
del checkpoint
else: # darknet format
load_weights(model, weights_path)
load_weights(model, weights_file_path)
# current = model.state_dict()
# saved = checkpoint['model']
@ -59,8 +56,8 @@ def main(opt):
model.to(device).eval()
# Set Dataloader
classes = load_classes(opt.class_path) # Extracts class labels from file
dataloader = load_images(opt.image_folder, batch_size=opt.batch_size, img_size=opt.img_size)
classes = load_classes(data_config['names']) # Extracts class labels from file
dataloader = load_images(images_path, batch_size=batch_size, img_size=img_size)
imgs = [] # Stores image paths
img_detections = [] # Stores detections for each image index
@ -71,10 +68,10 @@ def main(opt):
# Get detections
with torch.no_grad():
pred = model(torch.from_numpy(img).unsqueeze(0).to(device))
pred = pred[pred[:, :, 4] > opt.conf_thres]
pred = pred[pred[:, :, 4] > conf_thres]
if len(pred) > 0:
detections = non_max_suppression(pred.unsqueeze(0), opt.conf_thres, opt.nms_thres)
detections = non_max_suppression(pred.unsqueeze(0), conf_thres, nms_thres)
img_detections.extend(detections)
imgs.extend(img_paths)
@ -91,15 +88,15 @@ def main(opt):
for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
print("image %g: '%s'" % (img_i, path))
if opt.plot_flag:
if save_images:
img = cv2.imread(path)
# The amount of padding that was added
pad_x = max(img.shape[0] - img.shape[1], 0) * (opt.img_size / max(img.shape))
pad_y = max(img.shape[1] - img.shape[0], 0) * (opt.img_size / max(img.shape))
pad_x = max(img.shape[0] - img.shape[1], 0) * (img_size / max(img.shape))
pad_y = max(img.shape[1] - img.shape[0], 0) * (img_size / max(img.shape))
# Image height and width after padding is removed
unpad_h = opt.img_size - pad_y
unpad_w = opt.img_size - pad_x
unpad_h = img_size - pad_y
unpad_w = img_size - pad_x
# Draw bounding boxes and labels of detections
if detections is not None:
@ -107,7 +104,7 @@ def main(opt):
bbox_colors = random.sample(color_list, len(unique_classes))
# write results to .txt file
results_img_path = os.path.join(opt.output_folder, path.split('/')[-1])
results_img_path = os.path.join(output, path.split('/')[-1])
results_txt_path = results_img_path + '.txt'
if os.path.isfile(results_txt_path):
os.remove(results_txt_path)
@ -127,24 +124,55 @@ def main(opt):
x1, y1, x2, y2 = max(x1, 0), max(y1, 0), max(x2, 0), max(y2, 0)
# write to file
if opt.txt_out:
if save_txt:
with open(results_txt_path, 'a') as file:
file.write(('%g %g %g %g %g %g \n') % (x1, y1, x2, y2, cls_pred, cls_conf * conf))
if opt.plot_flag:
if save_images:
# Add the bbox to the plot
label = '%s %.2f' % (classes[int(cls_pred)], conf)
color = bbox_colors[int(np.where(unique_classes == int(cls_pred))[0])]
plot_one_box([x1, y1, x2, y2], img, label=label, color=color)
if opt.plot_flag:
if save_images:
# Save generated image with detections
cv2.imwrite(results_img_path.replace('.bmp', '.jpg').replace('.tif', '.jpg'), img)
if platform == 'darwin': # MacOS (local)
os.system('open ' + opt.output_folder)
os.system('open ' + output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Get data configuration
parser.add_argument('--image-folder', type=str, default='data/samples', help='path to images')
parser.add_argument('--output-folder', type=str, default='output', help='path to outputs')
parser.add_argument('--plot-flag', type=bool, default=True)
parser.add_argument('--txt-out', type=bool, default=False)
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data-config', type=str, default='cfg/coco.data', help='path to data config file')
parser.add_argument('--conf-thres', type=float, default=0.50, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('--batch-size', type=int, default=1, help='size of the batches')
parser.add_argument('--img-size', type=int, default=32 * 13, help='size of each image dimension')
opt = parser.parse_args()
print(opt)
torch.cuda.empty_cache()
main(opt)
init_seeds()
detect(
opt.cfg,
opt.data_config,
opt.image_folder,
output=opt.output_folder,
batch_size=opt.batch_size,
img_size=opt.img_size,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
save_txt=opt.txt_out,
save_images=opt.plot_flag,
)

82
test.py
View File

@ -4,47 +4,45 @@ from models import *
from utils.datasets import *
from utils.utils import *
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('-batch_size', type=int, default=32, help='size of each image batch')
parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='path to model config file')
parser.add_argument('-data_config_path', type=str, default='cfg/coco.data', help='path to data config file')
parser.add_argument('-weights_path', type=str, default='weights/yolov3.pt', help='path to weights file')
parser.add_argument('-class_path', type=str, default='data/coco.names', help='path to class label file')
parser.add_argument('-iou_thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('-conf_thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('-nms_thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('-n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')
parser.add_argument('-img_size', type=int, default=416, help='size of each image dimension')
opt = parser.parse_args()
print(opt, end='\n\n')
cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if cuda else 'cpu')
from utils import torch_utils
def main(opt):
def test(
net_config_path,
data_config_path,
weights_file_path,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
n_cpus=0,
):
device = torch_utils.select_device()
print("Using device: \"{}\"".format(device))
# Configure run
data_config = parse_data_config(opt.data_config_path)
data_config = parse_data_config(data_config_path)
nC = int(data_config['classes']) # number of classes (80 for COCO)
test_path = data_config['valid']
# Initiate model
model = Darknet(opt.cfg, opt.img_size)
model = Darknet(net_config_path, img_size)
# Load weights
if opt.weights_path.endswith('.pt'): # pytorch format
checkpoint = torch.load(opt.weights_path, map_location='cpu')
if weights_file_path.endswith('.pt'): # pytorch format
checkpoint = torch.load(weights_file_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
del checkpoint
else: # darknet format
load_weights(model, opt.weights_path)
load_weights(model, weights_file_path)
model.to(device).eval()
# Get dataloader
# dataset = load_images_with_labels(test_path)
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
dataloader = load_images_and_labels(test_path, batch_size=opt.batch_size, img_size=opt.img_size)
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=n_cpus)
dataloader = load_images_and_labels(test_path, batch_size=batch_size, img_size=img_size)
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class = [], [], [], [], [], [], [], []
@ -53,7 +51,7 @@ def main(opt):
with torch.no_grad():
output = model(imgs.to(device))
output = non_max_suppression(output, conf_thres=opt.conf_thres, nms_thres=opt.nms_thres)
output = non_max_suppression(output, conf_thres=conf_thres, nms_thres=nms_thres)
# Compute average precision for each sample
for sample_i, (labels, detections) in enumerate(zip(targets, output)):
@ -78,7 +76,7 @@ def main(opt):
target_cls = labels[:, 0]
# Extract target boxes as (x1, y1, x2, y2)
target_boxes = xywh2xyxy(labels[:, 1:5]) * opt.img_size
target_boxes = xywh2xyxy(labels[:, 1:5]) * img_size
detected = []
for *pred_bbox, conf, obj_conf, obj_pred in detections:
@ -89,7 +87,7 @@ def main(opt):
# Extract index of largest overlap
best_i = np.argmax(iou)
# If overlap exceeds threshold and classification is correct mark as correct
if iou[best_i] > opt.iou_thres and obj_pred == labels[best_i, 0] and best_i not in detected:
if iou[best_i] > iou_thres and obj_pred == labels[best_i, 0] and best_i not in detected:
correct.append(1)
detected.append(best_i)
else:
@ -119,7 +117,7 @@ def main(opt):
# Print mAP per class
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP') + '\n\nmAP Per Class:')
classes = load_classes(opt.class_path) # Extracts class labels from file
classes = load_classes(data_config['names']) # Extracts class labels from file
for i, c in enumerate(classes):
print('%15s: %-.4f' % (c, AP_accum[i] / AP_accum_count[i]))
@ -128,4 +126,30 @@ def main(opt):
if __name__ == '__main__':
mAP = main(opt)
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='path to model config file')
parser.add_argument('--data-config', type=str, default='cfg/coco.data', help='path to data config file')
parser.add_argument('--weights', type=str, default='weights/yolov3.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.45, help='iou threshold for non-maximum suppression')
parser.add_argument('--n-cpus', type=int, default=0, help='number of cpu threads to use during batch generation')
parser.add_argument('--img-size', type=int, default=416, help='size of each image dimension')
opt = parser.parse_args()
print(opt, end='\n\n')
init_seeds()
mAP = test(
opt.cfg,
opt.data_config,
opt.weights,
batch_size=opt.batch_size,
img_size=opt.img_size,
iou_thres=opt.iou_thres,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
n_cpus=opt.n_cpus,
)

149
train.py
View File

@ -6,57 +6,59 @@ from models import *
from utils.datasets import *
from utils.utils import *
parser = argparse.ArgumentParser()
parser.add_argument('-epochs', type=int, default=100, help='number of epochs')
parser.add_argument('-batch_size', type=int, default=16, help='size of each image batch')
parser.add_argument('-data_config_path', type=str, default='cfg/coco.data', help='data config file path')
parser.add_argument('-cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('-multi_scale', default=False, help='random image sizes per batch 320 - 608')
parser.add_argument('-img_size', type=int, default=32 * 13, help='pixels')
parser.add_argument('-resume', default=False, help='resume training flag')
parser.add_argument('-batch_report', default=False, help='report TP, FP, FN, P and R per batch (slower)')
parser.add_argument('-freeze_darknet53', default=False, help='freeze darknet53.conv.74 layers for first epoch')
parser.add_argument('-var', type=float, default=0, help='optional test variable')
opt = parser.parse_args()
if opt.multi_scale: # pass maximum multi_scale size
opt.img_size = 608
print(opt)
from utils import torch_utils
# Import test.py to get mAP after each epoch
sys.argv[1:] = [] # delete any train.py command-line arguments before they reach test.py
import test # must follow sys.argv[1:] = []
import test
cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if cuda else 'cpu')
DARKNET_WEIGHTS_FILENAME = 'darknet53.conv.74'
DARKNET_WEIGHTS_URL = 'https://pjreddie.com/media/files/{}'.format(
DARKNET_WEIGHTS_FILENAME
)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
if cuda:
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
if not opt.multi_scale:
def train(
net_config_path,
data_config_path,
img_size=416,
resume=False,
epochs=100,
batch_size=16,
weights_path='weights',
report=False,
multi_scale=False,
freeze_backbone=True,
var=0,
):
device = torch_utils.select_device()
print("Using device: \"{}\"".format(device))
if not multi_scale:
torch.backends.cudnn.benchmark = True
def main(opt):
os.makedirs('weights', exist_ok=True)
os.makedirs(weights_path, exist_ok=True)
latest_weights_file = os.path.join(weights_path, 'latest.pt')
best_weights_file = os.path.join(weights_path, 'best.pt')
# Configure run
data_config = parse_data_config(opt.data_config_path)
data_config = parse_data_config(data_config_path)
num_classes = int(data_config['classes'])
train_path = '../coco/trainvalno5k.txt'
train_path = data_config['train']
# Initialize model
model = Darknet(opt.cfg, opt.img_size)
model = Darknet(net_config_path, img_size)
# Get dataloader
dataloader = load_images_and_labels(train_path, batch_size=opt.batch_size, img_size=opt.img_size,
multi_scale=opt.multi_scale, augment=True)
if multi_scale: # pass maximum multi_scale size
img_size = 608
dataloader = load_images_and_labels(train_path, batch_size=batch_size, img_size=img_size,
multi_scale=multi_scale, augment=True)
lr0 = 0.001
if opt.resume:
checkpoint = torch.load('weights/latest.pt', map_location='cpu')
if resume:
checkpoint = torch.load(latest_weights_file, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if torch.cuda.device_count() > 1:
@ -85,9 +87,13 @@ def main(opt):
best_loss = float('inf')
# Initialize model with darknet53 weights (optional)
if not os.path.isfile('weights/darknet53.conv.74'):
os.system('wget https://pjreddie.com/media/files/darknet53.conv.74 -P weights')
load_weights(model, 'weights/darknet53.conv.74')
def_weight_file = os.path.join(weights_path, DARKNET_WEIGHTS_FILENAME)
if not os.path.isfile(def_weight_file):
os.system('wget {} -P {}'.format(
DARKNET_WEIGHTS_URL,
weights_path))
assert os.path.isfile(def_weight_file)
load_weights(model, def_weight_file)
if torch.cuda.device_count() > 1:
raise Exception('Multi-GPU not currently supported: https://github.com/ultralytics/yolov3/issues/21')
@ -106,7 +112,7 @@ def main(opt):
mean_recall, mean_precision = 0, 0
print('%11s' * 16 % (
'Epoch', 'Batch', 'x', 'y', 'w', 'h', 'conf', 'cls', 'total', 'P', 'R', 'nTargets', 'TP', 'FP', 'FN', 'time'))
for epoch in range(opt.epochs):
for epoch in range(epochs):
epoch += start_epoch
# Update scheduler (automatic)
@ -121,7 +127,7 @@ def main(opt):
g['lr'] = lr
# Freeze darknet53.conv.74 layers for first epoch
if opt.freeze_darknet53:
if freeze_backbone is not False:
if epoch == 0:
for i, (name, p) in enumerate(model.named_parameters()):
if int(name.split('.')[1]) < 75: # if layer < 75
@ -146,7 +152,7 @@ def main(opt):
g['lr'] = lr
# Compute loss, compute gradient, update parameters
loss = model(imgs.to(device), targets, batch_report=opt.batch_report, var=opt.var)
loss = model(imgs.to(device), targets, batch_report=report, var=var)
loss.backward()
# accumulated_batches = 1 # accumulate gradient for 4 batches before stepping optimizer
@ -159,7 +165,7 @@ def main(opt):
for key, val in model.losses.items():
rloss[key] = (rloss[key] * ui + val) / (ui + 1)
if opt.batch_report:
if report:
TP, FP, FN = metrics
metrics += model.losses['metrics']
@ -176,7 +182,7 @@ def main(opt):
mean_recall = recall[k].mean()
s = ('%11s%11s' + '%11.3g' * 14) % (
'%g/%g' % (epoch, opt.epochs - 1), '%g/%g' % (i, len(dataloader) - 1), rloss['x'],
'%g/%g' % (epoch, epochs - 1), '%g/%g' % (i, len(dataloader) - 1), rloss['x'],
rloss['y'], rloss['w'], rloss['h'], rloss['conf'], rloss['cls'],
rloss['loss'], mean_precision, mean_recall, model.losses['nT'], model.losses['TP'],
model.losses['FP'], model.losses['FN'], time.time() - t1)
@ -193,19 +199,32 @@ def main(opt):
'best_loss': best_loss,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}
torch.save(checkpoint, 'weights/latest.pt')
torch.save(checkpoint, latest_weights_file)
# Save best checkpoint
if best_loss == loss_per_target:
os.system('cp weights/latest.pt weights/best.pt')
os.system('cp {} {}'.format(
latest_weights_file,
best_weights_file,
))
# Save backup weights every 5 epochs
if (epoch > 0) & (epoch % 5 == 0):
os.system('cp weights/latest.pt weights/backup' + str(epoch) + '.pt')
backup_file_name = 'backup{}.pt'.format(epoch)
backup_file_path = os.path.join(weights_path, backup_file_name)
os.system('cp {} {}'.format(
latest_weights_file,
backup_file_path,
))
# Calculate mAP
test.opt.weights_path = 'weights/latest.pt'
mAP, R, P = test.main(test.opt)
mAP, R, P = test.test(
net_config_path,
data_config_path,
latest_weights_file,
batch_size=batch_size,
img_size=img_size,
)
# Write epoch results
with open('results.txt', 'a') as file:
@ -217,5 +236,35 @@ def main(opt):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100, help='number of epochs')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--data-config', type=str, default='cfg/coco.data', help='path to data config file')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--multi-scale', default=False, help='random image sizes per batch 320 - 608')
parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels')
parser.add_argument('--weights-path', type=str, default='weights', help='path to store weights')
parser.add_argument('--resume', action='store_true', help='resume training flag')
parser.add_argument('--report', action='store_true', help='report TP, FP, FN, P and R per batch (slower)')
parser.add_argument('--freeze', action='store_true', help='freeze darknet53.conv.74 layers for first epoche')
parser.add_argument('--var', type=float, default=0, help='optional test variable')
opt = parser.parse_args()
print(opt, end='\n\n')
init_seeds()
torch.cuda.empty_cache()
main(opt)
train(
opt.cfg,
opt.data_config,
img_size=opt.img_size,
resume=opt.resume,
epochs=opt.epochs,
batch_size=opt.batch_size,
weights_path=opt.weights_path,
report=opt.report,
multi_scale=opt.multi_scale,
freeze_backbone=opt.freeze,
var=opt.var,
)

14
utils/gcp.sh Normal file → Executable file
View File

@ -4,28 +4,28 @@
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3 && python3 train.py
# Resume
python3 train.py -resume 1
python3 train.py --resume
# Detect
gsutil cp gs://ultralytics/yolov3.pt yolov3/weights
python3 detect.py
# Test
python3 test.py -img_size 416 -weights_path weights/latest.pt
python3 test.py --img_size 416 --weights weights/latest.pt
# Test Darknet
python3 test.py -img_size 416 -weights_path ../darknet/backup/yolov3.backup
python3 test.py --img_size 416 --weights ../darknet/backup/yolov3.backup
# Download and Test
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3
wget https://pjreddie.com/media/files/yolov3.weights -P weights
python3 test.py -img_size 416 -weights_path weights/backup5.pt -nms_thres 0.45
python3 test.py --img_size 416 --weights weights/backup5.pt --nms_thres 0.45
# Download and Resume
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3
wget https://storage.googleapis.com/ultralytics/yolov3.pt -O weights/latest.pt
python3 train.py -img_size 416 -batch_size 16 -epochs 1 -resume 1
python3 test.py -img_size 416 -weights_path weights/latest.pt -conf_thres 0.5
python3 train.py --img_size 416 --batch_size 16 --epochs 1 --resume
python3 test.py --img_size 416 --weights weights/latest.pt --conf_thres 0.5
# Copy latest.pt to bucket
gsutil cp yolov3/weights/latest.pt gs://ultralytics
@ -36,6 +36,6 @@ wget https://storage.googleapis.com/ultralytics/latest.pt
# Testing
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3
python3 train.py -epochs 3 -var 64
python3 train.py --epochs 3 --var 64
sudo shutdown

23
utils/torch_utils.py Normal file
View File

@ -0,0 +1,23 @@
import torch
def check_cuda():
return torch.cuda.is_available()
CUDA_AVAILABLE = check_cuda()
def init_seeds(seed=0):
torch.manual_seed(seed)
if CUDA_AVAILABLE:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def select_device(force_cpu=False):
if force_cpu:
device = torch.device('cpu')
else:
device = torch.device('cuda:0' if CUDA_AVAILABLE else 'cpu')
return device

View File

@ -5,11 +5,19 @@ import numpy as np
import torch
import torch.nn.functional as F
from utils import torch_utils
# Set printoptions
torch.set_printoptions(linewidth=1320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch_utils.init_seeds(seed=seed)
def load_classes(path):
"""
Loads class labels at 'path'