From d661fba8ae27b2c08b3ccc804099dc75beeadb93 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 11:48:50 +0200 Subject: [PATCH 01/24] updates --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index b791d7e4..306a8c64 100644 --- a/train.py +++ b/train.py @@ -70,7 +70,7 @@ def train( cutoff = load_darknet_weights(model, weights + 'yolov3-tiny.conv.15') # Set optimizer - optimizer = torch.optim.SGD(model.parameters(), lr=lr0, momentum=.9) + optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=lr0, momentum=.9) if torch.cuda.device_count() > 1: model = nn.DataParallel(model) From 03791babfb3462cba287155d45107124283717c4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 12:08:55 +0200 Subject: [PATCH 02/24] Merge branch 'master' of /Users/glennjocher/PycharmProjects/yolov3 with conflicts. --- train.py | 45 +++++++++++++++++++++++---------------------- utils/gcp.sh | 2 +- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/train.py b/train.py index 27647173..db453b4c 100644 --- a/train.py +++ b/train.py @@ -7,6 +7,7 @@ from utils.datasets import * from utils.utils import * +# @profile def train( cfg, data_cfg, @@ -34,47 +35,39 @@ def train( # Initialize model model = Darknet(cfg, img_size).to(device) + # Optimizer + lr0 = 0.001 # initial learning rate + optimizer = torch.optim.SGD(model.parameters(), lr=lr0, momentum=.9) + # Get dataloader dataloader = LoadImagesAndLabels(train_path, batch_size, img_size, augment=True) - # dataloader = torch.utils.data.DataLoader(dataloader, batch_size=batch_size, num_workers=0) + # from torch.utils.data import DataLoader + # dataloader = DataLoader(dataloader, batch_size=batch_size, num_workers=1) - lr0 = 0.001 # initial learning rate cutoff = -1 # backbone reaches to cutoff layer start_epoch = 0 best_loss = float('inf') - if resume: - checkpoint = torch.load(latest, map_location=device) - - # Load weights to resume from + if resume: # Load previously saved PyTorch model + checkpoint = torch.load(latest, map_location=device) # load checkpoin model.load_state_dict(checkpoint['model']) - - # Transfer learning (train only YOLO layers) - # for i, (name, p) in enumerate(model.named_parameters()): - # p.requires_grad = True if (p.shape[0] == 255) else False - - # Set optimizer - optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=lr0, momentum=.9) - start_epoch = checkpoint['epoch'] + 1 if checkpoint['optimizer'] is not None: optimizer.load_state_dict(checkpoint['optimizer']) best_loss = checkpoint['best_loss'] - del checkpoint # current, saved - else: - # Initialize model with backbone (optional) + else: # Initialize model with backbone (optional) if cfg.endswith('yolov3.cfg'): cutoff = load_darknet_weights(model, weights + 'darknet53.conv.74') elif cfg.endswith('yolov3-tiny.cfg'): cutoff = load_darknet_weights(model, weights + 'yolov3-tiny.conv.15') - # Set optimizer - optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=lr0, momentum=.9) - if torch.cuda.device_count() > 1: model = nn.DataParallel(model) - model.to(device).train() + + # # Transfer learning (train only YOLO layers) + for i, (name, p) in enumerate(model.named_parameters()): + p.requires_grad = True if (p.shape[0] == 255) else False # Set scheduler # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[54, 61], gamma=0.1) @@ -110,6 +103,11 @@ def train( ui = -1 rloss = defaultdict(float) for i, (imgs, targets, _, _) in enumerate(dataloader): + + if targets.shape[1] == 100: # multithreaded forced to 100 + targets = targets.view((-1, 6)) + targets = targets[targets[:, 5].nonzero().squeeze()] + targets = targets.to(device) nT = targets.shape[0] if nT == 0: # if no targets continue @@ -157,6 +155,9 @@ def train( dataloader.img_size = random.choice(range(10, 20)) * 32 print('multi_scale img_size = %g' % dataloader.img_size) + if i == 10: + return + # Update best loss if rloss['total'] < best_loss: best_loss = rloss['total'] @@ -191,7 +192,7 @@ def train( if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--epochs', type=int, default=270, help='number of epochs') - parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch') + parser.add_argument('--batch-size', type=int, default=2, help='size of each image batch') parser.add_argument('--accumulate', type=int, default=1, help='accumulate gradient x batches before optimizing') parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path') parser.add_argument('--data-cfg', type=str, default='cfg/coco.data', help='coco.data file path') diff --git a/utils/gcp.sh b/utils/gcp.sh index 5cc27849..6d46f236 100755 --- a/utils/gcp.sh +++ b/utils/gcp.sh @@ -9,7 +9,7 @@ sudo shutdown # Start sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 cp -r weights yolov3 -cd yolov3 && python3 train.py --batch-size 26 +cd yolov3 && python3 train.py --batch-size 64 --multi_scale # Resume python3 train.py --resume From aecf840701039ff46b16fd352d1dc9a892ec3578 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 12:09:54 +0200 Subject: [PATCH 03/24] updates --- train.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/train.py b/train.py index db453b4c..572ff973 100644 --- a/train.py +++ b/train.py @@ -7,7 +7,6 @@ from utils.datasets import * from utils.utils import * -# @profile def train( cfg, data_cfg, @@ -155,9 +154,6 @@ def train( dataloader.img_size = random.choice(range(10, 20)) * 32 print('multi_scale img_size = %g' % dataloader.img_size) - if i == 10: - return - # Update best loss if rloss['total'] < best_loss: best_loss = rloss['total'] @@ -192,7 +188,7 @@ def train( if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--epochs', type=int, default=270, help='number of epochs') - parser.add_argument('--batch-size', type=int, default=2, help='size of each image batch') + parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch') parser.add_argument('--accumulate', type=int, default=1, help='accumulate gradient x batches before optimizing') parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path') parser.add_argument('--data-cfg', type=str, default='cfg/coco.data', help='coco.data file path') From 2856af5036de44188a18928368dbd726b42bbacb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 12:11:08 +0200 Subject: [PATCH 04/24] updates --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 572ff973..f37bae87 100644 --- a/train.py +++ b/train.py @@ -64,9 +64,9 @@ def train( if torch.cuda.device_count() > 1: model = nn.DataParallel(model) - # # Transfer learning (train only YOLO layers) - for i, (name, p) in enumerate(model.named_parameters()): - p.requires_grad = True if (p.shape[0] == 255) else False + # Transfer learning (train only YOLO layers) + # for i, (name, p) in enumerate(model.named_parameters()): + # p.requires_grad = True if (p.shape[0] == 255) else False # Set scheduler # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[54, 61], gamma=0.1) From be38caf2841c1614a850224b4861101a86f24ab3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 12:13:09 +0200 Subject: [PATCH 05/24] updates --- train.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index f37bae87..9efbb7f4 100644 --- a/train.py +++ b/train.py @@ -103,11 +103,10 @@ def train( rloss = defaultdict(float) for i, (imgs, targets, _, _) in enumerate(dataloader): - if targets.shape[1] == 100: # multithreaded forced to 100 + if targets.shape[1] == 100: # multithreaded 100-size block targets = targets.view((-1, 6)) targets = targets[targets[:, 5].nonzero().squeeze()] - targets = targets.to(device) nT = targets.shape[0] if nT == 0: # if no targets continue continue @@ -122,7 +121,7 @@ def train( pred = model(imgs.to(device)) # Build targets - target_list = build_targets(model, targets, pred) + target_list = build_targets(model, targets.to(device), pred) # Compute loss loss, loss_dict = compute_loss(pred, target_list) From 56d5b2fcc05e8828821558b493831e789ec1130a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 13:00:24 +0200 Subject: [PATCH 06/24] Update README.md --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 258e26af..50de72b3 100755 --- a/README.md +++ b/README.md @@ -80,11 +80,14 @@ GPUs | `batch_size` | speed | COCO epoch Run `detect.py` to apply trained weights to an image, such as `zidane.jpg` from the `data/samples` folder: -**YOLOv3:** `detect.py --cfg cfg/yolov3.cfg --weights weights/yolov3.pt` - +**YOLOv3:** `python3 detect.py --cfg cfg/yolov3.cfg --weights weights/yolov3.pt` + -**YOLOv3-tiny:** `detect.py --cfg cfg/yolov3-tiny.cfg --weights weights/yolov3-tiny.pt` - +**YOLOv3-tiny:** `python3 detect.py --cfg cfg/yolov3-tiny.cfg --weights weights/yolov3-tiny.pt` + + +**YOLOv3-SPP:** `python3 detect.py --cfg cfg/yolov3-spp.cfg --weights weights/yolov3-spp.pt` + ## Webcam From 0bb3cc100a3c05c22f444aab788ad2470895227b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 13:01:07 +0200 Subject: [PATCH 07/24] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 50de72b3..ff7d9134 100755 --- a/README.md +++ b/README.md @@ -80,13 +80,13 @@ GPUs | `batch_size` | speed | COCO epoch Run `detect.py` to apply trained weights to an image, such as `zidane.jpg` from the `data/samples` folder: -**YOLOv3:** `python3 detect.py --cfg cfg/yolov3.cfg --weights weights/yolov3.pt` +**YOLOv3:** `python3 detect.py --cfg cfg/yolov3.cfg --weights weights/yolov3.weights` -**YOLOv3-tiny:** `python3 detect.py --cfg cfg/yolov3-tiny.cfg --weights weights/yolov3-tiny.pt` +**YOLOv3-tiny:** `python3 detect.py --cfg cfg/yolov3-tiny.cfg --weights weights/yolov3-tiny.weights` -**YOLOv3-SPP:** `python3 detect.py --cfg cfg/yolov3-spp.cfg --weights weights/yolov3-spp.pt` +**YOLOv3-SPP:** `python3 detect.py --cfg cfg/yolov3-spp.cfg --weights weights/yolov3-spp.weights` ## Webcam From 70fe2204b4250c238be4c32e65f8038a297059cf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 14:48:40 +0200 Subject: [PATCH 08/24] multi_thread dataloader --- models.py | 3 --- train.py | 19 ++++++++++++------- utils/datasets.py | 11 ++++++++--- utils/utils.py | 2 +- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/models.py b/models.py index ef417603..67e8a83d 100755 --- a/models.py +++ b/models.py @@ -174,9 +174,6 @@ class Darknet(nn.Module): self.module_defs[0]['cfg'] = cfg_path self.module_defs[0]['height'] = img_size self.hyperparams, self.module_list = create_modules(self.module_defs) - self.img_size = img_size - self.loss_names = ['loss', 'xy', 'wh', 'conf', 'cls', 'nT'] - self.losses = [] def forward(self, x, var=None): img_size = x.shape[-1] diff --git a/train.py b/train.py index 9efbb7f4..52f22b3e 100644 --- a/train.py +++ b/train.py @@ -1,6 +1,8 @@ import argparse import time +from torch.utils.data import DataLoader + import test # Import test.py to get mAP after each epoch from models import * from utils.datasets import * @@ -17,6 +19,7 @@ def train( accumulate=1, multi_scale=False, freeze_backbone=False, + num_workers=0 ): weights = 'weights' + os.sep latest = weights + 'latest.pt' @@ -38,10 +41,11 @@ def train( lr0 = 0.001 # initial learning rate optimizer = torch.optim.SGD(model.parameters(), lr=lr0, momentum=.9) - # Get dataloader - dataloader = LoadImagesAndLabels(train_path, batch_size, img_size, augment=True) - # from torch.utils.data import DataLoader - # dataloader = DataLoader(dataloader, batch_size=batch_size, num_workers=1) + # Dataloader + if num_workers > 0: + cv2.setNumThreads(0) # to prevent OpenCV from multithreading + dataset = LoadImagesAndLabels(train_path, img_size=img_size, augment=True) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers) cutoff = -1 # backbone reaches to cutoff layer start_epoch = 0 @@ -102,7 +106,6 @@ def train( ui = -1 rloss = defaultdict(float) for i, (imgs, targets, _, _) in enumerate(dataloader): - if targets.shape[1] == 100: # multithreaded 100-size block targets = targets.view((-1, 6)) targets = targets[targets[:, 5].nonzero().squeeze()] @@ -150,8 +153,8 @@ def train( # Multi-Scale training (320 - 608 pixels) every 10 batches if multi_scale and (i + 1) % 10 == 0: - dataloader.img_size = random.choice(range(10, 20)) * 32 - print('multi_scale img_size = %g' % dataloader.img_size) + dataset.img_size = random.choice(range(10, 20)) * 32 + print('multi_scale img_size = %g' % dataset.img_size) # Update best loss if rloss['total'] < best_loss: @@ -194,6 +197,7 @@ if __name__ == '__main__': parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels') parser.add_argument('--resume', action='store_true', help='resume training flag') + parser.add_argument('--num_workers', type=int, default=0, help='number of Pytorch DataLoader workers') opt = parser.parse_args() print(opt, end='\n\n') @@ -208,4 +212,5 @@ if __name__ == '__main__': batch_size=opt.batch_size, accumulate=opt.accumulate, multi_scale=opt.multi_scale, + num_workers=opt.num_workers ) diff --git a/utils/datasets.py b/utils/datasets.py index 31fc166b..2280c6a1 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -7,7 +7,6 @@ import cv2 import numpy as np import torch -# from torch.utils.data import Dataset from utils.utils import xyxy2xywh @@ -114,10 +113,11 @@ class LoadImagesAndLabels: # for training def __getitem__(self, index): imgs, labels0, img_paths, img_shapes = self.load_images(index, index + 1) - labels0[:,0] = index % self.batch_size + labels0[:, 0] = index % self.batch_size labels = torch.zeros(100, 6) labels[:min(len(labels0), 100)] = labels0 # max 100 labels per image + return imgs.squeeze(0), labels, img_paths, img_shapes def __next__(self): @@ -225,7 +225,12 @@ class LoadImagesAndLabels: # for training img_all = np.ascontiguousarray(img_all, dtype=np.float32) # uint8 to float32 img_all /= 255.0 # 0 - 255 to 0.0 - 1.0 - labels_all = torch.from_numpy(np.concatenate(labels_all, 0)) + if len(labels_all) > 0: + labels_all = np.concatenate(labels_all, 0) + else: + labels_all = np.zeros((1, 6), dtype='float32') + + labels_all = torch.from_numpy(labels_all) return torch.from_numpy(img_all), labels_all, img_paths, img_shapes def __len__(self): diff --git a/utils/utils.py b/utils/utils.py index 1e5e691f..7c3d9929 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -40,7 +40,7 @@ def model_info(model): print('\n%5s %38s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') - print('%5g %38s %9s %12g %20s %12.3g %12.3g' % ( + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % ( i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) print('Model Summary: %g layers, %g parameters, %g gradients' % (i + 1, n_p, n_g)) From a3067e7978d64c7290868b5685b04d7cda5005e4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 14:57:41 +0200 Subject: [PATCH 09/24] multi_thread dataloader --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 52f22b3e..0e8f1e28 100644 --- a/train.py +++ b/train.py @@ -197,7 +197,7 @@ if __name__ == '__main__': parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels') parser.add_argument('--resume', action='store_true', help='resume training flag') - parser.add_argument('--num_workers', type=int, default=0, help='number of Pytorch DataLoader workers') + parser.add_argument('--num_workers', type=int, default=4, help='number of Pytorch DataLoader workers') opt = parser.parse_args() print(opt, end='\n\n') From a024286ec12145c64bf8733f0842386cfce6d953 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 15:05:20 +0200 Subject: [PATCH 10/24] updates --- utils/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/utils.py b/utils/utils.py index 7c3d9929..de79eac6 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -37,7 +37,7 @@ def model_info(model): # Plots a line-by-line description of a PyTorch model n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - print('\n%5s %38s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + print('\n%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % ( From aa95302880b9787916aa99e5ad9eba947f19073a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 15:15:26 +0200 Subject: [PATCH 11/24] Update README.md --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index ff7d9134..5d1261aa 100755 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ HS**V** Intensity | +/- 50% ## Speed https://cloud.google.com/deep-learning-vm/ -**Machine type:** n1-highmem-4 (4 vCPUs, 26 GB memory) +**Machine type:** n1-standard-8 (8 vCPUs, 30 GB memory) **CPU platform:** Intel Skylake **GPUs:** 1-4 x NVIDIA Tesla P100 **HDD:** 100 GB SSD @@ -72,9 +72,9 @@ https://cloud.google.com/deep-learning-vm/ GPUs | `batch_size` | speed | COCO epoch --- |---| --- | --- (P100) | (images) | (s/batch) | (min/epoch) -1 | 16 | 0.54s | 66min -2 | 32 | 0.99s | 61min -4 | 64 | 1.61s | 49min +1 | 16 | 0.39s | 48min +2 | 32 | 0.48s | 29min +4 | 64 | 0.65s | 20min # Inference From d047062074456a5618e1135d54368826892ffc17 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 17:26:31 +0200 Subject: [PATCH 12/24] updates --- train.py | 2 +- utils/datasets.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 0e8f1e28..4f621388 100644 --- a/train.py +++ b/train.py @@ -51,7 +51,7 @@ def train( start_epoch = 0 best_loss = float('inf') if resume: # Load previously saved PyTorch model - checkpoint = torch.load(latest, map_location=device) # load checkpoin + checkpoint = torch.load(latest, map_location=device) # load checkpoint model.load_state_dict(checkpoint['model']) start_epoch = checkpoint['epoch'] + 1 if checkpoint['optimizer'] is not None: diff --git a/utils/datasets.py b/utils/datasets.py index 2280c6a1..655072dc 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -108,7 +108,7 @@ class LoadImagesAndLabels: # for training def __iter__(self): self.count = -1 - self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) return self def __getitem__(self, index): @@ -133,8 +133,8 @@ class LoadImagesAndLabels: # for training def load_images(self, ia, ib): img_all, labels_all, img_paths, img_shapes = [], [], [], [] for index, files_index in enumerate(range(ia, ib)): - img_path = self.img_files[self.shuffled_vector[files_index]] - label_path = self.label_files[self.shuffled_vector[files_index]] + img_path = self.img_files[files_index] + label_path = self.label_files[files_index] img = cv2.imread(img_path) # BGR assert img is not None, 'File Not Found ' + img_path From 8ebb4da5ccd8c4cef48058a6d95b93699944a037 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 17:28:26 +0200 Subject: [PATCH 13/24] updates --- utils/datasets.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 655072dc..90b67e54 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -104,7 +104,6 @@ class LoadImagesAndLabels: # for training self.batch_size = batch_size self.img_size = img_size self.augment = augment - iter(self) def __iter__(self): self.count = -1 From 1e62e9418580daea3297d8b5cc91995b77b8db87 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 18:30:14 +0200 Subject: [PATCH 14/24] Update train.py --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 4f621388..41f54eae 100644 --- a/train.py +++ b/train.py @@ -197,7 +197,7 @@ if __name__ == '__main__': parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels') parser.add_argument('--resume', action='store_true', help='resume training flag') - parser.add_argument('--num_workers', type=int, default=4, help='number of Pytorch DataLoader workers') + parser.add_argument('--num_workers', type=int, default=0, help='number of Pytorch DataLoader workers') opt = parser.parse_args() print(opt, end='\n\n') From 6aef4e6a78a86ec93ea2dfb5721d4c373a50cb0d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 22:41:12 +0200 Subject: [PATCH 15/24] updates --- test.py | 21 +++-- train.py | 30 ++++--- utils/datasets.py | 195 +++++++++++++++++----------------------------- utils/utils.py | 3 + 4 files changed, 109 insertions(+), 140 deletions(-) diff --git a/test.py b/test.py index b614eae6..a4bba9a3 100644 --- a/test.py +++ b/test.py @@ -3,6 +3,8 @@ import json import time from pathlib import Path +from torch.utils.data import DataLoader + from models import * from utils.datasets import * from utils.utils import * @@ -39,16 +41,21 @@ def test( model.to(device).eval() - # Get dataloader - # dataloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path), batch_size=batch_size) - dataloader = LoadImagesAndLabels(test_path, batch_size=batch_size, img_size=img_size) + # Dataloader + dataset = LoadImagesAndLabels(test_path, img_size=img_size) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0) mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0 print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP')) mP, mR, mAPs, TP, jdict = [], [], [], [], [] AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC) coco91class = coco80_to_coco91_class() - for (imgs, targets, paths, shapes) in dataloader: + for imgs, targets, paths, shapes in dataloader: + # Unpad and collate targets + for j, t in enumerate(targets): + t[:, 0] = j + targets = torch.cat([t[t[:, 5].nonzero()] for t in targets], 0).squeeze(1) + targets = targets.to(device) t = time.time() output = model(imgs.to(device)) @@ -71,7 +78,7 @@ def test( if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... box = detections[:, :4].clone() # xyxy - scale_coords(img_size, box, shapes[si]) # to original shape + scale_coords(img_size, box, (shapes[0][si], shapes[1][si])) # to original shape box = xyxy2xywh(box) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner @@ -129,7 +136,7 @@ def test( # Print image mAP and running mean mAP print(('%11s%11s' + '%11.3g' * 4 + 's') % - (seen, dataloader.nF, mean_P, mean_R, mean_mAP, time.time() - t)) + (seen, len(dataset), mean_P, mean_R, mean_mAP, time.time() - t)) # Print mAP per class print('\nmAP Per Class:') @@ -139,7 +146,7 @@ def test( # Save JSON if save_json: - imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.img_files] + imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataset.img_files] with open('results.json', 'w') as file: json.dump(jdict, file) diff --git a/train.py b/train.py index 4f621388..4406dfca 100644 --- a/train.py +++ b/train.py @@ -42,10 +42,8 @@ def train( optimizer = torch.optim.SGD(model.parameters(), lr=lr0, momentum=.9) # Dataloader - if num_workers > 0: - cv2.setNumThreads(0) # to prevent OpenCV from multithreading dataset = LoadImagesAndLabels(train_path, img_size=img_size, augment=True) - dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4) cutoff = -1 # backbone reaches to cutoff layer start_epoch = 0 @@ -103,17 +101,28 @@ def train( if int(name.split('.')[1]) < cutoff: # if layer < 75 p.requires_grad = False if (epoch == 0) else True - ui = -1 rloss = defaultdict(float) for i, (imgs, targets, _, _) in enumerate(dataloader): - if targets.shape[1] == 100: # multithreaded 100-size block - targets = targets.view((-1, 6)) - targets = targets[targets[:, 5].nonzero().squeeze()] + # Unpad and collate targets + for j, t in enumerate(targets): + t[:, 0] = j + targets = torch.cat([t[t[:, 5].nonzero()] for t in targets], 0).squeeze(1) - nT = targets.shape[0] + nT = len(targets) if nT == 0: # if no targets continue continue + # Plot images with bounding boxes + plot_images = False + if plot_images: + import matplotlib.pyplot as plt + plt.figure(figsize=(10, 10)) + for ip in range(batch_size): + labels = xywh2xyxy(targets[targets[:, 0] == ip, 2:6]).numpy() * img_size + plt.subplot(3, 3, ip + 1).imshow(imgs[ip].numpy().transpose(1, 2, 0)) + plt.plot(labels[:, [0, 2, 2, 0, 0]].T, labels[:, [1, 1, 3, 3, 1]].T, '.-') + plt.axis('off') + # SGD burn-in if (epoch == 0) and (i <= n_burnin): lr = lr0 * (i / n_burnin) ** 4 @@ -138,9 +147,8 @@ def train( optimizer.zero_grad() # Running epoch-means of tracked metrics - ui += 1 for key, val in loss_dict.items(): - rloss[key] = (rloss[key] * ui + val) / (ui + 1) + rloss[key] = (rloss[key] * i + val) / (i + 1) s = ('%8s%12s' + '%10.3g' * 7) % ( '%g/%g' % (epoch, epochs - 1), @@ -197,7 +205,7 @@ if __name__ == '__main__': parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels') parser.add_argument('--resume', action='store_true', help='resume training flag') - parser.add_argument('--num_workers', type=int, default=4, help='number of Pytorch DataLoader workers') + parser.add_argument('--num_workers', type=int, default=0, help='number of Pytorch DataLoader workers') opt = parser.parse_args() print(opt, end='\n\n') diff --git a/utils/datasets.py b/utils/datasets.py index 90b67e54..6ccefcc3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -6,6 +6,7 @@ import random import cv2 import numpy as np import torch +from torch.utils.data import Dataset from utils.utils import xyxy2xywh @@ -88,152 +89,102 @@ class LoadWebcam: # for inference return 0 -class LoadImagesAndLabels: # for training - def __init__(self, path, batch_size=1, img_size=608, augment=False): +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=416, augment=False): with open(path, 'r') as file: self.img_files = file.read().splitlines() self.img_files = list(filter(lambda x: len(x) > 0, self.img_files)) - - self.nF = len(self.img_files) # number of image files - self.nB = math.ceil(self.nF / batch_size) # number of batches - assert self.nF > 0, 'No images found in %s' % path - + assert len(self.img_files) > 0, 'No images found in %s' % path + self.img_size = img_size + self.augment = augment self.label_files = [x.replace('images', 'labels').replace('.png', '.txt').replace('.jpg', '.txt') for x in self.img_files] - self.batch_size = batch_size - self.img_size = img_size - self.augment = augment - - def __iter__(self): - self.count = -1 - #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - return self + def __len__(self): + return len(self.img_files) def __getitem__(self, index): - imgs, labels0, img_paths, img_shapes = self.load_images(index, index + 1) + img_path = self.img_files[index] + label_path = self.label_files[index] - labels0[:, 0] = index % self.batch_size - labels = torch.zeros(100, 6) - labels[:min(len(labels0), 100)] = labels0 # max 100 labels per image + img = cv2.imread(img_path) # BGR + assert img is not None, 'File Not Found ' + img_path - return imgs.squeeze(0), labels, img_paths, img_shapes + augment_hsv = True + if self.augment and augment_hsv: + # SV augmentation by 50% + fraction = 0.50 + img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + S = img_hsv[:, :, 1].astype(np.float32) + V = img_hsv[:, :, 2].astype(np.float32) - def __next__(self): - self.count += 1 # batches - if self.count >= self.nB: - raise StopIteration + a = (random.random() * 2 - 1) * fraction + 1 + S *= a + if a > 1: + np.clip(S, a_min=0, a_max=255, out=S) - ia = self.count * self.batch_size # start index - ib = min(ia + self.batch_size, self.nF) # end index + a = (random.random() * 2 - 1) * fraction + 1 + V *= a + if a > 1: + np.clip(V, a_min=0, a_max=255, out=V) - return self.load_images(ia, ib) + img_hsv[:, :, 1] = S.astype(np.uint8) + img_hsv[:, :, 2] = V.astype(np.uint8) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) - def load_images(self, ia, ib): - img_all, labels_all, img_paths, img_shapes = [], [], [], [] - for index, files_index in enumerate(range(ia, ib)): - img_path = self.img_files[files_index] - label_path = self.label_files[files_index] + h, w, _ = img.shape + img, ratio, padw, padh = letterbox(img, height=self.img_size) - img = cv2.imread(img_path) # BGR - assert img is not None, 'File Not Found ' + img_path + # Load labels + if os.path.isfile(label_path): + # labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 5) # SLOWER + with open(label_path, 'r') as file: + lines = file.read().splitlines() + labels0 = np.array([x.split() for x in lines], dtype=np.float32) - augment_hsv = True - if self.augment and augment_hsv: - # SV augmentation by 50% - fraction = 0.50 - img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) - S = img_hsv[:, :, 1].astype(np.float32) - V = img_hsv[:, :, 2].astype(np.float32) + # Normalized xywh to pixel xyxy format + labels = labels0.copy() + labels[:, 1] = ratio * w * (labels0[:, 1] - labels0[:, 3] / 2) + padw + labels[:, 2] = ratio * h * (labels0[:, 2] - labels0[:, 4] / 2) + padh + labels[:, 3] = ratio * w * (labels0[:, 1] + labels0[:, 3] / 2) + padw + labels[:, 4] = ratio * h * (labels0[:, 2] + labels0[:, 4] / 2) + padh + else: + labels = np.array([]) - a = (random.random() * 2 - 1) * fraction + 1 - S *= a - if a > 1: - np.clip(S, a_min=0, a_max=255, out=S) + # Augment image and labels + if self.augment: + img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.90, 1.10)) - a = (random.random() * 2 - 1) * fraction + 1 - V *= a - if a > 1: - np.clip(V, a_min=0, a_max=255, out=V) + nL = len(labels) + if nL > 0: + # convert xyxy to xywh + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5].copy()) / self.img_size - img_hsv[:, :, 1] = S.astype(np.uint8) - img_hsv[:, :, 2] = V.astype(np.uint8) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) + if self.augment: + # random left-right flip + lr_flip = True + if lr_flip & (random.random() > 0.5): + img = np.fliplr(img) + if nL > 0: + labels[:, 1] = 1 - labels[:, 1] - h, w, _ = img.shape - img, ratio, padw, padh = letterbox(img, height=self.img_size) + # random up-down flip + ud_flip = False + if ud_flip & (random.random() > 0.5): + img = np.flipud(img) + if nL > 0: + labels[:, 2] = 1 - labels[:, 2] - # Load labels - if os.path.isfile(label_path): - # labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 5) # SLOWER - with open(label_path, 'r') as file: - lines = file.read().splitlines() - labels0 = np.array([x.split() for x in lines], dtype=np.float32) - - # Normalized xywh to pixel xyxy format - labels = labels0.copy() - labels[:, 1] = ratio * w * (labels0[:, 1] - labels0[:, 3] / 2) + padw - labels[:, 2] = ratio * h * (labels0[:, 2] - labels0[:, 4] / 2) + padh - labels[:, 3] = ratio * w * (labels0[:, 1] + labels0[:, 3] / 2) + padw - labels[:, 4] = ratio * h * (labels0[:, 2] + labels0[:, 4] / 2) + padh - else: - labels = np.array([]) - - # Augment image and labels - if self.augment: - img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.90, 1.10)) - - plotFlag = False - if plotFlag: - import matplotlib.pyplot as plt - plt.figure(figsize=(10, 10)) if index == 0 else None - plt.subplot(4, 4, index + 1).imshow(img[:, :, ::-1]) - plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-') - plt.axis('off') - - nL = len(labels) - if nL > 0: - # convert xyxy to xywh - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5].copy()) / self.img_size - - if self.augment: - # random left-right flip - lr_flip = True - if lr_flip & (random.random() > 0.5): - img = np.fliplr(img) - if nL > 0: - labels[:, 1] = 1 - labels[:, 1] - - # random up-down flip - ud_flip = False - if ud_flip & (random.random() > 0.5): - img = np.flipud(img) - if nL > 0: - labels[:, 2] = 1 - labels[:, 2] - - if nL > 0: - labels = np.concatenate((np.zeros((nL, 1), dtype='float32') + index, labels), 1) - labels_all.append(labels) - - img_all.append(img) - img_paths.append(img_path) - img_shapes.append((h, w)) + labels_out = np.zeros((100, 6), dtype=np.float32) + if nL > 0: + labels_out[:nL, 1:] = labels # max 100 labels per image # Normalize - img_all = np.stack(img_all)[:, :, :, ::-1].transpose(0, 3, 1, 2) # list to np.array and BGR to RGB - img_all = np.ascontiguousarray(img_all, dtype=np.float32) # uint8 to float32 - img_all /= 255.0 # 0 - 255 to 0.0 - 1.0 + img = img[:, :, ::-1].transpose(2, 0, 1) # list to np.array and BGR to RGB + img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 - if len(labels_all) > 0: - labels_all = np.concatenate(labels_all, 0) - else: - labels_all = np.zeros((1, 6), dtype='float32') - - labels_all = torch.from_numpy(labels_all) - return torch.from_numpy(img_all), labels_all, img_paths, img_shapes - - def __len__(self): - return self.nB # number of batches + return torch.from_numpy(img), torch.from_numpy(labels_out), img_path, (h, w) def letterbox(img, height=416, color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded square diff --git a/utils/utils.py b/utils/utils.py index de79eac6..1f4e03b0 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -15,6 +15,9 @@ from utils import torch_utils torch.set_printoptions(linewidth=1320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +# Prevent OpenCV from multithreading (to use PyTorch DataLoader) +cv2.setNumThreads(0) + def float3(x): # format floats to 3 decimals return float(format(x, '.3f')) From 176851f83a5bae8976e642afed2fade4d927eea5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 22:49:57 +0200 Subject: [PATCH 16/24] updates --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 4406dfca..12638c0a 100644 --- a/train.py +++ b/train.py @@ -43,7 +43,7 @@ def train( # Dataloader dataset = LoadImagesAndLabels(train_path, img_size=img_size, augment=True) - dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers) cutoff = -1 # backbone reaches to cutoff layer start_epoch = 0 From 20beee0c5b0396bdc1b6601d82e9499e22f91ca0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Mar 2019 22:51:22 +0200 Subject: [PATCH 17/24] updates --- train.py | 2 +- utils/gcp.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 12638c0a..c130089c 100644 --- a/train.py +++ b/train.py @@ -205,7 +205,7 @@ if __name__ == '__main__': parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels') parser.add_argument('--resume', action='store_true', help='resume training flag') - parser.add_argument('--num_workers', type=int, default=0, help='number of Pytorch DataLoader workers') + parser.add_argument('--num-workers', type=int, default=0, help='number of Pytorch DataLoader workers') opt = parser.parse_args() print(opt, end='\n\n') diff --git a/utils/gcp.sh b/utils/gcp.sh index 6d46f236..03ac0111 100755 --- a/utils/gcp.sh +++ b/utils/gcp.sh @@ -9,7 +9,7 @@ sudo shutdown # Start sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 cp -r weights yolov3 -cd yolov3 && python3 train.py --batch-size 64 --multi_scale +cd yolov3 && python3 train.py --batch-size 16 --num-workers 4 # Resume python3 train.py --resume From 943db40f1ad600cdcdc40ff06588cd9c9bf2523f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Mar 2019 00:41:43 +0200 Subject: [PATCH 18/24] updates --- test.py | 2 +- train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index a4bba9a3..5d0ca98a 100644 --- a/test.py +++ b/test.py @@ -43,7 +43,7 @@ def test( # Dataloader dataset = LoadImagesAndLabels(test_path, img_size=img_size) - dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4) mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0 print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP')) diff --git a/train.py b/train.py index c130089c..0791c5db 100644 --- a/train.py +++ b/train.py @@ -205,7 +205,7 @@ if __name__ == '__main__': parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608') parser.add_argument('--img-size', type=int, default=32 * 13, help='pixels') parser.add_argument('--resume', action='store_true', help='resume training flag') - parser.add_argument('--num-workers', type=int, default=0, help='number of Pytorch DataLoader workers') + parser.add_argument('--num-workers', type=int, default=4, help='number of Pytorch DataLoader workers') opt = parser.parse_args() print(opt, end='\n\n') From 476724be2dec4b2cb26c05595a2a7d85ef9997bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Mar 2019 12:50:25 +0200 Subject: [PATCH 19/24] updates --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 5d0ca98a..a4bba9a3 100644 --- a/test.py +++ b/test.py @@ -43,7 +43,7 @@ def test( # Dataloader dataset = LoadImagesAndLabels(test_path, img_size=img_size) - dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0) mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0 print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP')) From cd188dbde67890e49df7163af4b9316c164e9d4b Mon Sep 17 00:00:00 2001 From: WannaSeaU <1473628258@QQ.COM> Date: Fri, 22 Mar 2019 18:59:09 +0800 Subject: [PATCH 20/24] Empty label file may cause index error --- utils/datasets.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 6ccefcc3..92b59f5b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -141,13 +141,16 @@ class LoadImagesAndLabels(Dataset): # for training/testing with open(label_path, 'r') as file: lines = file.read().splitlines() labels0 = np.array([x.split() for x in lines], dtype=np.float32) - - # Normalized xywh to pixel xyxy format - labels = labels0.copy() - labels[:, 1] = ratio * w * (labels0[:, 1] - labels0[:, 3] / 2) + padw - labels[:, 2] = ratio * h * (labels0[:, 2] - labels0[:, 4] / 2) + padh - labels[:, 3] = ratio * w * (labels0[:, 1] + labels0[:, 3] / 2) + padw - labels[:, 4] = ratio * h * (labels0[:, 2] + labels0[:, 4] / 2) + padh + # If label file is empty + if labels0.size is 0: + labels = np.array([]) + else: + # Normalized xywh to pixel xyxy format + labels = labels0.copy() + labels[:, 1] = ratio * w * (labels0[:, 1] - labels0[:, 3] / 2) + padw + labels[:, 2] = ratio * h * (labels0[:, 2] - labels0[:, 4] / 2) + padh + labels[:, 3] = ratio * w * (labels0[:, 1] + labels0[:, 3] / 2) + padw + labels[:, 4] = ratio * h * (labels0[:, 2] + labels0[:, 4] / 2) + padh else: labels = np.array([]) From 3532ee038f06083ca0c615bde54fd3180a45ea4a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Mar 2019 14:52:58 +0200 Subject: [PATCH 21/24] Update datasets.py --- utils/datasets.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 92b59f5b..49703a91 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -137,20 +137,20 @@ class LoadImagesAndLabels(Dataset): # for training/testing # Load labels if os.path.isfile(label_path): - # labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 5) # SLOWER with open(label_path, 'r') as file: lines = file.read().splitlines() - labels0 = np.array([x.split() for x in lines], dtype=np.float32) - # If label file is empty - if labels0.size is 0: + + x = np.array([x.split() for x in lines], dtype=np.float32) + if x.size is 0: + # Empty labels file labels = np.array([]) else: # Normalized xywh to pixel xyxy format - labels = labels0.copy() - labels[:, 1] = ratio * w * (labels0[:, 1] - labels0[:, 3] / 2) + padw - labels[:, 2] = ratio * h * (labels0[:, 2] - labels0[:, 4] / 2) + padh - labels[:, 3] = ratio * w * (labels0[:, 1] + labels0[:, 3] / 2) + padw - labels[:, 4] = ratio * h * (labels0[:, 2] + labels0[:, 4] / 2) + padh + labels = x.copy() + labels[:, 1] = ratio * w * (x[:, 1] - x[:, 3] / 2) + padw + labels[:, 2] = ratio * h * (x[:, 2] - x[:, 4] / 2) + padh + labels[:, 3] = ratio * w * (x[:, 1] + x[:, 3] / 2) + padw + labels[:, 4] = ratio * h * (x[:, 2] + x[:, 4] / 2) + padh else: labels = np.array([]) From 75d8cbdd5f50c18eb85bcb5494d827e0dfc609c0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Mar 2019 14:56:43 +0200 Subject: [PATCH 22/24] updates --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 49703a91..285e51f9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -161,7 +161,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing nL = len(labels) if nL > 0: # convert xyxy to xywh - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5].copy()) / self.img_size + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) / self.img_size if self.augment: # random left-right flip From b31f8fb017b288f1bc8bd48f318625dc7bad6956 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Mar 2019 15:08:03 +0200 Subject: [PATCH 23/24] updates --- train.py | 8 +++----- utils/gcp.sh | 7 ++++--- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/train.py b/train.py index 0791c5db..da1a07fe 100644 --- a/train.py +++ b/train.py @@ -64,6 +64,7 @@ def train( cutoff = load_darknet_weights(model, weights + 'yolov3-tiny.conv.15') if torch.cuda.device_count() > 1: + print('WARNING: MultiGPU Issue: https://github.com/ultralytics/yolov3/issues/146') model = nn.DataParallel(model) # Transfer learning (train only YOLO layers) @@ -88,10 +89,7 @@ def train( # scheduler.step() # Update scheduler (manual) - if epoch > 250: - lr = lr0 / 10 - else: - lr = lr0 + lr = lr0 / 10 if epoch > 250 else lr0 for x in optimizer.param_groups: x['lr'] = lr @@ -119,7 +117,7 @@ def train( plt.figure(figsize=(10, 10)) for ip in range(batch_size): labels = xywh2xyxy(targets[targets[:, 0] == ip, 2:6]).numpy() * img_size - plt.subplot(3, 3, ip + 1).imshow(imgs[ip].numpy().transpose(1, 2, 0)) + plt.subplot(4, 4, ip + 1).imshow(imgs[ip].numpy().transpose(1, 2, 0)) plt.plot(labels[:, [0, 2, 2, 0, 0]].T, labels[:, [1, 1, 3, 3, 1]].T, '.-') plt.axis('off') diff --git a/utils/gcp.sh b/utils/gcp.sh index 03ac0111..1b58f373 100755 --- a/utils/gcp.sh +++ b/utils/gcp.sh @@ -9,14 +9,15 @@ sudo shutdown # Start sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 cp -r weights yolov3 -cd yolov3 && python3 train.py --batch-size 16 --num-workers 4 +cd yolov3 && python3 train.py --batch-size 16 --epochs 1 +sudo shutdown # Resume python3 train.py --resume # Detect -gsutil cp gs://ultralytics/yolov3.pt yolov3/weights -python3 detect.py +sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 +cd yolov3 && python3 detect.py # Clone branch sudo rm -rf yolov3 && git clone -b multi_gpu --depth 1 https://github.com/ultralytics/yolov3 From 4114d5e9c9c1540dd0d466f0af3797bfec73c68f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Mar 2019 17:53:06 +0200 Subject: [PATCH 24/24] updates --- test.py | 2 +- utils/gcp.sh | 27 +++++++++------------------ 2 files changed, 10 insertions(+), 19 deletions(-) diff --git a/test.py b/test.py index a4bba9a3..5d0ca98a 100644 --- a/test.py +++ b/test.py @@ -43,7 +43,7 @@ def test( # Dataloader dataset = LoadImagesAndLabels(test_path, img_size=img_size) - dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4) mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0 print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP')) diff --git a/utils/gcp.sh b/utils/gcp.sh index 1b58f373..50274913 100755 --- a/utils/gcp.sh +++ b/utils/gcp.sh @@ -6,7 +6,7 @@ bash yolov3/data/get_coco_dataset.sh sudo rm -rf cocoapi && git clone https://github.com/cocodataset/cocoapi && cd cocoapi/PythonAPI && make && cd ../.. && cp -r cocoapi/PythonAPI/pycocotools yolov3 sudo shutdown -# Start +# Train sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 cp -r weights yolov3 cd yolov3 && python3 train.py --batch-size 16 --epochs 1 @@ -16,30 +16,21 @@ sudo shutdown python3 train.py --resume # Detect -sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 -cd yolov3 && python3 detect.py +python3 detect.py -# Clone branch +# Clone a branch sudo rm -rf yolov3 && git clone -b multi_gpu --depth 1 https://github.com/ultralytics/yolov3 -cd yolov3 && python3 train.py --batch-size 26 - -sudo rm -rf yolov3 && git clone -b multigpu --depth 1 https://github.com/alexpolichroniadis/yolov3 -cp coco.data yolov3/cfg -cd yolov3 && python3 train.py --batch-size 26 # Test sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 sudo rm -rf cocoapi && git clone https://github.com/cocodataset/cocoapi && cd cocoapi/PythonAPI && make && cd ../.. && cp -r cocoapi/PythonAPI/pycocotools yolov3 cd yolov3 && python3 test.py --save-json --conf-thres 0.005 -# Test Darknet +# Test Darknet training python3 test.py --img_size 416 --weights ../darknet/backup/yolov3.backup -# Download and Resume -sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3 +# Download with wget wget https://storage.googleapis.com/ultralytics/yolov3.pt -O weights/latest.pt -python3 train.py --img_size 416 --batch_size 16 --epochs 1 --resume -python3 test.py --img_size 416 --weights weights/latest.pt --conf_thres 0.5 # Copy latest.pt to bucket gsutil cp yolov3/weights/latest.pt gs://ultralytics @@ -48,8 +39,8 @@ gsutil cp yolov3/weights/latest.pt gs://ultralytics gsutil cp gs://ultralytics/latest.pt yolov3/weights/latest.pt wget https://storage.googleapis.com/ultralytics/latest.pt -# Testing -sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 && cd yolov3 -python3 train.py --epochs 3 --var 64 +# Trade Studies +sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3 +cp -r weights yolov3 +cd yolov3 && python3 train.py --batch-size 16 --epochs 1 sudo shutdown -