From 8521c3cff952e9ae5f32ff69580bfdae726afb6f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Apr 2020 15:22:36 -0700 Subject: [PATCH] cleanup --- train.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/train.py b/train.py index ddeda1a7..d32f59e0 100644 --- a/train.py +++ b/train.py @@ -249,7 +249,7 @@ def train(): if 'momentum' in x: x['momentum'] = np.interp(ni, [0, n_burn], [0.9, hyp['momentum']]) - # Multi-Scale training + # Multi-Scale if opt.multi_scale: if ni / accumulate % 1 == 0: #  adjust img_size (67% - 150%) every 1 batch img_size = random.randrange(grid_min, grid_max + 1) * gs @@ -258,38 +258,36 @@ def train(): ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) - # Run model + # Forward pred = model(imgs) - # Compute loss + # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results - # Scale loss by nominal batch_size of 64 - loss *= batch_size / 64 - - # Compute gradient + # Backward + loss *= batch_size / 64 # scale loss if mixed_precision: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() - # Optimize accumulated gradient + # Optimize if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() ema.update(model) - # Print batch results + # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size) pbar.set_description(s) - # Plot images with bounding boxes + # Plot if ni < 1: f = 'train_batch%g.png' % i # filename plot_images(imgs=imgs, targets=targets, paths=paths, fname=f)