From a9e42a16f13a9d492d3cb048cd21eca31fb86266 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Jul 2019 19:48:29 +0200 Subject: [PATCH] updates --- train.py | 8 ++++---- utils/utils.py | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/train.py b/train.py index c8aeac52..2133115c 100644 --- a/train.py +++ b/train.py @@ -31,8 +31,8 @@ def train( data_cfg, img_size=416, epochs=100, # 500200 batches at bs 16, 117263 images = 273 epochs - batch_size=8, - accumulate=8, # effective bs = batch_size * accumulate = 8 * 8 = 64 + batch_size=16, + accumulate=4, # effective bs = batch_size * accumulate = 8 * 8 = 64 freeze_backbone=False, ): init_seeds() @@ -302,8 +302,8 @@ def print_mutation(hyp, results): if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--epochs', type=int, default=100, help='number of epochs') - parser.add_argument('--batch-size', type=int, default=8, help='batch size') - parser.add_argument('--accumulate', type=int, default=8, help='number of batches to accumulate before optimizing') + parser.add_argument('--batch-size', type=int, default=16, help='batch size') + parser.add_argument('--accumulate', type=int, default=4, help='number of batches to accumulate before optimizing') parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path') parser.add_argument('--data-cfg', type=str, default='data/coco_64img.data', help='coco.data file path') parser.add_argument('--single-scale', action='store_true', help='train at fixed size (no multi-scale)') diff --git a/utils/utils.py b/utils/utils.py index 5698d6e4..bdfdef38 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -281,7 +281,7 @@ def compute_loss(p, targets, model, giou_loss=True): # predictions, targets, mo MSE = nn.MSELoss() BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']])) BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']])) - # CE = nn.CrossEntropyLoss() # (weight=model.class_weights) + CE = nn.CrossEntropyLoss() # (weight=model.class_weights) # Compute losses bs = p[0].shape[0] # batch size @@ -304,10 +304,10 @@ def compute_loss(p, targets, model, giou_loss=True): # predictions, targets, mo lxy += (k * h['xy']) * MSE(torch.sigmoid(pi[..., 0:2]), txy[i]) # xy loss lwh += (k * h['wh']) * MSE(pi[..., 2:4], twh[i]) # wh yolo loss - tclsm = torch.zeros_like(pi[..., 5:]) - tclsm[range(len(b)), tcls[i]] = 1.0 - lcls += (k * h['cls']) * BCEcls(pi[..., 5:], tclsm) # cls loss (BCE) - # lcls += (k * h['cls']) * CE(pi[..., 5:], tcls[i]) # cls loss (CE) + # tclsm = torch.zeros_like(pi[..., 5:]) + # tclsm[range(len(b)), tcls[i]] = 1.0 + # lcls += (k * h['cls']) * BCEcls(pi[..., 5:], tclsm) # cls loss (BCE) + lcls += (k * h['cls']) * CE(pi[..., 5:], tcls[i]) # cls loss (CE) # Append targets to text file # with open('targets.txt', 'a') as file: