From abf59f156538761bdb41dda43b034f94efdc23b1 Mon Sep 17 00:00:00 2001 From: glenn-jocher Date: Thu, 4 Jul 2019 22:10:46 +0200 Subject: [PATCH] updates --- train.py | 10 +++++----- utils/utils.py | 16 ++++++++-------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/train.py b/train.py index 0b66205f..13cc0d47 100644 --- a/train.py +++ b/train.py @@ -18,8 +18,8 @@ hyp = {'giou': 1.008, # giou loss gain 'wh': 0.1845, # wh loss gain 'cls': 16.94, # cls loss gain 'cls_pw': 6.215, # cls BCELoss positive_weight - 'conf': 10.61, # conf loss gain - 'conf_pw': 4.272, # conf BCELoss positive_weight + 'obj': 10.61, # obj loss gain + 'obj_pw': 4.272, # obj BCELoss positive_weight 'iou_t': 0.251, # iou target-anchor training threshold 'lr0': 0.001, # initial learning rate 'lrf': -4., # final learning rate = lr0 * (10 ** lrf) @@ -34,8 +34,8 @@ hyp = {'giou': 1.008, # giou loss gain # 'wh': 0.10, # wh loss gain # 'cls': 0.035, # cls loss gain # 'cls_pw': 79.0, # cls BCELoss positive_weight -# 'conf': 1.61, # conf loss gain -# 'conf_pw': 3.53, # conf BCELoss positive_weight +# 'obj': 1.61, # obj loss gain +# 'obj_pw': 3.53, # obj BCELoss positive_weight # 'iou_t': 0.29, # iou target-anchor training threshold # 'lr0': 0.001, # initial learning rate # 'lrf': -4., # final learning rate = lr0 * (10 ** lrf) @@ -174,7 +174,7 @@ def train( for epoch in range(start_epoch, epochs): model.train() print(('\n%8s%12s' + '%10s' * 7) % - ('Epoch', 'Batch', 'xy', 'wh', 'conf', 'cls', 'total', 'targets', 'img_size')) + ('Epoch', 'Batch', 'xy', 'wh', 'obj', 'cls', 'total', 'targets', 'img_size')) # Update scheduler scheduler.step() diff --git a/utils/utils.py b/utils/utils.py index 4f8fe604..57d16b36 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -273,27 +273,27 @@ def wh_iou(box1, box2): def compute_loss(p, targets, model, giou_loss=False): # predictions, targets, model ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor - lxy, lwh, lcls, lconf = ft([0]), ft([0]), ft([0]), ft([0]) + lxy, lwh, lcls, lobj = ft([0]), ft([0]), ft([0]), ft([0]) txy, twh, tcls, tbox, indices, anchor_vec = build_targets(model, targets) h = model.hyp # hyperparameters # Define criteria MSE = nn.MSELoss() - CE = nn.CrossEntropyLoss() # (weight=model.class_weights) BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']])) - BCEconf = nn.BCEWithLogitsLoss(pos_weight=ft([h['conf_pw']])) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']])) + # CE = nn.CrossEntropyLoss() # (weight=model.class_weights) # Compute losses bs = p[0].shape[0] # batch size k = bs / 64 # loss gain for i, pi0 in enumerate(p): # layer i predictions, i b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tconf = torch.zeros_like(pi0[..., 0]) # conf + tobj = torch.zeros_like(pi0[..., 0]) # target obj # Compute losses if len(b): # number of targets pi = pi0[b, a, gj, gi] # predictions closest to anchors - tconf[b, a, gj, gi] = 1.0 # conf + tobj[b, a, gj, gi] = 1.0 # obj # pi[..., 2:4] = torch.sigmoid(pi[..., 2:4]) # wh power loss (uncomment) if giou_loss: @@ -313,10 +313,10 @@ def compute_loss(p, targets, model, giou_loss=False): # predictions, targets, m # with open('targets.txt', 'a') as file: # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - lconf += (k * h['conf']) * BCEconf(pi0[..., 4], tconf) # obj_conf loss - loss = lxy + lwh + lconf + lcls + lobj += (k * h['obj']) * BCEobj(pi0[..., 4], tobj) # obj loss + loss = lxy + lwh + lobj + lcls - return loss, torch.cat((lxy, lwh, lconf, lcls, loss)).detach() + return loss, torch.cat((lxy, lwh, lobj, lcls, loss)).detach() def build_targets(model, targets):