diff --git a/detect.py b/detect.py index 4189bd86..051ca6fc 100644 --- a/detect.py +++ b/detect.py @@ -111,7 +111,7 @@ def detect(save_txt=False, save_img=False): s += '%g %ss, ' % (n, names[int(c)]) # add to string # Write results - for *xyxy, conf, _, cls in det: + for *xyxy, conf, cls in det: if save_txt: # Write to file with open(save_path + '.txt', 'a') as file: file.write(('%g ' * 6 + '\n') % (*xyxy, cls, conf)) diff --git a/models.py b/models.py index 65bc20c2..d540981a 100755 --- a/models.py +++ b/models.py @@ -149,8 +149,10 @@ class YOLOLayer(nn.Module): self.anchors = torch.Tensor(anchors) self.na = len(anchors) # number of anchors (3) self.nc = nc # number of classes (80) + self.no = nc + 5 # number of outputs self.nx = 0 # initialize number of x gridpoints self.ny = 0 # initialize number of y gridpoints + self.oi = [0, 1, 2, 3] + list(range(5, self.no)) # output indices self.arc = arc if ONNX_EXPORT: # grids must be computed in __init__ @@ -168,7 +170,7 @@ class YOLOLayer(nn.Module): create_grids(self, img_size, (nx, ny), p.device, p.dtype) # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh) - p = p.view(bs, self.na, self.nc + 5, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction + p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction if self.training: return p @@ -180,18 +182,18 @@ class YOLOLayer(nn.Module): grid_xy = self.grid_xy.repeat((1, self.na, 1, 1, 1)).view(1, m, 2) anchor_wh = self.anchor_wh.repeat((1, 1, self.nx, self.ny, 1)).view(1, m, 2) / ngu - p = p.view(m, 5 + self.nc) + p = p.view(m, self.no) xy = torch.sigmoid(p[..., 0:2]) + grid_xy[0] # x, y wh = torch.exp(p[..., 2:4]) * anchor_wh[0] # width, height p_conf = torch.sigmoid(p[:, 4:5]) # Conf - p_cls = F.softmax(p[:, 5:5 + self.nc], 1) * p_conf # SSD-like conf + p_cls = F.softmax(p[:, 5:self.no], 1) * p_conf # SSD-like conf return torch.cat((xy / ngu[0], wh, p_conf, p_cls), 1).t() - # p = p.view(1, m, 5 + self.nc) + # p = p.view(1, m, self.no) # xy = torch.sigmoid(p[..., 0:2]) + grid_xy # x, y # wh = torch.exp(p[..., 2:4]) * anchor_wh # width, height # p_conf = torch.sigmoid(p[..., 4:5]) # Conf - # p_cls = p[..., 5:5 + self.nc] + # p_cls = p[..., 5:self.no] # # Broadcasting only supported on first dimension in CoreML. See onnx-coreml/_operators.py # # p_cls = F.softmax(p_cls, 2) * p_conf # SSD-like conf # p_cls = torch.exp(p_cls).permute((2, 1, 0)) @@ -219,8 +221,11 @@ class YOLOLayer(nn.Module): if self.nc == 1: io[..., 5] = 1 # single-class model https://github.com/ultralytics/yolov3/issues/235 - # reshape from [1, 3, 13, 13, 85] to [1, 507, 85] - return io.view(bs, -1, 5 + self.nc), p + # compute conf + io[..., 5:] *= io[..., 4:5] # conf = obj_conf * cls_conf + + # reshape from [1, 3, 13, 13, 85] to [1, 507, 84], remove obj_conf + return io[..., self.oi].view(bs, -1, self.no - 1), p class Darknet(nn.Module): diff --git a/test.py b/test.py index 7472fa98..7e38acf8 100644 --- a/test.py +++ b/test.py @@ -114,7 +114,7 @@ def test(cfg, box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for di, d in enumerate(pred): jdict.append({'image_id': image_id, - 'category_id': coco91class[int(d[6])], + 'category_id': coco91class[int(d[5])], 'bbox': [floatn(x, 3) for x in box[di]], 'score': floatn(d[4], 5)}) @@ -133,7 +133,7 @@ def test(cfg, tbox[:, [1, 3]] *= height # Search for correct predictions - for i, (*pbox, pconf, pcls_conf, pcls) in enumerate(pred): + for i, (*pbox, _, pcls) in enumerate(pred): # Break if all targets already located in image if len(detected) == nl: @@ -154,7 +154,7 @@ def test(cfg, correct[i] = iou > iou_thres # Append statistics (correct, conf, pcls, tcls) - stats.append((correct, pred[:, 4].cpu(), pred[:, 6].cpu(), tcls)) + stats.append((correct, pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Compute statistics stats = [np.concatenate(x, 0) for x in list(zip(*stats))] # to numpy @@ -209,7 +209,7 @@ def test(cfg, if __name__ == '__main__': parser = argparse.ArgumentParser(prog='test.py') parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path') - parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data path') + parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path') parser.add_argument('--weights', type=str, default='weights/yolov3-spp.weights', help='path to weights file') parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch') parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)') diff --git a/utils/utils.py b/utils/utils.py index d7b27171..c3c3e926 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -464,7 +464,7 @@ def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.5): Removes detections with lower object confidence score than 'conf_thres' Non-Maximum Suppression to further filter detections. Returns detections with shape: - (x1, y1, x2, y2, object_conf, class_conf, class) + (x1, y1, x2, y2, object_conf, conf, class) """ # NMS method https://github.com/ultralytics/yolov3/issues/679 'OR', 'AND', 'MERGE', 'VISION', 'VISION_BATCHED' method = 'MERGE' if conf_thres <= 0.01 else 'VISION' # MERGE is highest mAP, VISION is fastest @@ -474,47 +474,35 @@ def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.5): output = [None] * len(prediction) for image_i, pred in enumerate(prediction): - # Experiment: Prior class size rejection - # x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3] - # a = w * h # area - # ar = w / (h + 1e-16) # aspect ratio - # n = len(w) - # log_w, log_h, log_a, log_ar = torch.log(w), torch.log(h), torch.log(a), torch.log(ar) - # shape_likelihood = np.zeros((n, 60), dtype=np.float32) - # x = np.concatenate((log_w.reshape(-1, 1), log_h.reshape(-1, 1)), 1) - # from scipy.stats import multivariate_normal - # for c in range(60): - # shape_likelihood[:, c] = - # multivariate_normal.pdf(x, mean=mat['class_mu'][c, :2], cov=mat['class_cov'][c, :2, :2]) + # Duplicate ambiguous + # b = pred[pred[:, 5:].sum(1) > 1.1] + # if len(b): + # b[range(len(b)), 5 + b[:, 5:].argmax(1)] = 0 + # pred = torch.cat((pred, b), 0) # Multiply conf by class conf to get combined confidence - class_conf, class_pred = pred[:, 5:].max(1) - pred[:, 4] *= class_conf + conf, cls = pred[:, 4:].max(1) # # Merge classes (optional) - # class_pred[(class_pred.view(-1,1) == torch.LongTensor([2, 3, 5, 6, 7]).view(1,-1)).any(1)] = 2 + # cls[(cls.view(-1,1) == torch.LongTensor([2, 3, 5, 6, 7]).view(1,-1)).any(1)] = 2 # # # Remove classes (optional) - # pred[class_pred != 2, 4] = 0.0 + # pred[cls != 2, 4] = 0.0 # Select only suitable predictions - i = (pred[:, 4] > conf_thres) & (pred[:, 2:4] > min_wh).all(1) & (pred[:, 2:4] < max_wh).all(1) & \ - torch.isfinite(pred).all(1) + i = (conf > conf_thres) & (pred[:, 2:4] > min_wh).all(1) & (pred[:, 2:4] < max_wh).all(1) & torch.isfinite( + pred).all(1) pred = pred[i] # If none are remaining => process next image if len(pred) == 0: continue - # Select predicted classes - class_conf = class_conf[i] - class_pred = class_pred[i].unsqueeze(1).float() - # Box (center x, center y, width, height) to (x1, y1, x2, y2) pred[:, :4] = xywh2xyxy(pred[:, :4]) - # Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred) - pred = torch.cat((pred[:, :5], class_conf.unsqueeze(1), class_pred), 1) + # Detections ordered as (x1y1x2y2, conf, cls) + pred = torch.cat((pred[:, :4], conf[i].unsqueeze(1), cls[i].unsqueeze(1).float()), 1) # Get detections sorted by decreasing confidence scores pred = pred[(-pred[:, 4]).argsort()]