This commit is contained in:
Glenn Jocher 2019-12-19 18:09:13 -08:00
parent ad73ce4334
commit f5cd3596f5
4 changed files with 30 additions and 37 deletions

View File

@ -111,7 +111,7 @@ def detect(save_txt=False, save_img=False):
s += '%g %ss, ' % (n, names[int(c)]) # add to string s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results # Write results
for *xyxy, conf, _, cls in det: for *xyxy, conf, cls in det:
if save_txt: # Write to file if save_txt: # Write to file
with open(save_path + '.txt', 'a') as file: with open(save_path + '.txt', 'a') as file:
file.write(('%g ' * 6 + '\n') % (*xyxy, cls, conf)) file.write(('%g ' * 6 + '\n') % (*xyxy, cls, conf))

View File

@ -149,8 +149,10 @@ class YOLOLayer(nn.Module):
self.anchors = torch.Tensor(anchors) self.anchors = torch.Tensor(anchors)
self.na = len(anchors) # number of anchors (3) self.na = len(anchors) # number of anchors (3)
self.nc = nc # number of classes (80) self.nc = nc # number of classes (80)
self.no = nc + 5 # number of outputs
self.nx = 0 # initialize number of x gridpoints self.nx = 0 # initialize number of x gridpoints
self.ny = 0 # initialize number of y gridpoints self.ny = 0 # initialize number of y gridpoints
self.oi = [0, 1, 2, 3] + list(range(5, self.no)) # output indices
self.arc = arc self.arc = arc
if ONNX_EXPORT: # grids must be computed in __init__ if ONNX_EXPORT: # grids must be computed in __init__
@ -168,7 +170,7 @@ class YOLOLayer(nn.Module):
create_grids(self, img_size, (nx, ny), p.device, p.dtype) create_grids(self, img_size, (nx, ny), p.device, p.dtype)
# p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh) # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh)
p = p.view(bs, self.na, self.nc + 5, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction
if self.training: if self.training:
return p return p
@ -180,18 +182,18 @@ class YOLOLayer(nn.Module):
grid_xy = self.grid_xy.repeat((1, self.na, 1, 1, 1)).view(1, m, 2) grid_xy = self.grid_xy.repeat((1, self.na, 1, 1, 1)).view(1, m, 2)
anchor_wh = self.anchor_wh.repeat((1, 1, self.nx, self.ny, 1)).view(1, m, 2) / ngu anchor_wh = self.anchor_wh.repeat((1, 1, self.nx, self.ny, 1)).view(1, m, 2) / ngu
p = p.view(m, 5 + self.nc) p = p.view(m, self.no)
xy = torch.sigmoid(p[..., 0:2]) + grid_xy[0] # x, y xy = torch.sigmoid(p[..., 0:2]) + grid_xy[0] # x, y
wh = torch.exp(p[..., 2:4]) * anchor_wh[0] # width, height wh = torch.exp(p[..., 2:4]) * anchor_wh[0] # width, height
p_conf = torch.sigmoid(p[:, 4:5]) # Conf p_conf = torch.sigmoid(p[:, 4:5]) # Conf
p_cls = F.softmax(p[:, 5:5 + self.nc], 1) * p_conf # SSD-like conf p_cls = F.softmax(p[:, 5:self.no], 1) * p_conf # SSD-like conf
return torch.cat((xy / ngu[0], wh, p_conf, p_cls), 1).t() return torch.cat((xy / ngu[0], wh, p_conf, p_cls), 1).t()
# p = p.view(1, m, 5 + self.nc) # p = p.view(1, m, self.no)
# xy = torch.sigmoid(p[..., 0:2]) + grid_xy # x, y # xy = torch.sigmoid(p[..., 0:2]) + grid_xy # x, y
# wh = torch.exp(p[..., 2:4]) * anchor_wh # width, height # wh = torch.exp(p[..., 2:4]) * anchor_wh # width, height
# p_conf = torch.sigmoid(p[..., 4:5]) # Conf # p_conf = torch.sigmoid(p[..., 4:5]) # Conf
# p_cls = p[..., 5:5 + self.nc] # p_cls = p[..., 5:self.no]
# # Broadcasting only supported on first dimension in CoreML. See onnx-coreml/_operators.py # # Broadcasting only supported on first dimension in CoreML. See onnx-coreml/_operators.py
# # p_cls = F.softmax(p_cls, 2) * p_conf # SSD-like conf # # p_cls = F.softmax(p_cls, 2) * p_conf # SSD-like conf
# p_cls = torch.exp(p_cls).permute((2, 1, 0)) # p_cls = torch.exp(p_cls).permute((2, 1, 0))
@ -219,8 +221,11 @@ class YOLOLayer(nn.Module):
if self.nc == 1: if self.nc == 1:
io[..., 5] = 1 # single-class model https://github.com/ultralytics/yolov3/issues/235 io[..., 5] = 1 # single-class model https://github.com/ultralytics/yolov3/issues/235
# reshape from [1, 3, 13, 13, 85] to [1, 507, 85] # compute conf
return io.view(bs, -1, 5 + self.nc), p io[..., 5:] *= io[..., 4:5] # conf = obj_conf * cls_conf
# reshape from [1, 3, 13, 13, 85] to [1, 507, 84], remove obj_conf
return io[..., self.oi].view(bs, -1, self.no - 1), p
class Darknet(nn.Module): class Darknet(nn.Module):

View File

@ -114,7 +114,7 @@ def test(cfg,
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for di, d in enumerate(pred): for di, d in enumerate(pred):
jdict.append({'image_id': image_id, jdict.append({'image_id': image_id,
'category_id': coco91class[int(d[6])], 'category_id': coco91class[int(d[5])],
'bbox': [floatn(x, 3) for x in box[di]], 'bbox': [floatn(x, 3) for x in box[di]],
'score': floatn(d[4], 5)}) 'score': floatn(d[4], 5)})
@ -133,7 +133,7 @@ def test(cfg,
tbox[:, [1, 3]] *= height tbox[:, [1, 3]] *= height
# Search for correct predictions # Search for correct predictions
for i, (*pbox, pconf, pcls_conf, pcls) in enumerate(pred): for i, (*pbox, _, pcls) in enumerate(pred):
# Break if all targets already located in image # Break if all targets already located in image
if len(detected) == nl: if len(detected) == nl:
@ -154,7 +154,7 @@ def test(cfg,
correct[i] = iou > iou_thres correct[i] = iou > iou_thres
# Append statistics (correct, conf, pcls, tcls) # Append statistics (correct, conf, pcls, tcls)
stats.append((correct, pred[:, 4].cpu(), pred[:, 6].cpu(), tcls)) stats.append((correct, pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Compute statistics # Compute statistics
stats = [np.concatenate(x, 0) for x in list(zip(*stats))] # to numpy stats = [np.concatenate(x, 0) for x in list(zip(*stats))] # to numpy
@ -209,7 +209,7 @@ def test(cfg,
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py') parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path') parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data path') parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp.weights', help='path to weights file') parser.add_argument('--weights', type=str, default='weights/yolov3-spp.weights', help='path to weights file')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch') parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)') parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')

View File

@ -464,7 +464,7 @@ def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.5):
Removes detections with lower object confidence score than 'conf_thres' Removes detections with lower object confidence score than 'conf_thres'
Non-Maximum Suppression to further filter detections. Non-Maximum Suppression to further filter detections.
Returns detections with shape: Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_conf, class) (x1, y1, x2, y2, object_conf, conf, class)
""" """
# NMS method https://github.com/ultralytics/yolov3/issues/679 'OR', 'AND', 'MERGE', 'VISION', 'VISION_BATCHED' # NMS method https://github.com/ultralytics/yolov3/issues/679 'OR', 'AND', 'MERGE', 'VISION', 'VISION_BATCHED'
method = 'MERGE' if conf_thres <= 0.01 else 'VISION' # MERGE is highest mAP, VISION is fastest method = 'MERGE' if conf_thres <= 0.01 else 'VISION' # MERGE is highest mAP, VISION is fastest
@ -474,47 +474,35 @@ def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.5):
output = [None] * len(prediction) output = [None] * len(prediction)
for image_i, pred in enumerate(prediction): for image_i, pred in enumerate(prediction):
# Experiment: Prior class size rejection # Duplicate ambiguous
# x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3] # b = pred[pred[:, 5:].sum(1) > 1.1]
# a = w * h # area # if len(b):
# ar = w / (h + 1e-16) # aspect ratio # b[range(len(b)), 5 + b[:, 5:].argmax(1)] = 0
# n = len(w) # pred = torch.cat((pred, b), 0)
# log_w, log_h, log_a, log_ar = torch.log(w), torch.log(h), torch.log(a), torch.log(ar)
# shape_likelihood = np.zeros((n, 60), dtype=np.float32)
# x = np.concatenate((log_w.reshape(-1, 1), log_h.reshape(-1, 1)), 1)
# from scipy.stats import multivariate_normal
# for c in range(60):
# shape_likelihood[:, c] =
# multivariate_normal.pdf(x, mean=mat['class_mu'][c, :2], cov=mat['class_cov'][c, :2, :2])
# Multiply conf by class conf to get combined confidence # Multiply conf by class conf to get combined confidence
class_conf, class_pred = pred[:, 5:].max(1) conf, cls = pred[:, 4:].max(1)
pred[:, 4] *= class_conf
# # Merge classes (optional) # # Merge classes (optional)
# class_pred[(class_pred.view(-1,1) == torch.LongTensor([2, 3, 5, 6, 7]).view(1,-1)).any(1)] = 2 # cls[(cls.view(-1,1) == torch.LongTensor([2, 3, 5, 6, 7]).view(1,-1)).any(1)] = 2
# #
# # Remove classes (optional) # # Remove classes (optional)
# pred[class_pred != 2, 4] = 0.0 # pred[cls != 2, 4] = 0.0
# Select only suitable predictions # Select only suitable predictions
i = (pred[:, 4] > conf_thres) & (pred[:, 2:4] > min_wh).all(1) & (pred[:, 2:4] < max_wh).all(1) & \ i = (conf > conf_thres) & (pred[:, 2:4] > min_wh).all(1) & (pred[:, 2:4] < max_wh).all(1) & torch.isfinite(
torch.isfinite(pred).all(1) pred).all(1)
pred = pred[i] pred = pred[i]
# If none are remaining => process next image # If none are remaining => process next image
if len(pred) == 0: if len(pred) == 0:
continue continue
# Select predicted classes
class_conf = class_conf[i]
class_pred = class_pred[i].unsqueeze(1).float()
# Box (center x, center y, width, height) to (x1, y1, x2, y2) # Box (center x, center y, width, height) to (x1, y1, x2, y2)
pred[:, :4] = xywh2xyxy(pred[:, :4]) pred[:, :4] = xywh2xyxy(pred[:, :4])
# Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred) # Detections ordered as (x1y1x2y2, conf, cls)
pred = torch.cat((pred[:, :5], class_conf.unsqueeze(1), class_pred), 1) pred = torch.cat((pred[:, :4], conf[i].unsqueeze(1), cls[i].unsqueeze(1).float()), 1)
# Get detections sorted by decreasing confidence scores # Get detections sorted by decreasing confidence scores
pred = pred[(-pred[:, 4]).argsort()] pred = pred[(-pred[:, 4]).argsort()]