updates
This commit is contained in:
parent
178e1a346b
commit
6e1ff541c9
11
models.py
11
models.py
|
@ -254,9 +254,14 @@ class YOLOLayer(nn.Module):
|
||||||
xy = torch.sigmoid(p[..., 0:2]) + self.grid_xy # x, y
|
xy = torch.sigmoid(p[..., 0:2]) + self.grid_xy # x, y
|
||||||
width_height = torch.exp(p[..., 2:4]) * self.anchor_wh # width, height
|
width_height = torch.exp(p[..., 2:4]) * self.anchor_wh # width, height
|
||||||
p_conf = torch.sigmoid(p[..., 4:5]) # Conf
|
p_conf = torch.sigmoid(p[..., 4:5]) # Conf
|
||||||
## p_cls = torch.sigmoid(p[..., 5:85]) # Class
|
p_cls = p[..., 5:85]
|
||||||
p_cls = F.softmax(p[..., 5:85], 2) * p_conf # SSD-like conf
|
|
||||||
# p_cls = torch.exp(p[..., 5:85]) / torch.exp(p[..., 5:85]).sum(2).unsqueeze(2) #* p_conf # F.softmax() equivalent
|
# Broadcasting only supported on first dimension in CoreML. See onnx-coreml/_operators.py
|
||||||
|
# p_cls = F.softmax(p_cls, 2) * p_conf # SSD-like conf
|
||||||
|
p_cls = torch.exp(p_cls).permute(2, 1, 0)
|
||||||
|
p_cls = p_cls / p_cls.sum(0).unsqueeze(0) * p_conf.permute(2, 1, 0) # F.softmax() equivalent
|
||||||
|
p_cls = p_cls.permute(2, 1, 0)
|
||||||
|
|
||||||
return torch.cat((xy / nG, width_height, p_conf, p_cls), 2).squeeze().t()
|
return torch.cat((xy / nG, width_height, p_conf, p_cls), 2).squeeze().t()
|
||||||
|
|
||||||
p[..., 0] = torch.sigmoid(p[..., 0]) + self.grid_x # x
|
p[..., 0] = torch.sigmoid(p[..., 0]) + self.grid_x # x
|
||||||
|
|
|
@ -68,13 +68,14 @@ def main():
|
||||||
spec.neuralNetwork.preprocessing[0].featureName = '0'
|
spec.neuralNetwork.preprocessing[0].featureName = '0'
|
||||||
|
|
||||||
yolov3_model.save(name + '.mlmodel')
|
yolov3_model.save(name + '.mlmodel')
|
||||||
|
# yolov3_model.visualize_spec()
|
||||||
print(spec.description)
|
print(spec.description)
|
||||||
|
|
||||||
# 2.5. Try to Predict:
|
# 2.5. Try to Predict:
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
img = Image.open('../yolov3/data/samples/zidane_416.jpg')
|
img = Image.open('../yolov3/data/samples/zidane_416.jpg')
|
||||||
out = yolov3_model.predict({'0': img})
|
out = yolov3_model.predict({'0': img}, useCPUOnly=True)
|
||||||
print(out['141'].shape, out['143'].shape)
|
print(out['148'].shape, out['150'].shape)
|
||||||
|
|
||||||
# 3. Create NMS protobuf
|
# 3. Create NMS protobuf
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
@ -106,15 +107,15 @@ def main():
|
||||||
del ma_type.shape[:]
|
del ma_type.shape[:]
|
||||||
|
|
||||||
nms = nms_spec.nonMaximumSuppression
|
nms = nms_spec.nonMaximumSuppression
|
||||||
nms.confidenceInputFeatureName = '141' # 1x507x80
|
nms.confidenceInputFeatureName = '148' # 1x507x80
|
||||||
nms.coordinatesInputFeatureName = '143' # 1x507x4
|
nms.coordinatesInputFeatureName = '150' # 1x507x4
|
||||||
nms.confidenceOutputFeatureName = 'confidence'
|
nms.confidenceOutputFeatureName = 'confidence'
|
||||||
nms.coordinatesOutputFeatureName = 'coordinates'
|
nms.coordinatesOutputFeatureName = 'coordinates'
|
||||||
nms.iouThresholdInputFeatureName = 'iouThreshold'
|
nms.iouThresholdInputFeatureName = 'iouThreshold'
|
||||||
nms.confidenceThresholdInputFeatureName = 'confidenceThreshold'
|
nms.confidenceThresholdInputFeatureName = 'confidenceThreshold'
|
||||||
|
|
||||||
nms.iouThreshold = 0.6
|
nms.iouThreshold = 0.6
|
||||||
nms.confidenceThreshold = 0.9
|
nms.confidenceThreshold = 0.3
|
||||||
nms.pickTop.perClass = True
|
nms.pickTop.perClass = True
|
||||||
|
|
||||||
labels = np.loadtxt('../yolov3/data/coco.names', dtype=str, delimiter='\n')
|
labels = np.loadtxt('../yolov3/data/coco.names', dtype=str, delimiter='\n')
|
||||||
|
|
Loading…
Reference in New Issue