diff --git a/detect.py b/detect.py index a79b8021..d6419838 100644 --- a/detect.py +++ b/detect.py @@ -43,7 +43,13 @@ def detect(save_txt=False, save_img=False): # Export mode if ONNX_EXPORT: img = torch.zeros((1, 3) + img_size) # (1, 3, 320, 192) - torch.onnx.export(model, img, 'weights/export.onnx', verbose=True) + torch.onnx.export(model, img, 'weights/export.onnx', verbose=False, opset_version=11) + + # Validate exported model + import onnx + model = onnx.load('weights/export.onnx') # Load the ONNX model + onnx.checker.check_model(model) # Check that the IR is well formed + print(onnx.helper.printable_graph(model.graph)) # Print a human readable representation of the graph return # Half precision @@ -148,7 +154,7 @@ if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path') parser.add_argument('--data', type=str, default='data/coco.data', help='coco.data file path') - parser.add_argument('--weights', type=str, default='weights/yolov3-spp.weights', help='path to weights file') + parser.add_argument('--weights', type=str, default='weights/last49.pt', help='path to weights file') parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam parser.add_argument('--output', type=str, default='output', help='output folder') # output folder parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')