diff --git a/detect.py b/detect.py index 64a7867d..d4ad635e 100644 --- a/detect.py +++ b/detect.py @@ -77,7 +77,6 @@ def detect(save_img=False): # Run inference t0 = time.time() for path, img, im0s, vid_cap in dataset: - t = time.time() img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 @@ -85,7 +84,9 @@ def detect(save_img=False): img = img.unsqueeze(0) # Inference + t1 = torch_utils.time_synchronized() pred = model(img)[0].float() if half else model(img)[0] + t2 = torch_utils.time_synchronized() # Apply NMS pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) @@ -123,7 +124,7 @@ def detect(save_img=False): plot_one_box(xyxy, im0, label=label, color=colors[int(cls)]) # Print time (inference + NMS) - print('%sDone. (%.3fs)' % (s, time.time() - t)) + print('%sDone. (%.3fs)' % (s, t2 - t1)) # Stream results if view_img: