From 1430a1e4083609ab197cf1947a12ab8692b20593 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 4 Mar 2020 10:26:35 -0800 Subject: [PATCH] updates --- test.py | 10 +++++----- utils/torch_utils.py | 6 ++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/test.py b/test.py index 52a29061..1c04af78 100644 --- a/test.py +++ b/test.py @@ -84,18 +84,18 @@ def test(cfg, # Disable gradients with torch.no_grad(): # Run model - t = time.time() + t = torch_utils.time_synchronized() inf_out, train_out = model(imgs) # inference and training outputs - t0 += time.time() - t + t0 += torch_utils.time_synchronized() - t # Compute loss if hasattr(model, 'hyp'): # if model has loss hyperparameters loss += compute_loss(train_out, targets, model)[1][:3].cpu() # GIoU, obj, cls # Run NMS - t = time.time() - output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) - t1 += time.time() - t + t = torch_utils.time_synchronized() + output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) # nms + t1 += torch_utils.time_synchronized() - t # Statistics per image for si, pred in enumerate(output): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index a93b79d1..869575de 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,5 @@ import os +import time import torch import torch.backends.cudnn as cudnn @@ -40,6 +41,11 @@ def select_device(device='', apex=False, batch_size=None): return torch.device('cuda:0' if cuda else 'cpu') +def time_synchronized(): + torch.cuda.synchronize() if torch.cuda.is_available() else None + return time.time() + + def fuse_conv_and_bn(conv, bn): # https://tehnokv.com/posts/fusing-batchnorm-and-conv/ with torch.no_grad():