This commit is contained in:
Glenn Jocher 2020-03-04 10:26:35 -08:00
parent 35eae3ace9
commit 1430a1e408
2 changed files with 11 additions and 5 deletions

10
test.py
View File

@ -84,18 +84,18 @@ def test(cfg,
# Disable gradients
with torch.no_grad():
# Run model
t = time.time()
t = torch_utils.time_synchronized()
inf_out, train_out = model(imgs) # inference and training outputs
t0 += time.time() - t
t0 += torch_utils.time_synchronized() - t
# Compute loss
if hasattr(model, 'hyp'): # if model has loss hyperparameters
loss += compute_loss(train_out, targets, model)[1][:3].cpu() # GIoU, obj, cls
# Run NMS
t = time.time()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres)
t1 += time.time() - t
t = torch_utils.time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) # nms
t1 += torch_utils.time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):

View File

@ -1,4 +1,5 @@
import os
import time
import torch
import torch.backends.cudnn as cudnn
@ -40,6 +41,11 @@ def select_device(device='', apex=False, batch_size=None):
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time()
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():