2019-09-10 12:59:45 +00:00
|
|
|
import os
|
2019-09-13 14:00:52 +00:00
|
|
|
|
2018-12-05 10:55:27 +00:00
|
|
|
import torch
|
|
|
|
|
|
|
|
|
|
|
|
def init_seeds(seed=0):
|
|
|
|
torch.manual_seed(seed)
|
2019-02-25 12:47:51 +00:00
|
|
|
torch.cuda.manual_seed(seed)
|
|
|
|
torch.cuda.manual_seed_all(seed)
|
2019-09-10 08:56:56 +00:00
|
|
|
|
|
|
|
# Remove randomness (may be slower on Tesla GPUs) # https://pytorch.org/docs/stable/notes/randomness.html
|
|
|
|
if seed == 0:
|
|
|
|
torch.backends.cudnn.deterministic = True
|
|
|
|
torch.backends.cudnn.benchmark = False
|
2018-12-05 10:55:27 +00:00
|
|
|
|
|
|
|
|
2019-09-26 10:52:16 +00:00
|
|
|
def select_device(device='', apex=False):
|
2019-09-26 11:52:37 +00:00
|
|
|
# device = 'cpu' or '0' or '0,1,2,3'
|
|
|
|
cpu_request = device.lower() == 'cpu'
|
|
|
|
if device and not cpu_request: # if device requested other than 'cpu'
|
|
|
|
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
|
|
|
|
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
|
|
|
|
|
|
|
|
cuda = False if cpu_request else torch.cuda.is_available()
|
2019-04-08 13:41:14 +00:00
|
|
|
if cuda:
|
|
|
|
c = 1024 ** 2 # bytes to MB
|
|
|
|
ng = torch.cuda.device_count()
|
|
|
|
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
|
2019-09-26 11:52:37 +00:00
|
|
|
cuda_str = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex
|
2019-07-16 17:09:40 +00:00
|
|
|
for i in range(0, ng):
|
2019-07-16 17:10:33 +00:00
|
|
|
if i == 1:
|
2019-07-16 17:09:40 +00:00
|
|
|
cuda_str = ' ' * len(cuda_str)
|
|
|
|
print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
|
|
|
|
(cuda_str, i, x[i].name, x[i].total_memory / c))
|
2019-09-26 11:52:37 +00:00
|
|
|
else:
|
|
|
|
print('Using CPU')
|
2019-02-16 13:33:52 +00:00
|
|
|
|
2019-05-03 16:14:16 +00:00
|
|
|
print('') # skip a line
|
2019-09-26 11:52:37 +00:00
|
|
|
return torch.device('cuda:0' if cuda else 'cpu')
|
2019-04-19 18:41:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
def fuse_conv_and_bn(conv, bn):
|
|
|
|
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
|
|
|
|
with torch.no_grad():
|
|
|
|
# init
|
2019-07-24 17:02:24 +00:00
|
|
|
fusedconv = torch.nn.Conv2d(conv.in_channels,
|
|
|
|
conv.out_channels,
|
|
|
|
kernel_size=conv.kernel_size,
|
|
|
|
stride=conv.stride,
|
|
|
|
padding=conv.padding,
|
|
|
|
bias=True)
|
2019-04-19 18:41:18 +00:00
|
|
|
|
|
|
|
# prepare filters
|
|
|
|
w_conv = conv.weight.clone().view(conv.out_channels, -1)
|
|
|
|
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
|
|
|
|
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
|
|
|
|
|
|
|
|
# prepare spatial bias
|
|
|
|
if conv.bias is not None:
|
|
|
|
b_conv = conv.bias
|
|
|
|
else:
|
|
|
|
b_conv = torch.zeros(conv.weight.size(0))
|
|
|
|
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
|
|
|
|
fusedconv.bias.copy_(b_conv + b_bn)
|
|
|
|
|
|
|
|
return fusedconv
|
2019-10-06 22:50:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
def model_info(model, report='summary'):
|
|
|
|
# Plots a line-by-line description of a PyTorch model
|
|
|
|
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
|
|
|
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
|
|
|
if report is 'full':
|
|
|
|
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
|
|
|
|
for i, (name, p) in enumerate(model.named_parameters()):
|
|
|
|
name = name.replace('module_list.', '')
|
|
|
|
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
|
|
|
|
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
|
|
|
|
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
|