168 lines
6.4 KiB
Python
168 lines
6.4 KiB
Python
import os
|
|
|
|
import torch
|
|
|
|
|
|
def init_seeds(seed=0):
|
|
torch.manual_seed(seed)
|
|
|
|
# Remove randomness (may be slower on Tesla GPUs) # https://pytorch.org/docs/stable/notes/randomness.html
|
|
if seed == 0:
|
|
torch.backends.cudnn.deterministic = True
|
|
torch.backends.cudnn.benchmark = False
|
|
|
|
|
|
def select_device(device='', apex=False, batch_size=None):
|
|
# device = 'cpu' or '0' or '0,1,2,3'
|
|
cpu_request = device.lower() == 'cpu'
|
|
if device and not cpu_request: # if device requested other than 'cpu'
|
|
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
|
|
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
|
|
|
|
cuda = False if cpu_request else torch.cuda.is_available()
|
|
if cuda:
|
|
c = 1024 ** 2 # bytes to MB
|
|
ng = torch.cuda.device_count()
|
|
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
|
|
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
|
|
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
|
|
s = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex
|
|
for i in range(0, ng):
|
|
if i == 1:
|
|
s = ' ' * len(s)
|
|
print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
|
|
(s, i, x[i].name, x[i].total_memory / c))
|
|
else:
|
|
print('Using CPU')
|
|
|
|
print('') # skip a line
|
|
return torch.device('cuda:0' if cuda else 'cpu')
|
|
|
|
|
|
def fuse_conv_and_bn(conv, bn):
|
|
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
|
|
with torch.no_grad():
|
|
# init
|
|
fusedconv = torch.nn.Conv2d(conv.in_channels,
|
|
conv.out_channels,
|
|
kernel_size=conv.kernel_size,
|
|
stride=conv.stride,
|
|
padding=conv.padding,
|
|
bias=True)
|
|
|
|
# prepare filters
|
|
w_conv = conv.weight.clone().view(conv.out_channels, -1)
|
|
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
|
|
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
|
|
|
|
# prepare spatial bias
|
|
if conv.bias is not None:
|
|
b_conv = conv.bias
|
|
else:
|
|
b_conv = torch.zeros(conv.weight.size(0))
|
|
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
|
|
fusedconv.bias.copy_(b_conv + b_bn)
|
|
|
|
return fusedconv
|
|
|
|
|
|
def model_info(model, report='summary'):
|
|
# Plots a line-by-line description of a PyTorch model
|
|
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
|
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
|
if report is 'full':
|
|
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
|
|
for i, (name, p) in enumerate(model.named_parameters()):
|
|
name = name.replace('module_list.', '')
|
|
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
|
|
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
|
|
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
|
|
|
|
|
|
def load_classifier(name='resnet101', n=2):
|
|
# Loads a pretrained model reshaped to n-class output
|
|
import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch#torchvision
|
|
model = pretrainedmodels.__dict__[name](num_classes=1000, pretrained='imagenet')
|
|
|
|
# Display model properties
|
|
for x in ['model.input_size', 'model.input_space', 'model.input_range', 'model.mean', 'model.std']:
|
|
print(x + ' =', eval(x))
|
|
|
|
# Reshape output to n classes
|
|
filters = model.last_linear.weight.shape[1]
|
|
model.last_linear.bias = torch.nn.Parameter(torch.zeros(n))
|
|
model.last_linear.weight = torch.nn.Parameter(torch.zeros(n, filters))
|
|
model.last_linear.out_features = n
|
|
return model
|
|
|
|
|
|
from collections import defaultdict
|
|
from torch.optim import Optimizer
|
|
|
|
|
|
class Lookahead(Optimizer):
|
|
def __init__(self, optimizer, k=5, alpha=0.5):
|
|
self.optimizer = optimizer
|
|
self.k = k
|
|
self.alpha = alpha
|
|
self.param_groups = self.optimizer.param_groups
|
|
self.state = defaultdict(dict)
|
|
self.fast_state = self.optimizer.state
|
|
for group in self.param_groups:
|
|
group["counter"] = 0
|
|
|
|
def update(self, group):
|
|
for fast in group["params"]:
|
|
param_state = self.state[fast]
|
|
if "slow_param" not in param_state:
|
|
param_state["slow_param"] = torch.zeros_like(fast.data)
|
|
param_state["slow_param"].copy_(fast.data)
|
|
slow = param_state["slow_param"]
|
|
slow += (fast.data - slow) * self.alpha
|
|
fast.data.copy_(slow)
|
|
|
|
def update_lookahead(self):
|
|
for group in self.param_groups:
|
|
self.update(group)
|
|
|
|
def step(self, closure=None):
|
|
loss = self.optimizer.step(closure)
|
|
for group in self.param_groups:
|
|
if group["counter"] == 0:
|
|
self.update(group)
|
|
group["counter"] += 1
|
|
if group["counter"] >= self.k:
|
|
group["counter"] = 0
|
|
return loss
|
|
|
|
def state_dict(self):
|
|
fast_state_dict = self.optimizer.state_dict()
|
|
slow_state = {
|
|
(id(k) if isinstance(k, torch.Tensor) else k): v
|
|
for k, v in self.state.items()
|
|
}
|
|
fast_state = fast_state_dict["state"]
|
|
param_groups = fast_state_dict["param_groups"]
|
|
return {
|
|
"fast_state": fast_state,
|
|
"slow_state": slow_state,
|
|
"param_groups": param_groups,
|
|
}
|
|
|
|
def load_state_dict(self, state_dict):
|
|
slow_state_dict = {
|
|
"state": state_dict["slow_state"],
|
|
"param_groups": state_dict["param_groups"],
|
|
}
|
|
fast_state_dict = {
|
|
"state": state_dict["fast_state"],
|
|
"param_groups": state_dict["param_groups"],
|
|
}
|
|
super(Lookahead, self).load_state_dict(slow_state_dict)
|
|
self.optimizer.load_state_dict(fast_state_dict)
|
|
self.fast_state = self.optimizer.state
|
|
|
|
def add_param_group(self, param_group):
|
|
param_group["counter"] = 0
|
|
self.optimizer.add_param_group(param_group)
|