This commit is contained in:
Glenn Jocher 2020-01-17 10:55:30 -08:00
parent 1bc50ebfab
commit a8e1390028
4 changed files with 12 additions and 11 deletions

View File

@ -25,7 +25,7 @@ def detect(save_img=False):
if weights.endswith('.pt'): # pytorch format if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model']) model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format else: # darknet format
_ = load_darknet_weights(model, weights) load_darknet_weights(model, weights)
# Second-stage classifier # Second-stage classifier
classify = False classify = False

View File

@ -351,8 +351,6 @@ def load_darknet_weights(self, weights, cutoff=-1):
conv_layer.weight.data.copy_(conv_w) conv_layer.weight.data.copy_(conv_w)
ptr += num_w ptr += num_w
return cutoff
def save_weights(self, path='model.weights', cutoff=-1): def save_weights(self, path='model.weights', cutoff=-1):
# Converts a PyTorch model to Darket format (*.pt to *.weights) # Converts a PyTorch model to Darket format (*.pt to *.weights)

View File

@ -35,7 +35,7 @@ def test(cfg,
if weights.endswith('.pt'): # pytorch format if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model']) model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format else: # darknet format
_ = load_darknet_weights(model, weights) load_darknet_weights(model, weights)
if torch.cuda.device_count() > 1: if torch.cuda.device_count() > 1:
model = nn.DataParallel(model) model = nn.DataParallel(model)

View File

@ -84,12 +84,15 @@ def train():
model = Darknet(cfg, arc=opt.arc).to(device) model = Darknet(cfg, arc=opt.arc).to(device)
# Optimizer # Optimizer
pg0, pg1 = [], [] # optimizer parameter groups pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in dict(model.named_parameters()).items(): for k, v in dict(model.named_parameters()).items():
if 'Conv2d.weight' in k: print(k)
pg1 += [v] # parameter group 1 (apply weight_decay) if '.bias' in k:
pg2 += [v] # biases
elif 'Conv2d.weight' in k:
pg1 += [v] # apply weight_decay
else: else:
pg0 += [v] # parameter group 0 pg0 += [v] # all else
if opt.adam: if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0']) optimizer = optim.Adam(pg0, lr=hyp['lr0'])
@ -97,12 +100,12 @@ def train():
else: else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
del pg0, pg1 optimizer.add_param_group({'params': pg2}) # add pg2
del pg0, pg1, pg2
# https://github.com/alphadl/lookahead.pytorch # https://github.com/alphadl/lookahead.pytorch
# optimizer = torch_utils.Lookahead(optimizer, k=5, alpha=0.5) # optimizer = torch_utils.Lookahead(optimizer, k=5, alpha=0.5)
cutoff = -1 # backbone reaches to cutoff layer
start_epoch = 0 start_epoch = 0
best_fitness = float('inf') best_fitness = float('inf')
attempt_download(weights) attempt_download(weights)
@ -134,7 +137,7 @@ def train():
elif len(weights) > 0: # darknet format elif len(weights) > 0: # darknet format
# possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc. # possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc.
cutoff = load_darknet_weights(model, weights) load_darknet_weights(model, weights)
# Scheduler https://github.com/ultralytics/yolov3/issues/238 # Scheduler https://github.com/ultralytics/yolov3/issues/238
# lf = lambda x: 1 - x / epochs # linear ramp to zero # lf = lambda x: 1 - x / epochs # linear ramp to zero