updates
This commit is contained in:
parent
308eda38fd
commit
b025b3123e
2
train.py
2
train.py
|
@ -138,7 +138,7 @@ def train(cfg,
|
||||||
# lf = lambda x: 10 ** (hyp['lrf'] * x / epochs) # exp ramp
|
# lf = lambda x: 10 ** (hyp['lrf'] * x / epochs) # exp ramp
|
||||||
# lf = lambda x: 1 - 10 ** (hyp['lrf'] * (1 - x / epochs)) # inverse exp ramp
|
# lf = lambda x: 1 - 10 ** (hyp['lrf'] * (1 - x / epochs)) # inverse exp ramp
|
||||||
# scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
# scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
||||||
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[round(opt.epochs * x) for x in (0.8, 0.9)], gamma=0.1)
|
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[round(opt.epochs * x) for x in [0.8]], gamma=0.1)
|
||||||
scheduler.last_epoch = start_epoch - 1
|
scheduler.last_epoch = start_epoch - 1
|
||||||
|
|
||||||
# # Plot lr schedule
|
# # Plot lr schedule
|
||||||
|
|
|
@ -18,7 +18,7 @@ def select_device(force_cpu=False):
|
||||||
if cuda:
|
if cuda:
|
||||||
try: # Mixed precision training https://github.com/NVIDIA/apex
|
try: # Mixed precision training https://github.com/NVIDIA/apex
|
||||||
from apex import amp
|
from apex import amp
|
||||||
apex_str = 'with Apex '
|
apex_str = 'Apex '
|
||||||
except:
|
except:
|
||||||
apex_str = ''
|
apex_str = ''
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue