Adam optimizer
This commit is contained in:
parent
538e5741c6
commit
664cbaab09
9
train.py
9
train.py
|
@ -62,8 +62,9 @@ def main(opt):
|
||||||
# p.requires_grad = False
|
# p.requires_grad = False
|
||||||
|
|
||||||
# Set optimizer
|
# Set optimizer
|
||||||
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))
|
# optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))
|
||||||
# optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3, momentum=.9, weight_decay=5e-4, nesterov=True)
|
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3, momentum=.9,
|
||||||
|
weight_decay=5e-4, nesterov=True)
|
||||||
|
|
||||||
start_epoch = checkpoint['epoch'] + 1
|
start_epoch = checkpoint['epoch'] + 1
|
||||||
if checkpoint['optimizer'] is not None:
|
if checkpoint['optimizer'] is not None:
|
||||||
|
@ -84,8 +85,8 @@ def main(opt):
|
||||||
model.to(device).train()
|
model.to(device).train()
|
||||||
|
|
||||||
# Set optimizer
|
# Set optimizer
|
||||||
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=5e-4)
|
# optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=5e-4)
|
||||||
# optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=.9, weight_decay=5e-4, nesterov=True)
|
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=.9, weight_decay=5e-4, nesterov=True)
|
||||||
|
|
||||||
# Set scheduler
|
# Set scheduler
|
||||||
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[54, 61], gamma=0.1)
|
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[54, 61], gamma=0.1)
|
||||||
|
|
Loading…
Reference in New Issue