From bd498ae776cd5ccad9c111311e5ad8698c5c33d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 20 Nov 2019 13:14:24 -0800 Subject: [PATCH] updates --- train.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 1cde2012..dc8dce05 100644 --- a/train.py +++ b/train.py @@ -173,7 +173,7 @@ def train(): model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) # Initialize distributed training - if torch.cuda.device_count() > 1: + if device.type != 'cpu' and torch.cuda.device_count() > 1: dist.init_process_group(backend='nccl', # 'distributed backend' init_method='tcp://127.0.0.1:9999', # distributed training init method world_size=1, # number of nodes for distributed training @@ -418,6 +418,8 @@ if __name__ == '__main__': opt.weights = last if opt.resume else opt.weights print(opt) device = torch_utils.select_device(opt.device, apex=mixed_precision) + if device.type == 'cpu': + mixed_precision = False # scale hyp['obj'] by img_size (evolved at 416) hyp['obj'] *= opt.img_size / 416.