From ce4ea0c332baf1701f6e0d988f8f7ab630903f6e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Jul 2019 18:22:59 +0200 Subject: [PATCH] updates --- train.py | 1 + utils/torch_utils.py | 8 +------- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/train.py b/train.py index e21f0728..be7ffb3d 100644 --- a/train.py +++ b/train.py @@ -162,6 +162,7 @@ def train(cfg, # Mixed precision training https://github.com/NVIDIA/apex if mixed_precision: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) + print('Using Apex') # Initialize distributed training if torch.cuda.device_count() > 1: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b61050fa..6dabe6ae 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -16,17 +16,11 @@ def select_device(force_cpu=False): if not cuda: print('Using CPU') if cuda: - try: # Mixed precision training https://github.com/NVIDIA/apex - from apex import amp - apex_str = 'Apex ' - except: - apex_str = '' - torch.backends.cudnn.benchmark = True # set False for reproducible results c = 1024 ** 2 # bytes to MB ng = torch.cuda.device_count() x = [torch.cuda.get_device_properties(i) for i in range(ng)] - cuda_str = 'Using CUDA ' + apex_str + cuda_str = 'Using CUDA ' for i in range(0, ng): if i == 1: # torch.cuda.set_device(0) # OPTIONAL: Set GPU ID