select GPU0 if multiple available
This commit is contained in:
parent
ee4abc8cdf
commit
c828f5459f
14
train.py
14
train.py
|
@ -43,11 +43,11 @@ def train(
|
||||||
if resume:
|
if resume:
|
||||||
checkpoint = torch.load(latest, map_location='cpu')
|
checkpoint = torch.load(latest, map_location='cpu')
|
||||||
|
|
||||||
|
# Load weights to resume from
|
||||||
model.load_state_dict(checkpoint['model'])
|
model.load_state_dict(checkpoint['model'])
|
||||||
if torch.cuda.device_count() > 1:
|
|
||||||
raise Exception('Multi-GPU issue: https://github.com/ultralytics/yolov3/issues/21')
|
# if torch.cuda.device_count() > 1:
|
||||||
# print('Using ', torch.cuda.device_count(), ' GPUs')
|
# model = nn.DataParallel(model)
|
||||||
# model = nn.DataParallel(model)
|
|
||||||
model.to(device).train()
|
model.to(device).train()
|
||||||
|
|
||||||
# # Transfer learning (train only YOLO layers)
|
# # Transfer learning (train only YOLO layers)
|
||||||
|
@ -72,10 +72,8 @@ def train(
|
||||||
# Initialize model with darknet53 weights (optional)
|
# Initialize model with darknet53 weights (optional)
|
||||||
load_darknet_weights(model, os.path.join(weights, 'darknet53.conv.74'))
|
load_darknet_weights(model, os.path.join(weights, 'darknet53.conv.74'))
|
||||||
|
|
||||||
if torch.cuda.device_count() > 1:
|
# if torch.cuda.device_count() > 1:
|
||||||
raise Exception('Multi-GPU not currently supported: https://github.com/ultralytics/yolov3/issues/21')
|
# model = nn.DataParallel(model)
|
||||||
# print('Using ', torch.cuda.device_count(), ' GPUs')
|
|
||||||
# model = nn.DataParallel(model)
|
|
||||||
model.to(device).train()
|
model.to(device).train()
|
||||||
|
|
||||||
# Set optimizer
|
# Set optimizer
|
||||||
|
|
|
@ -6,7 +6,6 @@ def init_seeds(seed=0):
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
torch.cuda.manual_seed(seed)
|
torch.cuda.manual_seed(seed)
|
||||||
torch.cuda.manual_seed_all(seed)
|
torch.cuda.manual_seed_all(seed)
|
||||||
# torch.cuda.set_device(0) # OPTIONAL: Set your GPU if multiple available
|
|
||||||
|
|
||||||
|
|
||||||
def select_device(force_cpu=False):
|
def select_device(force_cpu=False):
|
||||||
|
@ -14,5 +13,11 @@ def select_device(force_cpu=False):
|
||||||
device = torch.device('cpu')
|
device = torch.device('cpu')
|
||||||
else:
|
else:
|
||||||
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
||||||
|
|
||||||
|
if torch.cuda.device_count() > 1:
|
||||||
|
print('WARNING Using GPU0 Only. Multi-GPU issue: https://github.com/ultralytics/yolov3/issues/21')
|
||||||
|
torch.cuda.set_device(0) # OPTIONAL: Set your GPU if multiple available
|
||||||
|
# print('Using ', torch.cuda.device_count(), ' GPUs')
|
||||||
|
|
||||||
print('Using ' + str(device) + '\n')
|
print('Using ' + str(device) + '\n')
|
||||||
return device
|
return device
|
||||||
|
|
Loading…
Reference in New Issue