clean up train.py
This commit is contained in:
parent
b7d039737a
commit
0cc8f2be01
|
@ -139,7 +139,7 @@ class YOLOLayer(nn.Module):
|
||||||
if targets is not None:
|
if targets is not None:
|
||||||
MSELoss = nn.MSELoss(size_average=True)
|
MSELoss = nn.MSELoss(size_average=True)
|
||||||
BCEWithLogitsLoss = nn.BCEWithLogitsLoss(size_average=True)
|
BCEWithLogitsLoss = nn.BCEWithLogitsLoss(size_average=True)
|
||||||
CrossEntropyLoss = nn.CrossEntropyLoss()
|
# CrossEntropyLoss = nn.CrossEntropyLoss()
|
||||||
|
|
||||||
if requestPrecision:
|
if requestPrecision:
|
||||||
gx = self.grid_x[:, :, :nG, :nG]
|
gx = self.grid_x[:, :, :nG, :nG]
|
||||||
|
@ -176,7 +176,7 @@ class YOLOLayer(nn.Module):
|
||||||
lx, ly, lw, lh, lcls, lconf = FT([0]), FT([0]), FT([0]), FT([0]), FT([0]), FT([0])
|
lx, ly, lw, lh, lcls, lconf = FT([0]), FT([0]), FT([0]), FT([0]), FT([0]), FT([0])
|
||||||
|
|
||||||
# Add confidence loss for background anchors (noobj)
|
# Add confidence loss for background anchors (noobj)
|
||||||
#lconf += k * BCEWithLogitsLoss(pred_conf[~mask], mask[~mask].float())
|
# lconf += k * BCEWithLogitsLoss(pred_conf[~mask], mask[~mask].float())
|
||||||
|
|
||||||
# Sum loss components
|
# Sum loss components
|
||||||
loss = lx + ly + lw + lh + lconf + lcls
|
loss = lx + ly + lw + lh + lconf + lcls
|
||||||
|
@ -244,8 +244,8 @@ class Darknet(nn.Module):
|
||||||
|
|
||||||
if is_training:
|
if is_training:
|
||||||
self.losses['nT'] /= 3
|
self.losses['nT'] /= 3
|
||||||
self.losses['TC'] /= 3
|
self.losses['TC'] /= 3 # target category
|
||||||
metrics = torch.zeros(4, len(self.losses['FPe'])) # TP, FP, FN, target_count
|
metrics = torch.zeros(3, len(self.losses['FPe'])) # TP, FP, FN
|
||||||
|
|
||||||
ui = np.unique(self.losses['TC'])[1:]
|
ui = np.unique(self.losses['TC'])[1:]
|
||||||
for i in ui:
|
for i in ui:
|
||||||
|
@ -253,7 +253,6 @@ class Darknet(nn.Module):
|
||||||
metrics[0, i] = (self.losses['TP'][j] > 0).sum().float() # TP
|
metrics[0, i] = (self.losses['TP'][j] > 0).sum().float() # TP
|
||||||
metrics[1, i] = (self.losses['FP'][j] > 0).sum().float() # FP
|
metrics[1, i] = (self.losses['FP'][j] > 0).sum().float() # FP
|
||||||
metrics[2, i] = (self.losses['FN'][j] == 3).sum().float() # FN
|
metrics[2, i] = (self.losses['FN'][j] == 3).sum().float() # FN
|
||||||
metrics[3] = metrics.sum(0)
|
|
||||||
metrics[1] += self.losses['FPe']
|
metrics[1] += self.losses['FPe']
|
||||||
|
|
||||||
self.losses['TP'] = metrics[0].sum()
|
self.losses['TP'] = metrics[0].sum()
|
||||||
|
|
26
train.py
26
train.py
|
@ -87,6 +87,7 @@ def main(opt):
|
||||||
|
|
||||||
modelinfo(model)
|
modelinfo(model)
|
||||||
t0, t1 = time.time(), time.time()
|
t0, t1 = time.time(), time.time()
|
||||||
|
mean_recall, mean_precision = 0, 0
|
||||||
print('%10s' * 16 % (
|
print('%10s' * 16 % (
|
||||||
'Epoch', 'Batch', 'x', 'y', 'w', 'h', 'conf', 'cls', 'total', 'P', 'R', 'nTargets', 'TP', 'FP', 'FN', 'time'))
|
'Epoch', 'Batch', 'x', 'y', 'w', 'h', 'conf', 'cls', 'total', 'P', 'R', 'nTargets', 'TP', 'FP', 'FN', 'time'))
|
||||||
for epoch in range(opt.epochs):
|
for epoch in range(opt.epochs):
|
||||||
|
@ -112,7 +113,8 @@ def main(opt):
|
||||||
|
|
||||||
ui = -1
|
ui = -1
|
||||||
rloss = defaultdict(float) # running loss
|
rloss = defaultdict(float) # running loss
|
||||||
metrics = torch.zeros(4, num_classes)
|
metrics = torch.zeros(3, num_classes)
|
||||||
|
optimizer.zero_grad()
|
||||||
for i, (imgs, targets) in enumerate(dataloader):
|
for i, (imgs, targets) in enumerate(dataloader):
|
||||||
if sum([len(x) for x in targets]) < 1: # if no targets continue
|
if sum([len(x) for x in targets]) < 1: # if no targets continue
|
||||||
continue
|
continue
|
||||||
|
@ -125,37 +127,37 @@ def main(opt):
|
||||||
|
|
||||||
# Compute loss, compute gradient, update parameters
|
# Compute loss, compute gradient, update parameters
|
||||||
loss = model(imgs.to(device), targets, requestPrecision=True)
|
loss = model(imgs.to(device), targets, requestPrecision=True)
|
||||||
optimizer.zero_grad()
|
|
||||||
loss.backward()
|
loss.backward()
|
||||||
|
|
||||||
|
# accumulated_batches = 4 # accumulate gradient for 4 batches before stepping optimizer
|
||||||
|
# if ((i+1) % accumulated_batches == 0) or (i == len(dataloader) - 1):
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
# Compute running epoch-means of tracked metrics
|
# Compute running epoch-means of tracked metrics
|
||||||
ui += 1
|
ui += 1
|
||||||
metrics += model.losses['metrics']
|
metrics += model.losses['metrics']
|
||||||
|
TP, FP, FN = metrics
|
||||||
for key, val in model.losses.items():
|
for key, val in model.losses.items():
|
||||||
rloss[key] = (rloss[key] * ui + val) / (ui + 1)
|
rloss[key] = (rloss[key] * ui + val) / (ui + 1)
|
||||||
|
|
||||||
# Precision
|
# Precision
|
||||||
precision = metrics[0] / (metrics[0] + metrics[1] + 1e-16)
|
precision = TP / (TP + FP)
|
||||||
k = (metrics[0] + metrics[1]) > 0
|
k = (TP + FP) > 0
|
||||||
if k.sum() > 0:
|
if k.sum() > 0:
|
||||||
mean_precision = precision[k].mean()
|
mean_precision = precision[k].mean()
|
||||||
else:
|
|
||||||
mean_precision = 0
|
|
||||||
|
|
||||||
# Recall
|
# Recall
|
||||||
recall = metrics[0] / (metrics[0] + metrics[2] + 1e-16)
|
recall = TP / (TP + FN)
|
||||||
k = (metrics[0] + metrics[2]) > 0
|
k = (TP + FN) > 0
|
||||||
if k.sum() > 0:
|
if k.sum() > 0:
|
||||||
mean_recall = recall[k].mean()
|
mean_recall = recall[k].mean()
|
||||||
else:
|
|
||||||
mean_recall = 0
|
|
||||||
|
|
||||||
s = ('%10s%10s' + '%10.3g' * 14) % (
|
s = ('%10s%10s' + '%10.3g' * 14) % (
|
||||||
'%g/%g' % (epoch, opt.epochs - 1), '%g/%g' % (i, len(dataloader) - 1), rloss['x'],
|
'%g/%g' % (epoch, opt.epochs - 1), '%g/%g' % (i, len(dataloader) - 1), rloss['x'],
|
||||||
rloss['y'], rloss['w'], rloss['h'], rloss['conf'], rloss['cls'],
|
rloss['y'], rloss['w'], rloss['h'], rloss['conf'], rloss['cls'],
|
||||||
rloss['loss'], mean_precision, mean_recall, model.losses['nT'], model.losses['TP'],
|
rloss['loss'], mean_precision, mean_recall, model.losses['nT'], TP.sum(),
|
||||||
model.losses['FP'], model.losses['FN'], time.time() - t1)
|
FP.sum(), FN.sum(), time.time() - t1)
|
||||||
t1 = time.time()
|
t1 = time.time()
|
||||||
print(s)
|
print(s)
|
||||||
|
|
||||||
|
|
13
utils/gcp.sh
13
utils/gcp.sh
|
@ -11,21 +11,22 @@ gsutil cp gs://ultralytics/fresh9_5_e201.pt yolov3/checkpoints
|
||||||
python3 detect.py
|
python3 detect.py
|
||||||
|
|
||||||
# Test
|
# Test
|
||||||
python3 test.py -img_size 416 -weights_path checkpoints/latest.pt -conf_thresh 0.5
|
python3 test.py -img_size 416 -weights_path checkpoints/latest.pt -conf_thres 0.5
|
||||||
|
|
||||||
# Download and Test
|
# Download and Test
|
||||||
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3
|
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3
|
||||||
cd yolov3
|
cd yolov3/checkpoints
|
||||||
cd checkpoints
|
|
||||||
wget https://pjreddie.com/media/files/yolov3.weights
|
wget https://pjreddie.com/media/files/yolov3.weights
|
||||||
cd ..
|
cd ..
|
||||||
python3 test.py -img_size 416 -weights_path checkpoints/backup5.pt -nms_thres 0.45
|
python3 test.py -img_size 416 -weights_path checkpoints/backup5.pt -nms_thres 0.45
|
||||||
|
|
||||||
# Download and Resume
|
# Download and Resume
|
||||||
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3
|
sudo rm -rf yolov3 && git clone https://github.com/ultralytics/yolov3
|
||||||
cd yolov3
|
cd yolov3/checkpoints
|
||||||
cd checkpoints
|
|
||||||
wget https://storage.googleapis.com/ultralytics/yolov3.pt
|
wget https://storage.googleapis.com/ultralytics/yolov3.pt
|
||||||
cp yolov3.pt latest.pt
|
cp yolov3.pt latest.pt
|
||||||
cd ..
|
cd ..
|
||||||
python3 train.py -img_size 416 -epochs 1 -resume 1
|
python3 train.py -img_size 416 -batch_size 12 -epochs 1 -resume 1
|
||||||
|
python3 test.py -img_size 416 -weights_path checkpoints/latest.pt -conf_thres 0.5
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue