updates
This commit is contained in:
parent
a46e500f9e
commit
809667404f
27
test.py
27
test.py
|
@ -16,7 +16,7 @@ parser.add_argument('-nms_thres', type=float, default=0.45, help='iou threshold
|
||||||
parser.add_argument('-n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')
|
parser.add_argument('-n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')
|
||||||
parser.add_argument('-img_size', type=int, default=416, help='size of each image dimension')
|
parser.add_argument('-img_size', type=int, default=416, help='size of each image dimension')
|
||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
print(opt)
|
print(opt, end='\n\n')
|
||||||
|
|
||||||
cuda = torch.cuda.is_available()
|
cuda = torch.cuda.is_available()
|
||||||
device = torch.device('cuda:0' if cuda else 'cpu')
|
device = torch.device('cuda:0' if cuda else 'cpu')
|
||||||
|
@ -49,10 +49,8 @@ def main(opt):
|
||||||
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
|
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
|
||||||
dataloader = load_images_and_labels(test_path, batch_size=opt.batch_size, img_size=opt.img_size)
|
dataloader = load_images_and_labels(test_path, batch_size=opt.batch_size, img_size=opt.img_size)
|
||||||
|
|
||||||
print('Compute mAP...')
|
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
|
||||||
|
outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class = [], [], [], [], [], [], [], []
|
||||||
mAP = 0
|
|
||||||
outputs, mAPs, TP, confidence, pred_class, target_class = [], [], [], [], [], []
|
|
||||||
AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC)
|
AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC)
|
||||||
for batch_i, (imgs, targets) in enumerate(dataloader):
|
for batch_i, (imgs, targets) in enumerate(dataloader):
|
||||||
imgs = imgs.to(device)
|
imgs = imgs.to(device)
|
||||||
|
@ -107,22 +105,25 @@ def main(opt):
|
||||||
correct.append(0)
|
correct.append(0)
|
||||||
|
|
||||||
# Compute Average Precision (AP) per class
|
# Compute Average Precision (AP) per class
|
||||||
AP, AP_class = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6],
|
AP, AP_class, R, P = ap_per_class(tp=correct, conf=detections[:, 4], pred_cls=detections[:, 6],
|
||||||
target_cls=target_cls)
|
target_cls=target_cls)
|
||||||
|
|
||||||
# Accumulate AP per class
|
# Accumulate AP per class
|
||||||
AP_accum_count += np.bincount(AP_class, minlength=nC)
|
AP_accum_count += np.bincount(AP_class, minlength=nC)
|
||||||
AP_accum += np.bincount(AP_class, minlength=nC, weights=AP)
|
AP_accum += np.bincount(AP_class, minlength=nC, weights=AP)
|
||||||
|
|
||||||
# Compute mean AP for this image
|
# Compute mean AP across all classes in this image, and append to image list
|
||||||
mAP = AP.mean()
|
mAPs.append(AP.mean())
|
||||||
|
mR.append(R.mean())
|
||||||
|
mP.append(P.mean())
|
||||||
|
|
||||||
# Append image mAP to list
|
# Means of all images
|
||||||
mAPs.append(mAP)
|
|
||||||
mean_mAP = np.mean(mAPs)
|
mean_mAP = np.mean(mAPs)
|
||||||
|
mean_R = np.mean(mR)
|
||||||
|
mean_P = np.mean(mP)
|
||||||
|
|
||||||
# Print image mAP and running mean mAP
|
# Print image mAP and running mean mAP
|
||||||
print('Image %d/%d AP: %.4f (%.4f)' % (len(mAPs), len(dataloader) * opt.batch_size, mAP, mean_mAP))
|
print(('%11s%11s' + '%11.3g' * 3) % (len(mAPs), len(dataloader) * opt.batch_size, mean_P, mean_R, mean_mAP))
|
||||||
|
|
||||||
# Print mAP per class
|
# Print mAP per class
|
||||||
classes = load_classes(opt.class_path) # Extracts class labels from file
|
classes = load_classes(opt.class_path) # Extracts class labels from file
|
||||||
|
@ -130,8 +131,8 @@ def main(opt):
|
||||||
print('%15s: %-.4f' % (c, AP_accum[i] / AP_accum_count[i]))
|
print('%15s: %-.4f' % (c, AP_accum[i] / AP_accum_count[i]))
|
||||||
|
|
||||||
# Print mAP
|
# Print mAP
|
||||||
print('Mean Average Precision: %.4f' % mean_mAP)
|
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
|
||||||
return mean_mAP
|
return mean_mAP, mean_R, mean_P
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
6
train.py
6
train.py
|
@ -125,7 +125,7 @@ def main(opt):
|
||||||
g['lr'] = lr
|
g['lr'] = lr
|
||||||
|
|
||||||
# Compute loss, compute gradient, update parameters
|
# Compute loss, compute gradient, update parameters
|
||||||
loss = model(imgs.to(device), targets, requestPrecision=True)
|
loss = model(imgs.to(device), targets, requestPrecision=False)
|
||||||
loss.backward()
|
loss.backward()
|
||||||
|
|
||||||
# accumulated_batches = 1 # accumulate gradient for 4 batches before stepping optimizer
|
# accumulated_batches = 1 # accumulate gradient for 4 batches before stepping optimizer
|
||||||
|
@ -183,11 +183,11 @@ def main(opt):
|
||||||
# Calculate mAP
|
# Calculate mAP
|
||||||
import test
|
import test
|
||||||
test.opt.weights_path = 'weights/latest.pt'
|
test.opt.weights_path = 'weights/latest.pt'
|
||||||
mAP = test.main(test.opt)
|
mAP, R, P = test.main(test.opt)
|
||||||
|
|
||||||
# Write epoch results
|
# Write epoch results
|
||||||
with open('results.txt', 'a') as file:
|
with open('results.txt', 'a') as file:
|
||||||
file.write(s + '%11.3g' % mAP + '\n')
|
file.write(s + '%11.3g' * 3 % (mAP, P, R) + '\n')
|
||||||
|
|
||||||
# Save final model
|
# Save final model
|
||||||
dt = time.time() - t0
|
dt = time.time() - t0
|
||||||
|
|
|
@ -14,20 +14,20 @@ def load_classes(path):
|
||||||
"""
|
"""
|
||||||
Loads class labels at 'path'
|
Loads class labels at 'path'
|
||||||
"""
|
"""
|
||||||
fp = open(path, "r")
|
fp = open(path, 'r')
|
||||||
names = fp.read().split("\n")[:-1]
|
names = fp.read().split('\n')[:-1]
|
||||||
return names
|
return names
|
||||||
|
|
||||||
|
|
||||||
def model_info(model): # Plots a line-by-line description of a PyTorch model
|
def model_info(model): # Plots a line-by-line description of a PyTorch model
|
||||||
nP = sum(x.numel() for x in model.parameters()) # number parameters
|
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
||||||
nG = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
||||||
print('\n%4s %70s %9s %12s %20s %12s %12s' % ('', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
|
print('\n%4s %70s %9s %12s %20s %12s %12s' % ('', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
|
||||||
for i, (name, p) in enumerate(model.named_parameters()):
|
for i, (name, p) in enumerate(model.named_parameters()):
|
||||||
name = name.replace('module_list.', '')
|
name = name.replace('module_list.', '')
|
||||||
print('%4g %70s %9s %12g %20s %12g %12g' % (
|
print('%4g %70s %9s %12g %20s %12g %12g' % (
|
||||||
i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
|
i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
|
||||||
print('\n%g layers, %g parameters, %g gradients' % (i + 1, nP, nG))
|
print('\nModel Summary: %g layers, %g parameters, %g gradients\n' % (i + 1, n_p, n_g))
|
||||||
|
|
||||||
|
|
||||||
def class_weights(): # frequency of each class in coco train2014
|
def class_weights(): # frequency of each class in coco train2014
|
||||||
|
@ -104,7 +104,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls):
|
||||||
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
|
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
|
||||||
|
|
||||||
# Create Precision-Recall curve and compute AP for each class
|
# Create Precision-Recall curve and compute AP for each class
|
||||||
ap = []
|
ap, p, r = [], [], []
|
||||||
for c in unique_classes:
|
for c in unique_classes:
|
||||||
i = pred_cls == c
|
i = pred_cls == c
|
||||||
n_gt = sum(target_cls == c) # Number of ground truth objects
|
n_gt = sum(target_cls == c) # Number of ground truth objects
|
||||||
|
@ -112,25 +112,27 @@ def ap_per_class(tp, conf, pred_cls, target_cls):
|
||||||
|
|
||||||
if (n_p == 0) and (n_gt == 0):
|
if (n_p == 0) and (n_gt == 0):
|
||||||
continue
|
continue
|
||||||
elif (np == 0) and (n_gt > 0):
|
elif (n_p == 0) or (n_gt == 0):
|
||||||
ap.append(0)
|
|
||||||
elif (n_p > 0) and (n_gt == 0):
|
|
||||||
ap.append(0)
|
ap.append(0)
|
||||||
|
r.append(0)
|
||||||
|
p.append(0)
|
||||||
else:
|
else:
|
||||||
# Accumulate FPs and TPs
|
# Accumulate FPs and TPs
|
||||||
fpa = np.cumsum(1 - tp[i])
|
fpc = np.cumsum(1 - tp[i])
|
||||||
tpa = np.cumsum(tp[i])
|
tpc = np.cumsum(tp[i])
|
||||||
|
|
||||||
# Recall
|
# Recall
|
||||||
recall = tpa / (n_gt + 1e-16)
|
recall_curve = tpc / (n_gt + 1e-16)
|
||||||
|
r.append(tpc[-1] / (n_gt + 1e-16))
|
||||||
|
|
||||||
# Precision
|
# Precision
|
||||||
precision = tpa / (tpa + fpa)
|
precision_curve = tpc / (tpc + fpc)
|
||||||
|
p.append(tpc[-1] / (tpc[-1] + fpc[-1]))
|
||||||
|
|
||||||
# AP from recall-precision curve
|
# AP from recall-precision curve
|
||||||
ap.append(compute_ap(recall, precision))
|
ap.append(compute_ap(recall_curve, precision_curve))
|
||||||
|
|
||||||
return np.array(ap), unique_classes.astype('int32')
|
return np.array(ap), unique_classes.astype('int32'), np.array(r), np.array(p)
|
||||||
|
|
||||||
|
|
||||||
def compute_ap(recall, precision):
|
def compute_ap(recall, precision):
|
||||||
|
@ -431,7 +433,7 @@ def coco_class_count(path='/Users/glennjocher/downloads/DATA/coco/labels/train20
|
||||||
|
|
||||||
|
|
||||||
def plot_results():
|
def plot_results():
|
||||||
# Plot YOLO training results file "results.txt"
|
# Plot YOLO training results file 'results.txt'
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
plt.figure(figsize=(16, 8))
|
plt.figure(figsize=(16, 8))
|
||||||
|
@ -445,4 +447,3 @@ def plot_results():
|
||||||
plt.title(s[i])
|
plt.title(s[i])
|
||||||
if i == 0:
|
if i == 0:
|
||||||
plt.legend()
|
plt.legend()
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue