training updates
This commit is contained in:
parent
c7f5b6cc21
commit
4120ac3aa6
6
train.py
6
train.py
|
@ -209,7 +209,7 @@ def train():
|
||||||
|
|
||||||
# Start training
|
# Start training
|
||||||
nb = len(dataloader) # number of batches
|
nb = len(dataloader) # number of batches
|
||||||
n_burn = max(3 * nb, 300) # burn-in iterations, max(3 epochs, 300 iterations)
|
n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 300 iterations)
|
||||||
maps = np.zeros(nc) # mAP per class
|
maps = np.zeros(nc) # mAP per class
|
||||||
# torch.autograd.set_detect_anomaly(True)
|
# torch.autograd.set_detect_anomaly(True)
|
||||||
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
|
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
|
||||||
|
@ -234,8 +234,8 @@ def train():
|
||||||
targets = targets.to(device)
|
targets = targets.to(device)
|
||||||
|
|
||||||
# Burn-in
|
# Burn-in
|
||||||
if ni <= n_burn:
|
if ni <= n_burn * 2:
|
||||||
model.gr = np.interp(ni, [0, n_burn], [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
|
model.gr = np.interp(ni, [0, n_burn * 2], [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
|
||||||
if ni == n_burn: # burnin complete
|
if ni == n_burn: # burnin complete
|
||||||
print_model_biases(model)
|
print_model_biases(model)
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ class ModelEMA:
|
||||||
self.ema = deepcopy(model)
|
self.ema = deepcopy(model)
|
||||||
self.ema.eval()
|
self.ema.eval()
|
||||||
self.updates = 0 # number of EMA updates
|
self.updates = 0 # number of EMA updates
|
||||||
self.decay = lambda x: decay * (1 - math.exp(-x / 1000)) # decay exponential ramp (to help early epochs)
|
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
|
||||||
self.device = device # perform ema on different device from model if set
|
self.device = device # perform ema on different device from model if set
|
||||||
if device:
|
if device:
|
||||||
self.ema.to(device=device)
|
self.ema.to(device=device)
|
||||||
|
|
|
@ -688,11 +688,11 @@ def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
|
||||||
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
|
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
|
||||||
|
|
||||||
|
|
||||||
def kmean_anchors(path='../coco/train2017.txt', n=9, img_size=(608, 608)):
|
def kmean_anchors(path='../coco/train2017.txt', n=12, img_size=(320, 1024)):
|
||||||
# from utils.utils import *; _ = kmean_anchors()
|
# from utils.utils import *; _ = kmean_anchors()
|
||||||
# Produces a list of target kmeans suitable for use in *.cfg files
|
# Creaters kmeans anchors for use in *.cfg files
|
||||||
from utils.datasets import LoadImagesAndLabels
|
from utils.datasets import LoadImagesAndLabels
|
||||||
thr = 0.20 # IoU threshold
|
thr = 0.225 # IoU threshold
|
||||||
|
|
||||||
def print_results(k):
|
def print_results(k):
|
||||||
k = k[np.argsort(k.prod(1))] # sort small to large
|
k = k[np.argsort(k.prod(1))] # sort small to large
|
||||||
|
@ -709,7 +709,7 @@ def kmean_anchors(path='../coco/train2017.txt', n=9, img_size=(608, 608)):
|
||||||
def fitness(k): # mutation fitness
|
def fitness(k): # mutation fitness
|
||||||
iou = wh_iou(wh, torch.Tensor(k)) # iou
|
iou = wh_iou(wh, torch.Tensor(k)) # iou
|
||||||
max_iou = iou.max(1)[0]
|
max_iou = iou.max(1)[0]
|
||||||
return max_iou.mean() # product
|
return (max_iou * (max_iou > thr).float()).mean() # product
|
||||||
|
|
||||||
# Get label wh
|
# Get label wh
|
||||||
wh = []
|
wh = []
|
||||||
|
|
Loading…
Reference in New Issue