updates
This commit is contained in:
parent
58d510df52
commit
1a8bbf600d
4
train.py
4
train.py
|
@ -206,7 +206,7 @@ def train():
|
||||||
model.arc = opt.arc # attach yolo architecture
|
model.arc = opt.arc # attach yolo architecture
|
||||||
model.hyp = hyp # attach hyperparameters to model
|
model.hyp = hyp # attach hyperparameters to model
|
||||||
# model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
|
# model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
|
||||||
model_info(model, report='summary') # 'full' or 'summary'
|
torch_utils.model_info(model, report='summary') # 'full' or 'summary'
|
||||||
nb = len(dataloader)
|
nb = len(dataloader)
|
||||||
maps = np.zeros(nc) # mAP per class
|
maps = np.zeros(nc) # mAP per class
|
||||||
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
|
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
|
||||||
|
@ -387,7 +387,7 @@ if __name__ == '__main__':
|
||||||
parser.add_argument('--batch-size', type=int, default=32) # effective bs = batch_size * accumulate = 16 * 4 = 64
|
parser.add_argument('--batch-size', type=int, default=32) # effective bs = batch_size * accumulate = 16 * 4 = 64
|
||||||
parser.add_argument('--accumulate', type=int, default=2, help='batches to accumulate before optimizing')
|
parser.add_argument('--accumulate', type=int, default=2, help='batches to accumulate before optimizing')
|
||||||
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path')
|
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path')
|
||||||
parser.add_argument('--data', type=str, default='data/coco.data', help='*.data file path')
|
parser.add_argument('--data', type=str, default='data/coco_64img.data', help='*.data file path')
|
||||||
parser.add_argument('--multi-scale', action='store_true', help='adjust (67% - 150%) img_size every 10 batches')
|
parser.add_argument('--multi-scale', action='store_true', help='adjust (67% - 150%) img_size every 10 batches')
|
||||||
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
|
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
|
||||||
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
||||||
|
|
|
@ -64,3 +64,16 @@ def fuse_conv_and_bn(conv, bn):
|
||||||
fusedconv.bias.copy_(b_conv + b_bn)
|
fusedconv.bias.copy_(b_conv + b_bn)
|
||||||
|
|
||||||
return fusedconv
|
return fusedconv
|
||||||
|
|
||||||
|
|
||||||
|
def model_info(model, report='summary'):
|
||||||
|
# Plots a line-by-line description of a PyTorch model
|
||||||
|
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
||||||
|
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
||||||
|
if report is 'full':
|
||||||
|
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
|
||||||
|
for i, (name, p) in enumerate(model.named_parameters()):
|
||||||
|
name = name.replace('module_list.', '')
|
||||||
|
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
|
||||||
|
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
|
||||||
|
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
|
||||||
|
|
|
@ -41,19 +41,6 @@ def load_classes(path):
|
||||||
return list(filter(None, names)) # filter removes empty strings (such as last line)
|
return list(filter(None, names)) # filter removes empty strings (such as last line)
|
||||||
|
|
||||||
|
|
||||||
def model_info(model, report='summary'):
|
|
||||||
# Plots a line-by-line description of a PyTorch model
|
|
||||||
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
|
||||||
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
|
||||||
if report is 'full':
|
|
||||||
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
|
|
||||||
for i, (name, p) in enumerate(model.named_parameters()):
|
|
||||||
name = name.replace('module_list.', '')
|
|
||||||
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
|
|
||||||
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
|
|
||||||
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
|
|
||||||
|
|
||||||
|
|
||||||
def labels_to_class_weights(labels, nc=80):
|
def labels_to_class_weights(labels, nc=80):
|
||||||
# Get class weights (inverse frequency) from training labels
|
# Get class weights (inverse frequency) from training labels
|
||||||
ni = len(labels) # number of images
|
ni = len(labels) # number of images
|
||||||
|
|
Loading…
Reference in New Issue