detailed image sizes report

This commit is contained in:
Glenn Jocher 2020-04-14 11:51:19 -07:00
parent 029e137bc2
commit 763cdd5ae2
2 changed files with 9 additions and 9 deletions

View File

@ -60,7 +60,7 @@ def train():
batch_size = opt.batch_size batch_size = opt.batch_size
accumulate = opt.accumulate # effective bs = batch_size * accumulate = 16 * 4 = 64 accumulate = opt.accumulate # effective bs = batch_size * accumulate = 16 * 4 = 64
weights = opt.weights # initial training weights weights = opt.weights # initial training weights
imgsz_min, imgsz_max, img_size_test = opt.img_size # img sizes (min, max, test) imgsz_min, imgsz_max, imgsz_test = opt.img_size # img sizes (min, max, test)
# Image Sizes # Image Sizes
gs = 64 # (pixels) grid size gs = 64 # (pixels) grid size
@ -71,9 +71,9 @@ def train():
imgsz_min //= 1.5 imgsz_min //= 1.5
imgsz_max //= 0.667 imgsz_max //= 0.667
grid_min, grid_max = imgsz_min // gs, imgsz_max // gs grid_min, grid_max = imgsz_min // gs, imgsz_max // gs
imgsz_max = grid_max * gs # initialize with maximum multi_scale size imgsz_min, imgsz_max = grid_min * gs, grid_max * gs
print('Using multi-scale %g - %g' % (grid_min * gs, imgsz_max)) print('Training image sizes %g - %g, testing image size %g' % (imgsz_min, imgsz_max, imgsz_test))
img_size = imgsz_max img_size = imgsz_max # initialize with max size
# Configure run # Configure run
init_seeds() init_seeds()
@ -192,7 +192,7 @@ def train():
collate_fn=dataset.collate_fn) collate_fn=dataset.collate_fn)
# Testloader # Testloader
testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, img_size_test, batch_size, testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size,
hyp=hyp, hyp=hyp,
rect=True, rect=True,
cache_images=opt.cache_images, cache_images=opt.cache_images,
@ -310,7 +310,7 @@ def train():
results, maps = test.test(cfg, results, maps = test.test(cfg,
data, data,
batch_size=batch_size, batch_size=batch_size,
img_size=img_size_test, img_size=imgsz_test,
model=ema.ema, model=ema.ema,
save_json=final_epoch and is_coco, save_json=final_epoch and is_coco,
single_cls=opt.single_cls, single_cls=opt.single_cls,

View File

@ -573,9 +573,9 @@ def get_yolo_layers(model):
def print_model_biases(model): def print_model_biases(model):
# prints the bias neurons preceding each yolo layer # prints the bias neurons preceding each yolo layer
print('\nModel Bias Summary: %8s%18s%18s%18s' % ('layer', 'regression', 'objectness', 'classification')) print('\nModel Bias Summary: %8s%18s%18s%18s' % ('layer', 'regression', 'objectness', 'classification'))
try:
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for l in model.yolo_layers: # print pretrained biases for l in model.yolo_layers: # print pretrained biases
try:
if multi_gpu: if multi_gpu:
na = model.module.module_list[l].na # number of anchors na = model.module.module_list[l].na # number of anchors
b = model.module.module_list[l - 1][0].bias.view(na, -1) # bias 3x85 b = model.module.module_list[l - 1][0].bias.view(na, -1) # bias 3x85