diff --git a/test.py b/test.py index fe6392bc..bdbb258e 100644 --- a/test.py +++ b/test.py @@ -191,7 +191,7 @@ if __name__ == '__main__': parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)') opt = parser.parse_args() - print(opt, end='\n\n') + print(opt) with torch.no_grad(): mAP = test( diff --git a/train.py b/train.py index 5c0bf823..9d90c86f 100644 --- a/train.py +++ b/train.py @@ -294,7 +294,7 @@ if __name__ == '__main__': parser.add_argument('--evolve', action='store_true', help='run hyperparameter evolution') parser.add_argument('--var', default=0, type=int, help='debug variable') opt = parser.parse_args() - print(opt, end='\n\n') + print(opt) if opt.evolve: opt.notest = True # save time by only testing final epoch diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 406d5ac7..fc8e2c43 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -25,6 +25,7 @@ def select_device(force_cpu=False): print(" device%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" % (i, x[i].name, x[i].total_memory / c)) + print('') # skip a line return device diff --git a/utils/utils.py b/utils/utils.py index d00b8aa4..156da436 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -41,7 +41,7 @@ def model_info(model): # Plots a line-by-line description of a PyTorch model n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - print('\n%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (