From 4a923445bcd694aac97e2b76f0afbc7fe9cb8240 Mon Sep 17 00:00:00 2001 From: tomasz Date: Fri, 11 Sep 2020 18:52:00 +0200 Subject: [PATCH] Update --- cfg/yolov4-21cls.cfg | 1155 +++++++++++++++++++++++ our_scripts/config_bayes.yml | 128 +-- our_scripts/run_yolov3_process_bayes.py | 39 +- our_scripts/utils.py | 22 +- train.py | 2 +- utils/datasets.py | 14 +- 6 files changed, 1251 insertions(+), 109 deletions(-) create mode 100644 cfg/yolov4-21cls.cfg diff --git a/cfg/yolov4-21cls.cfg b/cfg/yolov4-21cls.cfg new file mode 100644 index 00000000..e9d4d711 --- /dev/null +++ b/cfg/yolov4-21cls.cfg @@ -0,0 +1,1155 @@ +[net] +# Testing +#batch=1 +#subdivisions=1 +# Training +batch=64 +subdivisions=8 +width=608 +height=608 +channels=3 +momentum=0.949 +decay=0.0005 +angle=0 +saturation = 1.5 +exposure = 1.5 +hue=.1tea + +learning_rate=0.00261 +burn_in=1000 +max_batches = 500500 +policy=steps +steps=400000,450000 +scales=.1,.1 + +#cutmix=1 +mosaic=1 + +#:104x104 54:52x52 85:26x26 104:13x13 for 416 + +[convolutional] +batch_normalize=1 +filters=32 +size=3 +stride=1 +pad=1 +activation=mish + +# Downsample + +[convolutional] +batch_normalize=1 +filters=64 +size=3 +stride=2 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -2 + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=32 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=64 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -1,-7 + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +# Downsample + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=2 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -2 + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=64 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=64 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=64 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -1,-10 + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +# Downsample + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=2 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -2 + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -1,-28 + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +# Downsample + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=2 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -2 + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -1,-28 + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +# Downsample + +[convolutional] +batch_normalize=1 +filters=1024 +size=3 +stride=2 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -2 + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=mish + +[shortcut] +from=-3 +activation=linear + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=mish + +[route] +layers = -1,-16 + +[convolutional] +batch_normalize=1 +filters=1024 +size=1 +stride=1 +pad=1 +activation=mish + +########################## + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=1024 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=leaky + +### SPP ### +[maxpool] +stride=1 +size=5 + +[route] +layers=-2 + +[maxpool] +stride=1 +size=9 + +[route] +layers=-4 + +[maxpool] +stride=1 +size=13 + +[route] +layers=-1,-3,-5,-6 +### End SPP ### + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=1024 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[upsample] +stride=2 + +[route] +layers = 85 + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[route] +layers = -1, -3 + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=512 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=512 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +[upsample] +stride=2 + +[route] +layers = 54 + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +[route] +layers = -1, -3 + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=256 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=256 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +########################## + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=256 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=78 +activation=linear + + +[yolo] +mask = 0,1,2 +anchors=26,16, 19,45, 36,27, 54,32, 34,80, 60,49, 74,71, 96,105, 135,145 +classes=21 +num=9 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +scale_x_y = 1.2 +iou_thresh=0.213 +cls_normalizer=1.0 +iou_normalizer=0.07 +iou_loss=ciou +nms_kind=greedynms +beta_nms=0.6 + + +[route] +layers = -4 + +[convolutional] +batch_normalize=1 +size=3 +stride=2 +pad=1 +filters=256 +activation=leaky + +[route] +layers = -1, -16 + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=512 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=512 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=512 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=78 +activation=linear + + +[yolo] +mask = 3,4,5 +anchors=26,16, 19,45, 36,27, 54,32, 34,80, 60,49, 74,71, 96,105, 135,145 +classes=21 +num=9 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +scale_x_y = 1.1 +iou_thresh=0.213 +cls_normalizer=1.0 +iou_normalizer=0.07 +iou_loss=ciou +nms_kind=greedynms +beta_nms=0.6 + + +[route] +layers = -4 + +[convolutional] +batch_normalize=1 +size=3 +stride=2 +pad=1 +filters=512 +activation=leaky + +[route] +layers = -1, -37 + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=1024 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=1024 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=512 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +size=3 +stride=1 +pad=1 +filters=1024 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=78 +activation=linear + + +[yolo] +mask = 6,7,8 +anchors=26,16, 19,45, 36,27, 54,32, 34,80, 60,49, 74,71, 96,105, 135,145 +classes=21 +num=9 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +random=1 +scale_x_y = 1.05 +iou_thresh=0.213 +cls_normalizer=1.0 +iou_normalizer=0.07 +iou_loss=ciou +nms_kind=greedynms +beta_nms=0.6 diff --git a/our_scripts/config_bayes.yml b/our_scripts/config_bayes.yml index 05ae609b..0a92711c 100644 --- a/our_scripts/config_bayes.yml +++ b/our_scripts/config_bayes.yml @@ -1,29 +1,23 @@ bayes: - iterations: 10 + iterations: 1 train: epochs: type: discrete - values: [30] + values: [400] batch-size: type: discrete - min: 1 - max: 5 - step: 1 + values: [4] cfg: ./cfg/yolov3-spp-21cls.cfg data: ./data/widok_01_21.data multi-scale: type: discrete - values: [true, false] + values: [false] img-size-start: type: discrete - min: 512 - max: 1088 - step: 64 + values: [896] img-size-end: type: discrete - min: 512 - max: 1088 - step: 64 + values: [1344] rect: type: discrete values: [false] @@ -33,104 +27,76 @@ train: evolve: false bucket: cache-images: false - weights: ./weights/yolov3-spp-ultralytics.pt + weights: ./experiments/model2/best.pt device: 1 adam: type: discrete values: [true] single-cls: false snapshot-every: - freeze-layers: true + freeze-layers: false other-hyps: giou: - type: continuous - min: 0.0 - max: 10.0 + type: discrete + values: [3.54] cls: - type: continuous - min: 10.0 - max: 100.0 + type: discrete + values: [3.74] cls_pw: - type: continuous - min: 0.0 - max: 10.0 + type: discrete + values: [1.0] obj: - type: continuous - min: 10.0 - max: 100.0 + type: discrete + values: [64.3] obj_pw: - type: continuous - min: 0.0 - max: 10.0 + type: discrete + values: [1.0] iou_t: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.2] lr0: - type: continuous - min: 0.000001 - max: 0.1 + type: discrete + values: [0.01] lrf: - type: continuous - min: 0.000001 - max: 0.1 + type: discrete + values: [0.0005] momentum: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.937] weight_decay: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.0005] fl_gamma: - type: continuous - min: 0.0 - max: 10.0 + type: discrete + values: [0.0] hsv_h: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.0138] hsv_s: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.678] hsv_v: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.36] degrees: - type: continuous - min: 0.0 - max: 30.0 + type: discrete + values: [0.0] translate: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.0] scale: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.0] shear: - type: continuous - min: 0.0 - max: 1.0 + type: discrete + values: [0.0] experiments: dir: ./experiments detect: source: ./data/widok_01_21/widok_01_21_test_labels.txt - test-img-size: - type: discrete - min: 512 - max: 1088 - step: 64 - conf-thres: - type: continuous - min: 0.3 - max: 0.6 - iou-thres: - type: continuous - min: 0.3 - max: 0.6 + test-img-size: 1024 + conf-thres: 0.45 + iou-thres: 0.6 classes: agnostic-nms: augment: diff --git a/our_scripts/run_yolov3_process_bayes.py b/our_scripts/run_yolov3_process_bayes.py index 7d598ceb..c72a3d2c 100644 --- a/our_scripts/run_yolov3_process_bayes.py +++ b/our_scripts/run_yolov3_process_bayes.py @@ -84,9 +84,9 @@ def call_detection_script(gaussian_hyps, weights_path, names_path, dir): --output {detect_output_dir} --names {names_path} --weights {weights_path} - --test-img-size {gaussian_hyps['test-img-size']} - --conf-thres {gaussian_hyps['conf-thres']} - --iou-thres {gaussian_hyps['iou-thres']} + --test-img-size {getattr(config.detect, 'test-img-size')} + --conf-thres {getattr(config.detect, 'conf-thres')} + --iou-thres {getattr(config.detect, 'iou-thres')} --save-txt""" cmd += f" --device {config.train.device}" if config.train.device else "" cmd = " ".join(cmd.split()) @@ -134,37 +134,34 @@ def yolov3(x): 'translate': float(x[:, 22]), 'scale': float(x[:, 23]), 'shear': float(x[:, 24]), # train hyps end index - 'test-img-size': int(x[:, 25]), - 'conf-thres': float(x[:, 26]), - 'iou-thres': float(x[:, 27]) } - line = "" try: call_training_script(bayes_hyps) weights_path, names_path, train_results_dir = move_training_results_to_experiments_dir() detect_output_dir = call_detection_script(bayes_hyps, weights_path, names_path, train_results_dir) conf_matrix_path = call_generate_confussion_matrix(detect_output_dir, names_path, train_results_dir) - y_dict = get_values_from_conff_matrix(conf_matrix_path) + y_dict = get_values_from_conff_matrix(conf_matrix_path) - # tutaj wzór na wyliczanie funkcji - y_val = 1 - (y_dict['match'] * 10 - y_dict['false positives'] * 3 - y_dict['mistakes']) / y_dict['all labels'] + # tutaj wzór na wyliczanie funkcji + y_val = (1 - (y_dict['right'] * 10 - y_dict['false positives'] * 3 - y_dict['mistakes']) / y_dict['labeled']) / 30 # zapisywanie do pliku zadeklarowanego globalnie line = "\t".join([bayes_hyps.__str__(), str(y_val)]) + print('###### line ########') + print(line) bayes_params_file.writelines([line, '\n']) return y_val except: tb = traceback.format_exc() + y_max_val = 1 print("An error occured during running training-detect-confussion process \n", tb) - print("Returning 1 from current bayessian iteration") - line = "\t".join([bayes_hyps.__str__(), str(1)]) - return 1 - finally: + print(f"Returning {y_max_val} from current bayessian iteration") + line = "\t".join([bayes_hyps.__str__(), str(y_max_val)]) bayes_params_file.writelines([line, '\n']) - - + return y_max_val +# na jakiej rozdzielczości jest puszczana detekcja if __name__ == '__main__': bounds = config.get_bayes_bounds() @@ -172,7 +169,15 @@ if __name__ == '__main__': # wczytywanie z poprzednich eksperymentów plik bayes_params X, Y = load_previous_bayes_experiments(config.experiments.dir) + constraints = [ + { + 'name':'img_size_constraint', + 'constraint': '(x[:,3] - x[:,4])' # img-size-start - img-size-end <= 0 + } + ] + + bayes_optimizer = GPyOpt.methods.BayesianOptimization(f=yolov3, domain=bounds, X=X, Y=Y, verbosity=True, - initial_design_numdata=2) + initial_design_numdata=5, constraints=constraints, ) bayes_optimizer.run_optimization(config.bayes.iterations, verbosity=True) bayes_params_file.close() diff --git a/our_scripts/utils.py b/our_scripts/utils.py index 2732f806..fce51029 100644 --- a/our_scripts/utils.py +++ b/our_scripts/utils.py @@ -2,9 +2,10 @@ import ast import io import os import subprocess -import numpy as np from glob import glob +import numpy as np + def call_subprocess(cmd): process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) @@ -19,7 +20,7 @@ def call_subprocess(cmd): def get_values_from_conff_matrix(path): - lines = open(path, 'r').readlines()[:7] + lines = open(path, 'r').readlines()[:6] d = {} for l in lines: key, value, *_ = l.split("\t") @@ -53,14 +54,15 @@ def get_bayes_params_as_dict(x): 'translate': float(x[:, 22]), 'scale': float(x[:, 23]), 'shear': float(x[:, 24]), # train hyps end index - 'test-img-size': int(x[:, 25]), - 'conf-thres': float(x[:, 26]), - 'iou-thres': float(x[:, 27]) } def load_previous_bayes_experiments(experiments_dir): paths = list(glob(os.path.join(experiments_dir, '*bayes_params.txt'))) + if len(paths) == 0: + print("No bayes files found") + return None, None + y_values = [] x_values = [] @@ -74,8 +76,14 @@ def load_previous_bayes_experiments(experiments_dir): bayes_values = dict_to_numpy(bayes_dict) x_values.append(bayes_values) y_values.append(float(y_val)) + + print("Loaded values from prevous experiments ", dict_str, y_val) except: raise Exception(f"Cannot parse line {line} from file {p}") + if not y_values or not x_values: + print("No bayes files found") + return None, None + return np.array(x_values), np.array(y_values).reshape((len(y_values), 1)) @@ -91,3 +99,7 @@ def dict_to_numpy(d): else: x.append(float(value)) return x + + +if __name__ == '__main__': + get_values_from_conff_matrix('/home/tomekb/yolov3/experiments/2020-08-17_02-05-43/confussion-matrix.tsv') \ No newline at end of file diff --git a/train.py b/train.py index 2d616d6c..60bbd24e 100644 --- a/train.py +++ b/train.py @@ -234,7 +234,7 @@ def train(hyp): nb = len(dataloader) # number of batches n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class - # torch.autograd.set_detect_anomaly(True) + # torch.autograd.set_baddetect_anomaly(True) results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' t0 = time.time() print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) diff --git a/utils/datasets.py b/utils/datasets.py index 3be391a6..f3962962 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -44,10 +44,14 @@ def exif_size(img): class LoadImages: # for inference def __init__(self, path, img_size=416): path = str(Path(path)) # os-agnostic - files = [f.strip() for f in open(path, 'r').readlines()] - - - + files = [] + if path.endswith("txt"): + files = [f.strip() for f in open(path, 'r').readlines()] + else: + if os.path.isdir(path): + files = sorted(glob.glob(os.path.join(path, '*.*'))) + elif os.path.isfile(path): + files = [path] images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] @@ -198,7 +202,7 @@ class LoadStreams: # multiple IP or RTSP cameras self.sources = sources for i, s in enumerate(sources): # Start the thread to read frames from the video stream - print('%g/%g: %s... ' % (i + 1, n, s), end='') + print('%g/%g: %s... ' % (i + 1, n, s)) cap = cv2.VideoCapture(0 if s == '0' else s) assert cap.isOpened(), 'Failed to open %s' % s w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))