This commit is contained in:
tomasz 2020-09-11 18:52:00 +02:00
parent 36fedf62c0
commit 4a923445bc
6 changed files with 1251 additions and 109 deletions

1155
cfg/yolov4-21cls.cfg Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,23 @@
bayes: bayes:
iterations: 10 iterations: 1
train: train:
epochs: epochs:
type: discrete type: discrete
values: [30] values: [400]
batch-size: batch-size:
type: discrete type: discrete
min: 1 values: [4]
max: 5
step: 1
cfg: ./cfg/yolov3-spp-21cls.cfg cfg: ./cfg/yolov3-spp-21cls.cfg
data: ./data/widok_01_21.data data: ./data/widok_01_21.data
multi-scale: multi-scale:
type: discrete type: discrete
values: [true, false] values: [false]
img-size-start: img-size-start:
type: discrete type: discrete
min: 512 values: [896]
max: 1088
step: 64
img-size-end: img-size-end:
type: discrete type: discrete
min: 512 values: [1344]
max: 1088
step: 64
rect: rect:
type: discrete type: discrete
values: [false] values: [false]
@ -33,104 +27,76 @@ train:
evolve: false evolve: false
bucket: bucket:
cache-images: false cache-images: false
weights: ./weights/yolov3-spp-ultralytics.pt weights: ./experiments/model2/best.pt
device: 1 device: 1
adam: adam:
type: discrete type: discrete
values: [true] values: [true]
single-cls: false single-cls: false
snapshot-every: snapshot-every:
freeze-layers: true freeze-layers: false
other-hyps: other-hyps:
giou: giou:
type: continuous type: discrete
min: 0.0 values: [3.54]
max: 10.0
cls: cls:
type: continuous type: discrete
min: 10.0 values: [3.74]
max: 100.0
cls_pw: cls_pw:
type: continuous type: discrete
min: 0.0 values: [1.0]
max: 10.0
obj: obj:
type: continuous type: discrete
min: 10.0 values: [64.3]
max: 100.0
obj_pw: obj_pw:
type: continuous type: discrete
min: 0.0 values: [1.0]
max: 10.0
iou_t: iou_t:
type: continuous type: discrete
min: 0.0 values: [0.2]
max: 1.0
lr0: lr0:
type: continuous type: discrete
min: 0.000001 values: [0.01]
max: 0.1
lrf: lrf:
type: continuous type: discrete
min: 0.000001 values: [0.0005]
max: 0.1
momentum: momentum:
type: continuous type: discrete
min: 0.0 values: [0.937]
max: 1.0
weight_decay: weight_decay:
type: continuous type: discrete
min: 0.0 values: [0.0005]
max: 1.0
fl_gamma: fl_gamma:
type: continuous type: discrete
min: 0.0 values: [0.0]
max: 10.0
hsv_h: hsv_h:
type: continuous type: discrete
min: 0.0 values: [0.0138]
max: 1.0
hsv_s: hsv_s:
type: continuous type: discrete
min: 0.0 values: [0.678]
max: 1.0
hsv_v: hsv_v:
type: continuous type: discrete
min: 0.0 values: [0.36]
max: 1.0
degrees: degrees:
type: continuous type: discrete
min: 0.0 values: [0.0]
max: 30.0
translate: translate:
type: continuous type: discrete
min: 0.0 values: [0.0]
max: 1.0
scale: scale:
type: continuous type: discrete
min: 0.0 values: [0.0]
max: 1.0
shear: shear:
type: continuous type: discrete
min: 0.0 values: [0.0]
max: 1.0
experiments: experiments:
dir: ./experiments dir: ./experiments
detect: detect:
source: ./data/widok_01_21/widok_01_21_test_labels.txt source: ./data/widok_01_21/widok_01_21_test_labels.txt
test-img-size: test-img-size: 1024
type: discrete conf-thres: 0.45
min: 512 iou-thres: 0.6
max: 1088
step: 64
conf-thres:
type: continuous
min: 0.3
max: 0.6
iou-thres:
type: continuous
min: 0.3
max: 0.6
classes: classes:
agnostic-nms: agnostic-nms:
augment: augment:

View File

@ -84,9 +84,9 @@ def call_detection_script(gaussian_hyps, weights_path, names_path, dir):
--output {detect_output_dir} --output {detect_output_dir}
--names {names_path} --names {names_path}
--weights {weights_path} --weights {weights_path}
--test-img-size {gaussian_hyps['test-img-size']} --test-img-size {getattr(config.detect, 'test-img-size')}
--conf-thres {gaussian_hyps['conf-thres']} --conf-thres {getattr(config.detect, 'conf-thres')}
--iou-thres {gaussian_hyps['iou-thres']} --iou-thres {getattr(config.detect, 'iou-thres')}
--save-txt""" --save-txt"""
cmd += f" --device {config.train.device}" if config.train.device else "" cmd += f" --device {config.train.device}" if config.train.device else ""
cmd = " ".join(cmd.split()) cmd = " ".join(cmd.split())
@ -134,37 +134,34 @@ def yolov3(x):
'translate': float(x[:, 22]), 'translate': float(x[:, 22]),
'scale': float(x[:, 23]), 'scale': float(x[:, 23]),
'shear': float(x[:, 24]), # train hyps end index 'shear': float(x[:, 24]), # train hyps end index
'test-img-size': int(x[:, 25]),
'conf-thres': float(x[:, 26]),
'iou-thres': float(x[:, 27])
} }
line = ""
try: try:
call_training_script(bayes_hyps) call_training_script(bayes_hyps)
weights_path, names_path, train_results_dir = move_training_results_to_experiments_dir() weights_path, names_path, train_results_dir = move_training_results_to_experiments_dir()
detect_output_dir = call_detection_script(bayes_hyps, weights_path, names_path, train_results_dir) detect_output_dir = call_detection_script(bayes_hyps, weights_path, names_path, train_results_dir)
conf_matrix_path = call_generate_confussion_matrix(detect_output_dir, names_path, train_results_dir) conf_matrix_path = call_generate_confussion_matrix(detect_output_dir, names_path, train_results_dir)
y_dict = get_values_from_conff_matrix(conf_matrix_path) y_dict = get_values_from_conff_matrix(conf_matrix_path)
# tutaj wzór na wyliczanie funkcji # tutaj wzór na wyliczanie funkcji
y_val = 1 - (y_dict['match'] * 10 - y_dict['false positives'] * 3 - y_dict['mistakes']) / y_dict['all labels'] y_val = (1 - (y_dict['right'] * 10 - y_dict['false positives'] * 3 - y_dict['mistakes']) / y_dict['labeled']) / 30
# zapisywanie do pliku zadeklarowanego globalnie # zapisywanie do pliku zadeklarowanego globalnie
line = "\t".join([bayes_hyps.__str__(), str(y_val)]) line = "\t".join([bayes_hyps.__str__(), str(y_val)])
print('###### line ########')
print(line)
bayes_params_file.writelines([line, '\n']) bayes_params_file.writelines([line, '\n'])
return y_val return y_val
except: except:
tb = traceback.format_exc() tb = traceback.format_exc()
y_max_val = 1
print("An error occured during running training-detect-confussion process \n", tb) print("An error occured during running training-detect-confussion process \n", tb)
print("Returning 1 from current bayessian iteration") print(f"Returning {y_max_val} from current bayessian iteration")
line = "\t".join([bayes_hyps.__str__(), str(1)]) line = "\t".join([bayes_hyps.__str__(), str(y_max_val)])
return 1
finally:
bayes_params_file.writelines([line, '\n']) bayes_params_file.writelines([line, '\n'])
return y_max_val
# na jakiej rozdzielczości jest puszczana detekcja
if __name__ == '__main__': if __name__ == '__main__':
bounds = config.get_bayes_bounds() bounds = config.get_bayes_bounds()
@ -172,7 +169,15 @@ if __name__ == '__main__':
# wczytywanie z poprzednich eksperymentów plik bayes_params # wczytywanie z poprzednich eksperymentów plik bayes_params
X, Y = load_previous_bayes_experiments(config.experiments.dir) X, Y = load_previous_bayes_experiments(config.experiments.dir)
constraints = [
{
'name':'img_size_constraint',
'constraint': '(x[:,3] - x[:,4])' # img-size-start - img-size-end <= 0
}
]
bayes_optimizer = GPyOpt.methods.BayesianOptimization(f=yolov3, domain=bounds, X=X, Y=Y, verbosity=True, bayes_optimizer = GPyOpt.methods.BayesianOptimization(f=yolov3, domain=bounds, X=X, Y=Y, verbosity=True,
initial_design_numdata=2) initial_design_numdata=5, constraints=constraints, )
bayes_optimizer.run_optimization(config.bayes.iterations, verbosity=True) bayes_optimizer.run_optimization(config.bayes.iterations, verbosity=True)
bayes_params_file.close() bayes_params_file.close()

View File

@ -2,9 +2,10 @@ import ast
import io import io
import os import os
import subprocess import subprocess
import numpy as np
from glob import glob from glob import glob
import numpy as np
def call_subprocess(cmd): def call_subprocess(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
@ -19,7 +20,7 @@ def call_subprocess(cmd):
def get_values_from_conff_matrix(path): def get_values_from_conff_matrix(path):
lines = open(path, 'r').readlines()[:7] lines = open(path, 'r').readlines()[:6]
d = {} d = {}
for l in lines: for l in lines:
key, value, *_ = l.split("\t") key, value, *_ = l.split("\t")
@ -53,14 +54,15 @@ def get_bayes_params_as_dict(x):
'translate': float(x[:, 22]), 'translate': float(x[:, 22]),
'scale': float(x[:, 23]), 'scale': float(x[:, 23]),
'shear': float(x[:, 24]), # train hyps end index 'shear': float(x[:, 24]), # train hyps end index
'test-img-size': int(x[:, 25]),
'conf-thres': float(x[:, 26]),
'iou-thres': float(x[:, 27])
} }
def load_previous_bayes_experiments(experiments_dir): def load_previous_bayes_experiments(experiments_dir):
paths = list(glob(os.path.join(experiments_dir, '*bayes_params.txt'))) paths = list(glob(os.path.join(experiments_dir, '*bayes_params.txt')))
if len(paths) == 0:
print("No bayes files found")
return None, None
y_values = [] y_values = []
x_values = [] x_values = []
@ -74,8 +76,14 @@ def load_previous_bayes_experiments(experiments_dir):
bayes_values = dict_to_numpy(bayes_dict) bayes_values = dict_to_numpy(bayes_dict)
x_values.append(bayes_values) x_values.append(bayes_values)
y_values.append(float(y_val)) y_values.append(float(y_val))
print("Loaded values from prevous experiments ", dict_str, y_val)
except: except:
raise Exception(f"Cannot parse line {line} from file {p}") raise Exception(f"Cannot parse line {line} from file {p}")
if not y_values or not x_values:
print("No bayes files found")
return None, None
return np.array(x_values), np.array(y_values).reshape((len(y_values), 1)) return np.array(x_values), np.array(y_values).reshape((len(y_values), 1))
@ -91,3 +99,7 @@ def dict_to_numpy(d):
else: else:
x.append(float(value)) x.append(float(value))
return x return x
if __name__ == '__main__':
get_values_from_conff_matrix('/home/tomekb/yolov3/experiments/2020-08-17_02-05-43/confussion-matrix.tsv')

View File

@ -234,7 +234,7 @@ def train(hyp):
nb = len(dataloader) # number of batches nb = len(dataloader) # number of batches
n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations) n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations)
maps = np.zeros(nc) # mAP per class maps = np.zeros(nc) # mAP per class
# torch.autograd.set_detect_anomaly(True) # torch.autograd.set_baddetect_anomaly(True)
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
t0 = time.time() t0 = time.time()
print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test))

View File

@ -44,10 +44,14 @@ def exif_size(img):
class LoadImages: # for inference class LoadImages: # for inference
def __init__(self, path, img_size=416): def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic path = str(Path(path)) # os-agnostic
files = [f.strip() for f in open(path, 'r').readlines()] files = []
if path.endswith("txt"):
files = [f.strip() for f in open(path, 'r').readlines()]
else:
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
@ -198,7 +202,7 @@ class LoadStreams: # multiple IP or RTSP cameras
self.sources = sources self.sources = sources
for i, s in enumerate(sources): for i, s in enumerate(sources):
# Start the thread to read frames from the video stream # Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='') print('%g/%g: %s... ' % (i + 1, n, s))
cap = cv2.VideoCapture(0 if s == '0' else s) cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))