This commit is contained in:
tomasz 2020-09-11 18:52:00 +02:00
parent 36fedf62c0
commit 4a923445bc
6 changed files with 1251 additions and 109 deletions

1155
cfg/yolov4-21cls.cfg Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,23 @@
bayes:
iterations: 10
iterations: 1
train:
epochs:
type: discrete
values: [30]
values: [400]
batch-size:
type: discrete
min: 1
max: 5
step: 1
values: [4]
cfg: ./cfg/yolov3-spp-21cls.cfg
data: ./data/widok_01_21.data
multi-scale:
type: discrete
values: [true, false]
values: [false]
img-size-start:
type: discrete
min: 512
max: 1088
step: 64
values: [896]
img-size-end:
type: discrete
min: 512
max: 1088
step: 64
values: [1344]
rect:
type: discrete
values: [false]
@ -33,104 +27,76 @@ train:
evolve: false
bucket:
cache-images: false
weights: ./weights/yolov3-spp-ultralytics.pt
weights: ./experiments/model2/best.pt
device: 1
adam:
type: discrete
values: [true]
single-cls: false
snapshot-every:
freeze-layers: true
freeze-layers: false
other-hyps:
giou:
type: continuous
min: 0.0
max: 10.0
type: discrete
values: [3.54]
cls:
type: continuous
min: 10.0
max: 100.0
type: discrete
values: [3.74]
cls_pw:
type: continuous
min: 0.0
max: 10.0
type: discrete
values: [1.0]
obj:
type: continuous
min: 10.0
max: 100.0
type: discrete
values: [64.3]
obj_pw:
type: continuous
min: 0.0
max: 10.0
type: discrete
values: [1.0]
iou_t:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.2]
lr0:
type: continuous
min: 0.000001
max: 0.1
type: discrete
values: [0.01]
lrf:
type: continuous
min: 0.000001
max: 0.1
type: discrete
values: [0.0005]
momentum:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.937]
weight_decay:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.0005]
fl_gamma:
type: continuous
min: 0.0
max: 10.0
type: discrete
values: [0.0]
hsv_h:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.0138]
hsv_s:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.678]
hsv_v:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.36]
degrees:
type: continuous
min: 0.0
max: 30.0
type: discrete
values: [0.0]
translate:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.0]
scale:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.0]
shear:
type: continuous
min: 0.0
max: 1.0
type: discrete
values: [0.0]
experiments:
dir: ./experiments
detect:
source: ./data/widok_01_21/widok_01_21_test_labels.txt
test-img-size:
type: discrete
min: 512
max: 1088
step: 64
conf-thres:
type: continuous
min: 0.3
max: 0.6
iou-thres:
type: continuous
min: 0.3
max: 0.6
test-img-size: 1024
conf-thres: 0.45
iou-thres: 0.6
classes:
agnostic-nms:
augment:

View File

@ -84,9 +84,9 @@ def call_detection_script(gaussian_hyps, weights_path, names_path, dir):
--output {detect_output_dir}
--names {names_path}
--weights {weights_path}
--test-img-size {gaussian_hyps['test-img-size']}
--conf-thres {gaussian_hyps['conf-thres']}
--iou-thres {gaussian_hyps['iou-thres']}
--test-img-size {getattr(config.detect, 'test-img-size')}
--conf-thres {getattr(config.detect, 'conf-thres')}
--iou-thres {getattr(config.detect, 'iou-thres')}
--save-txt"""
cmd += f" --device {config.train.device}" if config.train.device else ""
cmd = " ".join(cmd.split())
@ -134,12 +134,8 @@ def yolov3(x):
'translate': float(x[:, 22]),
'scale': float(x[:, 23]),
'shear': float(x[:, 24]), # train hyps end index
'test-img-size': int(x[:, 25]),
'conf-thres': float(x[:, 26]),
'iou-thres': float(x[:, 27])
}
line = ""
try:
call_training_script(bayes_hyps)
weights_path, names_path, train_results_dir = move_training_results_to_experiments_dir()
@ -149,22 +145,23 @@ def yolov3(x):
y_dict = get_values_from_conff_matrix(conf_matrix_path)
# tutaj wzór na wyliczanie funkcji
y_val = 1 - (y_dict['match'] * 10 - y_dict['false positives'] * 3 - y_dict['mistakes']) / y_dict['all labels']
y_val = (1 - (y_dict['right'] * 10 - y_dict['false positives'] * 3 - y_dict['mistakes']) / y_dict['labeled']) / 30
# zapisywanie do pliku zadeklarowanego globalnie
line = "\t".join([bayes_hyps.__str__(), str(y_val)])
print('###### line ########')
print(line)
bayes_params_file.writelines([line, '\n'])
return y_val
except:
tb = traceback.format_exc()
y_max_val = 1
print("An error occured during running training-detect-confussion process \n", tb)
print("Returning 1 from current bayessian iteration")
line = "\t".join([bayes_hyps.__str__(), str(1)])
return 1
finally:
print(f"Returning {y_max_val} from current bayessian iteration")
line = "\t".join([bayes_hyps.__str__(), str(y_max_val)])
bayes_params_file.writelines([line, '\n'])
return y_max_val
# na jakiej rozdzielczości jest puszczana detekcja
if __name__ == '__main__':
bounds = config.get_bayes_bounds()
@ -172,7 +169,15 @@ if __name__ == '__main__':
# wczytywanie z poprzednich eksperymentów plik bayes_params
X, Y = load_previous_bayes_experiments(config.experiments.dir)
constraints = [
{
'name':'img_size_constraint',
'constraint': '(x[:,3] - x[:,4])' # img-size-start - img-size-end <= 0
}
]
bayes_optimizer = GPyOpt.methods.BayesianOptimization(f=yolov3, domain=bounds, X=X, Y=Y, verbosity=True,
initial_design_numdata=2)
initial_design_numdata=5, constraints=constraints, )
bayes_optimizer.run_optimization(config.bayes.iterations, verbosity=True)
bayes_params_file.close()

View File

@ -2,9 +2,10 @@ import ast
import io
import os
import subprocess
import numpy as np
from glob import glob
import numpy as np
def call_subprocess(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
@ -19,7 +20,7 @@ def call_subprocess(cmd):
def get_values_from_conff_matrix(path):
lines = open(path, 'r').readlines()[:7]
lines = open(path, 'r').readlines()[:6]
d = {}
for l in lines:
key, value, *_ = l.split("\t")
@ -53,14 +54,15 @@ def get_bayes_params_as_dict(x):
'translate': float(x[:, 22]),
'scale': float(x[:, 23]),
'shear': float(x[:, 24]), # train hyps end index
'test-img-size': int(x[:, 25]),
'conf-thres': float(x[:, 26]),
'iou-thres': float(x[:, 27])
}
def load_previous_bayes_experiments(experiments_dir):
paths = list(glob(os.path.join(experiments_dir, '*bayes_params.txt')))
if len(paths) == 0:
print("No bayes files found")
return None, None
y_values = []
x_values = []
@ -74,8 +76,14 @@ def load_previous_bayes_experiments(experiments_dir):
bayes_values = dict_to_numpy(bayes_dict)
x_values.append(bayes_values)
y_values.append(float(y_val))
print("Loaded values from prevous experiments ", dict_str, y_val)
except:
raise Exception(f"Cannot parse line {line} from file {p}")
if not y_values or not x_values:
print("No bayes files found")
return None, None
return np.array(x_values), np.array(y_values).reshape((len(y_values), 1))
@ -91,3 +99,7 @@ def dict_to_numpy(d):
else:
x.append(float(value))
return x
if __name__ == '__main__':
get_values_from_conff_matrix('/home/tomekb/yolov3/experiments/2020-08-17_02-05-43/confussion-matrix.tsv')

View File

@ -234,7 +234,7 @@ def train(hyp):
nb = len(dataloader) # number of batches
n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations)
maps = np.zeros(nc) # mAP per class
# torch.autograd.set_detect_anomaly(True)
# torch.autograd.set_baddetect_anomaly(True)
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
t0 = time.time()
print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test))

View File

@ -44,10 +44,14 @@ def exif_size(img):
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
files = []
if path.endswith("txt"):
files = [f.strip() for f in open(path, 'r').readlines()]
else:
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
@ -198,7 +202,7 @@ class LoadStreams: # multiple IP or RTSP cameras
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
print('%g/%g: %s... ' % (i + 1, n, s))
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))