This commit is contained in:
tomasz 2020-07-26 00:42:33 +02:00
parent 4219b9fe7d
commit eff752bba2
5 changed files with 1102 additions and 0 deletions

821
cfg/yolov3-spp-21cls.cfg Normal file
View File

@ -0,0 +1,821 @@
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=16
width=608
height=608
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
### SPP ###
[maxpool]
stride=1
size=5
[route]
layers=-2
[maxpool]
stride=1
size=9
[route]
layers=-4
[maxpool]
stride=1
size=13
[route]
layers=-1,-3,-5,-6
### End SPP ###
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=72
activation=linear
[yolo]
mask = 6,7,8
anchors=26,16, 19,45, 36,27, 54,32, 34,80, 60,49, 74,71, 96,105, 135,145
classes=19
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=72
activation=linear
[yolo]
mask = 3,4,5
anchors=26,16, 19,45, 36,27, 54,32, 34,80, 60,49, 74,71, 96,105, 135,145
classes=19
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=72
activation=linear
[yolo]
mask = 0,1,2
anchors=26,16, 19,45, 36,27, 54,32, 34,80, 60,49, 74,71, 96,105, 135,145
classes=19
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1

View File

@ -0,0 +1,64 @@
import yaml
class Args:
def get_args_string(self) -> str:
string = ''
for key, value in self.__dict__.items():
if not isinstance(value, Configuration.Train.OtherHyps) and value is not None:
if key == 'img-size':
string += f' --{key} {value.split(" ")[0]} {value.split(" ")[1]}'
elif type(value) == bool:
if value:
string += f" --{key}"
else:
continue
elif type(value) in [int, str] and value != '':
string += f' --{key} {value}'
else:
raise Exception(f"Cannot parse argument {key} {value}")
return string
class Configuration:
class Train(Args):
class OtherHyps:
def __init__(self, config_file) -> None:
for key, value in config_file['train']['other-hyps'].items():
self.__dict__[key] = value
def __init__(self, config_file) -> None:
self.other_hyps = Configuration.Train.OtherHyps(config_file)
for key, value in config_file['train'].items():
if key != 'other-hyps':
self.__dict__[key] = value
class Experiments(Args):
def __init__(self, config_file) -> None:
for key, value in config_file['experiments'].items():
self.__dict__[key] = value
class Detect(Args):
def __init__(self, config_file) -> None:
for key, value in config_file['detect'].items():
self.__dict__[key] = value
class ConfussionMatrix(Args):
def __init__(self, config_file) -> None:
for key, value in config_file['confussion-matrix'].items():
self.__dict__[key] = value
def __init__(self, config_path='/home/tomekb/yolov3/our_scripts/config_bayes.yml') -> None:
self.config_path = config_path
file = yaml.load(open(config_path, 'r'), Loader=yaml.Loader)
self.train = self.Train(file)
self.experiments = self.Experiments(file)
self.detect = self.Detect(file)
self.confussion_matrix = self.ConfussionMatrix(file)
if __name__ == '__main__':
config = Configuration()
print(config)

View File

@ -0,0 +1,116 @@
train:
epochs: 100
batch-size:
type: discrete
values: 1,2,3,4
cfg: ./cfg/yolov3-spp-19cls.cfg
data: ./data/widok_01_19.data
multi-scale:
type: discrete
values: true, false
img-size:
type: discrete
values: (512, 1280),(576, 1280),(640, 1280),(704, 1280),(768, 1280),(832,1280),(960, 1280),(1024, 1280) # trzeba wziąć pod uwagę wszystkie kombinacje i warunek img-size-min < img-size_max których jest cała masa
rect: false
type: discrete
values: true,false
resume: false
nosave: false
notest: false
evolve: false
bucket:
cache-images: false
weights: /home/tomekb/yolov3/weights/yolov3-spp-ultralytics.pt
device: 1
adam: true
single-cls: false
snapshot-every: 50
freeze-layers: true
other-hyps:
giou:
type: continuous
min: 0.0
max: 10.0
cls:
type: continuous
min: 10.0
max: 100.0
cls_pw:
type: continuous
min: 0.0
max: 10.0
obj:
type: continuous
min: 10.0
max: 100.0
obj_pw:
type: continuous
min: 0.0
max: 10.0
iou_t:
type: continuous
min: 0.0
max: 1.0
lr0: 0.01 # initial learning rate (SGD=5E-3 Adam=5E-4) # trzeba wziąć pod uwage zależność lr0 < lrf dlatego nie zmieniam
lrf: 0.0005 # final learning rate (with cos scheduler)
momentum:
type: continuous
min: 0.0
max: 1.0
weight_decay:
type: continuous
min: 0.0
max: 1.0
fl_gamma:
type: continuous
min: 0.0
max: 10.0
hsv_h:
type: continuous
min: 0.0
max: 1.0
hsv_s: 0.678
type: continuous
min: 0.0
max: 1.0
hsv_v:
type: continuous
min: 0.0
max: 1.0
degrees:
type: continuous
min: 0.0
max: 30.0
translate:
type: continuous
min: 0.0
max: 1.0
scale:
type: continuous
min: 0.0
max: 1.0
shear:
type: continuous
min: 0.0
max: 1.0
experiments:
dir: ./experiments
detect:
source: /home/tomekb/yolov3/data/widok_01_19/widok_01_19_test_labels.txt
test-img-size:
type: discrete
values: 512,576,640,704,768,832,896,960,1024,1088,1152,1216,1280,
conf-thres:
type: continuous
min: 0.0
max: 1.0
iou-thres:
type: continuous
min: 0.0
max: 1.0
classes:
agnostic-nms:
augment:
confussion-matrix:
labels-dir: /home/tomekb/yolov3/data/widok_01_19/widok_01_19_labels

View File

@ -0,0 +1,101 @@
import datetime
import glob
import io
import ntpath
import os
import shutil
import subprocess
from .config import Configuration
def call_training_script(config):
cmd = '/home/tomekb/miniconda3/envs/conda3.7/bin/python -u /home/tomekb/yolov3/train.py '
cmd += f"--experiment-dir {config.experiments.dir}"
cmd += config.train.get_args_string() # getting rest of train arguments
print("_______ CALLING TRAINING SCRIPT _______")
print(cmd)
os.chdir('..') # change to project root directory
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"): # print output of training process to console
print(line)
return cmd
def move_training_results_to_experiments_dir(config):
training_results_dir_path = os.path.join(config.experiments.dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) #creating directory accordint to pattern eg: 2020-06-30_17-52-19
print("_______ CALLING MOVING RESULTS _______")
print(f"MOVING RESUTLS TO {training_results_dir_path}")
os.mkdir(training_results_dir_path)
weights_path = os.path.join(training_results_dir_path, 'best.pt')
shutil.move('/home/tomekb/yolov3/weights/best.pt', weights_path) # move best weights
names_path = open(config.train.data).readlines()[3].split('=')[-1].rstrip() # read names path from file
names_file_name = ntpath.basename(names_path)
experiment_names_path = os.path.join(training_results_dir_path, names_file_name)
shutil.copy(names_path, experiment_names_path) # copy names file from *.data file to created experiment dir with training results
tensorboard_dir = './runs'
last_modified_tensorboard_dir = max(glob.glob(os.path.join(tensorboard_dir, '*/')), key=os.path.getmtime)
shutil.move(last_modified_tensorboard_dir, os.path.join(training_results_dir_path)) # saving related tensorboard dir
shutil.copy2(config.config_path, training_results_dir_path) # copying configuration yaml
# for test purposes only
# shutil.copy2('/home/tomekb/yolov3/experiments/1/best.pt', training_results_dir_path)
return weights_path, experiment_names_path, training_results_dir_path
def call_detection_script(config, weights_path, names_path, dir):
detect_output_dir = os.path.join(dir, 'output')
cmd = f"""/home/tomekb/miniconda3/envs/conda3.7/bin/python -u /home/tomekb/yolov3/detect.py
--cfg {config.train.cfg}
--source {config.detect.source}
--output {detect_output_dir}
--names {names_path}
--weights {weights_path}
--test-img-size {getattr(config.detect, 'test-img-size')}
--conf-thres {getattr(config.detect, 'conf-thres')}
--iou-thres {getattr(config.detect, 'iou-thres')}
--save-txt"""
cmd += " --agnostic-nms" if getattr(config.detect, 'agnostic-nms') else ""
cmd += " --agument" if getattr(config.detect, 'augment') else ""
cmd += f" --device {config.train.device}" if config.train.device else ""
cmd = " ".join(cmd.split())
print("_______ CALLING DETECTION SCRIPT _______")
print(cmd)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"): # print output of process to console
print(line)
return detect_output_dir
def call_generate_confussion_matrix(detect_output_dir, config, names_path, train_results_dir):
labels_dir = getattr(config.confussion_matrix, 'labels-dir')
cmd = f"node ./our_scripts/generate-confusion-matrix.js {detect_output_dir} {labels_dir} {names_path} > {train_results_dir}/confussion-matrix.tsv"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
print("_______ CALLING CONFUSSION MATRIX SCRIPT _______")
print(cmd)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"): # print output of process to console
print(line)
if __name__ == '__main__':
config = Configuration()
train_cmd = call_training_script(config)
weights_path, names_path, train_results_dir = move_training_results_to_experiments_dir(config)
detect_output_dir = call_detection_script(config, weights_path, names_path, train_results_dir)
call_generate_confussion_matrix(detect_output_dir, config, names_path, train_results_dir)