This commit is contained in:
tomasz 2020-06-28 14:37:21 +02:00
parent 2ed5ace62e
commit 6ec04a4cca
10 changed files with 595 additions and 10 deletions

5
.gitignore vendored
View File

@ -1,7 +1,12 @@
weights/ weights/
runs/ runs/
data/widok01-11_images data/widok01-11_images
data/widok01-11_labels
data/widok01-11_labels.npy
.idea/ .idea/
experiments/
MY_INFO MY_INFO
*.pyc *.pyc
*.pyx
*.jpg

View File

@ -6,9 +6,10 @@ from utils.utils import *
def detect(save_img=False): def detect(save_img=False):
imgsz = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width) imgsz = (320, 192) if ONNX_EXPORT else opt.test_img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt') #webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
webcam = False
# Initialize # Initialize
device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device) device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
@ -171,7 +172,7 @@ if __name__ == '__main__':
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path') parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)') parser.add_argument('--test-img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold') parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)') parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
@ -188,4 +189,4 @@ if __name__ == '__main__':
print(opt) print(opt)
with torch.no_grad(): with torch.no_grad():
detect() detect(True)

67
our_scripts/config.py Normal file
View File

@ -0,0 +1,67 @@
from typing import Any
import yaml
class Args:
def get_args_string(self) -> str:
string = ''
for key, value in self.__dict__.items():
if not isinstance(value, Configuration.Train.OtherHyps) and value is not None:
if key == 'img-size':
string += f' --{key} {value.split(" ")[0]} {value.split(" ")[1]}'
elif type(value) == bool:
if value:
string += f" --{key}"
else:
continue
elif type(value) in [int, str] and value != '':
string += f' --{key} {value}'
else:
raise Exception(f"Cannot parse argument {key} {value}")
return string
class Configuration:
class Train(Args):
class OtherHyps:
def __init__(self, config_file) -> None:
for key, value in config_file['train']['other-hyps'].items():
self.__dict__[key] = value
def __init__(self, config_file) -> None:
self.other_hyps = Configuration.Train.OtherHyps(config_file)
for key, value in config_file['train'].items():
if key != 'other-hyps':
self.__dict__[key] = value
class Experiments(Args):
def __init__(self, config_file) -> None:
for key, value in config_file['experiments'].items():
self.__dict__[key] = value
class Detect(Args):
def __init__(self, config_file) -> None:
for key, value in config_file['detect'].items():
self.__dict__[key] = value
class ConfussionMatrix(Args):
def __init__(self, config_file) -> None:
for key, value in config_file['confussion-matrix'].items():
self.__dict__[key] = value
class Bayes(Args):
def __init__(self, config_file) -> None:
for key, value in config_file['bayes'].items():
self.__dict__[key] = value
def __init__(self, config_path='/home/tomekb/yolov3/our_scripts/config.yml') -> None:
self.config_path = config_path
file = yaml.load(open(config_path, 'r'), Loader=yaml.Loader)
self.train = self.Train(file)
self.experiments = self.Experiments(file)
self.detect = self.Detect(file)
self.confussion_matrix = self.ConfussionMatrix(file)
self.bayes = self.Bayes(file)

58
our_scripts/config.yml Normal file
View File

@ -0,0 +1,58 @@
train:
epochs: 2
batch-size: 400
cfg: ./cfg/yolov3-spp-18cls.cfg
data: ./data/widok01-11.data
multi-scale: false
img-size: '64 128'
rect: false
resume: false
nosave: false
notest: false
evolve: false
bucket:
cache-images: false
weights: /home/tomekb/yolov3/weights/yolov3-spp-ultralytics.pt
device: 1
adam: true
single-cls: false
# inne hiperparametry
other-hyps:
giou: 3.53 # giou loss gain
cls: 37.4 # cls loss gain
cls_pw: 1.0 # cls BCELoss positive_weight
obj: 64.3 # obj loss gain (*=img_size/320 if img_size != 320)
obj_pw: 1.0 # obj BCELoss positive_weight
iou_t: 0.20 # iou training threshold
lr0: 0.01 # initial learning rate (SGD=5E-3 Adam=5E-4)
lrf: 0.0005 # final learning rate (with cos scheduler)
momentum: 0.937 # SGD momentum
weight_decay: 0.0005 # optimizer weight decay
fl_gamma: 0.0 # focal loss gamma (efficientDet default is gamma=1.5)
hsv_h: 0.0138 # image HSV-Hue augmentation (fraction)
hsv_s: 0.678 # image HSV-Saturation augmentation (fraction)
hsv_v: 0.36 # image HSV-Value augmentation (fraction)
degrees: 0 # 1.98 * 0 # image rotation (+/- deg)
translate: 0 # 0.05 * 0 # image translation (+/- fraction)
scale: 0 #0 .05 * 0 # image scale (+/- gain)
shear: 0 # 0.641 * 0 # image shear (+/- deg)
experiments:
dir: ./experiments
detect:
source: /home/michall/yolov3/data/widok01-11_test_labels.txt
test-img-size: 1024
conf-thres: 0.3
iou-thres: 0.6
save-txt: true
classes:
agnostic-nms:
augment:
confussion-matrix:
labels-dir: ./data/widok01-11_labels
bayes:
todo: todo

View File

@ -0,0 +1,178 @@
const fs = require('fs')
const path = require('path')
const detectionsDir = process.argv[2]
const labelsDir = process.argv[3]
const namesFile = process.argv[4]
const maxDistance = process.argv[5] || 0.1
const width = process.argv[6] || 1920
const height = process.argv[7] || 1080
/*
console.log("DETECTIONS DIRECTORY:", detectionsDir)
console.log("LABELS DIRECTORY:", labelsDir)
console.log("NAMES FILE:", namesFile)
console.log("WIDTH", width)
console.log("HEIGHT", height)
//*/
function parseDetections(detectionData) {
return detectionData
.split('\n')
.filter(x => !!x)
.map(line => line.split(' ').map(x => +x))
.map(a => ({
x: (a[0] + a[2]) / ( 2 * width ),
y: (a[1] + a[3]) / ( 2 * height ),
w: (a[2] - a[0]) / width,
h: (a[3] - a[1]) / height,
c: a[4],
p: a[5]
}))
}
function parseLabels(labelData) {
return labelData
.split('\n')
.filter(x => !!x)
.map(line => line.split(' ').map(x => +x))
.map(a => ({
x: a[1],
y: a[2],
w: a[3],
h: a[4],
c: a[0],
p: 1
}))
}
function findNearest(position, boxes) {
let dx = position.x - boxes[0].x
let dy = position.y - boxes[0].y
let bestBox = { ...boxes[0], d: Math.sqrt(dx * dx + dy * dy) }
for(let i = 1; i < boxes.length; i++) {
dx = position.x - boxes[i].x
dy = position.y - boxes[i].y
let distance = Math.sqrt(dx * dx + dy * dy)
if(distance < bestBox.d) {
bestBox = { ...boxes[i], d: distance }
}
}
return bestBox
}
function compare(labels, detections) {
const results = {}
for(const label of labels) {
const detection = findNearest(label, detections)
if(detection.d > maxDistance) {
if(!results[label.c]) results[label.c] = {}
results[label.c]['n'] = + (results[label.c]['n'] || 0) + 1
} else {
if(!results[label.c]) results[label.c] = {}
results[label.c][detection.c] = (results[label.c][detection.c] || 0) + 1
}
}
for(const detection of detections) {
const label = findNearest(detection, labels)
if(label.d > maxDistance) {
results['n'] = results['n'] || {}
results['n'][detection.c] = + (results['n'][detection.c] || 0) + 1
}
}
return results
}
async function compareLabelsAndResults(txt) {
const detectionPath = path.resolve(detectionsDir, txt)
const basename = path.basename(txt.split('.')[0])
const labelPath = path.resolve(labelsDir, basename+'.txt')
const [detectionData, labelData] = await Promise.all([fs.promises.readFile(detectionPath, 'utf8'), fs.promises.readFile(labelPath, 'utf8')])
const detections = parseDetections(detectionData)
const labels = parseLabels(labelData)
return {
basename,
result: compare(labels, detections)
}
}
async function main() {
const names = (await fs.promises.readFile(namesFile, 'utf8')).split('\n').map(t=>t.trim())
names.n = '?'
names.sum = 'sum'
const files = await fs.promises.readdir(detectionsDir)
const txts = files.filter(p => path.extname(p) == '.txt')
//console.log("OUTPUT TXT FILES", txts.length)
const promises = txts.map(compareLabelsAndResults)
const compareResults = await Promise.all(promises)
await fs.promises.mkdir(path.resolve(detectionsDir, 'errors')).catch(e => {})
const summary = {}
const copyPromises = []
for(const result of compareResults) {
let errors = []
for(const c in result.result) {
if(!summary[c]) summary[c] = {}
for(const r in result.result[c]) {
summary[c][r] = (summary[c][r] || 0) + result.result[c][r]
if( c!=r ) errors.push([c, r, result.result[c][r]])
}
}
if(errors.length > 0) {
copyPromises.push(fs.promises.copyFile(
path.resolve(detectionsDir, result.basename + '.jpg'),
path.resolve(detectionsDir, 'errors', result.basename + '.jpg')
))
copyPromises.push(fs.promises.writeFile(
path.resolve(detectionsDir, 'errors', result.basename + '.tsv'),
errors.map(([c1,c2,cnt]) => [ names[c1], names[c2], cnt ].join('\t')).join('\n'), 'utf8'))
}
}
//console.log("S", summary)
let rows = Object.keys(summary).filter(k=>k!='n').sort().concat(['n'])
summary.sum = {}
for(const row of rows) {
if(!summary[row]) summary[row] = {}
const rowSum = rows.map(r => summary[row][r] || 0).reduce( (a, b) => a + b, 0)
const columnSum = rows.map(r => summary[r] && summary[r][row] || 0).reduce( (a, b) => a + b, 0)
summary[row].sum = rowSum
summary.sum[row] = columnSum
}
summaryRows = rows.concat(['sum'])
let tsvRows = []
tsvRows.push('Count:')
tsvRows.push([' ', ...(summaryRows.map(n=>names[n]))].join('\t'))
for(const row of summaryRows) {
const summaryPart = summary[row] || {}
tsvRows.push([ names[row], ...(summaryRows.map(r => summaryPart[r]))].join('\t'))
}
summaryRows.pop()
tsvRows.push('Fraction:')
tsvRows.push([' ', ...(summaryRows.map(n=>names[n]))].join('\t'))
for(const row of summaryRows) {
const summaryPart = summary[row] || {}
const sum = row != 'sum' ? summaryPart.sum : summary.sum[row]
tsvRows.push([ names[row], ...(summaryRows.map(r => summaryPart[r] && (summaryPart[r] / sum).toFixed(2)))].join('\t'))
}
const allLabeled = rows.slice(0, -1).map(r => summary.sum[r]).reduce((a, b) => a + b, 0)
const allDetected = rows.slice(0, -1).map(r => summary[r].sum).reduce((a, b) => a + b, 0)
const falseNegatives = rows.slice(0, -1).map(r => summary.n[r] || 0).reduce((a, b) => a + b, 0)
const falsePositives = rows.slice(0, -1).map(r => summary[r].n || 0).reduce((a, b) => a + b, 0)
const right = rows.slice(0, -1).map(r => summary[r][r] || 0).reduce((a, b) => a + b, 0)
const mistakes = rows.slice(0, -1).map(a => rows.slice(0, -1).map(b => (a!=b && summary[a][b]) || 0).reduce((a, b) => a + b, 0)).reduce((a, b) => a + b, 0)
console.log(`right:\t${right}\t${(right/allLabeled).toFixed(3)}`)
console.log(`false positives:\t${falsePositives}\t${(falsePositives/allLabeled).toFixed(3)}`)
console.log(`false negatives:\t${falseNegatives}\t${(falseNegatives/allLabeled).toFixed(3)}`)
console.log(`mistakes:\t${mistakes}\t${(mistakes/allLabeled).toFixed(3)}`)
console.log(`labeled:\t${allLabeled}`)
console.log(`detected:\t${allDetected}`)
let tsv = tsvRows.join('\n')
console.log(tsv)
await Promise.all(copyPromises)
}
main()

View File

@ -0,0 +1,151 @@
# - *- coding: utf- 8 - *-
import sys
import os
import re
import xml.etree.ElementTree as ET
from glob import glob
from os.path import join
from pathlib import Path
# This should just be a folder of xmls
annotations = sys.argv[1]
# Then you have a folder of txts.
modified_annotations = sys.argv[2]
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[1]) / 2.0 - 1
y = (box[2] + box[3]) / 2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = round(x * dw, 4)
w = round(w * dw, 4)
y = round(y * dh, 4)
h = round(h * dh, 4)
return (x, y, w, h)
def map_class_name_to_id(class_name, xml_document, class_distribution):
if class_name in ['1. rower']:
class_distribution[0] += 1
return 0
elif class_name in ['2. motocykl']:
class_distribution[1] += 1
return 1
elif class_name in ['3. osobowy']:
class_distribution[2] += 1
return 2
elif class_name in ['4. osobowy pickup']:
class_distribution[3] += 1
return 3
elif class_name in ['5. osobowy dostawczy']:
class_distribution[4] += 1
return 4
elif class_name in ['6. osobowy van 7-9']:
class_distribution[5] += 1
return 5
elif class_name in ['7. dostawczy blaszak']:
class_distribution[6] += 1
return 6
elif class_name in ['8. dostawczy zabudowany']:
class_distribution[7] += 1
return 7
elif class_name in ['9. dostawczy pickup (w tym pomoc drog.)']:
class_distribution[8] += 1
return 8
elif class_name in ['10. dostawczy VAN (osobowy)']:
class_distribution[9] += 1
return 9
elif class_name in ['11. autobus mały 10-24']:
return -1
elif class_name in ['12. autobus miejski']:
class_distribution[10] += 1
return 10
elif class_name in ['13. autobus turystyczny i inny']:
return -1
elif class_name in ['14. ciężarowy pow. 3,5t zabudowany']:
class_distribution[11] += 1
return 11
elif class_name in ['15. ciężarowy pow. 3,5t otwarty (w tym duży holownik)']:
class_distribution[12] += 1
return 12
elif class_name in ['16. ciężarowy pow. 3,5t inny (wanna, gruszka, dźwig itp.)']:
class_distribution[13] += 1
return 13
elif class_name in ['17. ciężarowy z widoczną przyczepą']:
return -1
elif class_name in ['18. ciągnik siodłowy z widoczną naczepą']:
class_distribution[14] += 1
return 14
elif class_name in ['19. ciągnik siodłowy bez naczepy']:
class_distribution[15] += 1
return 15
elif class_name in ['20. camper']:
class_distribution[15] += 1
return -1
elif class_name in ['22. ciągnik roliczy, koparka, spychacz']:
return -1
elif class_name in ['23. inne pojazdy silnikowe']:
return -1
elif class_name in ['24. przyczepa']:
class_distribution[16] += 1
return 16
elif class_name in ['25. BUS-karetka/policja']:
class_distribution[17] += 1
return 17
else:
raise Exception('Unknown Class ', xml_document, class_name)
def generate_txt_from_xml():
class_distribution = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
filepaths = glob(annotations + '*.xml')
for filepath in filepaths:
txtpath = join(modified_annotations, re.sub(r"\.xml$", ".txt", os.path.basename(filepath)))
in_file = open(filepath, mode='r', encoding='utf-8')
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
good_file = True
for obj in root.iter('object'):
#difficult = obj.find('difficult').text
class_label = obj.find('name').text
#if int(difficult) == 1:
# raise Exception("Difficult == 1")
cls_id = map_class_name_to_id(class_label, filepath, class_distribution)
if cls_id == -1 :
good_file = False
if not good_file :
print('File discarded.')
continue
Path(txtpath).touch()
out_file = open(txtpath, mode='w', encoding='utf-8')
for obj in root.iter('object'):
#difficult = obj.find('difficult').text
class_label = obj.find('name').text
#if int(difficult) == 1:
# raise Exception("Difficult == 1")
cls_id = map_class_name_to_id(class_label, filepath, class_distribution)
if cls_id != -1 :
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert((w, h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
print(class_distribution)
generate_txt_from_xml()

View File

@ -0,0 +1,100 @@
import argparse
import datetime
import glob
import io
import ntpath
import os
import shutil
import subprocess
from config import Configuration
def call_training_script(config):
cmd = '/home/tomekb/miniconda3/envs/conda3.7/bin/python -u /home/tomekb/yolov3/train.py '
cmd += config.train.get_args_string()
print("_______ CALLING TRAINING SCRIPT _______")
print(cmd)
os.chdir('..')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"): # print output of training process to console
print(line)
return cmd
def move_training_results_to_experiments_dir(config):
training_results_dir_path = os.path.join(config.experiments.dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print("_______ CALLING MOVING RESULTS _______")
print(f"MOVING RESUTLS TO {training_results_dir_path}")
os.mkdir(training_results_dir_path)
weights_path = os.path.join(training_results_dir_path, 'best.pt')
shutil.move('/home/tomekb/yolov3/weights/best.pt', weights_path) #move best weights
names_path = open(config.train.data).readlines()[3].split('=')[-1].rstrip() # read names path from file
names_file_name = ntpath.basename(names_path)
experiment_names_path = os.path.join(training_results_dir_path, names_file_name)
shutil.copy(names_path, experiment_names_path) # copy names to created experiment dir with training results
tensorboard_dir = './runs'
last_modified_tensorboard_dir = max(glob.glob(os.path.join(tensorboard_dir, '*/')), key=os.path.getmtime)
shutil.move(last_modified_tensorboard_dir, os.path.join(training_results_dir_path)) #saving related tensorboard dir
shutil.copy2(config.config_path, training_results_dir_path) #copying configuration yaml
#for test purposes only
shutil.copy2('/home/tomekb/yolov3/experiments/1/best.pt', training_results_dir_path)
return weights_path, experiment_names_path, training_results_dir_path
def call_detection_script(config, weights_path, names_path, dir):
detect_output_dir = os.path.join(dir, 'output')
cmd = f"""/home/tomekb/miniconda3/envs/conda3.7/bin/python -u /home/tomekb/yolov3/detect.py
--cfg {config.train.cfg}
--source {config.detect.source}
--output {detect_output_dir}
--names {names_path}
--weights {weights_path}
--test-img-size {getattr(config.detect, 'test-img-size')}
--conf-thres {getattr(config.detect, 'conf-thres')}
--iou-thres {getattr(config.detect, 'iou-thres')}"""
cmd += " --save-txt" if getattr(config.detect, 'save-txt') else ""
cmd += " --agnostic-nms" if getattr(config.detect, 'agnostic-nms') else ""
cmd += " --agument" if getattr(config.detect, 'augment') else ""
cmd += f" --device {config.train.device}" if config.train.device else ""
cmd = " ".join(cmd.split())
print("_______ CALLING DETECTION SCRIPT _______")
print(cmd)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"): # print output of process to console
print(line)
return detect_output_dir
def call_generate_confussion_matrix(detect_output_dir, config, names_path, train_results_dir):
labels_dir = getattr(config.confussion_matrix, 'labels-dir')
cmd = f"node ./our_scripts/generate-confusion-matrix.js {detect_output_dir} {labels_dir} {names_path} > {train_results_dir}/confussion-matrix.tsv"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True)
print("_______ CALLING CONFUSSION MATRIX SCRIPT _______")
print(cmd)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"): # print output of process to console
print(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
config = Configuration()
train_cmd = call_training_script(config)
weights_path, names_path,train_results_dir = move_training_results_to_experiments_dir(config)
detect_output_dir = call_detection_script(config, weights_path, names_path,train_results_dir)
call_generate_confussion_matrix(detect_output_dir, config, names_path,train_results_dir)

View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
/home/tomekb/miniconda3/envs/conda3.7/bin/python -u /home/tomekb/yolov3/our_scripts/run_yolov3_process.py \
--epochs 50 \
--batch-size 8 \
--cfg /cfg/yolov3-spp-18cls.cfg \
--data ./data/widok01-11.data \
--multi-scale \
--img-size 512 896 \
`# --cache-images` \
--adam \
--device 0 \
`# poniżej parametry do detect.py` \
--source /home/michall/yolov3/data/widok01-11_test_labels.txt \
--test-img-size 1024 \
--conf-thres 0.3 \
--iou-thres 0.6 \
--save-txt \
`# poniżej parametry do generate-confussion-matrix.js` \
--labels-dir ./data/widok01-11_labels

View File

@ -9,6 +9,8 @@ import test # import test.py to get mAP after each epoch
from models import * from models import *
from utils.datasets import * from utils.datasets import *
from utils.utils import * from utils.utils import *
from our_scripts.config import Configuration
mixed_precision = True mixed_precision = True
try: # Mixed precision training https://github.com/NVIDIA/apex try: # Mixed precision training https://github.com/NVIDIA/apex
@ -399,7 +401,7 @@ if __name__ == '__main__':
check_git_status() check_git_status()
opt.cfg = check_file(opt.cfg) # check file opt.cfg = check_file(opt.cfg) # check file
opt.data = check_file(opt.data) # check file opt.data = check_file(opt.data) # check file
print(opt) #print(opt)
opt.img_size.extend([opt.img_size[-1]] * (3 - len(opt.img_size))) # extend to 3 sizes (min, max, test) opt.img_size.extend([opt.img_size[-1]] * (3 - len(opt.img_size))) # extend to 3 sizes (min, max, test)
device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size) device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
if device.type == 'cpu': if device.type == 'cpu':
@ -408,6 +410,10 @@ if __name__ == '__main__':
# scale hyp['obj'] by img_size (evolved at 320) # scale hyp['obj'] by img_size (evolved at 320)
# hyp['obj'] *= opt.img_size[0] / 320. # hyp['obj'] *= opt.img_size[0] / 320.
hyp = Configuration().train.other_hyps.__dict__
tb_writer = None tb_writer = None
if not opt.evolve: # Train normally if not opt.evolve: # Train normally
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')

View File

@ -44,11 +44,10 @@ def exif_size(img):
class LoadImages: # for inference class LoadImages: # for inference
def __init__(self, path, img_size=416): def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic path = str(Path(path)) # os-agnostic
files = [] files = [f.strip() for f in open(path, 'r').readlines()]
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]