This commit is contained in:
Glenn Jocher 2019-08-10 22:11:55 +02:00
parent 5ff6e6b3a5
commit e8a15ac1d7
2 changed files with 22 additions and 22 deletions

View File

@ -32,9 +32,10 @@ def create_modules(module_defs):
bias=not bn)) bias=not bn))
if bn: if bn:
modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.1)) modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.1))
if mdef['activation'] == 'leaky': if mdef['activation'] == 'leaky': # TODO: activation study https://github.com/ultralytics/yolov3/issues/441
# modules.add_module('activation', nn.PReLU(num_parameters=filters, init=0.1))
modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True)) modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True))
# modules.add_module('activation', nn.PReLU(num_parameters=1))
# modules.add_module('activation', Swish())
elif mdef['type'] == 'maxpool': elif mdef['type'] == 'maxpool':
kernel_size = int(mdef['size']) kernel_size = int(mdef['size'])
@ -82,6 +83,14 @@ def create_modules(module_defs):
return hyperparams, module_list return hyperparams, module_list
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class YOLOLayer(nn.Module): class YOLOLayer(nn.Module):
def __init__(self, anchors, nc, img_size, yolo_index): def __init__(self, anchors, nc, img_size, yolo_index):
super(YOLOLayer, self).__init__() super(YOLOLayer, self).__init__()

View File

@ -766,8 +766,14 @@ def plot_results(start=0, stop=0): # from utils.utils import *; plot_results()
n = results.shape[1] # number of rows n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n) x = range(start, min(stop, n) if stop else n)
for i in range(10): for i in range(10):
ax[i].plot(x, results[i, x], marker='.', label=f.replace('.txt', '')) y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # dont show zero loss values
ax[i].plot(x, y, marker='.', label=f.replace('.txt', ''))
ax[i].set_title(s[i]) ax[i].set_title(s[i])
if i in [5, 6, 7]: # share train and val loss y axes
ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
fig.tight_layout() fig.tight_layout()
ax[4].legend() ax[4].legend()
fig.savefig('results.png', dpi=200) fig.savefig('results.png', dpi=200)
@ -785,7 +791,10 @@ def plot_results_overlay(start=1, stop=0): # from utils.utils import *; plot_re
ax = ax.ravel() ax = ax.ravel()
for i in range(5): for i in range(5):
for j in [i, i + 5]: for j in [i, i + 5]:
ax[i].plot(x, results[j, x], marker='.', label=s[j]) y = results[j, x]
if i in [0, 1, 2]:
y[y == 0] = np.nan # dont show zero loss values
ax[i].plot(x, y, marker='.', label=s[j])
ax[i].set_title(t[i]) ax[i].set_title(t[i])
ax[i].legend() ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename ax[i].set_ylabel(f) if i == 0 else None # add filename
@ -793,24 +802,6 @@ def plot_results_overlay(start=1, stop=0): # from utils.utils import *; plot_re
fig.savefig(f.replace('.txt', '.png'), dpi=200) fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results_orig(start=0, stop=0): # from utils.utils import *; plot_results_orig()
# Plot training results files 'results*.txt' in original format
fig, ax = plt.subplots(2, 5, figsize=(14, 7))
ax = ax.ravel()
s = ['GIoU/XY', 'Width/Height', 'Confidence', 'Classification', 'Train Loss', 'Precision', 'Recall', 'mAP', 'F1',
'Test Loss']
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Google Drive/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 5, 6, 9, 10, 11, 12, 13]).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
ax[i].plot(x, results[i, x], marker='.', label=f.replace('.txt', ''))
ax[i].set_title(s[i])
fig.tight_layout()
ax[4].legend()
fig.savefig('results.png', dpi=200)
def version_to_tuple(version): def version_to_tuple(version):
# Used to compare versions of library # Used to compare versions of library
return tuple(map(int, (version.split(".")))) return tuple(map(int, (version.split("."))))