updates
This commit is contained in:
parent
a0936a4eac
commit
a701374014
34
detect.py
34
detect.py
|
@ -34,7 +34,7 @@ def detect(cfg, weights, images, output='output', img_size=416, conf_thres=0.3,
|
||||||
classes = load_classes(parse_data_cfg('cfg/coco.data')['names']) # Extracts class labels from file
|
classes = load_classes(parse_data_cfg('cfg/coco.data')['names']) # Extracts class labels from file
|
||||||
colors = [[random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)] for _ in range(len(classes))]
|
colors = [[random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)] for _ in range(len(classes))]
|
||||||
|
|
||||||
for i, (path, img, img0) in enumerate(dataloader):
|
for i, (path, img, im0) in enumerate(dataloader):
|
||||||
print("%g/%g '%s': " % (i + 1, len(dataloader), path), end='')
|
print("%g/%g '%s': " % (i + 1, len(dataloader), path), end='')
|
||||||
t = time.time()
|
t = time.time()
|
||||||
|
|
||||||
|
@ -54,11 +54,10 @@ def detect(cfg, weights, images, output='output', img_size=416, conf_thres=0.3,
|
||||||
if detections is not None:
|
if detections is not None:
|
||||||
save_img_path = os.path.join(output, path.split('/')[-1])
|
save_img_path = os.path.join(output, path.split('/')[-1])
|
||||||
save_txt_path = save_img_path + '.txt'
|
save_txt_path = save_img_path + '.txt'
|
||||||
img = img0
|
|
||||||
|
|
||||||
# The amount of padding that was added
|
# The amount of padding that was added
|
||||||
pad_x = max(img.shape[0] - img.shape[1], 0) * (img_size / max(img.shape))
|
pad_x = max(im0.shape[0] - im0.shape[1], 0) * (img_size / max(im0.shape))
|
||||||
pad_y = max(img.shape[1] - img.shape[0], 0) * (img_size / max(img.shape))
|
pad_y = max(im0.shape[1] - im0.shape[0], 0) * (img_size / max(im0.shape))
|
||||||
# Image height and width after padding is removed
|
# Image height and width after padding is removed
|
||||||
unpad_h = img_size - pad_y
|
unpad_h = img_size - pad_y
|
||||||
unpad_w = img_size - pad_x
|
unpad_w = img_size - pad_x
|
||||||
|
@ -70,34 +69,29 @@ def detect(cfg, weights, images, output='output', img_size=416, conf_thres=0.3,
|
||||||
|
|
||||||
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
|
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
|
||||||
# Rescale coordinates to original dimensions
|
# Rescale coordinates to original dimensions
|
||||||
box_h = ((y2 - y1) / unpad_h) * img.shape[0]
|
box_h = ((y2 - y1) / unpad_h) * im0.shape[0]
|
||||||
box_w = ((x2 - x1) / unpad_w) * img.shape[1]
|
box_w = ((x2 - x1) / unpad_w) * im0.shape[1]
|
||||||
y1 = (((y1 - pad_y // 2) / unpad_h) * img.shape[0]).round()
|
y1 = (((y1 - pad_y // 2) / unpad_h) * im0.shape[0]).round()
|
||||||
x1 = (((x1 - pad_x // 2) / unpad_w) * img.shape[1]).round()
|
x1 = (((x1 - pad_x // 2) / unpad_w) * im0.shape[1]).round()
|
||||||
x2 = (x1 + box_w).round()
|
x2 = (x1 + box_w).round()
|
||||||
y2 = (y1 + box_h).round()
|
y2 = (y1 + box_h).round()
|
||||||
x1, y1, x2, y2 = max(x1, 0), max(y1, 0), max(x2, 0), max(y2, 0)
|
x1, y1, x2, y2 = max(x1, 0), max(y1, 0), max(x2, 0), max(y2, 0)
|
||||||
|
|
||||||
# write to file
|
if save_txt: # Write to file
|
||||||
if save_txt:
|
|
||||||
with open(save_txt_path, 'a') as file:
|
with open(save_txt_path, 'a') as file:
|
||||||
file.write(('%g %g %g %g %g %g\n') % (x1, y1, x2, y2, cls_pred, cls_conf * conf))
|
file.write('%g %g %g %g %g %g\n' % (x1, y1, x2, y2, cls_pred, cls_conf * conf))
|
||||||
|
|
||||||
if save_images:
|
if save_images: # Add bbox to the image
|
||||||
# Add bbox to the image
|
|
||||||
label = '%s %.2f' % (classes[int(cls_pred)], conf)
|
label = '%s %.2f' % (classes[int(cls_pred)], conf)
|
||||||
plot_one_box([x1, y1, x2, y2], img, label=label, color=colors[int(cls_pred)])
|
plot_one_box([x1, y1, x2, y2], im0, label=label, color=colors[int(cls_pred)])
|
||||||
|
|
||||||
if save_images:
|
if save_images: # Save generated image with detections
|
||||||
# Save generated image with detections
|
cv2.imwrite(save_img_path, im0)
|
||||||
cv2.imwrite(save_img_path, img)
|
|
||||||
|
|
||||||
print(' Done. (%.3fs)' % (time.time() - t))
|
print(' Done. (%.3fs)' % (time.time() - t))
|
||||||
|
|
||||||
if platform == 'darwin': # MacOS
|
if platform == 'darwin': # MacOS
|
||||||
os.system('open ' + output)
|
os.system('open ' + output + '&& open ' + save_img_path)
|
||||||
os.system('open ' + save_img_path)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
3
test.py
3
test.py
|
@ -27,8 +27,7 @@ def test(cfg, data_cfg, weights, batch_size=16, img_size=416, iou_thres=0.5, con
|
||||||
model.to(device).eval()
|
model.to(device).eval()
|
||||||
|
|
||||||
# Get dataloader
|
# Get dataloader
|
||||||
# dataset = load_images_with_labels(test_path)
|
# dataloader = torch.utils.data.DataLoader(load_images_with_labels(test_path), batch_size=batch_size) # pytorch
|
||||||
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=n_cpus)
|
|
||||||
dataloader = load_images_and_labels(test_path, batch_size=batch_size, img_size=img_size)
|
dataloader = load_images_and_labels(test_path, batch_size=batch_size, img_size=img_size)
|
||||||
|
|
||||||
mean_mAP, mean_R, mean_P = 0.0, 0.0, 0.0
|
mean_mAP, mean_R, mean_P = 0.0, 0.0, 0.0
|
||||||
|
|
3
train.py
3
train.py
|
@ -45,8 +45,7 @@ def train(
|
||||||
model = Darknet(cfg, img_size)
|
model = Darknet(cfg, img_size)
|
||||||
|
|
||||||
# Get dataloader
|
# Get dataloader
|
||||||
dataloader = load_images_and_labels(train_path, batch_size=batch_size, img_size=img_size,
|
dataloader = load_images_and_labels(train_path, batch_size, img_size, multi_scale=multi_scale, augment=True)
|
||||||
multi_scale=multi_scale, augment=True)
|
|
||||||
|
|
||||||
lr0 = 0.001
|
lr0 = 0.001
|
||||||
if resume:
|
if resume:
|
||||||
|
|
|
@ -41,7 +41,8 @@ class load_images(): # for inference
|
||||||
assert img0 is not None, 'Failed to load ' + img_path
|
assert img0 is not None, 'Failed to load ' + img_path
|
||||||
|
|
||||||
# Padded resize
|
# Padded resize
|
||||||
img, _, _, _ = letterbox(img0, height=self.height, color=(127.5, 127.5, 127.5))
|
img, ratio, padw, padh = letterbox(img0, height=self.height, color=(127.5, 127.5, 127.5))
|
||||||
|
print(ratio, padw, padh)
|
||||||
|
|
||||||
# Normalize RGB
|
# Normalize RGB
|
||||||
img = img[:, :, ::-1].transpose(2, 0, 1)
|
img = img[:, :, ::-1].transpose(2, 0, 1)
|
||||||
|
|
Loading…
Reference in New Issue