add *.jpeg support

This commit is contained in:
Glenn Jocher 2019-05-10 15:16:02 +02:00
parent 31592c276f
commit ae03cf3eea
2 changed files with 9 additions and 8 deletions

View File

@ -167,7 +167,8 @@ def train(
p.requires_grad = False if epoch == 0 else True
# Update image weights (optional)
dataset.image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=1 - maps)
image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=1 - maps)
dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # random weighted index
mloss = torch.zeros(5).to(device) # mean losses
for i, (imgs, targets, _, _) in enumerate(dataloader):
@ -281,12 +282,12 @@ if __name__ == '__main__':
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--accumulate', type=int, default=1, help='accumulate gradient x batches before optimizing')
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path')
parser.add_argument('--data-cfg', type=str, default='data/coco.data', help='coco.data file path')
parser.add_argument('--data-cfg', type=str, default='data/coco_32img.data', help='coco.data file path')
parser.add_argument('--multi-scale', action='store_true', help='random image sizes per batch 320 - 608')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--resume', action='store_true', help='resume training flag')
parser.add_argument('--transfer', action='store_true', help='transfer learning flag')
parser.add_argument('--num-workers', type=int, default=4, help='number of Pytorch DataLoader workers')
parser.add_argument('--num-workers', type=int, default=0, help='number of Pytorch DataLoader workers')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:9999', type=str, help='distributed training init method')
parser.add_argument('--rank', default=0, type=int, help='distributed training node rank')
parser.add_argument('--world-size', default=1, type=int, help='number of nodes for distributed training')

View File

@ -130,7 +130,7 @@ class LoadWebcam: # for inference
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, rect=True, image_weighting=False):
def __init__(self, path, img_size=416, batch_size=16, augment=False, rect=True, image_weights=False):
with open(path, 'r') as f:
img_files = f.read().splitlines()
self.img_files = list(filter(lambda x: len(x) > 0, img_files))
@ -146,8 +146,8 @@ class LoadImagesAndLabels(Dataset): # for training/testing
replace('.bmp', '.txt').
replace('.png', '.txt') for x in self.img_files]
self.image_weighting = image_weighting
self.rect = False if image_weighting else rect
self.image_weights = image_weights
self.rect = False if image_weights else rect
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
@ -203,8 +203,8 @@ class LoadImagesAndLabels(Dataset): # for training/testing
return len(self.img_files)
def __getitem__(self, index):
if self.image_weighting:
index = random.choices(range(self.n), weights=self.image_weights, k=1)[0] # random weighted index
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]