This commit is contained in:
Glenn Jocher 2019-12-08 17:52:44 -08:00
parent 4942aacef9
commit e35397ee41
4 changed files with 4 additions and 5 deletions

View File

@ -86,7 +86,7 @@ GPUs | `batch_size` | images/sec | epoch time | epoch cost
K80 | 64 (32x2) | 11 | 175 min | $0.58 K80 | 64 (32x2) | 11 | 175 min | $0.58
T4 | 64 (32x2) | 40 | 49 min | $0.29 T4 | 64 (32x2) | 40 | 49 min | $0.29
T4 x2 | 64 (64x1) | 61 | 32 min | $0.36 T4 x2 | 64 (64x1) | 61 | 32 min | $0.36
V100 | 64 (32x2) | 115 | 17 min | $0.24 V100 | 64 (32x2) | 122 | 16 min | $0.23
V100 x2 | 64 (64x1) | 150 | 13 min | $0.36 V100 x2 | 64 (64x1) | 150 | 13 min | $0.36
2080Ti | 64 (32x2) | 81 | 24 min | - 2080Ti | 64 (32x2) | 81 | 24 min | -
2080Ti x2 | 64 (64x1) | 140 | 14 min | - 2080Ti x2 | 64 (64x1) | 140 | 14 min | -

View File

@ -64,8 +64,8 @@ def test(cfg,
loss = torch.zeros(3) loss = torch.zeros(3)
jdict, stats, ap, ap_class = [], [], [], [] jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
targets = targets.to(device) targets = targets.to(device)
imgs = imgs.to(device)
_, _, height, width = imgs.shape # batch size, channels, height, width _, _, height, width = imgs.shape # batch size, channels, height, width
# Plot images with bounding boxes # Plot images with bounding boxes

View File

@ -251,7 +251,7 @@ def train():
pbar = tqdm(enumerate(dataloader), total=nb) # progress bar pbar = tqdm(enumerate(dataloader), total=nb) # progress bar
for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start) ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device) imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
targets = targets.to(device) targets = targets.to(device)
# Multi-Scale training # Multi-Scale training

View File

@ -487,8 +487,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
# Convert # Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32 img = np.ascontiguousarray(img)
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return torch.from_numpy(img), labels_out, img_path, ((h, w), (ratio, pad)) return torch.from_numpy(img), labels_out, img_path, ((h, w), (ratio, pad))