updates
This commit is contained in:
parent
4942aacef9
commit
e35397ee41
|
@ -86,7 +86,7 @@ GPUs | `batch_size` | images/sec | epoch time | epoch cost
|
||||||
K80 | 64 (32x2) | 11 | 175 min | $0.58
|
K80 | 64 (32x2) | 11 | 175 min | $0.58
|
||||||
T4 | 64 (32x2) | 40 | 49 min | $0.29
|
T4 | 64 (32x2) | 40 | 49 min | $0.29
|
||||||
T4 x2 | 64 (64x1) | 61 | 32 min | $0.36
|
T4 x2 | 64 (64x1) | 61 | 32 min | $0.36
|
||||||
V100 | 64 (32x2) | 115 | 17 min | $0.24
|
V100 | 64 (32x2) | 122 | 16 min | $0.23
|
||||||
V100 x2 | 64 (64x1) | 150 | 13 min | $0.36
|
V100 x2 | 64 (64x1) | 150 | 13 min | $0.36
|
||||||
2080Ti | 64 (32x2) | 81 | 24 min | -
|
2080Ti | 64 (32x2) | 81 | 24 min | -
|
||||||
2080Ti x2 | 64 (64x1) | 140 | 14 min | -
|
2080Ti x2 | 64 (64x1) | 140 | 14 min | -
|
||||||
|
|
2
test.py
2
test.py
|
@ -64,8 +64,8 @@ def test(cfg,
|
||||||
loss = torch.zeros(3)
|
loss = torch.zeros(3)
|
||||||
jdict, stats, ap, ap_class = [], [], [], []
|
jdict, stats, ap, ap_class = [], [], [], []
|
||||||
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
|
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
|
||||||
|
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
|
||||||
targets = targets.to(device)
|
targets = targets.to(device)
|
||||||
imgs = imgs.to(device)
|
|
||||||
_, _, height, width = imgs.shape # batch size, channels, height, width
|
_, _, height, width = imgs.shape # batch size, channels, height, width
|
||||||
|
|
||||||
# Plot images with bounding boxes
|
# Plot images with bounding boxes
|
||||||
|
|
2
train.py
2
train.py
|
@ -251,7 +251,7 @@ def train():
|
||||||
pbar = tqdm(enumerate(dataloader), total=nb) # progress bar
|
pbar = tqdm(enumerate(dataloader), total=nb) # progress bar
|
||||||
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
||||||
ni = i + nb * epoch # number integrated batches (since train start)
|
ni = i + nb * epoch # number integrated batches (since train start)
|
||||||
imgs = imgs.to(device)
|
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
|
||||||
targets = targets.to(device)
|
targets = targets.to(device)
|
||||||
|
|
||||||
# Multi-Scale training
|
# Multi-Scale training
|
||||||
|
|
|
@ -487,8 +487,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
|
||||||
|
|
||||||
# Convert
|
# Convert
|
||||||
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
||||||
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
|
img = np.ascontiguousarray(img)
|
||||||
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
|
||||||
|
|
||||||
return torch.from_numpy(img), labels_out, img_path, ((h, w), (ratio, pad))
|
return torch.from_numpy(img), labels_out, img_path, ((h, w), (ratio, pad))
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue