diff --git a/test.py b/test.py index 431515ba..224ef57a 100644 --- a/test.py +++ b/test.py @@ -87,11 +87,12 @@ for batch_i, (imgs, targets) in enumerate(dataloader): correct.extend([0 for _ in range(len(detections))]) else: # Extract target boxes as (x1, y1, x2, y2) - target_boxes = torch.FloatTensor(annotations[:, 1:].shape) - target_boxes[:, 0] = (annotations[:, 1] - annotations[:, 3] / 2) - target_boxes[:, 1] = (annotations[:, 2] - annotations[:, 4] / 2) - target_boxes[:, 2] = (annotations[:, 1] + annotations[:, 3] / 2) - target_boxes[:, 3] = (annotations[:, 2] + annotations[:, 4] / 2) + # target_boxes = torch.FloatTensor(annotations[:, 1:].shape) + # target_boxes[:, 0] = (annotations[:, 1] - annotations[:, 3] / 2) + # target_boxes[:, 1] = (annotations[:, 2] - annotations[:, 4] / 2) + # target_boxes[:, 2] = (annotations[:, 1] + annotations[:, 3] / 2) + # target_boxes[:, 3] = (annotations[:, 2] + annotations[:, 4] / 2) + target_boxes = xywh2xyxy(annotations[:,1:5]) target_boxes *= opt.img_size detected = [] diff --git a/utils/datasets.py b/utils/datasets.py index 81371c8f..c6034ab0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -23,6 +23,7 @@ class ImageFolder(): # for eval-only self.nB = math.ceil(self.nF / batch_size) # number of batches self.batch_size = batch_size self.height = img_size + assert self.nF > 0, 'No images found in path %s' % path # RGB normalization values @@ -65,7 +66,7 @@ class ListDataset(): # for training with open(path, 'r') as file: self.img_files = file.readlines() - if platform == 'darwin': # macos + if platform == 'darwin': # MacOS (local) self.img_files = [path.replace('\n', '').replace('/images', '/Users/glennjocher/Downloads/DATA/coco/images') for path in self.img_files] else: # linux (gcp cloud) @@ -77,10 +78,10 @@ class ListDataset(): # for training self.nF = len(self.img_files) # number of image files self.nB = math.ceil(self.nF / batch_size) # number of batches self.batch_size = batch_size - - # assert self.nB > 0, 'No images found in path %s' % path self.height = img_size + assert self.nB > 0, 'No images found in path %s' % path + # RGB normalization values # self.rgb_mean = np.array([60.134, 49.697, 40.746], dtype=np.float32).reshape((1, 3, 1, 1)) # self.rgb_std = np.array([29.99, 24.498, 22.046], dtype=np.float32).reshape((1, 3, 1, 1)) diff --git a/utils/utils.py b/utils/utils.py index 54b49df3..b3c09d3e 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -62,7 +62,7 @@ def weights_init_normal(m): def xyxy2xywh(x): # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h] - y = np.zeros(x.shape) + y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 y[:, 1] = (x[:, 1] + x[:, 3]) / 2 y[:, 2] = x[:, 2] - x[:, 0] @@ -71,11 +71,11 @@ def xyxy2xywh(x): # Convert bounding box format from [x1, y1, x2, y2] to [x, y, def xywh2xyxy(x): # Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2] - y = np.zeros(x.shape) - y[:, 0] = (x[:, 1] - x[:, 3] / 2) - y[:, 1] = (x[:, 2] - x[:, 4] / 2) - y[:, 2] = (x[:, 1] + x[:, 3] / 2) - y[:, 3] = (x[:, 2] + x[:, 4] / 2) + y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape) + y[:, 0] = (x[:, 0] - x[:, 2] / 2) + y[:, 1] = (x[:, 1] - x[:, 3] / 2) + y[:, 2] = (x[:, 0] + x[:, 2] / 2) + y[:, 3] = (x[:, 1] + x[:, 3] / 2) return y