This commit is contained in:
Glenn Jocher 2019-07-20 17:05:09 +02:00
parent bc262aca2a
commit a39ee4d252
3 changed files with 7 additions and 7 deletions

View File

@ -253,7 +253,7 @@ def load_darknet_weights(self, weights, cutoff=-1):
if not os.path.isfile(weights): if not os.path.isfile(weights):
try: try:
url = 'https://pjreddie.com/media/files/' + weights_file url = 'https://pjreddie.com/media/files/' + weights_file
print('Downloading ' + url + ' to ' + weights) print('Downloading ' + url)
os.system('curl ' + url + ' -o ' + weights) os.system('curl ' + url + ' -o ' + weights)
except IOError: except IOError:
print(weights + ' not found.\nTry https://drive.google.com/drive/folders/1uxgUBemJVw9wZsdpboYbzUN4bcRhsuAI') print(weights + ' not found.\nTry https://drive.google.com/drive/folders/1uxgUBemJVw9wZsdpboYbzUN4bcRhsuAI')

View File

@ -87,9 +87,6 @@ def test(cfg,
stats.append(([], torch.Tensor(), torch.Tensor(), tcls)) stats.append(([], torch.Tensor(), torch.Tensor(), tcls))
continue continue
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Append to text file # Append to text file
# with open('test.txt', 'a') as file: # with open('test.txt', 'a') as file:
# [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred] # [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]
@ -99,7 +96,7 @@ def test(cfg,
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(Path(paths[si]).stem.split('_')[-1]) image_id = int(Path(paths[si]).stem.split('_')[-1])
box = pred[:, :4].clone() # xyxy box = pred[:, :4].clone() # xyxy
box = scale_coords(imgs[si].shape[1:], box, shapes[si]) # to original shape scale_coords(imgs[si].shape[1:], box, shapes[si]) # to original shape
box = xyxy2xywh(box) # xywh box = xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for di, d in enumerate(pred): for di, d in enumerate(pred):
@ -108,6 +105,9 @@ def test(cfg,
'bbox': [float3(x) for x in box[di]], 'bbox': [float3(x) for x in box[di]],
'score': float(d[4])}) 'score': float(d[4])})
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Assign all predictions as incorrect # Assign all predictions as incorrect
correct = [0] * len(pred) correct = [0] * len(pred)
if nl: if nl:

View File

@ -152,7 +152,7 @@ class LoadWebcam: # for inference
class LoadImagesAndLabels(Dataset): # for training/testing class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=True, image_weights=False): def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False):
with open(path, 'r') as f: with open(path, 'r') as f:
img_files = f.read().splitlines() img_files = f.read().splitlines()
self.img_files = [x for x in img_files if os.path.splitext(x)[-1].lower() in img_formats] self.img_files = [x for x in img_files if os.path.splitext(x)[-1].lower() in img_formats]
@ -280,7 +280,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
img = cv2.imread(img_path) # BGR img = cv2.imread(img_path) # BGR
assert img is not None, 'File Not Found ' + img_path assert img is not None, 'File Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio r = self.img_size / max(img.shape) # size ratio
if r < 1: # downsize if target shape is smaller if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA)