convert_ethz_to_coco.py 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. import os
  2. import numpy as np
  3. import json
  4. from PIL import Image
  5. DATA_PATH = 'datasets/ETHZ/'
  6. DATA_FILE_PATH = 'datasets/data_path/eth.train'
  7. OUT_PATH = DATA_PATH + 'annotations/'
  8. def load_paths(data_path):
  9. with open(data_path, 'r') as file:
  10. img_files = file.readlines()
  11. img_files = [x.replace('\n', '') for x in img_files]
  12. img_files = list(filter(lambda x: len(x) > 0, img_files))
  13. label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files]
  14. return img_files, label_files
  15. if __name__ == '__main__':
  16. if not os.path.exists(OUT_PATH):
  17. os.mkdir(OUT_PATH)
  18. out_path = OUT_PATH + 'train.json'
  19. out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]}
  20. img_paths, label_paths = load_paths(DATA_FILE_PATH)
  21. image_cnt = 0
  22. ann_cnt = 0
  23. video_cnt = 0
  24. for img_path, label_path in zip(img_paths, label_paths):
  25. image_cnt += 1
  26. im = Image.open(os.path.join("datasets", img_path))
  27. image_info = {'file_name': img_path,
  28. 'id': image_cnt,
  29. 'height': im.size[1],
  30. 'width': im.size[0]}
  31. out['images'].append(image_info)
  32. # Load labels
  33. if os.path.isfile(os.path.join("datasets", label_path)):
  34. labels0 = np.loadtxt(os.path.join("datasets", label_path), dtype=np.float32).reshape(-1, 6)
  35. # Normalized xywh to pixel xyxy format
  36. labels = labels0.copy()
  37. labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2)
  38. labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2)
  39. labels[:, 4] = image_info['width'] * labels0[:, 4]
  40. labels[:, 5] = image_info['height'] * labels0[:, 5]
  41. else:
  42. labels = np.array([])
  43. for i in range(len(labels)):
  44. ann_cnt += 1
  45. fbox = labels[i, 2:6].tolist()
  46. ann = {'id': ann_cnt,
  47. 'category_id': 1,
  48. 'image_id': image_cnt,
  49. 'track_id': -1,
  50. 'bbox': fbox,
  51. 'area': fbox[2] * fbox[3],
  52. 'iscrowd': 0}
  53. out['annotations'].append(ann)
  54. print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations'])))
  55. json.dump(out, open(out_path, 'w'))