123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447 |
- #!/usr/bin/env python
- # coding: utf-8
- # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- import argparse
- import glob
- import json
- import os
- import os.path as osp
- import shutil
- import xml.etree.ElementTree as ET
- from tqdm import tqdm
- import numpy as np
- import PIL.ImageDraw
- label_to_num = {}
- categories_list = []
- labels_list = []
- class MyEncoder(json.JSONEncoder):
- def default(self, obj):
- if isinstance(obj, np.integer):
- return int(obj)
- elif isinstance(obj, np.floating):
- return float(obj)
- elif isinstance(obj, np.ndarray):
- return obj.tolist()
- else:
- return super(MyEncoder, self).default(obj)
- def images_labelme(data, num):
- image = {}
- image['height'] = data['imageHeight']
- image['width'] = data['imageWidth']
- image['id'] = num + 1
- if '\\' in data['imagePath']:
- image['file_name'] = data['imagePath'].split('\\')[-1]
- else:
- image['file_name'] = data['imagePath'].split('/')[-1]
- return image
- def images_cityscape(data, num, img_file):
- image = {}
- image['height'] = data['imgHeight']
- image['width'] = data['imgWidth']
- image['id'] = num + 1
- image['file_name'] = img_file
- return image
- def categories(label, labels_list):
- category = {}
- category['supercategory'] = 'component'
- category['id'] = len(labels_list) + 1
- category['name'] = label
- return category
- def annotations_rectangle(points, label, image_num, object_num, label_to_num):
- annotation = {}
- seg_points = np.asarray(points).copy()
- seg_points[1, :] = np.asarray(points)[2, :]
- seg_points[2, :] = np.asarray(points)[1, :]
- annotation['segmentation'] = [list(seg_points.flatten())]
- annotation['iscrowd'] = 0
- annotation['image_id'] = image_num + 1
- annotation['bbox'] = list(
- map(float, [
- points[0][0], points[0][1], points[1][0] - points[0][0], points[1][
- 1] - points[0][1]
- ]))
- annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
- annotation['category_id'] = label_to_num[label]
- annotation['id'] = object_num + 1
- return annotation
- def annotations_polygon(height, width, points, label, image_num, object_num,
- label_to_num):
- annotation = {}
- annotation['segmentation'] = [list(np.asarray(points).flatten())]
- annotation['iscrowd'] = 0
- annotation['image_id'] = image_num + 1
- annotation['bbox'] = list(map(float, get_bbox(height, width, points)))
- annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
- annotation['category_id'] = label_to_num[label]
- annotation['id'] = object_num + 1
- return annotation
- def get_bbox(height, width, points):
- polygons = points
- mask = np.zeros([height, width], dtype=np.uint8)
- mask = PIL.Image.fromarray(mask)
- xy = list(map(tuple, polygons))
- PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
- mask = np.array(mask, dtype=bool)
- index = np.argwhere(mask == 1)
- rows = index[:, 0]
- clos = index[:, 1]
- left_top_r = np.min(rows)
- left_top_c = np.min(clos)
- right_bottom_r = np.max(rows)
- right_bottom_c = np.max(clos)
- return [
- left_top_c, left_top_r, right_bottom_c - left_top_c,
- right_bottom_r - left_top_r
- ]
- def deal_json(ds_type, img_path, json_path):
- data_coco = {}
- images_list = []
- annotations_list = []
- image_num = -1
- object_num = -1
- for img_file in os.listdir(img_path):
- img_label = os.path.splitext(img_file)[0]
- if img_file.split('.')[
- -1] not in ['bmp', 'jpg', 'jpeg', 'png', 'JPEG', 'JPG', 'PNG']:
- continue
- label_file = osp.join(json_path, img_label + '.json')
- print('Generating dataset from:', label_file)
- image_num = image_num + 1
- with open(label_file) as f:
- data = json.load(f)
- if ds_type == 'labelme':
- images_list.append(images_labelme(data, image_num))
- elif ds_type == 'cityscape':
- images_list.append(images_cityscape(data, image_num, img_file))
- if ds_type == 'labelme':
- for shapes in data['shapes']:
- object_num = object_num + 1
- label = shapes['label']
- if label not in labels_list:
- categories_list.append(categories(label, labels_list))
- labels_list.append(label)
- label_to_num[label] = len(labels_list)
- p_type = shapes['shape_type']
- if p_type == 'polygon':
- points = shapes['points']
- annotations_list.append(
- annotations_polygon(data['imageHeight'], data[
- 'imageWidth'], points, label, image_num,
- object_num, label_to_num))
- if p_type == 'rectangle':
- (x1, y1), (x2, y2) = shapes['points']
- x1, x2 = sorted([x1, x2])
- y1, y2 = sorted([y1, y2])
- points = [[x1, y1], [x2, y2], [x1, y2], [x2, y1]]
- annotations_list.append(
- annotations_rectangle(points, label, image_num,
- object_num, label_to_num))
- elif ds_type == 'cityscape':
- for shapes in data['objects']:
- object_num = object_num + 1
- label = shapes['label']
- if label not in labels_list:
- categories_list.append(categories(label, labels_list))
- labels_list.append(label)
- label_to_num[label] = len(labels_list)
- points = shapes['polygon']
- annotations_list.append(
- annotations_polygon(data['imgHeight'], data[
- 'imgWidth'], points, label, image_num, object_num,
- label_to_num))
- data_coco['images'] = images_list
- data_coco['categories'] = categories_list
- data_coco['annotations'] = annotations_list
- return data_coco
- def voc_get_label_anno(ann_dir_path, ann_ids_path, labels_path):
- with open(labels_path, 'r') as f:
- labels_str = f.read().split()
- labels_ids = list(range(1, len(labels_str) + 1))
- with open(ann_ids_path, 'r') as f:
- ann_ids = f.read().split()
- ann_paths = []
- for aid in ann_ids:
- if aid.endswith('xml'):
- ann_path = os.path.join(ann_dir_path, aid)
- else:
- ann_path = os.path.join(ann_dir_path, aid + '.xml')
- ann_paths.append(ann_path)
- return dict(zip(labels_str, labels_ids)), ann_paths
- def voc_get_image_info(annotation_root, im_id):
- filename = annotation_root.findtext('filename')
- assert filename is not None
- img_name = os.path.basename(filename)
- size = annotation_root.find('size')
- width = float(size.findtext('width'))
- height = float(size.findtext('height'))
- image_info = {
- 'file_name': filename,
- 'height': height,
- 'width': width,
- 'id': im_id
- }
- return image_info
- def voc_get_coco_annotation(obj, label2id):
- label = obj.findtext('name')
- assert label in label2id, "label is not in label2id."
- category_id = label2id[label]
- bndbox = obj.find('bndbox')
- xmin = float(bndbox.findtext('xmin'))
- ymin = float(bndbox.findtext('ymin'))
- xmax = float(bndbox.findtext('xmax'))
- ymax = float(bndbox.findtext('ymax'))
- assert xmax > xmin and ymax > ymin, "Box size error."
- o_width = xmax - xmin
- o_height = ymax - ymin
- anno = {
- 'area': o_width * o_height,
- 'iscrowd': 0,
- 'bbox': [xmin, ymin, o_width, o_height],
- 'category_id': category_id,
- 'ignore': 0,
- }
- return anno
- def voc_xmls_to_cocojson(annotation_paths, label2id, output_dir, output_file):
- output_json_dict = {
- "images": [],
- "type": "instances",
- "annotations": [],
- "categories": []
- }
- bnd_id = 1 # bounding box start id
- im_id = 0
- print('Start converting !')
- for a_path in tqdm(annotation_paths):
- # Read annotation xml
- ann_tree = ET.parse(a_path)
- ann_root = ann_tree.getroot()
- img_info = voc_get_image_info(ann_root, im_id)
- output_json_dict['images'].append(img_info)
- for obj in ann_root.findall('object'):
- ann = voc_get_coco_annotation(obj=obj, label2id=label2id)
- ann.update({'image_id': im_id, 'id': bnd_id})
- output_json_dict['annotations'].append(ann)
- bnd_id = bnd_id + 1
- im_id += 1
- for label, label_id in label2id.items():
- category_info = {'supercategory': 'none', 'id': label_id, 'name': label}
- output_json_dict['categories'].append(category_info)
- output_file = os.path.join(output_dir, output_file)
- with open(output_file, 'w') as f:
- output_json = json.dumps(output_json_dict)
- f.write(output_json)
- def main():
- parser = argparse.ArgumentParser(
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument(
- '--dataset_type',
- help='the type of dataset, can be `voc`, `labelme` or `cityscape`')
- parser.add_argument('--json_input_dir', help='input annotated directory')
- parser.add_argument('--image_input_dir', help='image directory')
- parser.add_argument(
- '--output_dir', help='output dataset directory', default='./')
- parser.add_argument(
- '--train_proportion',
- help='the proportion of train dataset',
- type=float,
- default=1.0)
- parser.add_argument(
- '--val_proportion',
- help='the proportion of validation dataset',
- type=float,
- default=0.0)
- parser.add_argument(
- '--test_proportion',
- help='the proportion of test dataset',
- type=float,
- default=0.0)
- parser.add_argument(
- '--voc_anno_dir',
- help='In Voc format dataset, path to annotation files directory.',
- type=str,
- default=None)
- parser.add_argument(
- '--voc_anno_list',
- help='In Voc format dataset, path to annotation files ids list.',
- type=str,
- default=None)
- parser.add_argument(
- '--voc_label_list',
- help='In Voc format dataset, path to label list. The content of each line is a category.',
- type=str,
- default=None)
- parser.add_argument(
- '--voc_out_name',
- type=str,
- default='voc.json',
- help='In Voc format dataset, path to output json file')
- args = parser.parse_args()
- try:
- assert args.dataset_type in ['voc', 'labelme', 'cityscape']
- except AssertionError as e:
- print(
- 'Now only support the voc, cityscape dataset and labelme dataset!!')
- os._exit(0)
- if args.dataset_type == 'voc':
- assert args.voc_anno_dir and args.voc_anno_list and args.voc_label_list
- label2id, ann_paths = voc_get_label_anno(
- args.voc_anno_dir, args.voc_anno_list, args.voc_label_list)
- voc_xmls_to_cocojson(
- annotation_paths=ann_paths,
- label2id=label2id,
- output_dir=args.output_dir,
- output_file=args.voc_out_name)
- else:
- try:
- assert os.path.exists(args.json_input_dir)
- except AssertionError as e:
- print('The json folder does not exist!')
- os._exit(0)
- try:
- assert os.path.exists(args.image_input_dir)
- except AssertionError as e:
- print('The image folder does not exist!')
- os._exit(0)
- try:
- assert abs(args.train_proportion + args.val_proportion \
- + args.test_proportion - 1.0) < 1e-5
- except AssertionError as e:
- print(
- 'The sum of pqoportion of training, validation and test datase must be 1!'
- )
- os._exit(0)
- # Allocate the dataset.
- total_num = len(glob.glob(osp.join(args.json_input_dir, '*.json')))
- if args.train_proportion != 0:
- train_num = int(total_num * args.train_proportion)
- out_dir = args.output_dir + '/train'
- if not os.path.exists(out_dir):
- os.makedirs(out_dir)
- else:
- train_num = 0
- if args.val_proportion == 0.0:
- val_num = 0
- test_num = total_num - train_num
- out_dir = args.output_dir + '/test'
- if args.test_proportion != 0.0 and not os.path.exists(out_dir):
- os.makedirs(out_dir)
- else:
- val_num = int(total_num * args.val_proportion)
- test_num = total_num - train_num - val_num
- val_out_dir = args.output_dir + '/val'
- if not os.path.exists(val_out_dir):
- os.makedirs(val_out_dir)
- test_out_dir = args.output_dir + '/test'
- if args.test_proportion != 0.0 and not os.path.exists(test_out_dir):
- os.makedirs(test_out_dir)
- count = 1
- for img_name in os.listdir(args.image_input_dir):
- if count <= train_num:
- if osp.exists(args.output_dir + '/train/'):
- shutil.copyfile(
- osp.join(args.image_input_dir, img_name),
- osp.join(args.output_dir + '/train/', img_name))
- else:
- if count <= train_num + val_num:
- if osp.exists(args.output_dir + '/val/'):
- shutil.copyfile(
- osp.join(args.image_input_dir, img_name),
- osp.join(args.output_dir + '/val/', img_name))
- else:
- if osp.exists(args.output_dir + '/test/'):
- shutil.copyfile(
- osp.join(args.image_input_dir, img_name),
- osp.join(args.output_dir + '/test/', img_name))
- count = count + 1
- # Deal with the json files.
- if not os.path.exists(args.output_dir + '/annotations'):
- os.makedirs(args.output_dir + '/annotations')
- if args.train_proportion != 0:
- train_data_coco = deal_json(args.dataset_type,
- args.output_dir + '/train',
- args.json_input_dir)
- train_json_path = osp.join(args.output_dir + '/annotations',
- 'instance_train.json')
- json.dump(
- train_data_coco,
- open(train_json_path, 'w'),
- indent=4,
- cls=MyEncoder)
- if args.val_proportion != 0:
- val_data_coco = deal_json(args.dataset_type,
- args.output_dir + '/val',
- args.json_input_dir)
- val_json_path = osp.join(args.output_dir + '/annotations',
- 'instance_val.json')
- json.dump(
- val_data_coco,
- open(val_json_path, 'w'),
- indent=4,
- cls=MyEncoder)
- if args.test_proportion != 0:
- test_data_coco = deal_json(args.dataset_type,
- args.output_dir + '/test',
- args.json_input_dir)
- test_json_path = osp.join(args.output_dir + '/annotations',
- 'instance_test.json')
- json.dump(
- test_data_coco,
- open(test_json_path, 'w'),
- indent=4,
- cls=MyEncoder)
- if __name__ == '__main__':
- main()
|