eval_utils.py 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import logging
  18. import numpy as np
  19. import os
  20. import time
  21. import paddle.fluid as fluid
  22. from .voc_eval import bbox_eval as voc_bbox_eval
  23. from .post_process import mstest_box_post_process, mstest_mask_post_process
  24. __all__ = ['parse_fetches', 'eval_run', 'eval_results', 'json_eval_results']
  25. logger = logging.getLogger(__name__)
  26. def parse_fetches(fetches, prog=None, extra_keys=None):
  27. """
  28. Parse fetch variable infos from model fetches,
  29. values for fetch_list and keys for stat
  30. """
  31. keys, values = [], []
  32. cls = []
  33. for k, v in fetches.items():
  34. if hasattr(v, 'name'):
  35. keys.append(k)
  36. #v.persistable = True
  37. values.append(v.name)
  38. else:
  39. cls.append(v)
  40. if prog is not None and extra_keys is not None:
  41. for k in extra_keys:
  42. try:
  43. v = fluid.framework._get_var(k, prog)
  44. keys.append(k)
  45. values.append(v.name)
  46. except Exception:
  47. pass
  48. return keys, values, cls
  49. def length2lod(length_lod):
  50. offset_lod = [0]
  51. for i in length_lod:
  52. offset_lod.append(offset_lod[-1] + i)
  53. return [offset_lod]
  54. def get_sub_feed(input, place):
  55. new_dict = {}
  56. res_feed = {}
  57. key_name = ['bbox', 'im_info', 'im_id', 'im_shape', 'bbox_flip']
  58. for k in key_name:
  59. if k in input.keys():
  60. new_dict[k] = input[k]
  61. for k in input.keys():
  62. if 'image' in k:
  63. new_dict[k] = input[k]
  64. for k, v in new_dict.items():
  65. data_t = fluid.LoDTensor()
  66. data_t.set(v[0], place)
  67. if 'bbox' in k:
  68. lod = length2lod(v[1][0])
  69. data_t.set_lod(lod)
  70. res_feed[k] = data_t
  71. return res_feed
  72. def clean_res(result, keep_name_list):
  73. clean_result = {}
  74. for k in result.keys():
  75. if k in keep_name_list:
  76. clean_result[k] = result[k]
  77. result.clear()
  78. return clean_result
  79. def get_masks(result):
  80. import pycocotools.mask as mask_util
  81. if result is None:
  82. return {}
  83. seg_pred = result['segm'][0].astype(np.uint8)
  84. cate_label = result['cate_label'][0].astype(np.int)
  85. cate_score = result['cate_score'][0].astype(np.float)
  86. num_ins = seg_pred.shape[0]
  87. masks = []
  88. for idx in range(num_ins - 1):
  89. cur_mask = seg_pred[idx, ...]
  90. rle = mask_util.encode(
  91. np.array(
  92. cur_mask[:, :, np.newaxis], order='F'))[0]
  93. rst = (rle, cate_score[idx])
  94. masks.append([cate_label[idx], rst])
  95. return masks
  96. def eval_run(exe,
  97. compile_program,
  98. loader,
  99. keys,
  100. values,
  101. cls,
  102. cfg=None,
  103. sub_prog=None,
  104. sub_keys=None,
  105. sub_values=None,
  106. resolution=None):
  107. """
  108. Run evaluation program, return program outputs.
  109. """
  110. iter_id = 0
  111. results = []
  112. if len(cls) != 0:
  113. values = []
  114. for i in range(len(cls)):
  115. _, accum_map = cls[i].get_map_var()
  116. cls[i].reset(exe)
  117. values.append(accum_map)
  118. images_num = 0
  119. start_time = time.time()
  120. has_bbox = 'bbox' in keys
  121. try:
  122. loader.start()
  123. while True:
  124. outs = exe.run(compile_program,
  125. fetch_list=values,
  126. return_numpy=False)
  127. res = {
  128. k: (np.array(v), v.recursive_sequence_lengths())
  129. for k, v in zip(keys, outs)
  130. }
  131. multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)
  132. mask_multi_scale_test = multi_scale_test and 'Mask' in cfg.architecture
  133. if multi_scale_test:
  134. post_res = mstest_box_post_process(res, multi_scale_test,
  135. cfg.num_classes)
  136. res.update(post_res)
  137. if mask_multi_scale_test:
  138. place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
  139. sub_feed = get_sub_feed(res, place)
  140. sub_prog_outs = exe.run(sub_prog,
  141. feed=sub_feed,
  142. fetch_list=sub_values,
  143. return_numpy=False)
  144. sub_prog_res = {
  145. k: (np.array(v), v.recursive_sequence_lengths())
  146. for k, v in zip(sub_keys, sub_prog_outs)
  147. }
  148. post_res = mstest_mask_post_process(sub_prog_res, cfg)
  149. res.update(post_res)
  150. if multi_scale_test:
  151. res = clean_res(
  152. res, ['im_info', 'bbox', 'im_id', 'im_shape', 'mask'])
  153. if 'mask' in res:
  154. from ppdet.utils.post_process import mask_encode
  155. res['mask'] = mask_encode(res, resolution)
  156. post_config = getattr(cfg, 'PostProcess', None)
  157. if 'Corner' in cfg.architecture and post_config is not None:
  158. from ppdet.utils.post_process import corner_post_process
  159. corner_post_process(res, post_config, cfg.num_classes)
  160. if 'TTFNet' in cfg.architecture:
  161. res['bbox'][1].append([len(res['bbox'][0])])
  162. if 'segm' in res:
  163. res['segm'] = get_masks(res)
  164. results.append(res)
  165. if iter_id % 100 == 0:
  166. logger.info('Test iter {}'.format(iter_id))
  167. iter_id += 1
  168. if 'bbox' not in res or len(res['bbox'][1]) == 0:
  169. has_bbox = False
  170. images_num += len(res['bbox'][1][0]) if has_bbox else 1
  171. except (StopIteration, fluid.core.EOFException):
  172. loader.reset()
  173. logger.info('Test finish iter {}'.format(iter_id))
  174. end_time = time.time()
  175. fps = images_num / (end_time - start_time)
  176. if has_bbox:
  177. logger.info('Total number of images: {}, inference time: {} fps.'.
  178. format(images_num, fps))
  179. else:
  180. logger.info('Total iteration: {}, inference time: {} batch/s.'.format(
  181. images_num, fps))
  182. return results
  183. def eval_results(results,
  184. metric,
  185. num_classes,
  186. resolution=None,
  187. is_bbox_normalized=False,
  188. output_directory=None,
  189. map_type='11point',
  190. dataset=None,
  191. save_only=False):
  192. """Evaluation for evaluation program results"""
  193. box_ap_stats = []
  194. if metric == 'COCO':
  195. from ppdet.utils.coco_eval import proposal_eval, bbox_eval, mask_eval, segm_eval
  196. anno_file = dataset.get_anno()
  197. with_background = dataset.with_background
  198. if 'proposal' in results[0]:
  199. output = 'proposal.json'
  200. if output_directory:
  201. output = os.path.join(output_directory, 'proposal.json')
  202. proposal_eval(results, anno_file, output)
  203. if 'bbox' in results[0]:
  204. output = 'bbox.json'
  205. if output_directory:
  206. output = os.path.join(output_directory, 'bbox.json')
  207. box_ap_stats = bbox_eval(
  208. results,
  209. anno_file,
  210. output,
  211. with_background,
  212. is_bbox_normalized=is_bbox_normalized,
  213. save_only=save_only)
  214. if 'mask' in results[0]:
  215. output = 'mask.json'
  216. if output_directory:
  217. output = os.path.join(output_directory, 'mask.json')
  218. mask_eval(
  219. results, anno_file, output, resolution, save_only=save_only)
  220. if 'segm' in results[0]:
  221. output = 'segm.json'
  222. if output_directory:
  223. output = os.path.join(output_directory, output)
  224. mask_ap_stats = segm_eval(
  225. results, anno_file, output, save_only=save_only)
  226. if len(box_ap_stats) == 0:
  227. box_ap_stats = mask_ap_stats
  228. else:
  229. if 'accum_map' in results[-1]:
  230. res = np.mean(results[-1]['accum_map'][0])
  231. logger.info('mAP: {:.2f}'.format(res * 100.))
  232. box_ap_stats.append(res * 100.)
  233. elif 'bbox' in results[0]:
  234. box_ap = voc_bbox_eval(
  235. results,
  236. num_classes,
  237. is_bbox_normalized=is_bbox_normalized,
  238. map_type=map_type)
  239. box_ap_stats.append(box_ap)
  240. return box_ap_stats
  241. def json_eval_results(metric, json_directory=None, dataset=None):
  242. """
  243. cocoapi eval with already exists proposal.json, bbox.json or mask.json
  244. """
  245. assert metric == 'COCO'
  246. from ppdet.utils.coco_eval import cocoapi_eval
  247. anno_file = dataset.get_anno()
  248. json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
  249. if json_directory:
  250. assert os.path.exists(
  251. json_directory), "The json directory:{} does not exist".format(
  252. json_directory)
  253. for k, v in enumerate(json_file_list):
  254. json_file_list[k] = os.path.join(str(json_directory), v)
  255. coco_eval_style = ['proposal', 'bbox', 'segm']
  256. for i, v_json in enumerate(json_file_list):
  257. if os.path.exists(v_json):
  258. cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
  259. else:
  260. logger.info("{} not exists!".format(v_json))