face_eval.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import sys
  19. # add python path of PadleDetection to sys.path
  20. parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
  21. if parent_path not in sys.path:
  22. sys.path.append(parent_path)
  23. import paddle.fluid as fluid
  24. import numpy as np
  25. import cv2
  26. from collections import OrderedDict
  27. import logging
  28. FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
  29. logging.basicConfig(level=logging.INFO, format=FORMAT)
  30. logger = logging.getLogger(__name__)
  31. try:
  32. import ppdet.utils.checkpoint as checkpoint
  33. from ppdet.utils.cli import ArgsParser
  34. from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode
  35. from ppdet.utils.widerface_eval_utils import get_shrink, bbox_vote, \
  36. save_widerface_bboxes, save_fddb_bboxes, to_chw_bgr
  37. from ppdet.core.workspace import load_config, merge_config, create
  38. except ImportError as e:
  39. if sys.argv[0].find('static') >= 0:
  40. logger.error("Importing ppdet failed when running static model "
  41. "with error: {}\n"
  42. "please try:\n"
  43. "\t1. run static model under PaddleDetection/static "
  44. "directory\n"
  45. "\t2. run 'pip uninstall ppdet' to uninstall ppdet "
  46. "dynamic version firstly.".format(e))
  47. sys.exit(-1)
  48. else:
  49. raise e
  50. def face_img_process(image,
  51. mean=[104., 117., 123.],
  52. std=[127.502231, 127.502231, 127.502231]):
  53. img = np.array(image)
  54. img = to_chw_bgr(img)
  55. img = img.astype('float32')
  56. img -= np.array(mean)[:, np.newaxis, np.newaxis].astype('float32')
  57. img /= np.array(std)[:, np.newaxis, np.newaxis].astype('float32')
  58. img = [img]
  59. img = np.array(img)
  60. return img
  61. def face_eval_run(exe,
  62. compile_program,
  63. fetches,
  64. image_dir,
  65. gt_file,
  66. pred_dir='output/pred',
  67. eval_mode='widerface',
  68. multi_scale=False):
  69. # load ground truth files
  70. with open(gt_file, 'r') as f:
  71. gt_lines = f.readlines()
  72. imid2path = []
  73. pos_gt = 0
  74. while pos_gt < len(gt_lines):
  75. name_gt = gt_lines[pos_gt].strip('\n\t').split()[0]
  76. imid2path.append(name_gt)
  77. pos_gt += 1
  78. n_gt = int(gt_lines[pos_gt].strip('\n\t').split()[0])
  79. pos_gt += 1 + n_gt
  80. logger.info('The ground truth file load {} images'.format(len(imid2path)))
  81. dets_dist = OrderedDict()
  82. for iter_id, im_path in enumerate(imid2path):
  83. image_path = os.path.join(image_dir, im_path)
  84. if eval_mode == 'fddb':
  85. image_path += '.jpg'
  86. assert os.path.exists(image_path)
  87. image = cv2.imread(image_path)
  88. image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  89. if multi_scale:
  90. shrink, max_shrink = get_shrink(image.shape[0], image.shape[1])
  91. det0 = detect_face(exe, compile_program, fetches, image, shrink)
  92. det1 = flip_test(exe, compile_program, fetches, image, shrink)
  93. [det2, det3] = multi_scale_test(exe, compile_program, fetches,
  94. image, max_shrink)
  95. det4 = multi_scale_test_pyramid(exe, compile_program, fetches,
  96. image, max_shrink)
  97. det = np.row_stack((det0, det1, det2, det3, det4))
  98. dets = bbox_vote(det)
  99. else:
  100. dets = detect_face(exe, compile_program, fetches, image, 1)
  101. if eval_mode == 'widerface':
  102. save_widerface_bboxes(image_path, dets, pred_dir)
  103. else:
  104. dets_dist[im_path] = dets
  105. if iter_id % 100 == 0:
  106. logger.info('Test iter {}'.format(iter_id))
  107. if eval_mode == 'fddb':
  108. save_fddb_bboxes(dets_dist, pred_dir)
  109. logger.info("Finish evaluation.")
  110. def detect_face(exe, compile_program, fetches, image, shrink):
  111. image_shape = [3, image.shape[0], image.shape[1]]
  112. if shrink != 1:
  113. h, w = int(image_shape[1] * shrink), int(image_shape[2] * shrink)
  114. image = cv2.resize(image, (w, h))
  115. image_shape = [3, h, w]
  116. img = face_img_process(image)
  117. detection, = exe.run(compile_program,
  118. feed={'image': img},
  119. fetch_list=[fetches['bbox']],
  120. return_numpy=False)
  121. detection = np.array(detection)
  122. # layout: xmin, ymin, xmax. ymax, score
  123. if np.prod(detection.shape) == 1:
  124. logger.info("No face detected")
  125. return np.array([[0, 0, 0, 0, 0]])
  126. det_conf = detection[:, 1]
  127. det_xmin = image_shape[2] * detection[:, 2] / shrink
  128. det_ymin = image_shape[1] * detection[:, 3] / shrink
  129. det_xmax = image_shape[2] * detection[:, 4] / shrink
  130. det_ymax = image_shape[1] * detection[:, 5] / shrink
  131. det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))
  132. return det
  133. def flip_test(exe, compile_program, fetches, image, shrink):
  134. img = cv2.flip(image, 1)
  135. det_f = detect_face(exe, compile_program, fetches, img, shrink)
  136. det_t = np.zeros(det_f.shape)
  137. img_width = image.shape[1]
  138. det_t[:, 0] = img_width - det_f[:, 2]
  139. det_t[:, 1] = det_f[:, 1]
  140. det_t[:, 2] = img_width - det_f[:, 0]
  141. det_t[:, 3] = det_f[:, 3]
  142. det_t[:, 4] = det_f[:, 4]
  143. return det_t
  144. def multi_scale_test(exe, compile_program, fetches, image, max_shrink):
  145. # Shrink detecting is only used to detect big faces
  146. st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
  147. det_s = detect_face(exe, compile_program, fetches, image, st)
  148. index = np.where(
  149. np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)
  150. > 30)[0]
  151. det_s = det_s[index, :]
  152. # Enlarge one times
  153. bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
  154. det_b = detect_face(exe, compile_program, fetches, image, bt)
  155. # Enlarge small image x times for small faces
  156. if max_shrink > 2:
  157. bt *= 2
  158. while bt < max_shrink:
  159. det_b = np.row_stack((det_b, detect_face(exe, compile_program,
  160. fetches, image, bt)))
  161. bt *= 2
  162. det_b = np.row_stack((det_b, detect_face(exe, compile_program, fetches,
  163. image, max_shrink)))
  164. # Enlarged images are only used to detect small faces.
  165. if bt > 1:
  166. index = np.where(
  167. np.minimum(det_b[:, 2] - det_b[:, 0] + 1,
  168. det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
  169. det_b = det_b[index, :]
  170. # Shrinked images are only used to detect big faces.
  171. else:
  172. index = np.where(
  173. np.maximum(det_b[:, 2] - det_b[:, 0] + 1,
  174. det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
  175. det_b = det_b[index, :]
  176. return det_s, det_b
  177. def multi_scale_test_pyramid(exe, compile_program, fetches, image, max_shrink):
  178. # Use image pyramids to detect faces
  179. det_b = detect_face(exe, compile_program, fetches, image, 0.25)
  180. index = np.where(
  181. np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)
  182. > 30)[0]
  183. det_b = det_b[index, :]
  184. st = [0.75, 1.25, 1.5, 1.75]
  185. for i in range(len(st)):
  186. if st[i] <= max_shrink:
  187. det_temp = detect_face(exe, compile_program, fetches, image, st[i])
  188. # Enlarged images are only used to detect small faces.
  189. if st[i] > 1:
  190. index = np.where(
  191. np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,
  192. det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]
  193. det_temp = det_temp[index, :]
  194. # Shrinked images are only used to detect big faces.
  195. else:
  196. index = np.where(
  197. np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,
  198. det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]
  199. det_temp = det_temp[index, :]
  200. det_b = np.row_stack((det_b, det_temp))
  201. return det_b
  202. def main():
  203. """
  204. Main evaluate function
  205. """
  206. cfg = load_config(FLAGS.config)
  207. merge_config(FLAGS.opt)
  208. check_config(cfg)
  209. # check if set use_gpu=True in paddlepaddle cpu version
  210. check_gpu(cfg.use_gpu)
  211. check_version()
  212. main_arch = cfg.architecture
  213. # define executor
  214. place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
  215. exe = fluid.Executor(place)
  216. # build program
  217. model = create(main_arch)
  218. startup_prog = fluid.Program()
  219. eval_prog = fluid.Program()
  220. with fluid.program_guard(eval_prog, startup_prog):
  221. with fluid.unique_name.guard():
  222. inputs_def = cfg['EvalReader']['inputs_def']
  223. inputs_def['use_dataloader'] = False
  224. feed_vars, _ = model.build_inputs(**inputs_def)
  225. fetches = model.eval(feed_vars)
  226. eval_prog = eval_prog.clone(True)
  227. # load model
  228. exe.run(startup_prog)
  229. if 'weights' in cfg:
  230. checkpoint.load_params(exe, eval_prog, cfg.weights)
  231. assert cfg.metric in ['WIDERFACE'], \
  232. "unknown metric type {}".format(cfg.metric)
  233. dataset = cfg['EvalReader']['dataset']
  234. annotation_file = dataset.get_anno()
  235. dataset_dir = dataset.dataset_dir
  236. image_dir = os.path.join(
  237. dataset_dir,
  238. dataset.image_dir) if FLAGS.eval_mode == 'widerface' else dataset_dir
  239. pred_dir = FLAGS.output_eval if FLAGS.output_eval else 'output/pred'
  240. face_eval_run(
  241. exe,
  242. eval_prog,
  243. fetches,
  244. image_dir,
  245. annotation_file,
  246. pred_dir=pred_dir,
  247. eval_mode=FLAGS.eval_mode,
  248. multi_scale=FLAGS.multi_scale)
  249. if __name__ == '__main__':
  250. enable_static_mode()
  251. parser = ArgsParser()
  252. parser.add_argument(
  253. "-f",
  254. "--output_eval",
  255. default=None,
  256. type=str,
  257. help="Evaluation file directory, default is current directory.")
  258. parser.add_argument(
  259. "-e",
  260. "--eval_mode",
  261. default="widerface",
  262. type=str,
  263. help="Evaluation mode, include `widerface` and `fddb`, default is `widerface`."
  264. )
  265. parser.add_argument(
  266. "--multi_scale",
  267. action='store_true',
  268. default=False,
  269. help="If True it will select `multi_scale` evaluation. Default is `False`, it will select `single-scale` evaluation."
  270. )
  271. FLAGS = parser.parse_args()
  272. main()