infer.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import sys
  19. # add python path of PadleDetection to sys.path
  20. parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
  21. if parent_path not in sys.path:
  22. sys.path.append(parent_path)
  23. import numpy as np
  24. from PIL import Image
  25. from paddle import fluid
  26. import logging
  27. FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
  28. logging.basicConfig(level=logging.INFO, format=FORMAT)
  29. logger = logging.getLogger(__name__)
  30. try:
  31. from ppdet.core.workspace import load_config, merge_config, create
  32. from ppdet.utils.eval_utils import parse_fetches
  33. from ppdet.utils.cli import ArgsParser
  34. from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode
  35. from ppdet.utils.visualizer import visualize_results
  36. import ppdet.utils.checkpoint as checkpoint
  37. from ppdet.data.reader import create_reader
  38. from tools.infer import get_test_images, get_save_image_name
  39. except ImportError as e:
  40. if sys.argv[0].find('static') >= 0:
  41. logger.error("Importing ppdet failed when running static model "
  42. "with error: {}\n"
  43. "please try:\n"
  44. "\t1. run static model under PaddleDetection/static "
  45. "directory\n"
  46. "\t2. run 'pip uninstall ppdet' to uninstall ppdet "
  47. "dynamic version firstly.".format(e))
  48. sys.exit(-1)
  49. else:
  50. raise e
  51. from paddleslim.quant import quant_aware, convert
  52. def main():
  53. cfg = load_config(FLAGS.config)
  54. merge_config(FLAGS.opt)
  55. check_config(cfg)
  56. # check if set use_gpu=True in paddlepaddle cpu version
  57. check_gpu(cfg.use_gpu)
  58. # check if paddlepaddle version is satisfied
  59. check_version()
  60. main_arch = cfg.architecture
  61. dataset = cfg.TestReader['dataset']
  62. test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
  63. dataset.set_images(test_images)
  64. place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
  65. exe = fluid.Executor(place)
  66. model = create(main_arch)
  67. startup_prog = fluid.Program()
  68. infer_prog = fluid.Program()
  69. with fluid.program_guard(infer_prog, startup_prog):
  70. with fluid.unique_name.guard():
  71. inputs_def = cfg['TestReader']['inputs_def']
  72. feed_vars, loader = model.build_inputs(**inputs_def)
  73. test_fetches = model.test(feed_vars)
  74. infer_prog = infer_prog.clone(True)
  75. reader = create_reader(cfg.TestReader)
  76. # When iterable mode, set set_sample_list_generator(reader, place)
  77. loader.set_sample_list_generator(reader)
  78. not_quant_pattern = []
  79. if FLAGS.not_quant_pattern:
  80. not_quant_pattern = FLAGS.not_quant_pattern
  81. config = {
  82. 'weight_quantize_type': 'channel_wise_abs_max',
  83. 'activation_quantize_type': 'moving_average_abs_max',
  84. 'quantize_op_types': ['depthwise_conv2d', 'mul', 'conv2d'],
  85. 'not_quant_pattern': not_quant_pattern
  86. }
  87. infer_prog = quant_aware(infer_prog, place, config, for_test=True)
  88. exe.run(startup_prog)
  89. if cfg.weights:
  90. checkpoint.load_params(exe, infer_prog, cfg.weights)
  91. infer_prog = convert(infer_prog, place, config, save_int8=False)
  92. # parse infer fetches
  93. assert cfg.metric in ['COCO', 'VOC', 'OID', 'WIDERFACE'], \
  94. "unknown metric type {}".format(cfg.metric)
  95. extra_keys = []
  96. if cfg['metric'] in ['COCO', 'OID']:
  97. extra_keys = ['im_info', 'im_id', 'im_shape']
  98. if cfg['metric'] == 'VOC' or cfg['metric'] == 'WIDERFACE':
  99. extra_keys = ['im_id', 'im_shape']
  100. keys, values, _ = parse_fetches(test_fetches, infer_prog, extra_keys)
  101. # parse dataset category
  102. if cfg.metric == 'COCO':
  103. from ppdet.utils.coco_eval import bbox2out, mask2out, get_category_info
  104. if cfg.metric == 'OID':
  105. from ppdet.utils.oid_eval import bbox2out, get_category_info
  106. if cfg.metric == "VOC":
  107. from ppdet.utils.voc_eval import bbox2out, get_category_info
  108. if cfg.metric == "WIDERFACE":
  109. from ppdet.utils.widerface_eval_utils import bbox2out, get_category_info
  110. anno_file = dataset.get_anno()
  111. with_background = dataset.with_background
  112. use_default_label = dataset.use_default_label
  113. clsid2catid, catid2name = get_category_info(anno_file, with_background,
  114. use_default_label)
  115. # whether output bbox is normalized in model output layer
  116. is_bbox_normalized = False
  117. if hasattr(model, 'is_bbox_normalized') and \
  118. callable(model.is_bbox_normalized):
  119. is_bbox_normalized = model.is_bbox_normalized()
  120. imid2path = dataset.get_imid2path()
  121. iter_id = 0
  122. try:
  123. loader.start()
  124. while True:
  125. outs = exe.run(infer_prog, fetch_list=values, return_numpy=False)
  126. res = {
  127. k: (np.array(v), v.recursive_sequence_lengths())
  128. for k, v in zip(keys, outs)
  129. }
  130. logger.info('Infer iter {}'.format(iter_id))
  131. iter_id += 1
  132. bbox_results = None
  133. mask_results = None
  134. if 'bbox' in res:
  135. bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized)
  136. if 'mask' in res:
  137. mask_results = mask2out([res], clsid2catid,
  138. model.mask_head.resolution)
  139. # visualize result
  140. im_ids = res['im_id'][0]
  141. for im_id in im_ids:
  142. image_path = imid2path[int(im_id)]
  143. image = Image.open(image_path).convert('RGB')
  144. image = visualize_results(image,
  145. int(im_id), catid2name,
  146. FLAGS.draw_threshold, bbox_results,
  147. mask_results)
  148. save_name = get_save_image_name(FLAGS.output_dir, image_path)
  149. logger.info("Detection bbox results save in {}".format(
  150. save_name))
  151. image.save(save_name, quality=95)
  152. except (StopIteration, fluid.core.EOFException):
  153. loader.reset()
  154. if __name__ == '__main__':
  155. enable_static_mode()
  156. parser = ArgsParser()
  157. parser.add_argument(
  158. "--infer_dir",
  159. type=str,
  160. default=None,
  161. help="Directory for images to perform inference on.")
  162. parser.add_argument(
  163. "--infer_img",
  164. type=str,
  165. default=None,
  166. help="Image path, has higher priority over --infer_dir")
  167. parser.add_argument(
  168. "--output_dir",
  169. type=str,
  170. default="output",
  171. help="Directory for storing the output visualization files.")
  172. parser.add_argument(
  173. "--draw_threshold",
  174. type=float,
  175. default=0.5,
  176. help="Threshold to reserve the result for visualization.")
  177. parser.add_argument(
  178. "--not_quant_pattern",
  179. nargs='+',
  180. type=str,
  181. help="Layers which name_scope contains string in not_quant_pattern will not be quantized"
  182. )
  183. FLAGS = parser.parse_args()
  184. main()