export_model.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import sys
  19. # add python path of PadleDetection to sys.path
  20. parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
  21. if parent_path not in sys.path:
  22. sys.path.append(parent_path)
  23. import logging
  24. FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
  25. logging.basicConfig(level=logging.INFO, format=FORMAT)
  26. logger = logging.getLogger(__name__)
  27. from paddle import fluid
  28. try:
  29. from ppdet.core.workspace import load_config, merge_config, create
  30. from ppdet.utils.cli import ArgsParser
  31. import ppdet.utils.checkpoint as checkpoint
  32. from ppdet.utils.export_utils import save_infer_model, dump_infer_config
  33. from ppdet.utils.check import check_config, check_version, enable_static_mode
  34. except ImportError as e:
  35. if sys.argv[0].find('static') >= 0:
  36. logger.error("Importing ppdet failed when running static model "
  37. "with error: {}\n"
  38. "please try:\n"
  39. "\t1. run static model under PaddleDetection/static "
  40. "directory\n"
  41. "\t2. run 'pip uninstall ppdet' to uninstall ppdet "
  42. "dynamic version firstly.".format(e))
  43. sys.exit(-1)
  44. else:
  45. raise e
  46. from paddleslim.quant import quant_aware, convert
  47. def main():
  48. cfg = load_config(FLAGS.config)
  49. merge_config(FLAGS.opt)
  50. check_config(cfg)
  51. check_version()
  52. main_arch = cfg.architecture
  53. # Use CPU for exporting inference model instead of GPU
  54. place = fluid.CPUPlace()
  55. exe = fluid.Executor(place)
  56. model = create(main_arch)
  57. startup_prog = fluid.Program()
  58. infer_prog = fluid.Program()
  59. with fluid.program_guard(infer_prog, startup_prog):
  60. with fluid.unique_name.guard():
  61. inputs_def = cfg['TestReader']['inputs_def']
  62. inputs_def['use_dataloader'] = False
  63. feed_vars, _ = model.build_inputs(**inputs_def)
  64. test_fetches = model.test(feed_vars)
  65. infer_prog = infer_prog.clone(True)
  66. not_quant_pattern = []
  67. if FLAGS.not_quant_pattern:
  68. not_quant_pattern = FLAGS.not_quant_pattern
  69. config = {
  70. 'weight_quantize_type': 'channel_wise_abs_max',
  71. 'activation_quantize_type': 'moving_average_abs_max',
  72. 'quantize_op_types': ['depthwise_conv2d', 'mul', 'conv2d'],
  73. 'not_quant_pattern': not_quant_pattern
  74. }
  75. infer_prog = quant_aware(infer_prog, place, config, for_test=True)
  76. exe.run(startup_prog)
  77. checkpoint.load_params(exe, infer_prog, cfg.weights)
  78. infer_prog, int8_program = convert(
  79. infer_prog, place, config, save_int8=True)
  80. FLAGS.output_dir = os.path.join(FLAGS.output_dir, 'float')
  81. save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
  82. FLAGS.output_dir = os.path.join(FLAGS.output_dir, 'int')
  83. save_infer_model(FLAGS, exe, feed_vars, test_fetches, int8_program)
  84. if __name__ == '__main__':
  85. enable_static_mode()
  86. parser = ArgsParser()
  87. parser.add_argument(
  88. "--output_dir",
  89. type=str,
  90. default="output",
  91. help="Directory for storing the output model files.")
  92. parser.add_argument(
  93. "--not_quant_pattern",
  94. nargs='+',
  95. type=str,
  96. help="Layers which name_scope contains string in not_quant_pattern will not be quantized"
  97. )
  98. FLAGS = parser.parse_args()
  99. main()