benchmark_utils.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import logging
  16. import paddle
  17. import paddle.inference as paddle_infer
  18. from pathlib import Path
  19. CUR_DIR = os.path.dirname(os.path.abspath(__file__))
  20. LOG_PATH_ROOT = f"{CUR_DIR}/../../output"
  21. class PaddleInferBenchmark(object):
  22. def __init__(self,
  23. config,
  24. model_info: dict={},
  25. data_info: dict={},
  26. perf_info: dict={},
  27. resource_info: dict={},
  28. **kwargs):
  29. """
  30. Construct PaddleInferBenchmark Class to format logs.
  31. args:
  32. config(paddle.inference.Config): paddle inference config
  33. model_info(dict): basic model info
  34. {'model_name': 'resnet50'
  35. 'precision': 'fp32'}
  36. data_info(dict): input data info
  37. {'batch_size': 1
  38. 'shape': '3,224,224'
  39. 'data_num': 1000}
  40. perf_info(dict): performance result
  41. {'preprocess_time_s': 1.0
  42. 'inference_time_s': 2.0
  43. 'postprocess_time_s': 1.0
  44. 'total_time_s': 4.0}
  45. resource_info(dict):
  46. cpu and gpu resources
  47. {'cpu_rss': 100
  48. 'gpu_rss': 100
  49. 'gpu_util': 60}
  50. """
  51. # PaddleInferBenchmark Log Version
  52. self.log_version = "1.0.3"
  53. # Paddle Version
  54. self.paddle_version = paddle.__version__
  55. self.paddle_commit = paddle.__git_commit__
  56. paddle_infer_info = paddle_infer.get_version()
  57. self.paddle_branch = paddle_infer_info.strip().split(': ')[-1]
  58. # model info
  59. self.model_info = model_info
  60. # data info
  61. self.data_info = data_info
  62. # perf info
  63. self.perf_info = perf_info
  64. try:
  65. # required value
  66. self.model_name = model_info['model_name']
  67. self.precision = model_info['precision']
  68. self.batch_size = data_info['batch_size']
  69. self.shape = data_info['shape']
  70. self.data_num = data_info['data_num']
  71. self.inference_time_s = round(perf_info['inference_time_s'], 4)
  72. except:
  73. self.print_help()
  74. raise ValueError(
  75. "Set argument wrong, please check input argument and its type")
  76. self.preprocess_time_s = perf_info.get('preprocess_time_s', 0)
  77. self.postprocess_time_s = perf_info.get('postprocess_time_s', 0)
  78. self.with_tracker = True if 'tracking_time_s' in perf_info else False
  79. self.tracking_time_s = perf_info.get('tracking_time_s', 0)
  80. self.total_time_s = perf_info.get('total_time_s', 0)
  81. self.inference_time_s_90 = perf_info.get("inference_time_s_90", "")
  82. self.inference_time_s_99 = perf_info.get("inference_time_s_99", "")
  83. self.succ_rate = perf_info.get("succ_rate", "")
  84. self.qps = perf_info.get("qps", "")
  85. # conf info
  86. self.config_status = self.parse_config(config)
  87. # mem info
  88. if isinstance(resource_info, dict):
  89. self.cpu_rss_mb = int(resource_info.get('cpu_rss_mb', 0))
  90. self.cpu_vms_mb = int(resource_info.get('cpu_vms_mb', 0))
  91. self.cpu_shared_mb = int(resource_info.get('cpu_shared_mb', 0))
  92. self.cpu_dirty_mb = int(resource_info.get('cpu_dirty_mb', 0))
  93. self.cpu_util = round(resource_info.get('cpu_util', 0), 2)
  94. self.gpu_rss_mb = int(resource_info.get('gpu_rss_mb', 0))
  95. self.gpu_util = round(resource_info.get('gpu_util', 0), 2)
  96. self.gpu_mem_util = round(resource_info.get('gpu_mem_util', 0), 2)
  97. else:
  98. self.cpu_rss_mb = 0
  99. self.cpu_vms_mb = 0
  100. self.cpu_shared_mb = 0
  101. self.cpu_dirty_mb = 0
  102. self.cpu_util = 0
  103. self.gpu_rss_mb = 0
  104. self.gpu_util = 0
  105. self.gpu_mem_util = 0
  106. # init benchmark logger
  107. self.benchmark_logger()
  108. def benchmark_logger(self):
  109. """
  110. benchmark logger
  111. """
  112. # remove other logging handler
  113. for handler in logging.root.handlers[:]:
  114. logging.root.removeHandler(handler)
  115. # Init logger
  116. FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
  117. log_output = f"{LOG_PATH_ROOT}/{self.model_name}.log"
  118. Path(f"{LOG_PATH_ROOT}").mkdir(parents=True, exist_ok=True)
  119. logging.basicConfig(
  120. level=logging.INFO,
  121. format=FORMAT,
  122. handlers=[
  123. logging.FileHandler(
  124. filename=log_output, mode='w'),
  125. logging.StreamHandler(),
  126. ])
  127. self.logger = logging.getLogger(__name__)
  128. self.logger.info(
  129. f"Paddle Inference benchmark log will be saved to {log_output}")
  130. def parse_config(self, config) -> dict:
  131. """
  132. parse paddle predictor config
  133. args:
  134. config(paddle.inference.Config): paddle inference config
  135. return:
  136. config_status(dict): dict style config info
  137. """
  138. if isinstance(config, paddle_infer.Config):
  139. config_status = {}
  140. config_status['runtime_device'] = "gpu" if config.use_gpu(
  141. ) else "cpu"
  142. config_status['ir_optim'] = config.ir_optim()
  143. config_status['enable_tensorrt'] = config.tensorrt_engine_enabled()
  144. config_status['precision'] = self.precision
  145. config_status['enable_mkldnn'] = config.mkldnn_enabled()
  146. config_status[
  147. 'cpu_math_library_num_threads'] = config.cpu_math_library_num_threads(
  148. )
  149. elif isinstance(config, dict):
  150. config_status['runtime_device'] = config.get('runtime_device', "")
  151. config_status['ir_optim'] = config.get('ir_optim', "")
  152. config_status['enable_tensorrt'] = config.get('enable_tensorrt', "")
  153. config_status['precision'] = config.get('precision', "")
  154. config_status['enable_mkldnn'] = config.get('enable_mkldnn', "")
  155. config_status['cpu_math_library_num_threads'] = config.get(
  156. 'cpu_math_library_num_threads', "")
  157. else:
  158. self.print_help()
  159. raise ValueError(
  160. "Set argument config wrong, please check input argument and its type"
  161. )
  162. return config_status
  163. def report(self, identifier=None):
  164. """
  165. print log report
  166. args:
  167. identifier(string): identify log
  168. """
  169. if identifier:
  170. identifier = f"[{identifier}]"
  171. else:
  172. identifier = ""
  173. self.logger.info("\n")
  174. self.logger.info(
  175. "---------------------- Paddle info ----------------------")
  176. self.logger.info(f"{identifier} paddle_version: {self.paddle_version}")
  177. self.logger.info(f"{identifier} paddle_commit: {self.paddle_commit}")
  178. self.logger.info(f"{identifier} paddle_branch: {self.paddle_branch}")
  179. self.logger.info(f"{identifier} log_api_version: {self.log_version}")
  180. self.logger.info(
  181. "----------------------- Conf info -----------------------")
  182. self.logger.info(
  183. f"{identifier} runtime_device: {self.config_status['runtime_device']}"
  184. )
  185. self.logger.info(
  186. f"{identifier} ir_optim: {self.config_status['ir_optim']}")
  187. self.logger.info(f"{identifier} enable_memory_optim: {True}")
  188. self.logger.info(
  189. f"{identifier} enable_tensorrt: {self.config_status['enable_tensorrt']}"
  190. )
  191. self.logger.info(
  192. f"{identifier} enable_mkldnn: {self.config_status['enable_mkldnn']}")
  193. self.logger.info(
  194. f"{identifier} cpu_math_library_num_threads: {self.config_status['cpu_math_library_num_threads']}"
  195. )
  196. self.logger.info(
  197. "----------------------- Model info ----------------------")
  198. self.logger.info(f"{identifier} model_name: {self.model_name}")
  199. self.logger.info(f"{identifier} precision: {self.precision}")
  200. self.logger.info(
  201. "----------------------- Data info -----------------------")
  202. self.logger.info(f"{identifier} batch_size: {self.batch_size}")
  203. self.logger.info(f"{identifier} input_shape: {self.shape}")
  204. self.logger.info(f"{identifier} data_num: {self.data_num}")
  205. self.logger.info(
  206. "----------------------- Perf info -----------------------")
  207. self.logger.info(
  208. f"{identifier} cpu_rss(MB): {self.cpu_rss_mb}, cpu_vms: {self.cpu_vms_mb}, cpu_shared_mb: {self.cpu_shared_mb}, cpu_dirty_mb: {self.cpu_dirty_mb}, cpu_util: {self.cpu_util}%"
  209. )
  210. self.logger.info(
  211. f"{identifier} gpu_rss(MB): {self.gpu_rss_mb}, gpu_util: {self.gpu_util}%, gpu_mem_util: {self.gpu_mem_util}%"
  212. )
  213. self.logger.info(
  214. f"{identifier} total time spent(s): {self.total_time_s}")
  215. if self.with_tracker:
  216. self.logger.info(
  217. f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, "
  218. f"inference_time(ms): {round(self.inference_time_s*1000, 1)}, "
  219. f"postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}, "
  220. f"tracking_time(ms): {round(self.tracking_time_s*1000, 1)}")
  221. else:
  222. self.logger.info(
  223. f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, "
  224. f"inference_time(ms): {round(self.inference_time_s*1000, 1)}, "
  225. f"postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}"
  226. )
  227. if self.inference_time_s_90:
  228. self.looger.info(
  229. f"{identifier} 90%_cost: {self.inference_time_s_90}, 99%_cost: {self.inference_time_s_99}, succ_rate: {self.succ_rate}"
  230. )
  231. if self.qps:
  232. self.logger.info(f"{identifier} QPS: {self.qps}")
  233. def print_help(self):
  234. """
  235. print function help
  236. """
  237. print("""Usage:
  238. ==== Print inference benchmark logs. ====
  239. config = paddle.inference.Config()
  240. model_info = {'model_name': 'resnet50'
  241. 'precision': 'fp32'}
  242. data_info = {'batch_size': 1
  243. 'shape': '3,224,224'
  244. 'data_num': 1000}
  245. perf_info = {'preprocess_time_s': 1.0
  246. 'inference_time_s': 2.0
  247. 'postprocess_time_s': 1.0
  248. 'total_time_s': 4.0}
  249. resource_info = {'cpu_rss_mb': 100
  250. 'gpu_rss_mb': 100
  251. 'gpu_util': 60}
  252. log = PaddleInferBenchmark(config, model_info, data_info, perf_info, resource_info)
  253. log('Test')
  254. """)
  255. def __call__(self, identifier=None):
  256. """
  257. __call__
  258. args:
  259. identifier(string): identify log
  260. """
  261. self.report(identifier)