3
0

infer.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import sys
  19. # add python path of PadleDetection to sys.path
  20. parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
  21. if parent_path not in sys.path:
  22. sys.path.append(parent_path)
  23. import argparse
  24. import time
  25. import yaml
  26. import ast
  27. from functools import reduce
  28. import cv2
  29. import numpy as np
  30. import paddle
  31. import paddle.fluid as fluid
  32. from preprocess import preprocess, Resize, Normalize, Permute, PadStride
  33. from visualize import visualize_box_mask, lmk2out
  34. # Global dictionary
  35. SUPPORT_MODELS = {
  36. 'YOLO',
  37. 'SSD',
  38. 'RetinaNet',
  39. 'EfficientDet',
  40. 'RCNN',
  41. 'Face',
  42. 'TTF',
  43. 'FCOS',
  44. 'SOLOv2',
  45. }
  46. class Detector(object):
  47. """
  48. Args:
  49. config (object): config of model, defined by `Config(model_dir)`
  50. model_dir (str): root path of __model__, __params__ and infer_cfg.yml
  51. device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU
  52. run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)
  53. threshold (float): threshold to reserve the result for output.
  54. enable_mkldnn (bool): whether use mkldnn with CPU.
  55. enable_mkldnn_bfloat16 (bool): whether use mkldnn bfloat16 with CPU.
  56. """
  57. def __init__(self,
  58. config,
  59. model_dir,
  60. device='CPU',
  61. run_mode='fluid',
  62. threshold=0.5,
  63. trt_calib_mode=False,
  64. enable_mkldnn=False,
  65. enable_mkldnn_bfloat16=False):
  66. self.config = config
  67. if self.config.use_python_inference:
  68. self.executor, self.program, self.fecth_targets = load_executor(
  69. model_dir, device=device)
  70. else:
  71. self.predictor = load_predictor(
  72. model_dir,
  73. run_mode=run_mode,
  74. min_subgraph_size=self.config.min_subgraph_size,
  75. device=device,
  76. trt_calib_mode=trt_calib_mode,
  77. enable_mkldnn=enable_mkldnn,
  78. enable_mkldnn_bfloat16=enable_mkldnn_bfloat16)
  79. def preprocess(self, im):
  80. preprocess_ops = []
  81. for op_info in self.config.preprocess_infos:
  82. new_op_info = op_info.copy()
  83. op_type = new_op_info.pop('type')
  84. if op_type == 'Resize':
  85. new_op_info['arch'] = self.config.arch
  86. preprocess_ops.append(eval(op_type)(**new_op_info))
  87. im, im_info = preprocess(im, preprocess_ops)
  88. inputs = create_inputs(im, im_info, self.config.arch)
  89. return inputs, im_info
  90. def postprocess(self, np_boxes, np_masks, np_lmk, im_info, threshold=0.5):
  91. # postprocess output of predictor
  92. results = {}
  93. if np_lmk is not None:
  94. results['landmark'] = lmk2out(np_boxes, np_lmk, im_info, threshold)
  95. if self.config.arch in ['SSD', 'Face']:
  96. w, h = im_info['origin_shape']
  97. np_boxes[:, 2] *= h
  98. np_boxes[:, 3] *= w
  99. np_boxes[:, 4] *= h
  100. np_boxes[:, 5] *= w
  101. expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
  102. np_boxes = np_boxes[expect_boxes, :]
  103. for box in np_boxes:
  104. print('class_id:{:d}, confidence:{:.4f},'
  105. 'left_top:[{:.2f},{:.2f}],'
  106. ' right_bottom:[{:.2f},{:.2f}]'.format(
  107. int(box[0]), box[1], box[2], box[3], box[4], box[5]))
  108. results['boxes'] = np_boxes
  109. if np_masks is not None:
  110. np_masks = np_masks[expect_boxes, :, :, :]
  111. results['masks'] = np_masks
  112. return results
  113. def predict(self,
  114. image,
  115. threshold=0.5,
  116. warmup=0,
  117. repeats=1,
  118. run_benchmark=False):
  119. '''
  120. Args:
  121. image (str/np.ndarray): path of image/ np.ndarray read by cv2
  122. threshold (float): threshold of predicted box' score
  123. Returns:
  124. results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
  125. matix element:[class, score, x_min, y_min, x_max, y_max]
  126. MaskRCNN's results include 'masks': np.ndarray:
  127. shape:[N, class_num, mask_resolution, mask_resolution]
  128. '''
  129. inputs, im_info = self.preprocess(image)
  130. np_boxes, np_masks, np_lmk = None, None, None
  131. if self.config.use_python_inference:
  132. for i in range(warmup):
  133. outs = self.executor.run(self.program,
  134. feed=inputs,
  135. fetch_list=self.fecth_targets,
  136. return_numpy=False)
  137. t1 = time.time()
  138. for i in range(repeats):
  139. outs = self.executor.run(self.program,
  140. feed=inputs,
  141. fetch_list=self.fecth_targets,
  142. return_numpy=False)
  143. t2 = time.time()
  144. ms = (t2 - t1) * 1000.0 / repeats
  145. print("Inference: {} ms per batch image".format(ms))
  146. np_boxes = np.array(outs[0])
  147. if self.config.mask_resolution is not None:
  148. np_masks = np.array(outs[1])
  149. else:
  150. input_names = self.predictor.get_input_names()
  151. for i in range(len(input_names)):
  152. input_tensor = self.predictor.get_input_tensor(input_names[i])
  153. input_tensor.copy_from_cpu(inputs[input_names[i]])
  154. for i in range(warmup):
  155. self.predictor.zero_copy_run()
  156. output_names = self.predictor.get_output_names()
  157. boxes_tensor = self.predictor.get_output_tensor(output_names[0])
  158. np_boxes = boxes_tensor.copy_to_cpu()
  159. if self.config.mask_resolution is not None:
  160. masks_tensor = self.predictor.get_output_tensor(
  161. output_names[1])
  162. np_masks = masks_tensor.copy_to_cpu()
  163. if self.config.with_lmk is not None and self.config.with_lmk == True:
  164. face_index = self.predictor.get_output_tensor(output_names[
  165. 1])
  166. landmark = self.predictor.get_output_tensor(output_names[2])
  167. prior_boxes = self.predictor.get_output_tensor(output_names[
  168. 3])
  169. np_face_index = face_index.copy_to_cpu()
  170. np_prior_boxes = prior_boxes.copy_to_cpu()
  171. np_landmark = landmark.copy_to_cpu()
  172. np_lmk = [np_face_index, np_landmark, np_prior_boxes]
  173. t1 = time.time()
  174. for i in range(repeats):
  175. self.predictor.zero_copy_run()
  176. output_names = self.predictor.get_output_names()
  177. boxes_tensor = self.predictor.get_output_tensor(output_names[0])
  178. np_boxes = boxes_tensor.copy_to_cpu()
  179. if self.config.mask_resolution is not None:
  180. masks_tensor = self.predictor.get_output_tensor(
  181. output_names[1])
  182. np_masks = masks_tensor.copy_to_cpu()
  183. if self.config.with_lmk is not None and self.config.with_lmk == True:
  184. face_index = self.predictor.get_output_tensor(output_names[
  185. 1])
  186. landmark = self.predictor.get_output_tensor(output_names[2])
  187. prior_boxes = self.predictor.get_output_tensor(output_names[
  188. 3])
  189. np_face_index = face_index.copy_to_cpu()
  190. np_prior_boxes = prior_boxes.copy_to_cpu()
  191. np_landmark = landmark.copy_to_cpu()
  192. np_lmk = [np_face_index, np_landmark, np_prior_boxes]
  193. t2 = time.time()
  194. ms = (t2 - t1) * 1000.0 / repeats
  195. print("Inference: {} ms per batch image".format(ms))
  196. # do not perform postprocess in benchmark mode
  197. results = []
  198. if not run_benchmark:
  199. if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
  200. print('[WARNNING] No object detected.')
  201. results = {'boxes': np.array([])}
  202. else:
  203. results = self.postprocess(
  204. np_boxes, np_masks, np_lmk, im_info, threshold=threshold)
  205. return results
  206. class DetectorSOLOv2(Detector):
  207. def __init__(self,
  208. config,
  209. model_dir,
  210. device='CPU',
  211. run_mode='fluid',
  212. threshold=0.5,
  213. trt_calib_mode=False,
  214. enable_mkldnn=False,
  215. enable_mkldnn_bfloat16=False):
  216. super(DetectorSOLOv2, self).__init__(
  217. config=config,
  218. model_dir=model_dir,
  219. device=device,
  220. run_mode=run_mode,
  221. threshold=threshold,
  222. trt_calib_mode=trt_calib_mode,
  223. enable_mkldn=enable_mkldnn,
  224. enable_mkldnn_bfloat16=enable_mkldnn_bfloat16)
  225. def predict(self,
  226. image,
  227. threshold=0.5,
  228. warmup=0,
  229. repeats=1,
  230. run_benchmark=False):
  231. inputs, im_info = self.preprocess(image)
  232. np_label, np_score, np_segms = None, None, None
  233. if self.config.use_python_inference:
  234. for i in range(warmup):
  235. outs = self.executor.run(self.program,
  236. feed=inputs,
  237. fetch_list=self.fecth_targets,
  238. return_numpy=False)
  239. t1 = time.time()
  240. for i in range(repeats):
  241. outs = self.executor.run(self.program,
  242. feed=inputs,
  243. fetch_list=self.fecth_targets,
  244. return_numpy=False)
  245. t2 = time.time()
  246. ms = (t2 - t1) * 1000.0 / repeats
  247. print("Inference: {} ms per batch image".format(ms))
  248. np_label, np_score, np_segms = np.array(outs[0]), np.array(outs[
  249. 1]), np.array(outs[2])
  250. else:
  251. input_names = self.predictor.get_input_names()
  252. for i in range(len(input_names)):
  253. input_tensor = self.predictor.get_input_tensor(input_names[i])
  254. input_tensor.copy_from_cpu(inputs[input_names[i]])
  255. for i in range(warmup):
  256. self.predictor.zero_copy_run()
  257. output_names = self.predictor.get_output_names()
  258. np_label = self.predictor.get_output_tensor(output_names[
  259. 0]).copy_to_cpu()
  260. np_score = self.predictor.get_output_tensor(output_names[
  261. 1]).copy_to_cpu()
  262. np_segms = self.predictor.get_output_tensor(output_names[
  263. 2]).copy_to_cpu()
  264. t1 = time.time()
  265. for i in range(repeats):
  266. self.predictor.zero_copy_run()
  267. output_names = self.predictor.get_output_names()
  268. np_label = self.predictor.get_output_tensor(output_names[
  269. 0]).copy_to_cpu()
  270. np_score = self.predictor.get_output_tensor(output_names[
  271. 1]).copy_to_cpu()
  272. np_segms = self.predictor.get_output_tensor(output_names[
  273. 2]).copy_to_cpu()
  274. t2 = time.time()
  275. ms = (t2 - t1) * 1000.0 / repeats
  276. print("Inference: {} ms per batch image".format(ms))
  277. # do not perform postprocess in benchmark mode
  278. results = []
  279. if not run_benchmark:
  280. return dict(segm=np_segms, label=np_label, score=np_score)
  281. return results
  282. def create_inputs(im, im_info, model_arch='YOLO'):
  283. """generate input for different model type
  284. Args:
  285. im (np.ndarray): image (np.ndarray)
  286. im_info (dict): info of image
  287. model_arch (str): model type
  288. Returns:
  289. inputs (dict): input of model
  290. """
  291. inputs = {}
  292. inputs['image'] = im
  293. origin_shape = list(im_info['origin_shape'])
  294. resize_shape = list(im_info['resize_shape'])
  295. pad_shape = list(im_info['pad_shape']) if im_info[
  296. 'pad_shape'] is not None else list(im_info['resize_shape'])
  297. scale_x, scale_y = im_info['scale']
  298. if 'YOLO' in model_arch:
  299. im_size = np.array([origin_shape]).astype('int32')
  300. inputs['im_size'] = im_size
  301. elif 'RetinaNet' in model_arch or 'EfficientDet' in model_arch:
  302. scale = scale_x
  303. im_info = np.array([pad_shape + [scale]]).astype('float32')
  304. inputs['im_info'] = im_info
  305. elif ('RCNN' in model_arch) or ('FCOS' in model_arch):
  306. scale = scale_x
  307. im_info = np.array([pad_shape + [scale]]).astype('float32')
  308. im_shape = np.array([origin_shape + [1.]]).astype('float32')
  309. inputs['im_info'] = im_info
  310. inputs['im_shape'] = im_shape
  311. elif 'TTF' in model_arch:
  312. scale_factor = np.array([scale_x, scale_y] * 2).astype('float32')
  313. inputs['scale_factor'] = scale_factor
  314. elif 'SOLOv2' in model_arch:
  315. scale = scale_x
  316. im_info = np.array([resize_shape + [scale]]).astype('float32')
  317. inputs['im_info'] = im_info
  318. return inputs
  319. class Config():
  320. """set config of preprocess, postprocess and visualize
  321. Args:
  322. model_dir (str): root path of model.yml
  323. """
  324. def __init__(self, model_dir):
  325. # parsing Yaml config for Preprocess
  326. deploy_file = os.path.join(model_dir, 'infer_cfg.yml')
  327. with open(deploy_file) as f:
  328. yml_conf = yaml.safe_load(f)
  329. self.check_model(yml_conf)
  330. self.arch = yml_conf['arch']
  331. self.preprocess_infos = yml_conf['Preprocess']
  332. self.use_python_inference = yml_conf['use_python_inference']
  333. self.min_subgraph_size = yml_conf['min_subgraph_size']
  334. self.labels = yml_conf['label_list']
  335. self.mask_resolution = None
  336. if 'mask_resolution' in yml_conf:
  337. self.mask_resolution = yml_conf['mask_resolution']
  338. self.with_lmk = None
  339. if 'with_lmk' in yml_conf:
  340. self.with_lmk = yml_conf['with_lmk']
  341. self.print_config()
  342. def check_model(self, yml_conf):
  343. """
  344. Raises:
  345. ValueError: loaded model not in supported model type
  346. """
  347. for support_model in SUPPORT_MODELS:
  348. if support_model in yml_conf['arch']:
  349. return True
  350. raise ValueError("Unsupported arch: {}, expect {}".format(yml_conf[
  351. 'arch'], SUPPORT_MODELS))
  352. def print_config(self):
  353. print('----------- Model Configuration -----------')
  354. print('%s: %s' % ('Model Arch', self.arch))
  355. print('%s: %s' % ('Use Paddle Executor', self.use_python_inference))
  356. print('%s: ' % ('Transform Order'))
  357. for op_info in self.preprocess_infos:
  358. print('--%s: %s' % ('transform op', op_info['type']))
  359. print('--------------------------------------------')
  360. def load_predictor(model_dir,
  361. run_mode='fluid',
  362. batch_size=1,
  363. device='CPU',
  364. min_subgraph_size=3,
  365. trt_calib_mode=False,
  366. enable_mkldnn=False,
  367. enable_mkldnn_bfloat16=False):
  368. """set AnalysisConfig, generate AnalysisPredictor
  369. Args:
  370. model_dir (str): root path of __model__ and __params__
  371. device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU
  372. trt_calib_mode (bool): If the model is produced by TRT offline quantitative
  373. calibration, trt_calib_mode need to set True
  374. enable_mkldnn (bool): Whether use mkldnn with CPU, default is False
  375. enable_mkldnn_bfloat16 (bool): Whether use mkldnn bfloat16 with CPU, default is False
  376. Returns:
  377. predictor (PaddlePredictor): AnalysisPredictor
  378. Raises:
  379. ValueError: predict by TensorRT need device == GPU.
  380. """
  381. if device != 'GPU' and not run_mode == 'fluid':
  382. raise ValueError(
  383. "Predict by TensorRT mode: {}, expect device==GPU, but device == {}"
  384. .format(run_mode, device))
  385. precision_map = {
  386. 'trt_int8': fluid.core.AnalysisConfig.Precision.Int8,
  387. 'trt_fp32': fluid.core.AnalysisConfig.Precision.Float32,
  388. 'trt_fp16': fluid.core.AnalysisConfig.Precision.Half
  389. }
  390. config = fluid.core.AnalysisConfig(
  391. os.path.join(model_dir, '__model__'),
  392. os.path.join(model_dir, '__params__'))
  393. if device == 'GPU':
  394. # initial GPU memory(M), device ID
  395. config.enable_use_gpu(100, 0)
  396. # optimize graph and fuse op
  397. config.switch_ir_optim(True)
  398. elif device == 'XPU':
  399. config.enable_lite_engine()
  400. config.enable_xpu(10 * 1024 * 1024)
  401. else:
  402. config.disable_gpu()
  403. if enable_mkldnn:
  404. config.set_mkldnn_cache_capacity(0)
  405. config.enable_mkldnn()
  406. config.pass_builder().append_pass("interpolate_mkldnn_pass")
  407. if enable_mkldnn_bfloat16:
  408. config.enable_mkldnn_bfloat16()
  409. if run_mode in precision_map.keys():
  410. config.enable_tensorrt_engine(
  411. workspace_size=1 << 10,
  412. max_batch_size=batch_size,
  413. min_subgraph_size=min_subgraph_size,
  414. precision_mode=precision_map[run_mode],
  415. use_static=False,
  416. use_calib_mode=trt_calib_mode)
  417. # disable print log when predict
  418. config.disable_glog_info()
  419. # enable shared memory
  420. if (not enable_mkldnn):
  421. config.enable_memory_optim()
  422. # disable feed, fetch OP, needed by zero_copy_run
  423. config.switch_use_feed_fetch_ops(False)
  424. predictor = fluid.core.create_paddle_predictor(config)
  425. return predictor
  426. def load_executor(model_dir, device='CPU'):
  427. if device == 'GPU':
  428. place = fluid.CUDAPlace(0)
  429. else:
  430. place = fluid.CPUPlace()
  431. exe = fluid.Executor(place)
  432. program, feed_names, fetch_targets = fluid.io.load_inference_model(
  433. dirname=model_dir,
  434. executor=exe,
  435. model_filename='__model__',
  436. params_filename='__params__')
  437. return exe, program, fetch_targets
  438. def visualize(image_file,
  439. results,
  440. labels,
  441. mask_resolution=14,
  442. output_dir='output/',
  443. threshold=0.5):
  444. # visualize the predict result
  445. im = visualize_box_mask(
  446. image_file,
  447. results,
  448. labels,
  449. mask_resolution=mask_resolution,
  450. threshold=threshold)
  451. img_name = os.path.split(image_file)[-1]
  452. if not os.path.exists(output_dir):
  453. os.makedirs(output_dir)
  454. out_path = os.path.join(output_dir, img_name)
  455. im.save(out_path, quality=95)
  456. print("save result to: " + out_path)
  457. def print_arguments(args):
  458. print('----------- Running Arguments -----------')
  459. for arg, value in sorted(vars(args).items()):
  460. print('%s: %s' % (arg, value))
  461. print('------------------------------------------')
  462. def predict_image(detector):
  463. if FLAGS.run_benchmark:
  464. detector.predict(
  465. FLAGS.image_file,
  466. FLAGS.threshold,
  467. warmup=100,
  468. repeats=100,
  469. run_benchmark=True)
  470. else:
  471. results = detector.predict(FLAGS.image_file, FLAGS.threshold)
  472. visualize(
  473. FLAGS.image_file,
  474. results,
  475. detector.config.labels,
  476. mask_resolution=detector.config.mask_resolution,
  477. output_dir=FLAGS.output_dir,
  478. threshold=FLAGS.threshold)
  479. def predict_video(detector, camera_id):
  480. if camera_id != -1:
  481. capture = cv2.VideoCapture(camera_id)
  482. video_name = 'output.mp4'
  483. else:
  484. capture = cv2.VideoCapture(FLAGS.video_file)
  485. video_name = os.path.split(FLAGS.video_file)[-1]
  486. fps = 30
  487. width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
  488. height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
  489. fourcc = cv2.VideoWriter_fourcc(*'mp4v')
  490. if not os.path.exists(FLAGS.output_dir):
  491. os.makedirs(FLAGS.output_dir)
  492. out_path = os.path.join(FLAGS.output_dir, video_name)
  493. writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
  494. index = 1
  495. while (1):
  496. ret, frame = capture.read()
  497. if not ret:
  498. break
  499. print('detect frame:%d' % (index))
  500. index += 1
  501. results = detector.predict(frame, FLAGS.threshold)
  502. im = visualize_box_mask(
  503. frame,
  504. results,
  505. detector.config.labels,
  506. mask_resolution=detector.config.mask_resolution,
  507. threshold=FLAGS.threshold)
  508. im = np.array(im)
  509. writer.write(im)
  510. if camera_id != -1:
  511. cv2.imshow('Mask Detection', im)
  512. if cv2.waitKey(1) & 0xFF == ord('q'):
  513. break
  514. writer.release()
  515. def main():
  516. config = Config(FLAGS.model_dir)
  517. detector = Detector(
  518. config,
  519. FLAGS.model_dir,
  520. device=FLAGS.device,
  521. run_mode=FLAGS.run_mode,
  522. trt_calib_mode=FLAGS.trt_calib_mode,
  523. enable_mkldnn=FLAGS.enable_mkldnn,
  524. enable_mkldnn_bfloat16=FLAGS.enable_mkldnn_bfloat16)
  525. if config.arch == 'SOLOv2':
  526. detector = DetectorSOLOv2(
  527. config,
  528. FLAGS.model_dir,
  529. device=FLAGS.device,
  530. run_mode=FLAGS.run_mode,
  531. trt_calib_mode=FLAGS.trt_calib_mode,
  532. enable_mkldnn=FLAGS.enable_mkldnn,
  533. enable_mkldnn_bfloat16=FLAGS.enable_mkldnn_bfloat16)
  534. # predict from image
  535. if FLAGS.image_file != '':
  536. predict_image(detector)
  537. # predict from video file or camera video stream
  538. if FLAGS.video_file != '' or FLAGS.camera_id != -1:
  539. predict_video(detector, FLAGS.camera_id)
  540. if __name__ == '__main__':
  541. try:
  542. paddle.enable_static()
  543. except:
  544. pass
  545. parser = argparse.ArgumentParser(description=__doc__)
  546. parser.add_argument(
  547. "--model_dir",
  548. type=str,
  549. default=None,
  550. help=("Directory include:'__model__', '__params__', "
  551. "'infer_cfg.yml', created by tools/export_model.py."),
  552. required=True)
  553. parser.add_argument(
  554. "--image_file", type=str, default='', help="Path of image file.")
  555. parser.add_argument(
  556. "--video_file", type=str, default='', help="Path of video file.")
  557. parser.add_argument(
  558. "--camera_id",
  559. type=int,
  560. default=-1,
  561. help="device id of camera to predict.")
  562. parser.add_argument(
  563. "--run_mode",
  564. type=str,
  565. default='fluid',
  566. help="mode of running(fluid/trt_fp32/trt_fp16/trt_int8)")
  567. parser.add_argument(
  568. "--device",
  569. type=str,
  570. default='cpu',
  571. help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
  572. )
  573. parser.add_argument(
  574. "--use_gpu",
  575. type=ast.literal_eval,
  576. default=False,
  577. help="Deprecated, please use `--device` to set the device you want to run."
  578. )
  579. parser.add_argument(
  580. "--run_benchmark",
  581. type=ast.literal_eval,
  582. default=False,
  583. help="Whether to predict a image_file repeatedly for benchmark")
  584. parser.add_argument(
  585. "--threshold", type=float, default=0.5, help="Threshold of score.")
  586. parser.add_argument(
  587. "--output_dir",
  588. type=str,
  589. default="output",
  590. help="Directory of output visualization files.")
  591. parser.add_argument(
  592. "--trt_calib_mode",
  593. type=bool,
  594. default=False,
  595. help="If the model is produced by TRT offline quantitative "
  596. "calibration, trt_calib_mode need to set True.")
  597. parser.add_argument(
  598. "--enable_mkldnn",
  599. type=ast.literal_eval,
  600. default=False,
  601. help="Whether use mkldnn with CPU.")
  602. parser.add_argument(
  603. "--enable_mkldnn_bfloat16",
  604. type=ast.literal_eval,
  605. default=False,
  606. help="Whether use mkldnn bfloat16 with CPU.")
  607. FLAGS = parser.parse_args()
  608. print_arguments(FLAGS)
  609. if FLAGS.image_file != '' and FLAGS.video_file != '':
  610. assert "Cannot predict image and video at the same time"
  611. FLAGS.device = FLAGS.device.upper()
  612. assert FLAGS.device in ['CPU', 'GPU', 'XPU'
  613. ], "device should be CPU, GPU or XPU"
  614. assert not FLAGS.use_gpu, "use_gpu has been deprecated, please use --device"
  615. assert not (FLAGS.enable_mkldnn==False and FLAGS.enable_mkldnn_bfloat16==True),"To turn on mkldnn_bfloat, please set both enable_mkldnn and enable_mkldnn_bfloat16 True"
  616. main()