pipeline.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import yaml
  16. import glob
  17. from collections import defaultdict
  18. import cv2
  19. import numpy as np
  20. import math
  21. import paddle
  22. import sys
  23. import copy
  24. from collections import Sequence
  25. from reid import ReID
  26. from datacollector import DataCollector, Result
  27. from mtmct import mtmct_process
  28. # add deploy path of PadleDetection to sys.path
  29. parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
  30. sys.path.insert(0, parent_path)
  31. from python.infer import Detector, DetectorPicoDet
  32. from python.attr_infer import AttrDetector
  33. from python.keypoint_infer import KeyPointDetector
  34. from python.keypoint_postprocess import translate_to_ori_images
  35. from python.action_infer import ActionRecognizer
  36. from python.action_utils import KeyPointBuff, ActionVisualHelper
  37. from pipe_utils import argsparser, print_arguments, merge_cfg, PipeTimer
  38. from pipe_utils import get_test_images, crop_image_with_det, crop_image_with_mot, parse_mot_res, parse_mot_keypoint
  39. from python.preprocess import decode_image
  40. from python.visualize import visualize_box_mask, visualize_attr, visualize_pose, visualize_action
  41. from pptracking.python.mot_sde_infer import SDE_Detector
  42. from pptracking.python.mot.visualize import plot_tracking_dict
  43. from pptracking.python.mot.utils import flow_statistic
  44. class Pipeline(object):
  45. """
  46. Pipeline
  47. Args:
  48. cfg (dict): config of models in pipeline
  49. image_file (string|None): the path of image file, default as None
  50. image_dir (string|None): the path of image directory, if not None,
  51. then all the images in directory will be predicted, default as None
  52. video_file (string|None): the path of video file, default as None
  53. camera_id (int): the device id of camera to predict, default as -1
  54. enable_attr (bool): whether use attribute recognition, default as false
  55. enable_action (bool): whether use action recognition, default as false
  56. device (string): the device to predict, options are: CPU/GPU/XPU,
  57. default as CPU
  58. run_mode (string): the mode of prediction, options are:
  59. paddle/trt_fp32/trt_fp16, default as paddle
  60. trt_min_shape (int): min shape for dynamic shape in trt, default as 1
  61. trt_max_shape (int): max shape for dynamic shape in trt, default as 1280
  62. trt_opt_shape (int): opt shape for dynamic shape in trt, default as 640
  63. trt_calib_mode (bool): If the model is produced by TRT offline quantitative
  64. calibration, trt_calib_mode need to set True. default as False
  65. cpu_threads (int): cpu threads, default as 1
  66. enable_mkldnn (bool): whether to open MKLDNN, default as False
  67. output_dir (string): The path of output, default as 'output'
  68. draw_center_traj (bool): Whether drawing the trajectory of center, default as False
  69. secs_interval (int): The seconds interval to count after tracking, default as 10
  70. do_entrance_counting(bool): Whether counting the numbers of identifiers entering
  71. or getting out from the entrance, default as False,only support single class
  72. counting in MOT.
  73. """
  74. def __init__(self,
  75. cfg,
  76. image_file=None,
  77. image_dir=None,
  78. video_file=None,
  79. video_dir=None,
  80. camera_id=-1,
  81. enable_attr=False,
  82. enable_action=True,
  83. device='CPU',
  84. run_mode='paddle',
  85. trt_min_shape=1,
  86. trt_max_shape=1280,
  87. trt_opt_shape=640,
  88. trt_calib_mode=False,
  89. cpu_threads=1,
  90. enable_mkldnn=False,
  91. output_dir='output',
  92. draw_center_traj=False,
  93. secs_interval=10,
  94. do_entrance_counting=False):
  95. self.multi_camera = False
  96. self.is_video = False
  97. self.output_dir = output_dir
  98. self.vis_result = cfg['visual']
  99. self.input = self._parse_input(image_file, image_dir, video_file,
  100. video_dir, camera_id)
  101. if self.multi_camera:
  102. self.predictor = []
  103. for name in self.input:
  104. predictor_item = PipePredictor(
  105. cfg,
  106. is_video=True,
  107. multi_camera=True,
  108. enable_attr=enable_attr,
  109. enable_action=enable_action,
  110. device=device,
  111. run_mode=run_mode,
  112. trt_min_shape=trt_min_shape,
  113. trt_max_shape=trt_max_shape,
  114. trt_opt_shape=trt_opt_shape,
  115. cpu_threads=cpu_threads,
  116. enable_mkldnn=enable_mkldnn,
  117. output_dir=output_dir)
  118. predictor_item.set_file_name(name)
  119. self.predictor.append(predictor_item)
  120. else:
  121. self.predictor = PipePredictor(
  122. cfg,
  123. self.is_video,
  124. enable_attr=enable_attr,
  125. enable_action=enable_action,
  126. device=device,
  127. run_mode=run_mode,
  128. trt_min_shape=trt_min_shape,
  129. trt_max_shape=trt_max_shape,
  130. trt_opt_shape=trt_opt_shape,
  131. trt_calib_mode=trt_calib_mode,
  132. cpu_threads=cpu_threads,
  133. enable_mkldnn=enable_mkldnn,
  134. output_dir=output_dir,
  135. draw_center_traj=draw_center_traj,
  136. secs_interval=secs_interval,
  137. do_entrance_counting=do_entrance_counting)
  138. if self.is_video:
  139. self.predictor.set_file_name(video_file)
  140. self.output_dir = output_dir
  141. self.draw_center_traj = draw_center_traj
  142. self.secs_interval = secs_interval
  143. self.do_entrance_counting = do_entrance_counting
  144. def _parse_input(self, image_file, image_dir, video_file, video_dir,
  145. camera_id):
  146. # parse input as is_video and multi_camera
  147. if image_file is not None or image_dir is not None:
  148. input = get_test_images(image_dir, image_file)
  149. self.is_video = False
  150. self.multi_camera = False
  151. elif video_file is not None:
  152. assert os.path.exists(video_file), "video_file not exists."
  153. self.multi_camera = False
  154. input = video_file
  155. self.is_video = True
  156. elif video_dir is not None:
  157. videof = [os.path.join(video_dir, x) for x in os.listdir(video_dir)]
  158. if len(videof) > 1:
  159. self.multi_camera = True
  160. videof.sort()
  161. input = videof
  162. else:
  163. input = videof[0]
  164. self.is_video = True
  165. elif camera_id != -1:
  166. self.multi_camera = False
  167. input = camera_id
  168. self.is_video = True
  169. else:
  170. raise ValueError(
  171. "Illegal Input, please set one of ['video_file','camera_id','image_file', 'image_dir']"
  172. )
  173. return input
  174. def run(self):
  175. if self.multi_camera:
  176. multi_res = []
  177. for predictor, input in zip(self.predictor, self.input):
  178. predictor.run(input)
  179. collector_data = predictor.get_result()
  180. multi_res.append(collector_data)
  181. mtmct_process(
  182. multi_res,
  183. self.input,
  184. mtmct_vis=self.vis_result,
  185. output_dir=self.output_dir)
  186. else:
  187. self.predictor.run(self.input)
  188. class PipePredictor(object):
  189. """
  190. Predictor in single camera
  191. The pipeline for image input:
  192. 1. Detection
  193. 2. Detection -> Attribute
  194. The pipeline for video input:
  195. 1. Tracking
  196. 2. Tracking -> Attribute
  197. 3. Tracking -> KeyPoint -> Action Recognition
  198. Args:
  199. cfg (dict): config of models in pipeline
  200. is_video (bool): whether the input is video, default as False
  201. multi_camera (bool): whether to use multi camera in pipeline,
  202. default as False
  203. camera_id (int): the device id of camera to predict, default as -1
  204. enable_attr (bool): whether use attribute recognition, default as false
  205. enable_action (bool): whether use action recognition, default as false
  206. device (string): the device to predict, options are: CPU/GPU/XPU,
  207. default as CPU
  208. run_mode (string): the mode of prediction, options are:
  209. paddle/trt_fp32/trt_fp16, default as paddle
  210. trt_min_shape (int): min shape for dynamic shape in trt, default as 1
  211. trt_max_shape (int): max shape for dynamic shape in trt, default as 1280
  212. trt_opt_shape (int): opt shape for dynamic shape in trt, default as 640
  213. trt_calib_mode (bool): If the model is produced by TRT offline quantitative
  214. calibration, trt_calib_mode need to set True. default as False
  215. cpu_threads (int): cpu threads, default as 1
  216. enable_mkldnn (bool): whether to open MKLDNN, default as False
  217. output_dir (string): The path of output, default as 'output'
  218. draw_center_traj (bool): Whether drawing the trajectory of center, default as False
  219. secs_interval (int): The seconds interval to count after tracking, default as 10
  220. do_entrance_counting(bool): Whether counting the numbers of identifiers entering
  221. or getting out from the entrance, default as False,only support single class
  222. counting in MOT.
  223. """
  224. def __init__(self,
  225. cfg,
  226. is_video=True,
  227. multi_camera=False,
  228. enable_attr=False,
  229. enable_action=False,
  230. device='CPU',
  231. run_mode='paddle',
  232. trt_min_shape=1,
  233. trt_max_shape=1280,
  234. trt_opt_shape=640,
  235. trt_calib_mode=False,
  236. cpu_threads=1,
  237. enable_mkldnn=False,
  238. output_dir='output',
  239. draw_center_traj=False,
  240. secs_interval=10,
  241. do_entrance_counting=False):
  242. if enable_attr and not cfg.get('ATTR', False):
  243. ValueError(
  244. 'enable_attr is set to True, please set ATTR in config file')
  245. if enable_action and (not cfg.get('ACTION', False) or
  246. not cfg.get('KPT', False)):
  247. ValueError(
  248. 'enable_action is set to True, please set KPT and ACTION in config file'
  249. )
  250. self.with_attr = cfg.get('ATTR', False) and enable_attr
  251. self.with_action = cfg.get('ACTION', False) and enable_action
  252. self.with_mtmct = cfg.get('REID', False) and multi_camera
  253. if self.with_attr:
  254. print('Attribute Recognition enabled')
  255. if self.with_action:
  256. print('Action Recognition enabled')
  257. if multi_camera:
  258. if not self.with_mtmct:
  259. print(
  260. 'Warning!!! MTMCT enabled, but cannot find REID config in [infer_cfg.yml], please check!'
  261. )
  262. else:
  263. print("MTMCT enabled")
  264. self.is_video = is_video
  265. self.multi_camera = multi_camera
  266. self.cfg = cfg
  267. self.output_dir = output_dir
  268. self.draw_center_traj = draw_center_traj
  269. self.secs_interval = secs_interval
  270. self.do_entrance_counting = do_entrance_counting
  271. self.warmup_frame = self.cfg['warmup_frame']
  272. self.pipeline_res = Result()
  273. self.pipe_timer = PipeTimer()
  274. self.file_name = None
  275. self.collector = DataCollector()
  276. if not is_video:
  277. det_cfg = self.cfg['DET']
  278. model_dir = det_cfg['model_dir']
  279. batch_size = det_cfg['batch_size']
  280. self.det_predictor = Detector(
  281. model_dir, device, run_mode, batch_size, trt_min_shape,
  282. trt_max_shape, trt_opt_shape, trt_calib_mode, cpu_threads,
  283. enable_mkldnn)
  284. if self.with_attr:
  285. attr_cfg = self.cfg['ATTR']
  286. model_dir = attr_cfg['model_dir']
  287. batch_size = attr_cfg['batch_size']
  288. self.attr_predictor = AttrDetector(
  289. model_dir, device, run_mode, batch_size, trt_min_shape,
  290. trt_max_shape, trt_opt_shape, trt_calib_mode, cpu_threads,
  291. enable_mkldnn)
  292. else:
  293. mot_cfg = self.cfg['MOT']
  294. model_dir = mot_cfg['model_dir']
  295. tracker_config = mot_cfg['tracker_config']
  296. batch_size = mot_cfg['batch_size']
  297. self.mot_predictor = SDE_Detector(
  298. model_dir,
  299. tracker_config,
  300. device,
  301. run_mode,
  302. batch_size,
  303. trt_min_shape,
  304. trt_max_shape,
  305. trt_opt_shape,
  306. trt_calib_mode,
  307. cpu_threads,
  308. enable_mkldnn,
  309. draw_center_traj=draw_center_traj,
  310. secs_interval=secs_interval,
  311. do_entrance_counting=do_entrance_counting)
  312. if self.with_attr:
  313. attr_cfg = self.cfg['ATTR']
  314. model_dir = attr_cfg['model_dir']
  315. batch_size = attr_cfg['batch_size']
  316. self.attr_predictor = AttrDetector(
  317. model_dir, device, run_mode, batch_size, trt_min_shape,
  318. trt_max_shape, trt_opt_shape, trt_calib_mode, cpu_threads,
  319. enable_mkldnn)
  320. if self.with_action:
  321. kpt_cfg = self.cfg['KPT']
  322. kpt_model_dir = kpt_cfg['model_dir']
  323. kpt_batch_size = kpt_cfg['batch_size']
  324. action_cfg = self.cfg['ACTION']
  325. action_model_dir = action_cfg['model_dir']
  326. action_batch_size = action_cfg['batch_size']
  327. action_frames = action_cfg['max_frames']
  328. display_frames = action_cfg['display_frames']
  329. self.coord_size = action_cfg['coord_size']
  330. self.kpt_predictor = KeyPointDetector(
  331. kpt_model_dir,
  332. device,
  333. run_mode,
  334. kpt_batch_size,
  335. trt_min_shape,
  336. trt_max_shape,
  337. trt_opt_shape,
  338. trt_calib_mode,
  339. cpu_threads,
  340. enable_mkldnn,
  341. use_dark=False)
  342. self.kpt_buff = KeyPointBuff(action_frames)
  343. self.action_predictor = ActionRecognizer(
  344. action_model_dir,
  345. device,
  346. run_mode,
  347. action_batch_size,
  348. trt_min_shape,
  349. trt_max_shape,
  350. trt_opt_shape,
  351. trt_calib_mode,
  352. cpu_threads,
  353. enable_mkldnn,
  354. window_size=action_frames)
  355. self.action_visual_helper = ActionVisualHelper(display_frames)
  356. if self.with_mtmct:
  357. reid_cfg = self.cfg['REID']
  358. model_dir = reid_cfg['model_dir']
  359. batch_size = reid_cfg['batch_size']
  360. self.reid_predictor = ReID(model_dir, device, run_mode, batch_size,
  361. trt_min_shape, trt_max_shape,
  362. trt_opt_shape, trt_calib_mode,
  363. cpu_threads, enable_mkldnn)
  364. def set_file_name(self, path):
  365. if path is not None:
  366. self.file_name = os.path.split(path)[-1]
  367. else:
  368. # use camera id
  369. self.file_name = None
  370. def get_result(self):
  371. return self.collector.get_res()
  372. def run(self, input):
  373. if self.is_video:
  374. self.predict_video(input)
  375. else:
  376. self.predict_image(input)
  377. self.pipe_timer.info()
  378. def predict_image(self, input):
  379. # det
  380. # det -> attr
  381. batch_loop_cnt = math.ceil(
  382. float(len(input)) / self.det_predictor.batch_size)
  383. for i in range(batch_loop_cnt):
  384. start_index = i * self.det_predictor.batch_size
  385. end_index = min((i + 1) * self.det_predictor.batch_size, len(input))
  386. batch_file = input[start_index:end_index]
  387. batch_input = [decode_image(f, {})[0] for f in batch_file]
  388. if i > self.warmup_frame:
  389. self.pipe_timer.total_time.start()
  390. self.pipe_timer.module_time['det'].start()
  391. # det output format: class, score, xmin, ymin, xmax, ymax
  392. det_res = self.det_predictor.predict_image(
  393. batch_input, visual=False)
  394. det_res = self.det_predictor.filter_box(det_res,
  395. self.cfg['crop_thresh'])
  396. if i > self.warmup_frame:
  397. self.pipe_timer.module_time['det'].end()
  398. self.pipeline_res.update(det_res, 'det')
  399. if self.with_attr:
  400. crop_inputs = crop_image_with_det(batch_input, det_res)
  401. attr_res_list = []
  402. if i > self.warmup_frame:
  403. self.pipe_timer.module_time['attr'].start()
  404. for crop_input in crop_inputs:
  405. attr_res = self.attr_predictor.predict_image(
  406. crop_input, visual=False)
  407. attr_res_list.extend(attr_res['output'])
  408. if i > self.warmup_frame:
  409. self.pipe_timer.module_time['attr'].end()
  410. attr_res = {'output': attr_res_list}
  411. self.pipeline_res.update(attr_res, 'attr')
  412. self.pipe_timer.img_num += len(batch_input)
  413. if i > self.warmup_frame:
  414. self.pipe_timer.total_time.end()
  415. if self.cfg['visual']:
  416. self.visualize_image(batch_file, batch_input, self.pipeline_res)
  417. def predict_video(self, video_file):
  418. # mot
  419. # mot -> attr
  420. # mot -> pose -> action
  421. capture = cv2.VideoCapture(video_file)
  422. video_out_name = 'output.mp4' if self.file_name is None else self.file_name
  423. # Get Video info : resolution, fps, frame count
  424. width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
  425. height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
  426. fps = int(capture.get(cv2.CAP_PROP_FPS))
  427. frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
  428. print("video fps: %d, frame_count: %d" % (fps, frame_count))
  429. if not os.path.exists(self.output_dir):
  430. os.makedirs(self.output_dir)
  431. out_path = os.path.join(self.output_dir, video_out_name)
  432. fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
  433. writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
  434. frame_id = 0
  435. entrance, records, center_traj = None, None, None
  436. if self.draw_center_traj:
  437. center_traj = [{}]
  438. id_set = set()
  439. interval_id_set = set()
  440. in_id_list = list()
  441. out_id_list = list()
  442. prev_center = dict()
  443. records = list()
  444. entrance = [0, height / 2., width, height / 2.]
  445. video_fps = fps
  446. while (1):
  447. if frame_id % 10 == 0:
  448. print('frame id: ', frame_id)
  449. ret, frame = capture.read()
  450. if not ret:
  451. break
  452. if frame_id > self.warmup_frame:
  453. self.pipe_timer.total_time.start()
  454. self.pipe_timer.module_time['mot'].start()
  455. res = self.mot_predictor.predict_image(
  456. [copy.deepcopy(frame)], visual=False)
  457. if frame_id > self.warmup_frame:
  458. self.pipe_timer.module_time['mot'].end()
  459. # mot output format: id, class, score, xmin, ymin, xmax, ymax
  460. mot_res = parse_mot_res(res)
  461. # flow_statistic only support single class MOT
  462. boxes, scores, ids = res[0] # batch size = 1 in MOT
  463. mot_result = (frame_id + 1, boxes[0], scores[0],
  464. ids[0]) # single class
  465. statistic = flow_statistic(
  466. mot_result, self.secs_interval, self.do_entrance_counting,
  467. video_fps, entrance, id_set, interval_id_set, in_id_list,
  468. out_id_list, prev_center, records)
  469. records = statistic['records']
  470. # nothing detected
  471. if len(mot_res['boxes']) == 0:
  472. frame_id += 1
  473. if frame_id > self.warmup_frame:
  474. self.pipe_timer.img_num += 1
  475. self.pipe_timer.total_time.end()
  476. if self.cfg['visual']:
  477. _, _, fps = self.pipe_timer.get_total_time()
  478. im = self.visualize_video(frame, mot_res, frame_id, fps,
  479. entrance, records,
  480. center_traj) # visualize
  481. writer.write(im)
  482. if self.file_name is None: # use camera_id
  483. cv2.imshow('PPHuman', im)
  484. if cv2.waitKey(1) & 0xFF == ord('q'):
  485. break
  486. continue
  487. self.pipeline_res.update(mot_res, 'mot')
  488. if self.with_attr or self.with_action:
  489. crop_input, new_bboxes, ori_bboxes = crop_image_with_mot(
  490. frame, mot_res)
  491. if self.with_attr:
  492. if frame_id > self.warmup_frame:
  493. self.pipe_timer.module_time['attr'].start()
  494. attr_res = self.attr_predictor.predict_image(
  495. crop_input, visual=False)
  496. if frame_id > self.warmup_frame:
  497. self.pipe_timer.module_time['attr'].end()
  498. self.pipeline_res.update(attr_res, 'attr')
  499. if self.with_action:
  500. if frame_id > self.warmup_frame:
  501. self.pipe_timer.module_time['kpt'].start()
  502. kpt_pred = self.kpt_predictor.predict_image(
  503. crop_input, visual=False)
  504. keypoint_vector, score_vector = translate_to_ori_images(
  505. kpt_pred, np.array(new_bboxes))
  506. kpt_res = {}
  507. kpt_res['keypoint'] = [
  508. keypoint_vector.tolist(), score_vector.tolist()
  509. ] if len(keypoint_vector) > 0 else [[], []]
  510. kpt_res['bbox'] = ori_bboxes
  511. if frame_id > self.warmup_frame:
  512. self.pipe_timer.module_time['kpt'].end()
  513. self.pipeline_res.update(kpt_res, 'kpt')
  514. self.kpt_buff.update(kpt_res, mot_res) # collect kpt output
  515. state = self.kpt_buff.get_state(
  516. ) # whether frame num is enough or lost tracker
  517. action_res = {}
  518. if state:
  519. if frame_id > self.warmup_frame:
  520. self.pipe_timer.module_time['action'].start()
  521. collected_keypoint = self.kpt_buff.get_collected_keypoint(
  522. ) # reoragnize kpt output with ID
  523. action_input = parse_mot_keypoint(collected_keypoint,
  524. self.coord_size)
  525. action_res = self.action_predictor.predict_skeleton_with_mot(
  526. action_input)
  527. if frame_id > self.warmup_frame:
  528. self.pipe_timer.module_time['action'].end()
  529. self.pipeline_res.update(action_res, 'action')
  530. if self.cfg['visual']:
  531. self.action_visual_helper.update(action_res)
  532. if self.with_mtmct and frame_id % 10 == 0:
  533. crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot(
  534. frame, mot_res)
  535. if frame_id > self.warmup_frame:
  536. self.pipe_timer.module_time['reid'].start()
  537. reid_res = self.reid_predictor.predict_batch(crop_input)
  538. if frame_id > self.warmup_frame:
  539. self.pipe_timer.module_time['reid'].end()
  540. reid_res_dict = {
  541. 'features': reid_res,
  542. "qualities": img_qualities,
  543. "rects": rects
  544. }
  545. self.pipeline_res.update(reid_res_dict, 'reid')
  546. else:
  547. self.pipeline_res.clear('reid')
  548. self.collector.append(frame_id, self.pipeline_res)
  549. if frame_id > self.warmup_frame:
  550. self.pipe_timer.img_num += 1
  551. self.pipe_timer.total_time.end()
  552. frame_id += 1
  553. if self.cfg['visual']:
  554. _, _, fps = self.pipe_timer.get_total_time()
  555. im = self.visualize_video(frame, self.pipeline_res, frame_id,
  556. fps, entrance, records,
  557. center_traj) # visualize
  558. writer.write(im)
  559. if self.file_name is None: # use camera_id
  560. cv2.imshow('PPHuman', im)
  561. if cv2.waitKey(1) & 0xFF == ord('q'):
  562. break
  563. writer.release()
  564. print('save result to {}'.format(out_path))
  565. def visualize_video(self,
  566. image,
  567. result,
  568. frame_id,
  569. fps,
  570. entrance=None,
  571. records=None,
  572. center_traj=None):
  573. mot_res = copy.deepcopy(result.get('mot'))
  574. if mot_res is not None:
  575. ids = mot_res['boxes'][:, 0]
  576. scores = mot_res['boxes'][:, 2]
  577. boxes = mot_res['boxes'][:, 3:]
  578. boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
  579. boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
  580. else:
  581. boxes = np.zeros([0, 4])
  582. ids = np.zeros([0])
  583. scores = np.zeros([0])
  584. # single class, still need to be defaultdict type for ploting
  585. num_classes = 1
  586. online_tlwhs = defaultdict(list)
  587. online_scores = defaultdict(list)
  588. online_ids = defaultdict(list)
  589. online_tlwhs[0] = boxes
  590. online_scores[0] = scores
  591. online_ids[0] = ids
  592. image = plot_tracking_dict(
  593. image,
  594. num_classes,
  595. online_tlwhs,
  596. online_ids,
  597. online_scores,
  598. frame_id=frame_id,
  599. fps=fps,
  600. do_entrance_counting=self.do_entrance_counting,
  601. entrance=entrance,
  602. records=records,
  603. center_traj=center_traj)
  604. attr_res = result.get('attr')
  605. if attr_res is not None:
  606. boxes = mot_res['boxes'][:, 1:]
  607. attr_res = attr_res['output']
  608. image = visualize_attr(image, attr_res, boxes)
  609. image = np.array(image)
  610. kpt_res = result.get('kpt')
  611. if kpt_res is not None:
  612. image = visualize_pose(
  613. image,
  614. kpt_res,
  615. visual_thresh=self.cfg['kpt_thresh'],
  616. returnimg=True)
  617. action_res = result.get('action')
  618. if action_res is not None:
  619. image = visualize_action(image, mot_res['boxes'],
  620. self.action_visual_helper, "Falling")
  621. return image
  622. def visualize_image(self, im_files, images, result):
  623. start_idx, boxes_num_i = 0, 0
  624. det_res = result.get('det')
  625. attr_res = result.get('attr')
  626. for i, (im_file, im) in enumerate(zip(im_files, images)):
  627. if det_res is not None:
  628. det_res_i = {}
  629. boxes_num_i = det_res['boxes_num'][i]
  630. det_res_i['boxes'] = det_res['boxes'][start_idx:start_idx +
  631. boxes_num_i, :]
  632. im = visualize_box_mask(
  633. im,
  634. det_res_i,
  635. labels=['person'],
  636. threshold=self.cfg['crop_thresh'])
  637. im = np.ascontiguousarray(np.copy(im))
  638. im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
  639. if attr_res is not None:
  640. attr_res_i = attr_res['output'][start_idx:start_idx +
  641. boxes_num_i]
  642. im = visualize_attr(im, attr_res_i, det_res_i['boxes'])
  643. img_name = os.path.split(im_file)[-1]
  644. if not os.path.exists(self.output_dir):
  645. os.makedirs(self.output_dir)
  646. out_path = os.path.join(self.output_dir, img_name)
  647. cv2.imwrite(out_path, im)
  648. print("save result to: " + out_path)
  649. start_idx += boxes_num_i
  650. def main():
  651. cfg = merge_cfg(FLAGS)
  652. print_arguments(cfg)
  653. pipeline = Pipeline(
  654. cfg, FLAGS.image_file, FLAGS.image_dir, FLAGS.video_file,
  655. FLAGS.video_dir, FLAGS.camera_id, FLAGS.enable_attr,
  656. FLAGS.enable_action, FLAGS.device, FLAGS.run_mode, FLAGS.trt_min_shape,
  657. FLAGS.trt_max_shape, FLAGS.trt_opt_shape, FLAGS.trt_calib_mode,
  658. FLAGS.cpu_threads, FLAGS.enable_mkldnn, FLAGS.output_dir,
  659. FLAGS.draw_center_traj, FLAGS.secs_interval, FLAGS.do_entrance_counting)
  660. pipeline.run()
  661. if __name__ == '__main__':
  662. paddle.enable_static()
  663. parser = argsparser()
  664. FLAGS = parser.parse_args()
  665. FLAGS.device = FLAGS.device.upper()
  666. assert FLAGS.device in ['CPU', 'GPU', 'XPU'
  667. ], "device should be CPU, GPU or XPU"
  668. main()