3
0

reid.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import sys
  16. import cv2
  17. import numpy as np
  18. # add deploy path of PadleDetection to sys.path
  19. parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
  20. sys.path.insert(0, parent_path)
  21. from python.infer import PredictConfig
  22. from pptracking.python.det_infer import load_predictor
  23. from python.utils import Timer
  24. class ReID(object):
  25. """
  26. ReID of SDE methods
  27. Args:
  28. pred_config (object): config of model, defined by `Config(model_dir)`
  29. model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml
  30. device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU
  31. run_mode (str): mode of running(paddle/trt_fp32/trt_fp16)
  32. batch_size (int): size of per batch in inference, default 50 means at most
  33. 50 sub images can be made a batch and send into ReID model
  34. trt_min_shape (int): min shape for dynamic shape in trt
  35. trt_max_shape (int): max shape for dynamic shape in trt
  36. trt_opt_shape (int): opt shape for dynamic shape in trt
  37. trt_calib_mode (bool): If the model is produced by TRT offline quantitative
  38. calibration, trt_calib_mode need to set True
  39. cpu_threads (int): cpu threads
  40. enable_mkldnn (bool): whether to open MKLDNN
  41. """
  42. def __init__(self,
  43. model_dir,
  44. device='CPU',
  45. run_mode='paddle',
  46. batch_size=50,
  47. trt_min_shape=1,
  48. trt_max_shape=1088,
  49. trt_opt_shape=608,
  50. trt_calib_mode=False,
  51. cpu_threads=4,
  52. enable_mkldnn=False):
  53. self.pred_config = self.set_config(model_dir)
  54. self.predictor, self.config = load_predictor(
  55. model_dir,
  56. run_mode=run_mode,
  57. batch_size=batch_size,
  58. min_subgraph_size=self.pred_config.min_subgraph_size,
  59. device=device,
  60. use_dynamic_shape=self.pred_config.use_dynamic_shape,
  61. trt_min_shape=trt_min_shape,
  62. trt_max_shape=trt_max_shape,
  63. trt_opt_shape=trt_opt_shape,
  64. trt_calib_mode=trt_calib_mode,
  65. cpu_threads=cpu_threads,
  66. enable_mkldnn=enable_mkldnn)
  67. self.det_times = Timer()
  68. self.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0
  69. self.batch_size = batch_size
  70. self.input_wh = (128, 256)
  71. def set_config(self, model_dir):
  72. return PredictConfig(model_dir)
  73. def check_img_quality(self, crop, bbox, xyxy):
  74. if crop is None:
  75. return None
  76. #eclipse
  77. eclipse_quality = 1.0
  78. inner_rect = np.zeros(xyxy.shape)
  79. inner_rect[:, :2] = np.maximum(xyxy[:, :2], bbox[None, :2])
  80. inner_rect[:, 2:] = np.minimum(xyxy[:, 2:], bbox[None, 2:])
  81. wh_array = inner_rect[:, 2:] - inner_rect[:, :2]
  82. filt = np.logical_and(wh_array[:, 0] > 0, wh_array[:, 1] > 0)
  83. wh_array = wh_array[filt]
  84. if wh_array.shape[0] > 1:
  85. eclipse_ratio = wh_array / (bbox[2:] - bbox[:2])
  86. eclipse_area_ratio = eclipse_ratio[:, 0] * eclipse_ratio[:, 1]
  87. ear_lst = eclipse_area_ratio.tolist()
  88. ear_lst.sort(reverse=True)
  89. eclipse_quality = 1.0 - ear_lst[1]
  90. bbox_wh = (bbox[2:] - bbox[:2])
  91. height_quality = bbox_wh[1] / (bbox_wh[0] * 2)
  92. eclipse_quality = min(eclipse_quality, height_quality)
  93. #definition
  94. cropgray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
  95. definition = int(cv2.Laplacian(cropgray, cv2.CV_64F, ksize=3).var())
  96. brightness = int(cropgray.mean())
  97. bd_quality = min(1., brightness / 50.)
  98. eclipse_weight = 0.7
  99. return eclipse_quality * eclipse_weight + bd_quality * (1 -
  100. eclipse_weight)
  101. def normal_crop(self, image, rect):
  102. imgh, imgw, c = image.shape
  103. label, conf, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]
  104. xmin = max(0, xmin)
  105. ymin = max(0, ymin)
  106. xmax = min(imgw, xmax)
  107. ymax = min(imgh, ymax)
  108. if label != 0 or xmax <= xmin or ymax <= ymin:
  109. print("Warning! label missed!!")
  110. return None, None, None
  111. return image[ymin:ymax, xmin:xmax, :]
  112. def crop_image_with_mot(self, image, mot_res):
  113. res = mot_res['boxes']
  114. crop_res = []
  115. img_quality = []
  116. rects = []
  117. for box in res:
  118. crop_image = self.normal_crop(image, box[1:])
  119. quality_item = self.check_img_quality(crop_image, box[3:],
  120. res[:, 3:])
  121. if crop_image is not None:
  122. crop_res.append(crop_image)
  123. img_quality.append(quality_item)
  124. rects.append(box)
  125. return crop_res, img_quality, rects
  126. def preprocess(self,
  127. imgs,
  128. mean=[0.485, 0.456, 0.406],
  129. std=[0.229, 0.224, 0.225]):
  130. im_batch = []
  131. for img in imgs:
  132. img = cv2.resize(img, self.input_wh)
  133. img = img.astype('float32') / 255.
  134. img -= np.array(mean)
  135. img /= np.array(std)
  136. im_batch.append(img.transpose((2, 0, 1)))
  137. inputs = {}
  138. inputs['x'] = np.array(im_batch).astype('float32')
  139. return inputs
  140. def predict(self, crops, repeats=1, add_timer=True, seq_name=''):
  141. # preprocess
  142. if add_timer:
  143. self.det_times.preprocess_time_s.start()
  144. inputs = self.preprocess(crops)
  145. input_names = self.predictor.get_input_names()
  146. for i in range(len(input_names)):
  147. input_tensor = self.predictor.get_input_handle(input_names[i])
  148. input_tensor.copy_from_cpu(inputs[input_names[i]])
  149. if add_timer:
  150. self.det_times.preprocess_time_s.end()
  151. self.det_times.inference_time_s.start()
  152. # model prediction
  153. for i in range(repeats):
  154. self.predictor.run()
  155. output_names = self.predictor.get_output_names()
  156. feature_tensor = self.predictor.get_output_handle(output_names[0])
  157. pred_embs = feature_tensor.copy_to_cpu()
  158. if add_timer:
  159. self.det_times.inference_time_s.end(repeats=repeats)
  160. self.det_times.postprocess_time_s.start()
  161. if add_timer:
  162. self.det_times.postprocess_time_s.end()
  163. self.det_times.img_num += 1
  164. return pred_embs
  165. def predict_batch(self, imgs, batch_size=4):
  166. batch_feat = []
  167. for b in range(0, len(imgs), batch_size):
  168. b_end = min(len(imgs), b + batch_size)
  169. batch_imgs = imgs[b:b_end]
  170. feat = self.predict(batch_imgs)
  171. batch_feat.extend(feat.tolist())
  172. return batch_feat