jde_detector.cc 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <sstream>
  15. // for setprecision
  16. #include <chrono>
  17. #include <iomanip>
  18. #include "include/jde_detector.h"
  19. using namespace paddle_infer;
  20. namespace PaddleDetection {
  21. // Load Model and create model predictor
  22. void JDEDetector::LoadModel(const std::string& model_dir,
  23. const int batch_size,
  24. const std::string& run_mode) {
  25. paddle_infer::Config config;
  26. std::string prog_file = model_dir + OS_PATH_SEP + "model.pdmodel";
  27. std::string params_file = model_dir + OS_PATH_SEP + "model.pdiparams";
  28. config.SetModel(prog_file, params_file);
  29. if (this->device_ == "GPU") {
  30. config.EnableUseGpu(200, this->gpu_id_);
  31. config.SwitchIrOptim(true);
  32. // use tensorrt
  33. if (run_mode != "paddle") {
  34. auto precision = paddle_infer::Config::Precision::kFloat32;
  35. if (run_mode == "trt_fp32") {
  36. precision = paddle_infer::Config::Precision::kFloat32;
  37. } else if (run_mode == "trt_fp16") {
  38. precision = paddle_infer::Config::Precision::kHalf;
  39. } else if (run_mode == "trt_int8") {
  40. precision = paddle_infer::Config::Precision::kInt8;
  41. } else {
  42. printf(
  43. "run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or "
  44. "'trt_int8'");
  45. }
  46. // set tensorrt
  47. config.EnableTensorRtEngine(1 << 30,
  48. batch_size,
  49. this->min_subgraph_size_,
  50. precision,
  51. false,
  52. this->trt_calib_mode_);
  53. // set use dynamic shape
  54. if (this->use_dynamic_shape_) {
  55. // set DynamicShsape for image tensor
  56. const std::vector<int> min_input_shape = {
  57. 1, 3, this->trt_min_shape_, this->trt_min_shape_};
  58. const std::vector<int> max_input_shape = {
  59. 1, 3, this->trt_max_shape_, this->trt_max_shape_};
  60. const std::vector<int> opt_input_shape = {
  61. 1, 3, this->trt_opt_shape_, this->trt_opt_shape_};
  62. const std::map<std::string, std::vector<int>> map_min_input_shape = {
  63. {"image", min_input_shape}};
  64. const std::map<std::string, std::vector<int>> map_max_input_shape = {
  65. {"image", max_input_shape}};
  66. const std::map<std::string, std::vector<int>> map_opt_input_shape = {
  67. {"image", opt_input_shape}};
  68. config.SetTRTDynamicShapeInfo(
  69. map_min_input_shape, map_max_input_shape, map_opt_input_shape);
  70. std::cout << "TensorRT dynamic shape enabled" << std::endl;
  71. }
  72. }
  73. } else if (this->device_ == "XPU") {
  74. config.EnableXpu(10 * 1024 * 1024);
  75. } else {
  76. config.DisableGpu();
  77. if (this->use_mkldnn_) {
  78. config.EnableMKLDNN();
  79. // cache 10 different shapes for mkldnn to avoid memory leak
  80. config.SetMkldnnCacheCapacity(10);
  81. }
  82. config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
  83. }
  84. config.SwitchUseFeedFetchOps(false);
  85. config.SwitchIrOptim(true);
  86. config.DisableGlogInfo();
  87. // Memory optimization
  88. config.EnableMemoryOptim();
  89. predictor_ = std::move(CreatePredictor(config));
  90. }
  91. // Visualiztion results
  92. cv::Mat VisualizeTrackResult(const cv::Mat& img,
  93. const MOT_Result& results,
  94. const float fps,
  95. const int frame_id) {
  96. cv::Mat vis_img = img.clone();
  97. int im_h = img.rows;
  98. int im_w = img.cols;
  99. float text_scale = std::max(1, int(im_w / 1600.));
  100. float text_thickness = 2.;
  101. float line_thickness = std::max(1, int(im_w / 500.));
  102. std::ostringstream oss;
  103. oss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
  104. oss << "frame: " << frame_id << " ";
  105. oss << "fps: " << fps << " ";
  106. oss << "num: " << results.size();
  107. std::string text = oss.str();
  108. cv::Point origin;
  109. origin.x = 0;
  110. origin.y = int(15 * text_scale);
  111. cv::putText(vis_img,
  112. text,
  113. origin,
  114. cv::FONT_HERSHEY_PLAIN,
  115. text_scale,
  116. (0, 0, 255),
  117. 2);
  118. for (int i = 0; i < results.size(); ++i) {
  119. const int obj_id = results[i].ids;
  120. const float score = results[i].score;
  121. cv::Scalar color = GetColor(obj_id);
  122. cv::Point pt1 = cv::Point(results[i].rects.left, results[i].rects.top);
  123. cv::Point pt2 = cv::Point(results[i].rects.right, results[i].rects.bottom);
  124. cv::Point id_pt =
  125. cv::Point(results[i].rects.left, results[i].rects.top + 10);
  126. cv::Point score_pt =
  127. cv::Point(results[i].rects.left, results[i].rects.top - 10);
  128. cv::rectangle(vis_img, pt1, pt2, color, line_thickness);
  129. std::ostringstream idoss;
  130. idoss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
  131. idoss << obj_id;
  132. std::string id_text = idoss.str();
  133. cv::putText(vis_img,
  134. id_text,
  135. id_pt,
  136. cv::FONT_HERSHEY_PLAIN,
  137. text_scale,
  138. cv::Scalar(0, 255, 255),
  139. text_thickness);
  140. std::ostringstream soss;
  141. soss << std::setiosflags(std::ios::fixed) << std::setprecision(2);
  142. soss << score;
  143. std::string score_text = soss.str();
  144. cv::putText(vis_img,
  145. score_text,
  146. score_pt,
  147. cv::FONT_HERSHEY_PLAIN,
  148. text_scale,
  149. cv::Scalar(0, 255, 255),
  150. text_thickness);
  151. }
  152. return vis_img;
  153. }
  154. void FilterDets(const float conf_thresh,
  155. const cv::Mat dets,
  156. std::vector<int>* index) {
  157. for (int i = 0; i < dets.rows; ++i) {
  158. float score = *dets.ptr<float>(i, 4);
  159. if (score > conf_thresh) {
  160. index->push_back(i);
  161. }
  162. }
  163. }
  164. void JDEDetector::Preprocess(const cv::Mat& ori_im) {
  165. // Clone the image : keep the original mat for postprocess
  166. cv::Mat im = ori_im.clone();
  167. preprocessor_.Run(&im, &inputs_);
  168. }
  169. void JDEDetector::Postprocess(const cv::Mat dets,
  170. const cv::Mat emb,
  171. MOT_Result* result) {
  172. result->clear();
  173. std::vector<Track> tracks;
  174. std::vector<int> valid;
  175. FilterDets(conf_thresh_, dets, &valid);
  176. cv::Mat new_dets, new_emb;
  177. for (int i = 0; i < valid.size(); ++i) {
  178. new_dets.push_back(dets.row(valid[i]));
  179. new_emb.push_back(emb.row(valid[i]));
  180. }
  181. JDETracker::instance()->update(new_dets, new_emb, tracks);
  182. if (tracks.size() == 0) {
  183. MOT_Track mot_track;
  184. MOT_Rect ret = {*dets.ptr<float>(0, 0),
  185. *dets.ptr<float>(0, 1),
  186. *dets.ptr<float>(0, 2),
  187. *dets.ptr<float>(0, 3)};
  188. mot_track.ids = 1;
  189. mot_track.score = *dets.ptr<float>(0, 4);
  190. mot_track.rects = ret;
  191. result->push_back(mot_track);
  192. } else {
  193. std::vector<Track>::iterator titer;
  194. for (titer = tracks.begin(); titer != tracks.end(); ++titer) {
  195. if (titer->score < threshold_) {
  196. continue;
  197. } else {
  198. float w = titer->ltrb[2] - titer->ltrb[0];
  199. float h = titer->ltrb[3] - titer->ltrb[1];
  200. bool vertical = w / h > 1.6;
  201. float area = w * h;
  202. if (area > min_box_area_ && !vertical) {
  203. MOT_Track mot_track;
  204. MOT_Rect ret = {
  205. titer->ltrb[0], titer->ltrb[1], titer->ltrb[2], titer->ltrb[3]};
  206. mot_track.rects = ret;
  207. mot_track.score = titer->score;
  208. mot_track.ids = titer->id;
  209. result->push_back(mot_track);
  210. }
  211. }
  212. }
  213. }
  214. }
  215. void JDEDetector::Predict(const std::vector<cv::Mat> imgs,
  216. const double threshold,
  217. const int warmup,
  218. const int repeats,
  219. MOT_Result* result,
  220. std::vector<double>* times) {
  221. auto preprocess_start = std::chrono::steady_clock::now();
  222. int batch_size = imgs.size();
  223. // in_data_batch
  224. std::vector<float> in_data_all;
  225. std::vector<float> im_shape_all(batch_size * 2);
  226. std::vector<float> scale_factor_all(batch_size * 2);
  227. // Preprocess image
  228. for (int bs_idx = 0; bs_idx < batch_size; bs_idx++) {
  229. cv::Mat im = imgs.at(bs_idx);
  230. Preprocess(im);
  231. im_shape_all[bs_idx * 2] = inputs_.im_shape_[0];
  232. im_shape_all[bs_idx * 2 + 1] = inputs_.im_shape_[1];
  233. scale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0];
  234. scale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1];
  235. // TODO: reduce cost time
  236. in_data_all.insert(
  237. in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end());
  238. }
  239. // Prepare input tensor
  240. auto input_names = predictor_->GetInputNames();
  241. for (const auto& tensor_name : input_names) {
  242. auto in_tensor = predictor_->GetInputHandle(tensor_name);
  243. if (tensor_name == "image") {
  244. int rh = inputs_.in_net_shape_[0];
  245. int rw = inputs_.in_net_shape_[1];
  246. in_tensor->Reshape({batch_size, 3, rh, rw});
  247. in_tensor->CopyFromCpu(in_data_all.data());
  248. } else if (tensor_name == "im_shape") {
  249. in_tensor->Reshape({batch_size, 2});
  250. in_tensor->CopyFromCpu(im_shape_all.data());
  251. } else if (tensor_name == "scale_factor") {
  252. in_tensor->Reshape({batch_size, 2});
  253. in_tensor->CopyFromCpu(scale_factor_all.data());
  254. }
  255. }
  256. auto preprocess_end = std::chrono::steady_clock::now();
  257. std::vector<int> bbox_shape;
  258. std::vector<int> emb_shape;
  259. // Run predictor
  260. // warmup
  261. for (int i = 0; i < warmup; i++) {
  262. predictor_->Run();
  263. // Get output tensor
  264. auto output_names = predictor_->GetOutputNames();
  265. auto bbox_tensor = predictor_->GetOutputHandle(output_names[0]);
  266. bbox_shape = bbox_tensor->shape();
  267. auto emb_tensor = predictor_->GetOutputHandle(output_names[1]);
  268. emb_shape = emb_tensor->shape();
  269. // Calculate bbox length
  270. int bbox_size = 1;
  271. for (int j = 0; j < bbox_shape.size(); ++j) {
  272. bbox_size *= bbox_shape[j];
  273. }
  274. // Calculate emb length
  275. int emb_size = 1;
  276. for (int j = 0; j < emb_shape.size(); ++j) {
  277. emb_size *= emb_shape[j];
  278. }
  279. bbox_data_.resize(bbox_size);
  280. bbox_tensor->CopyToCpu(bbox_data_.data());
  281. emb_data_.resize(emb_size);
  282. emb_tensor->CopyToCpu(emb_data_.data());
  283. }
  284. auto inference_start = std::chrono::steady_clock::now();
  285. for (int i = 0; i < repeats; i++) {
  286. predictor_->Run();
  287. // Get output tensor
  288. auto output_names = predictor_->GetOutputNames();
  289. auto bbox_tensor = predictor_->GetOutputHandle(output_names[0]);
  290. bbox_shape = bbox_tensor->shape();
  291. auto emb_tensor = predictor_->GetOutputHandle(output_names[1]);
  292. emb_shape = emb_tensor->shape();
  293. // Calculate bbox length
  294. int bbox_size = 1;
  295. for (int j = 0; j < bbox_shape.size(); ++j) {
  296. bbox_size *= bbox_shape[j];
  297. }
  298. // Calculate emb length
  299. int emb_size = 1;
  300. for (int j = 0; j < emb_shape.size(); ++j) {
  301. emb_size *= emb_shape[j];
  302. }
  303. bbox_data_.resize(bbox_size);
  304. bbox_tensor->CopyToCpu(bbox_data_.data());
  305. emb_data_.resize(emb_size);
  306. emb_tensor->CopyToCpu(emb_data_.data());
  307. }
  308. auto inference_end = std::chrono::steady_clock::now();
  309. auto postprocess_start = std::chrono::steady_clock::now();
  310. // Postprocessing result
  311. result->clear();
  312. cv::Mat dets(bbox_shape[0], 6, CV_32FC1, bbox_data_.data());
  313. cv::Mat emb(bbox_shape[0], emb_shape[1], CV_32FC1, emb_data_.data());
  314. Postprocess(dets, emb, result);
  315. auto postprocess_end = std::chrono::steady_clock::now();
  316. std::chrono::duration<float> preprocess_diff =
  317. preprocess_end - preprocess_start;
  318. (*times)[0] += double(preprocess_diff.count() * 1000);
  319. std::chrono::duration<float> inference_diff = inference_end - inference_start;
  320. (*times)[1] += double(inference_diff.count() * 1000);
  321. std::chrono::duration<float> postprocess_diff =
  322. postprocess_end - postprocess_start;
  323. (*times)[2] += double(postprocess_diff.count() * 1000);
  324. }
  325. cv::Scalar GetColor(int idx) {
  326. idx = idx * 3;
  327. cv::Scalar color =
  328. cv::Scalar((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255);
  329. return color;
  330. }
  331. } // namespace PaddleDetection