inferencer.hpp 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /*************************************************************************
  2. * Copyright (C) [2019] by Cambricon, Inc. All rights reserved
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  13. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  17. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  18. * THE SOFTWARE.
  19. *************************************************************************/
  20. #ifndef MODULES_INFERENCE_INCLUDE_INFERENCER_HPP_
  21. #define MODULES_INFERENCE_INCLUDE_INFERENCER_HPP_
  22. /**
  23. * @file inferencer.hpp
  24. *
  25. * This file contains a declaration of struct Inferencer and its substructure.
  26. */
  27. #include <memory>
  28. #include <string>
  29. #include <thread>
  30. #include <vector>
  31. #include "cnstream_module.hpp"
  32. #include "exception.hpp"
  33. #define DECLARE_PRIVATE(d_ptr, Class) \
  34. friend class Class##Private; \
  35. Class##Private* d_ptr = nullptr;
  36. #define DECLARE_PUBLIC(q_ptr, Class) \
  37. friend class Class; \
  38. Class* q_ptr = nullptr;
  39. namespace cnstream {
  40. CNSTREAM_REGISTER_EXCEPTION(Inferencer);
  41. class InferencerPrivate;
  42. class InferParamManager;
  43. /**
  44. * @class Inferencer
  45. *
  46. * @brief Inferencer is a module for running offline model to do inference.
  47. * The input data could come from Decoder or other plugins, in MLU memory
  48. * or CPU memory. Also, If the ``preproc_name`` parameter is set to ``PreprocCpu``
  49. * in the Open function or configuration file, CPU is used for image preprocessing.
  50. * Otherwise, if the ``preproc_name`` parameter is not
  51. * set, MLU is used for image preprocessing. The image preprocessing includes
  52. * data shape resizing and color space convertion.
  53. * Afterwards, you can infer with offline model loading from the model path.
  54. *
  55. * @attention
  56. * The error log will be reported when the following two situations occur as MLU is used to do preprocessing.
  57. * case 1: scale-up factor is greater than 100.
  58. * case 2: the image width before resize is greater than 7680.
  59. */
  60. class Inferencer : public Module, public ModuleCreator<Inferencer> {
  61. public:
  62. /**
  63. * @brief Creates Inferencer module.
  64. *
  65. * @param[in] name The name of the Inferencer module.
  66. *
  67. * @return None.
  68. */
  69. explicit Inferencer(const std::string& name);
  70. /**
  71. * @brief Destructor, destructs the inference instance.
  72. *
  73. * @param None.
  74. *
  75. * @return None.
  76. */
  77. virtual ~Inferencer();
  78. /**
  79. * @brief Called by pipeline when the pipeline is started.
  80. *
  81. * @param[in] paramSet:
  82. * @verbatim
  83. * model_path: Required. The path of the offline model.
  84. * func_name: Required. The function name that is defined in the offline model.
  85. It could be found in Cambricon twins file. For most cases, it is "subnet0".
  86. * postproc_name: Required. The class name for postprocess. The class specified by this name must
  87. inherited from class cnstream::Postproc when [object_infer] is false, otherwise the
  88. class specified by this name must inherit from class cnstream::ObjPostproc.
  89. * preproc_name: Optional. The class name for preprocessing on CPU. The class specified by this name must
  90. inherited from class cnstream::Preproc when [object_infer] is false, otherwise the class
  91. specified by this name must inherit from class cnstream::ObjPreproc. Preprocessing will be
  92. done on MLU by ResizeYuv2Rgb (cambricon Bang op) when this parameter not set.
  93. * use_scaler: Optional. Whether use the scaler to preprocess the input. The scaler will not be used by default.
  94. * device_id: Optional. MLU device ordinal number. The default value is 0.
  95. * batching_timeout: Optional. The batching timeout. The default value is 3000.0[ms]. type[float]. unit[ms].
  96. * data_order: Optional. Data format. The default format is NHWC.
  97. * threshold: Optional. The threshold of the confidence. By default it is 0.
  98. * infer_interval: Optional. Process one frame for every ``infer_interval`` frames.
  99. * object_infer: Optional. if object_infer is set to true, the detection target is used as the input to
  100. inferencing. if it is set to false, the video frame is used as the input to inferencing.
  101. False by default.
  102. * obj_filter_name: Optional. The class name for object filter. See cnstream::ObjFilter. This parameter is valid
  103. when object_infer is true. When this parameter not set, no object will be filtered.
  104. * keep_aspect_ratio: Optional. As the mlu is used for image processing, the scale remains constant.
  105. * model_input_pixel_format: Optional. As the mlu is used for image processing, set the pixel format of the
  106. * model input image. RGBA32 by default.
  107. * mem_on_mlu_for_postproc: Optional. Pass a batch mlu pointer directly to post-processing function without
  108. making d2h copies. see `Postproc` for details.
  109. * saving_infer_input: Optional. Save the data close to inferencing.
  110. * pad_method: Optional. When use mlu preprocessing, set the pad method. set pad_method = "center", image in center;
  111. set the pad_method = "origin". image in top left corner.
  112. * if not set this param, the default value is "center"
  113. * @endverbatim
  114. *
  115. * @return Returns true if the inferencer has been opened successfully.
  116. */
  117. bool Open(ModuleParamSet paramSet) override;
  118. /**
  119. * @brief Called by pipeline when the pipeline is stopped.
  120. *
  121. * @param None.
  122. *
  123. * @return Void.
  124. */
  125. void Close() override;
  126. /**
  127. * @brief Performs inference for each frame.
  128. *
  129. * @param[in] data The information and data of frames.
  130. *
  131. * @retval 1: The process has run successfully.
  132. * @retval -1: The process is failed.
  133. */
  134. int Process(CNFrameInfoPtr data) final;
  135. /**
  136. * @brief Check ParamSet for inferencer..
  137. *
  138. * @param[in] param_set Parameters for this module.
  139. *
  140. * @return Returns true if this API run successfully. Otherwise, returns false.
  141. */
  142. bool CheckParamSet(const ModuleParamSet &param_set) const override;
  143. private:
  144. InferParamManager *param_manager_ = nullptr;
  145. DECLARE_PRIVATE(d_ptr_, Inferencer);
  146. }; // class Inferencer
  147. } // namespace cnstream
  148. #endif // MODULES_INFERENCE_INCLUDE_INFERENCER_HPP_