#ifndef __INFERTOOLS_HPP_ #define __INFERTOOLS_HPP_ #include #include #include #include "Util/logger.h" #include "Util/SqlPool.h" #include "cuda_utils.h" #include "yololayer.h" #include "logging.h" #include #include "calibrator.h" #include "preprocess.h" #include "common.hpp" #include #include "CNStreamInferData.h" #include #include "InfineFilter.hpp" #include "config.hpp" using namespace toolkit; using namespace std; namespace gsd { class InferTools { private: InferTools(){} public: using Ptr = std::shared_ptr; /** * @description: getPtr * @return {*} */ static std::shared_ptr getPtr(); /** * @description: * @param {string} engin * @return {*} */ bool Init(std::string enginefile); /** * @description: 释放 * @return {*} */ void Destroy(); /** * @description: 推理 * @return {*} */ bool Inference(std::shared_ptr img, CNStreamInferData::Ptr result); /** * @description: doInference * @param {IExecutionContext&} context * @param {cudaStream_t&} stream * @param {void} * * @param {float*} output * @param {int} batchSize * @return {*} */ void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output, int batchSize); /** * @description: InferTools * @return {*} */ ~InferTools(){ } protected: int nms_Thresh = 0.4; int conf_Thresh = 0.5; int device = 0; std::string engine_name = "../data/modules/best6_fp16.engine"; char *trtModelStream = nullptr; IRuntime* runtime = nullptr; ICudaEngine* engine = nullptr; IExecutionContext* context = nullptr; const char* INPUT_BLOB_NAME = "data"; const char* OUTPUT_BLOB_NAME = "prob"; cudaStream_t stream; uint8_t* img_host = nullptr; uint8_t* img_device = nullptr; float* buffers[2]; int inputIndex; int outputIndex; mutex m_mutex; }; } // namespace gsd #endif