Sfoglia il codice sorgente

测试30路视频加推理

lishengyin 3 anni fa
parent
commit
c771b060a4

BIN
lib/libmodules.so


+ 16 - 6
modules/inference/src/inference.cpp

@@ -22,10 +22,10 @@
 
 /* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
  * based on the fastest source's framerate. */
-#define MUXER_BATCH_TIMEOUT_USEC 40000
+#define MUXER_BATCH_TIMEOUT_USEC 500
 
-#define TILED_OUTPUT_WIDTH 1280
-#define TILED_OUTPUT_HEIGHT 720
+#define TILED_OUTPUT_WIDTH 1920
+#define TILED_OUTPUT_HEIGHT 1080
 
 /* NVIDIA Decoder source pad memory feature. This feature signifies that source
  * pads having this capability will push GstBuffers containing cuda buffers. */
@@ -175,8 +175,18 @@ namespace MIVA{
 
         gst_bin_add_many (GST_BIN (this->pipeline), this->queue1, this->pgie, this->queue2, this->tiler, this->queue3,
             this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL);
-            /* we link the elements together
-            * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
+
+        // gst_bin_add_many (GST_BIN (this->pipeline), this->queue2, this->tiler, this->queue3,
+        //     this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL);
+
+        //     /* we link the elements together
+        //     * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
+
+        // if (!gst_element_link_many (this->streammux, this->queue2, this->tiler, this->queue3,
+        //         this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL)) {
+        //     ErrorL << "Elements could not be linked. Exiting.";
+        //     return -1;
+        // }
         if (!gst_element_link_many (this->streammux, this->queue1, this->pgie, this->queue2, this->tiler, this->queue3,
                 this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL)) {
             ErrorL << "Elements could not be linked. Exiting.";
@@ -296,7 +306,7 @@ namespace MIVA{
                 gst_message_parse_warning (msg, &error, &debug);
                 WarnL << "WARNING from element " << GST_OBJECT_NAME (msg->src) << ": " << error->message;
                 g_free (debug);
-                ErrorL << "Warning: %s" << error->message;
+                ErrorL << "Warning: " << error->message;
                 g_error_free (error);
                 break;
             }

+ 3 - 2
modules/userApp/src/user_app.cpp

@@ -2,7 +2,7 @@
 
 namespace MIVA
 {  
-    ThreadPool poolInfer(4,ThreadPool::PRIORITY_HIGHEST, false);
+    ThreadPool poolInfer(6,ThreadPool::PRIORITY_HIGHEST, false);
 
     std::shared_ptr<UserApp> UserApp::CreateNew(){
         return std::make_shared<UserApp>();
@@ -165,7 +165,8 @@ namespace MIVA
     void UserApp::ListenInfer(int Source_id, int num)
     {   
         if(this->play == true){
-            this->m_timer2 = std::make_shared<Timer>(3.0f,[&](){
+            this->m_timer2 = std::make_shared<Timer>(20.0f,[&](){
+                
                 // 暂停任务
                 this->m_Infer->PauseTask();
                 return false;

+ 1 - 0
source/bin/config_infer_primary_yoloV5.txt

@@ -55,6 +55,7 @@ network-type=0
 output-blob-names=prob
 ## 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3= DBSCAN+NMS Hybrid, 4 = None(No clustering)
 #cluster-mode=2
+interval=0
 maintain-aspect-ratio=1
 parse-bbox-func-name=NvDsInferParseCustomYoloV5
 custom-lib-path=/home/nvidia/work/MIVA/lib/libnvdsinfer_custom_impl_Yolo.so