#include "inference.h"

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

/* By default, OSD process-mode is set to CPU_MODE. To change mode, set as:
 * 1: GPU mode (for Tesla only)
 * 2: HW mode (For Jetson only)
 */
#define OSD_PROCESS_MODE 0

/* By default, OSD will not display text. To display text, change this to 1 */
#define OSD_DISPLAY_TEXT 1

/* The muxer output resolution must be set if the input streams will be of
 * different resolution. The muxer will scale all the input frames to this
 * resolution. */
#define MUXER_OUTPUT_WIDTH 1920
#define MUXER_OUTPUT_HEIGHT 1080

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
 * based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 500

#define TILED_OUTPUT_WIDTH 1920
#define TILED_OUTPUT_HEIGHT 1080

/* NVIDIA Decoder source pad memory feature. This feature signifies that source
 * pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"

gint frame_number = 0;


namespace MIVA{
    std::shared_ptr<Inference> infer = NULL;
    std::shared_ptr<Inference> Inference::CreateNew()
    {
        if(infer == NULL) infer = std::make_shared<Inference>();
        return infer;
    }
    Inference::Inference()
    {
        
    }
    Inference::~Inference()
    {
        Destory();
    }
    // Init 初始化
    int32_t Inference::Init(vector<DataSource> DataList)
    {
        // init
        this->loop = g_main_loop_new (NULL, FALSE);

        // 创建管道
        this->pipeline = gst_pipeline_new("dstest3-pipeline");

        // 创建批处理器
        this->streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

        if(this->pipeline == NULL || this->streammux == NULL){
            ErrorL << "One element could not be created. Exiting.";
            return ERR;
        }
        gst_bin_add (GST_BIN (this->pipeline), this->streammux);

        // 创建数据源
        std::vector<DataSource>::iterator iter;
        int i = 0;
        for(iter = DataList.begin(); iter != DataList.end(); iter++){
            GstPad *sinkpad, *srcpad;
            gchar pad_name[16] = { };
            
            GstElement *source_bin = create_source_bin ((*iter).Id, (gchar*)((*iter).uri).c_str());
    
            if (!source_bin) {
                ErrorL << "Failed to create source bin. Exiting."; 
                return ERR;
            }
            gst_bin_add(GST_BIN (this->pipeline), source_bin);
            g_snprintf (pad_name, 15, "sink_%u", i);
            sinkpad = gst_element_get_request_pad (this->streammux, pad_name);
            if(!sinkpad){
                ErrorL << "Streammux request sink pad failed. Exiting.";
                return ERR;
            }

            srcpad = gst_element_get_static_pad(source_bin, "src");
            if(!srcpad){
                ErrorL << "Failed to get src pad of source bin. Exiting.";
                return ERR;
            }
            if(gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK){
                ErrorL << "Failed to link source bin to stream muxer. Exiting.";
                return ERR;
            }
            gst_object_unref (srcpad);
            gst_object_unref (sinkpad);

            i++;
        }
        /* Use nvinfer to infer on batched frame. */
        this->pgie = gst_element_factory_make("nvinfer", "primary-nvinference-engine");

        /* Add queue elements between every two elements */
        this->queue1 = gst_element_factory_make ("queue", "queue1");
        this->queue2 = gst_element_factory_make ("queue", "queue2");
        this->queue3 = gst_element_factory_make ("queue", "queue3");
        this->queue4 = gst_element_factory_make ("queue", "queue4");
        this->queue5 = gst_element_factory_make ("queue", "queue5");

        /* Use nvtiler to composite the batched frames into a 2D tiled array based
         * on the source of the frames. */
        this->tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

        /* Use convertor to convert from NV12 to RGBA as required by nvosd */
        this->nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");

        this->nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");

        #ifdef PLATFORM_TEGRA
            this->transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
        #endif

        this->sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
        
        if (!this->pgie || !this->tiler || !this->nvvidconv || !this->nvosd || !this->sink) {
            ErrorL << "One element could not be created. Exiting.";
            return -1;
        }

        #ifdef PLATFORM_TEGRA
            if(!this->transform) {
                ErrorL << "One tegra element could not be created. Exiting.";
                return -1;
            }
        #endif

        g_object_set(G_OBJECT(this->streammux), "batch-size", i, NULL);

        g_object_set (G_OBJECT (this->streammux), "width", MUXER_OUTPUT_WIDTH, "height",MUXER_OUTPUT_HEIGHT,
            "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

        /* Configure the nvinfer element using the nvinfer config file. */
        g_object_set (G_OBJECT (this->pgie),
            "config-file-path", "config_infer_primary_yoloV5.txt", NULL);

        /* Override the batch-size set in the config file with the number of sources. */
        g_object_get (G_OBJECT (this->pgie), "batch-size", &(this->pgie_batch_size), NULL);

        if (this->pgie_batch_size != i) {
            WarnL << "WARNING: Overriding infer-config batch-size:" << this->pgie_batch_size << "with number of sources ("<<  i << ")";
            g_object_set (G_OBJECT (this->pgie), "batch-size", i, NULL);
        }

        this->tiler_rows = (guint) sqrt (i);
        this->tiler_columns = (guint) ceil (1.0 * i / this->tiler_rows);

        /* we set the tiler properties here */
        g_object_set (G_OBJECT (this->tiler), "rows", this->tiler_rows, "columns", this->tiler_columns,
            "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);

        g_object_set (G_OBJECT (this->nvosd), "process-mode", OSD_PROCESS_MODE,
            "display-text", OSD_DISPLAY_TEXT, NULL);

        g_object_set (G_OBJECT (this->sink), "qos", 0, NULL);
            
    
        this->bus = gst_pipeline_get_bus (GST_PIPELINE (this->pipeline));
        this->bus_watch_id = gst_bus_add_watch (this->bus, bus_call, this->loop);
        gst_object_unref (this->bus);

        gst_bin_add_many (GST_BIN (this->pipeline), this->queue1, this->pgie, this->queue2, this->tiler, this->queue3,
            this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL);

        // gst_bin_add_many (GST_BIN (this->pipeline), this->queue2, this->tiler, this->queue3,
        //     this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL);

        //     /* we link the elements together
        //     * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */

        // if (!gst_element_link_many (this->streammux, this->queue2, this->tiler, this->queue3,
        //         this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL)) {
        //     ErrorL << "Elements could not be linked. Exiting.";
        //     return -1;
        // }
        if (!gst_element_link_many (this->streammux, this->queue1, this->pgie, this->queue2, this->tiler, this->queue3,
                this->nvvidconv, this->queue4, this->nvosd, this->queue5, this->transform, this->sink, NULL)) {
            ErrorL << "Elements could not be linked. Exiting.";
            return -1;
        }

        this->tiler_src_pad = gst_element_get_static_pad(this->pgie, "src");
        if (!this->tiler_src_pad)
            InfoL << "Unable to get src pad";
        else
            gst_pad_add_probe (this->tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
                tiler_src_pad_buffer_probe, NULL, NULL);
        gst_object_unref (this->tiler_src_pad);

        return OK;
    }
    void Inference::ReadyTask()
    {
         InfoL << "Now ReadyTask";
         gst_element_set_state(this->pipeline, GST_STATE_READY);
         g_main_loop_run(this->loop);
    }
    // 启动任务
    void Inference::StartTask()
    {
        InfoL << "Now palying";
        gst_element_set_state(this->pipeline, GST_STATE_PLAYING);
    }
    // 暂停任务
    void Inference::PauseTask()
    {
        InfoL << "Now Pause";
        gst_element_set_state(this->pipeline, GST_STATE_PAUSED);
    }
    // 销毁对象
    void Inference::Destory()
    {
        InfoL << "Returned, stopping playback";
        gst_element_set_state(this->pipeline, GST_STATE_NULL);
        InfoL << "Deleting pipeline";
        gst_object_unref(GST_OBJECT(this->pipeline));
        g_source_remove(this->bus_watch_id);
        g_main_loop_unref(this->loop);
        infer = NULL;
    }

    GstPadProbeReturn 
    Inference::tiler_src_pad_buffer_probe(GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
    {
        //获取从管道中获取推理结果
        GstBuffer *buf = (GstBuffer *) info->data;
        NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
        //初始化要使用的数据结构
        NvDsObjectMeta *obj_meta = NULL; //目标检测元数据类型变量
        NvDsMetaList * l_frame = NULL;
        NvDsMetaList * l_obj = NULL;
        NvDsDisplayMeta *display_meta = NULL; 
        
        for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;l_frame = l_frame->next) //从批量中获取某一帧图
        {
            NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
            int num = 0;
            for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;l_obj = l_obj->next) 
            {
                obj_meta = (NvDsObjectMeta *) (l_obj->data);
                if (obj_meta->class_id == 0) // Person
                {
                    num++;
                }
            }
            //画左上角的统计信息
            display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
            NvOSD_TextParams *txt_params  = &display_meta->text_params[0];
            display_meta->num_labels = 1;
            txt_params->display_text = (char *)g_malloc0 (MAX_DISPLAY_LEN);

            snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Number of people: %d \n", num);

            // 推理广播
            NoticeCenter::Instance().emitEvent(NOTICE_INFER,frame_meta->source_id, num);

            txt_params->x_offset = 30;
            txt_params->y_offset = 30;

            /* Font , font-color and font-size */
            txt_params->font_params.font_name = (char *)"Serif";
            txt_params->font_params.font_size = 10;
            txt_params->font_params.font_color.red = 1.0;
            txt_params->font_params.font_color.green = 1.0;
            txt_params->font_params.font_color.blue = 1.0;
            txt_params->font_params.font_color.alpha = 1.0;

            /* Text background color */
            txt_params->set_bg_clr = 1;
            txt_params->text_bg_clr.red = 0.0;
            txt_params->text_bg_clr.green = 0.0;
            txt_params->text_bg_clr.blue = 0.0;
            txt_params->text_bg_clr.alpha = 1.0;

            // nvds_add_display_meta_to_frame(frame_meta, display_meta);
        }
        return GST_PAD_PROBE_OK;
    }

    gboolean Inference::bus_call (GstBus * bus, GstMessage * msg, gpointer data)
    {
        GMainLoop *loop = (GMainLoop *) data;
        switch (GST_MESSAGE_TYPE (msg)) {
            case GST_MESSAGE_EOS:
                InfoL << "End of stream";
                g_main_loop_quit (loop);
                break;
            case GST_MESSAGE_WARNING:
            {
                gchar *debug;
                GError *error;
                gst_message_parse_warning (msg, &error, &debug);
                WarnL << "WARNING from element " << GST_OBJECT_NAME (msg->src) << ": " << error->message;
                g_free (debug);
                ErrorL << "Warning: " << error->message;
                g_error_free (error);
                break;
            }
            case GST_MESSAGE_ERROR:
            {
                gchar *debug;
                GError *error;
                gst_message_parse_error (msg, &error, &debug);
                ErrorL << "ERROR from element" << GST_OBJECT_NAME (msg->src) << ":" << error->message;
                if (debug)
                    ErrorL << "Error details:" << debug;
                g_free (debug);
                g_error_free (error);
                g_main_loop_quit (loop);
                break;
            }
        #ifndef PLATFORM_TEGRA
            case GST_MESSAGE_ELEMENT:
            {
            if (gst_nvmessage_is_stream_eos (msg)) {
                guint stream_id;
                if (gst_nvmessage_parse_stream_eos (msg, &stream_id)) {
                    InfoL << "Got EOS from stream " << stream_id;
                }
            }
            break;
            }
        #endif
            default:
            break;
        }
        return TRUE;
    }

    void Inference::cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
    {
        InfoL << "In cb_newpad";
        GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);
        const GstStructure *str = gst_caps_get_structure (caps, 0);
        const gchar *name = gst_structure_get_name (str);
        GstElement *source_bin = (GstElement *) data;
        GstCapsFeatures *features = gst_caps_get_features (caps, 0);

        /* Need to check if the pad created by the decodebin is for video and not
        * audio. */
        if (!strncmp (name, "video", 5)) {
            /* Link the decodebin pad only if decodebin has picked nvidia
            * decoder plugin nvdec_*. We do this by checking if the pad caps contain
            * NVMM memory features. */
            if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {
            /* Get the source bin ghost pad */
            GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");
            if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
                    decoder_src_pad)) {
                ErrorL << "Failed to link decoder src pad to source bin ghost pad";
            }
            gst_object_unref (bin_ghost_pad);
            } else {
                ErrorL << "Error: Decodebin did not pick nvidia decoder plugin.";
            }
        }
    }

    void Inference::decodebin_child_added (GstChildProxy * child_proxy, GObject * object,
        gchar * name, gpointer user_data)
    {

        InfoL << "Decodebin child added: " << name;
        if (g_strrstr (name, "decodebin") == name) {
            g_signal_connect (G_OBJECT (object), "child-added",
                G_CALLBACK (decodebin_child_added), user_data);
        }
    }

    GstElement* Inference::create_source_bin(guint index, gchar * uri)
    {
        GstElement *bin = NULL, *uri_decode_bin = NULL;
        gchar bin_name[16] = { };

        g_snprintf (bin_name, 15, "source-bin-%02d", index);
        /* Create a source GstBin to abstract this bin's content from the rest of the
        * pipeline */
        bin = gst_bin_new (bin_name);

        /* Source element for reading from the uri.
        * We will use decodebin and let it figure out the container format of the
        * stream and the codec and plug the appropriate demux and decode plugins. */
        uri_decode_bin = gst_element_factory_make ("uridecodebin", "uri-decode-bin");

        if (!bin || !uri_decode_bin) {
            ErrorL << "One element in source bin could not be created.";
            return NULL;
        }

        /* We set the input uri to the source element */
        g_object_set (G_OBJECT (uri_decode_bin), "uri", uri, NULL);

        /* Connect to the "pad-added" signal of the decodebin which generates a
        * callback once a new pad for raw data has beed created by the decodebin */
        g_signal_connect (G_OBJECT (uri_decode_bin), "pad-added",
        G_CALLBACK (cb_newpad), bin);
        g_signal_connect (G_OBJECT (uri_decode_bin), "child-added",
        G_CALLBACK (decodebin_child_added), bin);

        gst_bin_add (GST_BIN (bin), uri_decode_bin);

        /* We need to create a ghost pad for the source bin which will act as a proxy
        * for the video decoder src pad. The ghost pad will not have a target right
        * now. Once the decode bin creates the video decoder and generates the
        * cb_newpad callback, we will set the ghost pad target to the video decoder
        * src pad. */
        if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target ("src",
                GST_PAD_SRC))) {
            ErrorL << "Failed to add ghost pad in source bin";
            return NULL;
        }
        return bin;
    }
}