| | |
| | | #include "PL_SensetimeFaceTrack.h"
|
| | |
|
| | | #include "PL_DlibFaceTrack.h"
|
| | | #include "PL_OpenCV_HOG_SVM_Detector.h"
|
| | |
|
| | | #include "logger.h"
|
| | | #include <iostream>
|
| | |
| | | PipeLine::register_global_elem_creator("PL_Scale", create_PL_Scale);
|
| | | PipeLine::register_global_elem_creator("PL_Fork", create_PL_Scale);
|
| | |
|
| | | pipeLine.register_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack);
|
| | | //pipeLineDecoderDetector.register_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack);
|
| | |
|
| | | pipeLine.register_elem_creator("PL_DlibFaceTrack", create_PL_DlibFaceTrack);
|
| | | //pipeLineDecoderDetector.register_elem_creator("PL_DlibFaceTrack", create_PL_DlibFaceTrack);
|
| | | pipeLine.register_elem_creator("PL_OpenCV_HOG_SVM_Detector", create_PL_OpenCV_HOG_SVM_Detector);
|
| | |
|
| | | {
|
| | | PL_RTSPClient* rtspClient = (PL_RTSPClient*)pipeLine.push_elem("PL_RTSPClient");
|
| | |
| | | }
|
| | |
|
| | | {
|
| | | PL_AVFrameBGRA_Config config;
|
| | | config.convertTo = PL_AVFrameBGRA_Config::I420_TO_RGBA8888;
|
| | | PL_AVFrameBGRA* ple = (PL_AVFrameBGRA*)pipeLine.push_elem("PL_AVFrameBGRA");
|
| | | bool ret = ple->init(&config);
|
| | | PL_OpenCV_HOG_SVM_Detector* openCV_HOG_SVM_Detector = (PL_OpenCV_HOG_SVM_Detector*)pipeLine.push_elem("PL_OpenCV_HOG_SVM_Detector");
|
| | | bool ret = openCV_HOG_SVM_Detector->init(nullptr);
|
| | | if (!ret)
|
| | | {
|
| | | LOG_ERROR << "PL_AVFrameBGRA.init error" << std::endl;
|
| | | LOG_ERROR << "PL_OpenCV_HOG_SVM_Detector.init error" << std::endl;
|
| | | exit(EXIT_FAILURE);
|
| | | }
|
| | | }
|
| | | |
| | | //{
|
| | | // PL_AVFrameBGRA_Config config;
|
| | | // config.convertTo = PL_AVFrameBGRA_Config::I420_TO_RGBA8888;
|
| | | // PL_AVFrameBGRA* ple = (PL_AVFrameBGRA*)pipeLineDecoderDetector.push_elem("PL_AVFrameBGRA");
|
| | | // bool ret = ple->init(&config);
|
| | | // if (!ret)
|
| | | // {
|
| | | // LOG_ERROR << "PL_AVFrameBGRA.init error" << std::endl;
|
| | | // exit(EXIT_FAILURE);
|
| | | // }
|
| | | //}
|
| | |
|
| | | //{
|
| | | // SensetimeFaceTrackConfig config;
|
| | | // //config.generate_face_feature = true;
|
| | | // PL_SensetimeFaceTrack* ple = (PL_SensetimeFaceTrack*)pipeLine.push_elem("PL_SensetimeFaceTrack");
|
| | | // PL_SensetimeFaceTrack* ple = (PL_SensetimeFaceTrack*)pipeLineDecoderDetector.push_elem("PL_SensetimeFaceTrack");
|
| | | // ple->init(&config);
|
| | | //}
|
| | |
|
| | | //PipeLine pipeLine2;
|
| | | //PipeLine pipeLineAnalizor;
|
| | | //{
|
| | | // PL_Fork_Config config;
|
| | | // config.forkBy = PL_Fork::FB_MB_TYPE;
|
| | | // config.forkSync = PL_Fork::FS_PARALLEL;
|
| | | // PL_Fork* ple = (PL_Fork*)pipeLine.push_elem("PL_Fork");
|
| | | // PL_Fork* ple = (PL_Fork*)pipeLineDecoderDetector.push_elem("PL_Fork");
|
| | | // ple->init(&config);
|
| | | // ple->attach_pipe_line(&pipeLine2);
|
| | | // ple->attach_pipe_line(&pipeLineAnalizor);
|
| | | //
|
| | | // {
|
| | | // //pipeLine2.push_elem();
|
| | | // //pipeLineAnalizor.push_elem();
|
| | | // }
|
| | | //}
|
| | |
|
| | |
| | | // PL_DlibFaceTrack_Config config;
|
| | | // config.pyramid_down_layers = 2;
|
| | | // config.pyramid_down_n = 1;
|
| | | // PL_DlibFaceTrack* ple = (PL_DlibFaceTrack*)pipeLine.push_elem("PL_DlibFaceTrack");
|
| | | // PL_DlibFaceTrack* ple = (PL_DlibFaceTrack*)pipeLineDecoderDetector.push_elem("PL_DlibFaceTrack");
|
| | | // ple->init(&config);
|
| | | //}
|
| | |
|
| | | //{//#todo queue should support deep copy
|
| | | // PL_Queue_Config config;
|
| | | // PL_Queue* queue1 = (PL_Queue*)pipeLine.push_elem("PL_Queue");
|
| | | // PL_Queue* queue1 = (PL_Queue*)pipeLineDecoderDetector.push_elem("PL_Queue");
|
| | | // bool ret = queue1->init(&config);
|
| | | // if (!ret)
|
| | | // {
|
| | |
| | | // }
|
| | | //}
|
| | |
|
| | | //{
|
| | | // PL_H264Encoder_Config config;
|
| | | // config.av_opt_preset = "superfast";
|
| | | // config.av_opt_tune = "zerolatency";
|
| | | // config.avc_profile_str = "baseline";
|
| | | // PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
|
| | | // bool ret = h264Encoder->init(&config);
|
| | | // if (!ret)
|
| | | // {
|
| | | // LOG_ERROR << "PL_H264Encoder.init error" << std::endl;
|
| | | // exit(EXIT_FAILURE);
|
| | | // }
|
| | | //}
|
| | | //
|
| | | //{
|
| | | // PL_RTSPServer* rtspServer = (PL_RTSPServer*)pipeLine.push_elem("PL_RTSPServer");
|
| | | // bool ret = rtspServer->init(nullptr);
|
| | | // if (!ret)
|
| | | // {
|
| | | // LOG_ERROR << "rtspServer.init error" << std::endl;
|
| | | // exit(EXIT_FAILURE);
|
| | | // }
|
| | | //}
|
| | | {
|
| | | PL_H264Encoder_Config config;
|
| | | config.av_opt_preset = "superfast";
|
| | | config.av_opt_tune = "zerolatency";
|
| | | config.avc_profile_str = "baseline";
|
| | | PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
|
| | | bool ret = h264Encoder->init(&config);
|
| | | if (!ret)
|
| | | {
|
| | | LOG_ERROR << "PL_H264Encoder.init error" << std::endl;
|
| | | exit(EXIT_FAILURE);
|
| | | }
|
| | | }
|
| | | |
| | | {
|
| | | PL_RTSPServer* rtspServer = (PL_RTSPServer*)pipeLine.push_elem("PL_RTSPServer");
|
| | | bool ret = rtspServer->init(nullptr);
|
| | | if (!ret)
|
| | | {
|
| | | LOG_ERROR << "rtspServer.init error" << std::endl;
|
| | | exit(EXIT_FAILURE);
|
| | | }
|
| | | }
|
| | |
|
| | | while(true)
|
| | | {
|