#include "PipeLine.h"
|
#include "PL_RTSPClient.h"
|
#include "PL_RTSPServer.h"
|
#include "PL_H264Decoder.h"
|
#include "PL_H264Encoder.h"
|
#include "PL_AVFrameYUV420.h"
|
#include "PL_AVFrameBGRA.h"
|
#include "PL_Queue.h"
|
#include "PL_Scale.h"
|
#include "PL_Fork.h"
|
|
#include "PL_SensetimeFaceTrack.h"
|
|
#include "PL_DlibFaceTrack.h"
|
#include "PL_OpenCV_HOG_SVM_Detector.h"
|
|
#include "logger.h"
|
#include <iostream>
|
|
Logger g_logger(std::cout);
|
|
int main(int argc, char** argv)
|
{
|
g_logger.set_level(VERBOSE);
|
|
PipeLine pipeLine;
|
|
PipeLine::register_global_elem_creator("PL_RTSPClient", create_PL_RTSPClient);
|
PipeLine::register_global_elem_creator("PL_RTSPServer", create_PL_RTSPServer);
|
PipeLine::register_global_elem_creator("PL_H264Decoder", create_PL_H264Decoder);
|
PipeLine::register_global_elem_creator("PL_AVFrameYUV420", create_PL_AVFrameYUV420);
|
PipeLine::register_global_elem_creator("PL_AVFrameBGRA", create_PL_AVFrameBGRA);
|
PipeLine::register_global_elem_creator("PL_H264Encoder", create_PL_H264Encoder);
|
PipeLine::register_global_elem_creator("PL_Queue", create_PL_Queue);
|
PipeLine::register_global_elem_creator("PL_Scale", create_PL_Scale);
|
PipeLine::register_global_elem_creator("PL_Fork", create_PL_Scale);
|
|
//pipeLineDecoderDetector.register_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack);
|
|
//pipeLineDecoderDetector.register_elem_creator("PL_DlibFaceTrack", create_PL_DlibFaceTrack);
|
pipeLine.register_elem_creator("PL_OpenCV_HOG_SVM_Detector", create_PL_OpenCV_HOG_SVM_Detector);
|
|
{
|
PL_RTSPClient* rtspClient = (PL_RTSPClient*)pipeLine.push_elem("PL_RTSPClient");
|
PL_RTSPClient_Config rtspConfig;
|
rtspConfig.progName = argv[0];
|
rtspConfig.rtspURL = argv[1];
|
rtspConfig.aux = true; // ffmpeg need aux, but live555 not
|
rtspConfig.verbosityLevel = 1;
|
rtspConfig.tunnelOverHTTPPortNum = 0;
|
rtspConfig.args = nullptr;
|
bool ret = rtspClient->init(&rtspConfig);
|
if (!ret)
|
{
|
LOG_ERROR << "rtspClient.init error" << std::endl;
|
exit(EXIT_FAILURE);
|
}
|
}
|
|
{
|
PL_H264Decoder* h264Decoder = (PL_H264Decoder*)pipeLine.push_elem("PL_H264Decoder");
|
bool ret = h264Decoder->init(nullptr);
|
if (!ret)
|
{
|
LOG_ERROR << "PL_H264Decoder.init error" << std::endl;
|
exit(EXIT_FAILURE);
|
}
|
}
|
|
{
|
PL_AVFrameYUV420* avFrameYUV420 = (PL_AVFrameYUV420*)pipeLine.push_elem("PL_AVFrameYUV420");
|
bool ret = avFrameYUV420->init(nullptr);
|
if (!ret)
|
{
|
LOG_ERROR << "PL_AVFrameYUV420.init error" << std::endl;
|
exit(EXIT_FAILURE);
|
}
|
}
|
|
{
|
PL_Scale_Config config;
|
config.toWidth = 480;
|
config.toHeight = 360;
|
PL_Scale* ple = (PL_Scale*)pipeLine.push_elem("PL_Scale");
|
bool ret = ple->init(&config);
|
if (!ret)
|
{
|
LOG_ERROR << "PL_Scale.init error" << std::endl;
|
exit(EXIT_FAILURE);
|
}
|
}
|
|
{
|
PL_OpenCV_HOG_SVM_Detector* openCV_HOG_SVM_Detector = (PL_OpenCV_HOG_SVM_Detector*)pipeLine.push_elem("PL_OpenCV_HOG_SVM_Detector");
|
bool ret = openCV_HOG_SVM_Detector->init(nullptr);
|
if (!ret)
|
{
|
LOG_ERROR << "PL_OpenCV_HOG_SVM_Detector.init error" << std::endl;
|
exit(EXIT_FAILURE);
|
}
|
}
|
|
//{
|
// PL_AVFrameBGRA_Config config;
|
// config.convertTo = PL_AVFrameBGRA_Config::I420_TO_RGBA8888;
|
// PL_AVFrameBGRA* ple = (PL_AVFrameBGRA*)pipeLineDecoderDetector.push_elem("PL_AVFrameBGRA");
|
// bool ret = ple->init(&config);
|
// if (!ret)
|
// {
|
// LOG_ERROR << "PL_AVFrameBGRA.init error" << std::endl;
|
// exit(EXIT_FAILURE);
|
// }
|
//}
|
|
//{
|
// SensetimeFaceTrackConfig config;
|
// //config.generate_face_feature = true;
|
// PL_SensetimeFaceTrack* ple = (PL_SensetimeFaceTrack*)pipeLineDecoderDetector.push_elem("PL_SensetimeFaceTrack");
|
// ple->init(&config);
|
//}
|
|
//PipeLine pipeLineAnalizor;
|
//{
|
// PL_Fork_Config config;
|
// config.forkBy = PL_Fork::FB_MB_TYPE;
|
// config.forkSync = PL_Fork::FS_PARALLEL;
|
// PL_Fork* ple = (PL_Fork*)pipeLineDecoderDetector.push_elem("PL_Fork");
|
// ple->init(&config);
|
// ple->attach_pipe_line(&pipeLineAnalizor);
|
//
|
// {
|
// //pipeLineAnalizor.push_elem();
|
// }
|
//}
|
|
//{
|
// PL_DlibFaceTrack_Config config;
|
// config.pyramid_down_layers = 2;
|
// config.pyramid_down_n = 1;
|
// PL_DlibFaceTrack* ple = (PL_DlibFaceTrack*)pipeLineDecoderDetector.push_elem("PL_DlibFaceTrack");
|
// ple->init(&config);
|
//}
|
|
//{//#todo queue should support deep copy
|
// PL_Queue_Config config;
|
// PL_Queue* queue1 = (PL_Queue*)pipeLineDecoderDetector.push_elem("PL_Queue");
|
// bool ret = queue1->init(&config);
|
// if (!ret)
|
// {
|
// LOG_ERROR << "queue1.init error" << std::endl;
|
// exit(EXIT_FAILURE);
|
// }
|
//}
|
|
{
|
PL_H264Encoder_Config config;
|
config.av_opt_preset = "superfast";
|
config.av_opt_tune = "zerolatency";
|
config.avc_profile_str = "baseline";
|
PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
|
bool ret = h264Encoder->init(&config);
|
if (!ret)
|
{
|
LOG_ERROR << "PL_H264Encoder.init error" << std::endl;
|
exit(EXIT_FAILURE);
|
}
|
}
|
|
{
|
PL_RTSPServer* rtspServer = (PL_RTSPServer*)pipeLine.push_elem("PL_RTSPServer");
|
bool ret = rtspServer->init(nullptr);
|
if (!ret)
|
{
|
LOG_ERROR << "rtspServer.init error" << std::endl;
|
exit(EXIT_FAILURE);
|
}
|
}
|
|
while(true)
|
{
|
//LOG_ERROR << "begin pipe" << std::endl;
|
pipeLine.pipe();
|
//LOG_ERROR << "end pipe" << std::endl;
|
}
|
}
|