unify log
git-svn-id: http://192.168.1.226/svn/proxy@53 454eff88-639b-444f-9e54-f578c98de674
| | |
| | | #ifndef _MATERIAL_BUFFER_H_
|
| | | #define _MATERIAL_BUFFER_H_
|
| | |
|
| | | // timeval
|
| | | #if defined(WIN32) || defined(_MSC_VER)
|
| | | struct timeval {
|
| | | time_t tv_sec; /* seconds */
|
| | | suseconds_t tv_usec; /* microseconds */
|
| | | };
|
| | | #include <Winsock2.h>
|
| | | #else
|
| | | #include <sys/time.h>
|
| | | #endif
|
| | |
| | | enum MBFType
|
| | | {
|
| | | MBFT__FIRST,
|
| | | MBFT_JPEG,
|
| | | MBFT_YUV420,
|
| | | MBFT_BGRA,
|
| | | MBFT_H264_NALU,
|
| | | MBFT_H264_NALU_WITH_AUX,
|
| | | MBFT_PTR_AVFRAME,
|
| | | |
| | | MBFT_SDP, // buffer = char[N], buffSize = N
|
| | | MBFT_FMTP, // buffer = char[N], buffSize = N
|
| | | |
| | | MBFT_JPEG, // buffer = uint8_t[N], buffSize = N
|
| | | MBFT_YUV420, // buffer = uint8_t[N], buffSize = N
|
| | | MBFT_BGRA, // buffer = uint8_t[N], buffSize = N
|
| | | |
| | | MBFT_H264_NALU, // buffer = uint8_t[N], buffSize = N
|
| | | MBFT_H264_NALU_AUX, // buffer = uint8_t[N], buffSize = N//#todo support!
|
| | | |
| | | MBFT_PTR_AVFRAME, // buffer = AVFrame*, buffSize = 0
|
| | | |
| | | MBFT__LAST
|
| | | };
|
| | |
|
| | | MBFType type;
|
| | | uint8_t* buffer;//#todo void*
|
| | | void* buffer;
|
| | | size_t buffSize;
|
| | | int width;
|
| | | int height;
|
| | | timeval pts;
|
| | |
|
| | | MB_Frame() : type(MBFT__FIRST), buffer(nullptr), buffSize(0), pts() { }
|
| | | MB_Frame() : |
| | | type(MBFT__FIRST), buffer(nullptr), buffSize(0), |
| | | width(0), height(0), pts()
|
| | | { }
|
| | | };
|
| | |
|
| | | #endif
|
| | |
| | | #include "PL_AVFrameBGRA.h"
|
| | | #include "MaterialBuffer.h"
|
| | | #include "logger.h"
|
| | |
|
| | | extern "C"
|
| | | {
|
| | |
| | |
|
| | | struct PL_AVFrameBGRA_Internal
|
| | | {
|
| | | uint8_t buffer[1920*1080*4];
|
| | | uint8_t buffer[1920*1080*4];//#todo
|
| | | size_t buffSize;
|
| | | size_t buffSizeMax;
|
| | | MB_Frame lastFrame;
|
| | |
|
| | | bool payError;
|
| | |
|
| | | PL_AVFrameBGRA_Internal() :
|
| | | buffSize(0), buffSizeMax(sizeof(buffer)), |
| | | buffSize(0), buffSizeMax(sizeof(buffer)), lastFrame(), |
| | | payError(true)
|
| | | {
|
| | | }
|
| | |
| | | {
|
| | | buffSize = 0;
|
| | | payError = true;
|
| | | |
| | | MB_Frame _lastFrame;
|
| | | lastFrame = _lastFrame;
|
| | | }
|
| | | };
|
| | |
|
| | |
| | | {
|
| | | PL_AVFrameBGRA_Internal* in = (PL_AVFrameBGRA_Internal*)internal;
|
| | |
|
| | | AVFrame* pAVFrame = (AVFrame*)pm.buffer;
|
| | | if (pm.type != PipeMaterial::PMT_FRAME)
|
| | | {
|
| | | LOG(ERROR) << "PL_AVFrameBGRA::pay only support PMT_FRAME";
|
| | | return false;
|
| | | }
|
| | | |
| | | if (pm.buffer == nullptr)
|
| | | return false;
|
| | | |
| | | MB_Frame* frame = (MB_Frame*)pm.buffer;
|
| | | if (frame->type != MB_Frame::MBFT_PTR_AVFRAME)
|
| | | {
|
| | | LOG(ERROR) << "PL_AVFrameBGRA::pay only support MBFT_PTR_AVFRAME";
|
| | | return false;
|
| | | }
|
| | |
|
| | | AVFrame* pAVFrame = (AVFrame*)frame->buffer;
|
| | | if (pAVFrame == nullptr)
|
| | | return false;
|
| | |
|
| | | int height = pAVFrame->height;
|
| | | int width = pAVFrame->width;
|
| | | const int height = pAVFrame->height;
|
| | | const int width = pAVFrame->width;
|
| | |
|
| | | //int I420ToBGRA(const uint8* src_y, int src_stride_y,
|
| | | // const uint8* src_u, int src_stride_u,
|
| | |
| | |
|
| | | in->buffSize = in->buffSizeMax;
|
| | | //in->buffer readly
|
| | | |
| | | in->lastFrame.type = MB_Frame::MBFT_BGRA;
|
| | | in->lastFrame.buffer = in->buffer;
|
| | | in->lastFrame.buffSize = in->buffSize;
|
| | | in->lastFrame.width = width;
|
| | | in->lastFrame.height = height;
|
| | | in->lastFrame.pts = frame->pts;
|
| | |
|
| | | static size_t f=0;
|
| | | char fname[50];
|
| | | sprintf(fname, "%u.bgra", ++f);
|
| | | FILE * pFile = fopen (fname,"wb");
|
| | | fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
|
| | | fclose(pFile);
|
| | | //#test
|
| | | //static size_t f=0;
|
| | | //char fname[50];
|
| | | //sprintf(fname, "%u.bgra", ++f);
|
| | | //FILE * pFile = fopen (fname,"wb");
|
| | | //fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
|
| | | //fclose(pFile);
|
| | |
|
| | | return true;
|
| | | }
|
| | |
| | | {
|
| | | PL_AVFrameBGRA_Internal* in = (PL_AVFrameBGRA_Internal*)internal;
|
| | |
|
| | | pm.buffer = in->buffer;
|
| | | pm.buffSize = in->buffSize;
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = &(in->lastFrame);
|
| | | pm.buffSize = 0;
|
| | | pm.former = this;
|
| | | return true;
|
| | | }
|
| | |
| | | #include "PL_AVFrameYUV420.h"
|
| | | #include "MaterialBuffer.h"
|
| | | #include "logger.h"
|
| | |
|
| | | extern "C"
|
| | | {
|
| | |
| | |
|
| | | if (pm.type != PipeMaterial::PMT_FRAME)
|
| | | {
|
| | | printf("PL_H264Encoder::pay only support PMT_FRAME\n");
|
| | | LOG(ERROR) << "PL_AVFrameYUV420::pay only support PMT_FRAME";
|
| | | return false;
|
| | | }
|
| | |
|
| | |
| | | MB_Frame* frame = (MB_Frame*)pm.buffer;
|
| | | if (frame->type != MB_Frame::MBFT_PTR_AVFRAME)
|
| | | {
|
| | | printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME\n");
|
| | | LOG(ERROR) << "PL_AVFrameYUV420::pay only support MBFT_PTR_AVFRAME";
|
| | | return false;
|
| | | }
|
| | |
|
| | |
| | | int picSize = pAVFrame->height * pAVFrame->width;
|
| | | in->buffSize = picSize * 1.5;
|
| | |
|
| | | int height = pAVFrame->height;
|
| | | int width = pAVFrame->width;
|
| | | const int height = pAVFrame->height;
|
| | | const int width = pAVFrame->width;
|
| | |
|
| | | uint8_t* pBuff = in->buffer;
|
| | |
|
| | |
| | | in->lastFrame.type = MB_Frame::MBFT_YUV420;
|
| | | in->lastFrame.buffer = in->buffer;
|
| | | in->lastFrame.buffSize = in->buffSize;
|
| | | in->lastFrame.width = width;
|
| | | in->lastFrame.height = height;
|
| | | in->lastFrame.pts = frame->pts;
|
| | |
|
| | | //#test
|
| | |
| | | AVFrameYUV420_Internal* in = (AVFrameYUV420_Internal*)internal;
|
| | |
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = (uint8_t*)(&(in->lastFrame));
|
| | | pm.buffSize = sizeof(in->lastFrame);
|
| | | pm.buffer = &(in->lastFrame);
|
| | | pm.buffSize = 0;
|
| | | pm.former = this;
|
| | | return true;
|
| | | }
|
| | |
| | | #include "PL_H264Decoder.h"
|
| | | #include "MaterialBuffer.h"
|
| | | #include "logger.h"
|
| | |
|
| | | #include <H264VideoRTPSource.hh> // for SPropRecord
|
| | | #include <libbase64.h>
|
| | |
| | |
|
| | | if (!avCodec)
|
| | | {
|
| | | printf("codec not found!\n"); |
| | | LOG(WARN) << "codec not found!"; |
| | | return false;
|
| | | }
|
| | |
|
| | |
| | |
|
| | | if (av_packet_from_data(&packet, buffer, buffSize) != 0)
|
| | | {
|
| | | printf("av_packet_from_data error\n");
|
| | | LOG(WARN) << "av_packet_from_data error";
|
| | | return false;
|
| | | }
|
| | |
|
| | |
| | | }
|
| | | else
|
| | | {
|
| | | printf("incomplete frame\n");
|
| | | LOG(WARN) << "incomplete frame";
|
| | | return false;
|
| | | }
|
| | | }
|
| | |
| | |
|
| | | bool ret = initH264DecoderEnv(in, sps.sPropBytes, sps.sPropLength, pps.sPropBytes, pps.sPropLength);
|
| | | if (!ret)
|
| | | {
|
| | | LOG(ERROR) << "PL_H264Decoder::pay initH264DecoderEnv error";
|
| | | return false; // #todo log
|
| | | }
|
| | | else
|
| | | in->fmtp_set_to_context = true;
|
| | | }
|
| | |
|
| | | if (pm.buffer == nullptr || pm.buffSize <= 0)
|
| | | if (pm.buffer == nullptr)
|
| | | return false;
|
| | |
|
| | | bool ret = false;
|
| | | if (pm.type == PipeMaterial::PMT_BYTES)
|
| | | {
|
| | | if (pm.buffSize <= 0)
|
| | | return false;
|
| | |
|
| | | timeval pts = {0};
|
| | | ret = decodeH264(in, pm.buffer, pm.buffSize, pts);
|
| | | ret = decodeH264(in, (uint8_t*)pm.buffer, pm.buffSize, pts);
|
| | | }
|
| | | else if (pm.type == PipeMaterial::PMT_FRAME)
|
| | | {
|
| | | MB_Frame* frame = (MB_Frame*)pm.buffer;
|
| | |
|
| | | ret = decodeH264(in, frame->buffer, frame->buffSize, frame->pts);
|
| | | if (frame->buffSize <= 0)
|
| | | return false;
|
| | | |
| | | ret = decodeH264(in, (uint8_t*)frame->buffer, frame->buffSize, frame->pts);
|
| | | if (ret)
|
| | | {
|
| | | in->lastFrame.type = MB_Frame::MBFT_PTR_AVFRAME;
|
| | | in->lastFrame.buffer = (uint8_t*)(in->pAVFrame);
|
| | | in->lastFrame.buffSize = sizeof(in->pAVFrame);
|
| | | in->lastFrame.width = in->pAVFrame->width;
|
| | | in->lastFrame.height = in->pAVFrame->height;
|
| | | //in->lastFrame.pts = frame->pts;//#todo
|
| | | gettimeofday(&(in->lastFrame.pts),NULL);
|
| | | }
|
| | |
| | | if (!in->payError)
|
| | | {
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = (uint8_t*)(&(in->lastFrame));
|
| | | pm.buffSize = sizeof(uint8_t*);
|
| | | pm.buffer = &(in->lastFrame);
|
| | | pm.buffSize = 0;
|
| | | }
|
| | | pm.former = this;
|
| | | return !in->payError;
|
| | |
| | | in->pAVCodecContext->height = 600;//#todo from config
|
| | | in->pAVCodecContext->time_base.num=1;
|
| | | in->pAVCodecContext->time_base.den=25;
|
| | | in->pAVCodecContext->gop_size = 2;
|
| | | in->pAVCodecContext->gop_size = 25;
|
| | | in->pAVCodecContext->max_b_frames = 0;
|
| | | //in->pAVCodecContext->profile = FF_PROFILE_H264_MAIN;
|
| | | in->pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
|
| | |
|
| | | av_opt_set(in->pAVCodecContext->priv_data, "preset", "superfast", 0);
|
| | | //av_opt_set(c->priv_data, "tune", "zerolatency", 0);
|
| | | //av_opt_set(in->pAVCodecContext->priv_data, "tune", "zerolatency", 0);
|
| | |
|
| | | if(avcodec_open2(in->pAVCodecContext, avCodec, NULL) >= 0)
|
| | | {
|
| | |
| | | in->lastFrame.type = MB_Frame::MBFT_H264_NALU;
|
| | | in->lastFrame.buffer = in->buffer;
|
| | | in->lastFrame.buffSize = in->buffSize;
|
| | | in->lastFrame.width = frame->width;
|
| | | in->lastFrame.height = frame->height;
|
| | | in->lastFrame.pts = frame->pts;
|
| | | }
|
| | |
|
| | |
| | | if (!in->payError)
|
| | | {
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = (uint8_t*)(&(in->lastFrame));
|
| | | pm.buffSize = sizeof(in->lastFrame);
|
| | | pm.former = this;
|
| | | pm.buffer = &(in->lastFrame);
|
| | | pm.buffSize = 0;
|
| | | }
|
| | | pm.former = this;
|
| | | return !in->payError;
|
| | |
| | | #include "PL_RTSPClient.h"
|
| | | #include "MaterialBuffer.h"
|
| | | #include "logger.h"
|
| | | #include <pthread.h>
|
| | |
|
| | | void rtsp_client_sdp_callback(void* arg, const char* val);
|
| | |
| | | }
|
| | |
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = (uint8_t*)(&(in->lastFrame));
|
| | | pm.buffSize = sizeof(in->lastFrame);
|
| | | pm.buffer = &(in->lastFrame);
|
| | | pm.buffSize = 0;
|
| | | pm.former = this;
|
| | |
|
| | | return true;
|
| | |
| | | in->lastFrame.type = MB_Frame::MBFT_H264_NALU;
|
| | | in->lastFrame.buffer = buffer;
|
| | | in->lastFrame.buffSize = buffSize;
|
| | | in->lastFrame.width = 0;
|
| | | in->lastFrame.height = 0;
|
| | | in->lastFrame.pts = presentationTime;
|
| | |
|
| | | int ret = pthread_mutex_unlock(in->frame_mutex);
|
| | |
| | | #include "PL_RTSPServer.h"
|
| | | #include "MaterialBuffer.h"
|
| | | #include "logger.h"
|
| | |
|
| | | #include <liveMedia.hh>
|
| | | #include <BasicUsageEnvironment.hh>
|
| | |
| | | *FrameBuffer = pBuffer;
|
| | | *FrameSize = newBufferSize;
|
| | |
|
| | | printf("send frame size=%u\n", in.buffSize);
|
| | | LOG(DEBUG) << "send frame size=" << in.buffSize;
|
| | | }
|
| | |
|
| | | virtual char ReleaseFrame()
|
| | |
| | | int ret = pthread_mutex_unlock(in.frame_mutex);
|
| | | if(ret != 0)
|
| | | {
|
| | | printf("pthread_mutex_unlock frame_mutex: %s/n", strerror(ret));
|
| | | LOG(WARN) << "pthread_mutex_unlock frame_mutex: " << strerror(ret);
|
| | | return 0;
|
| | | }
|
| | | }
|
| | |
| | | int ret = pthread_mutex_lock(in.frame_mutex);
|
| | | if(ret != 0)
|
| | | {
|
| | | printf("pthread_mutex_lock frame_mutex: %s/n", strerror(ret));
|
| | | LOG(WARN) << "pthread_mutex_lock frame_mutex: " << strerror(ret);
|
| | | return;
|
| | | }
|
| | | }
|
| | |
| | | int ret = pthread_create(&(in->live_daemon_thid), NULL, live_daemon_thd, in);
|
| | | if(ret != 0)
|
| | | {
|
| | | printf("pthread_create: %s/n", strerror(ret));
|
| | | LOG(ERROR) << "pthread_create: " << strerror(ret);
|
| | | return false;
|
| | | }
|
| | |
|
| | |
| | | {
|
| | | RTSPServer_Internal* in = (RTSPServer_Internal*)internal;
|
| | |
|
| | | if (pm.buffer == nullptr || pm.buffSize <= 0)
|
| | | if (pm.buffer == nullptr)
|
| | | return false;
|
| | |
|
| | | if (pm.type != PipeMaterial::PMT_FRAME)
|
| | | {
|
| | | printf("PL_RTSPServer::pay only support PMT_FRAME\n");
|
| | | LOG(ERROR) << "PL_RTSPServer::pay only support PMT_FRAME";
|
| | | return false;
|
| | | }
|
| | |
|
| | | if (in->buffSize > 0)
|
| | | printf("PL_RTSPServer::pay may lost data size=%u\n", in->buffSize);
|
| | | LOG(WARN) << "PL_RTSPServer::pay may lost data size=" << in->buffSize;
|
| | |
|
| | | MB_Frame* frame = (MB_Frame*)pm.buffer;
|
| | | if (frame->buffer == nullptr)
|
| | | return false;
|
| | | memcpy(in->buffer, frame->buffer, frame->buffSize);
|
| | | in->buffSize = frame->buffSize;
|
| | |
|
| | |
| | | {
|
| | | RTSPServer_Internal* in = (RTSPServer_Internal*)internal;
|
| | |
|
| | | pm.type = PipeMaterial::PMT_NONE;
|
| | | pm.buffer = nullptr;
|
| | | pm.buffSize = 0;
|
| | | pm.former = this;
|
| | |
| | | #include "PL_SensetimeFaceDetect.h"
|
| | | #include "MaterialBuffer.h"
|
| | | #include "logger.h"
|
| | |
|
| | | #include <opencv2/opencv.hpp>
|
| | | #include <cv_face.h>
|
| | |
| | | in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106;
|
| | | else
|
| | | {
|
| | | printf("alignment point size must be 21 or 106\n");
|
| | | LOG(ERROR) << "alignment point size must be 21 or 106";
|
| | | return false;
|
| | | }
|
| | |
|
| | |
| | | in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD);
|
| | | if (cv_result != CV_OK)
|
| | | {
|
| | | printf("cv_face_create_tracker failed, error code %d\n", cv_result);
|
| | | LOG(ERROR) << "cv_face_create_tracker failed, error code" << cv_result;
|
| | | return false;
|
| | | }
|
| | |
|
| | |
| | | cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val);
|
| | | if (cv_result != CV_OK)
|
| | | {
|
| | | printf("cv_face_track_set_detect_face_cnt_limit failed, error : %d\n", cv_result);
|
| | | LOG(ERROR) << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result;
|
| | | return false;
|
| | | }
|
| | | else
|
| | | printf("detect face count limit : %d\n", val);
|
| | | LOG(ERROR) << "detect face count limit : " << val;
|
| | |
|
| | | return true;
|
| | | }
|
| | |
| | | CV_FACE_UP, &p_face, &face_count);
|
| | | if (cv_result != CV_OK)
|
| | | {
|
| | | printf("cv_face_track failed, error : %d\n", cv_result);
|
| | | LOG(ERROR) << "cv_face_track failed, error : " << cv_result;
|
| | | cv_face_release_tracker_result(p_face, face_count);
|
| | | return -1;
|
| | | }
|
| | |
|
| | | // draw the video
|
| | | cv::Mat yuvMat(cv::Size(1920,1080), CV_8UC3, buffer);
|
| | | cv::Mat yuvMat(cv::Size(1920,1080), CV_8UC3, buffer);//#todo
|
| | | cv::Mat yMat(cv::Size(1920,1080), CV_8UC1, buffer);
|
| | | for (int i = 0; i < face_count; i++)
|
| | | {
|
| | | printf("face: %d-----[%d, %d, %d, %d]-----id: %d\n", i,
|
| | | LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d\n", i,
|
| | | p_face[i].rect.left, p_face[i].rect.top,
|
| | | p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID);
|
| | |
|
| | | printf("face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n",
|
| | | LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n",
|
| | | p_face[i].yaw,
|
| | | p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist);
|
| | |
|
| | |
| | |
|
| | | if (pm.type != PipeMaterial::PMT_FRAME)
|
| | | {
|
| | | printf("PL_H264Encoder::pay only support PMT_FRAME\n");
|
| | | LOG(ERROR) << "PL_H264Encoder::pay only support PMT_FRAME";
|
| | | return false;
|
| | | }
|
| | |
|
| | |
| | | MB_Frame* frame = (MB_Frame*)pm.buffer;
|
| | | if (frame->type != MB_Frame::MBFT_YUV420)
|
| | | {
|
| | | printf("PL_H264Encoder::pay only support MBFT_YUV420\n");
|
| | | LOG(ERROR) << "PL_H264Encoder::pay only support MBFT_YUV420";
|
| | | return false;
|
| | | }
|
| | |
|
| | | int face_count = doFaceDetect(in, frame->buffer, 1920, 1080, 1920, CV_PIX_FMT_YUV420P);
|
| | | int face_count = doFaceDetect(in, (uint8_t*)frame->buffer, 1920, 1080, 1920, CV_PIX_FMT_YUV420P);//#todo
|
| | | if (face_count < 0)
|
| | | {
|
| | | in->payError = true;
|
| | |
| | | in->lastFrame.type = MB_Frame::MBFT_YUV420;
|
| | | in->lastFrame.buffer = frame->buffer;//#todo should copy
|
| | | in->lastFrame.buffSize = frame->buffSize;
|
| | | in->lastFrame.width = frame->width;
|
| | | in->lastFrame.height = frame->height;
|
| | | in->lastFrame.pts = frame->pts;
|
| | |
|
| | | return true;
|
| | |
| | | if (!in->payError)
|
| | | {
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = (uint8_t*)(&(in->lastFrame));
|
| | | pm.buffSize = sizeof(in->lastFrame);
|
| | | pm.buffer = &(in->lastFrame);
|
| | | pm.buffSize = 0;
|
| | | pm.former = this;
|
| | | }
|
| | | pm.former = this;
|
| | |
| | | #include "PipeLine.h"
|
| | | #include "logger.h"
|
| | |
|
| | | PipeMaterial::PipeMaterial() :
|
| | | type(PMT__FIRST), buffer(nullptr), buffSize(0),
|
| | |
| | | return elem;
|
| | | }
|
| | |
|
| | | class PipeDebugger
|
| | | {
|
| | | private:
|
| | | PipeLine* pipeLine;
|
| | |
|
| | | public:
|
| | | PipeLineElem* retElem;
|
| | | PipeMaterial* pm;
|
| | |
|
| | | PipeDebugger(PipeLine* _pipeLine) : |
| | | pipeLine(_pipeLine), retElem(nullptr), pm(nullptr)
|
| | | {
|
| | | LOG(DEBUG) << "pipe line begin";
|
| | | }
|
| | | |
| | | ~PipeDebugger()
|
| | | {
|
| | | bool retOK = (*(pipeLine->elems).rbegin() == retElem);
|
| | | if (retOK)
|
| | | LOG(DEBUG) << "pipe line end, ret OK";
|
| | | else
|
| | | LOG(WARN) << "pipe line end, ret ERROR";
|
| | | }
|
| | | };
|
| | |
|
| | | PipeLineElem* PipeLine::pipe(PipeMaterial* pm /*= nullptr*/)
|
| | | {
|
| | | PipeDebugger debugger(this);
|
| | | |
| | | PipeLineElem* elem_begin = *elems.begin();
|
| | | PipeLineElem* elem_last = *elems.rbegin();
|
| | |
|
| | |
| | | uint8_t pmPlacement[sizeof(PipeMaterial)];
|
| | | if (pm == nullptr)
|
| | | pm = new (pmPlacement) PipeMaterial;
|
| | | |
| | | debugger.pm = pm;
|
| | |
|
| | | if (elems.size() == 1)
|
| | | {
|
| | | elem_begin->gain(*pm);
|
| | | pm->exec_deleter();
|
| | | return elem_begin;
|
| | | return debugger.retElem = elem_begin;
|
| | | }
|
| | | else if (elems.size() == 2)
|
| | | {
|
| | |
| | | pm->exec_deleter();
|
| | | }
|
| | | else
|
| | | return elem_begin;
|
| | | return elem_last;
|
| | | return debugger.retElem = elem_begin;
|
| | | return debugger.retElem = elem_last;
|
| | | }
|
| | | else
|
| | | {
|
| | | if (!elem_begin->gain(*pm))
|
| | | return elem_begin;
|
| | | return debugger.retElem = elem_begin;
|
| | |
|
| | | bool lastRet = true;
|
| | | elem_vec_t::iterator iter = elems.begin();
|
| | |
| | | lastRet = elem_begin->gain(*pm);
|
| | | }
|
| | | else
|
| | | return elem_begin;//#todo this may memory leakage in pm
|
| | | return debugger.retElem = elem_begin;
|
| | |
|
| | | ++iter;
|
| | | elem_begin = *iter;
|
| | |
| | | elem_last->pay(*pm);
|
| | | pm->exec_deleter();
|
| | | }
|
| | | return elem_last;
|
| | | return debugger.retElem = elem_last;
|
| | | }
|
| | |
|
| | | return nullptr;
|
| | |
| | | enum PipeMaterialBufferType |
| | | { |
| | | PMT__FIRST, |
| | | PMT_BYTES, // uint8_t[] |
| | | PMT_FRAME, // MB_Frame* |
| | | PMT_PM_LIST, |
| | | PMT_NONE, // buffer = nullptr, buffSize = 0 |
| | | PMT_BYTES, // buffer = uint8_t[N], buffSize = N |
| | | PMT_FRAME, // buffer = MB_Frame*, buffSize = 0 |
| | | PMT_PM_LIST, // buffer = PipeMaterial*[N], buffSize = N |
| | | PMT_FRAME_LIST, // buffer = MB_Frame*[N], buffSize = N |
| | | PMT__LAST |
| | | }; |
| | | |
| | | PipeMaterialBufferType type; // #todo MaterialBuffer merge into there |
| | | uint8_t* buffer;//#todo void* |
| | | PipeMaterialBufferType type; |
| | | void* buffer; |
| | | size_t buffSize; |
| | | PipeLineElem* former; |
| | | pm_deleter_func deleter; |
| | |
| | | // gain --> [pay --> pm.deleter --> gain -->] [pay --> pm.deleter --> gain -->] ... --> pay --> pm.deleter |
| | | class PipeLine |
| | | { |
| | | friend class PipeDebugger; |
| | | |
| | | public: |
| | | PipeLine(); |
| | | |
| | |
| | | PipeLineElem* push_elem(const std::string& type); |
| | | |
| | | // do pipe sync. returns the element who returns false, or the last one. |
| | | // if false return, the element should deal with pm, clean up. |
| | | PipeLineElem* pipe(PipeMaterial* pm = nullptr); |
| | | |
| | | // do pipe async |
New file |
| | |
| | | /* --------------------------------------------------------------------------- |
| | | ** This software is in the public domain, furnished "as is", without technical |
| | | ** support, and with no warranty, express or implied, as to its usefulness for |
| | | ** any purpose. |
| | | ** |
| | | ** logger.h |
| | | ** |
| | | ** -------------------------------------------------------------------------*/ |
| | | |
| | | #ifndef LOGGER_H |
| | | #define LOGGER_H |
| | | |
| | | #include <unistd.h> |
| | | |
| | | #include "log4cpp/Category.hh" |
| | | #include "log4cpp/FileAppender.hh" |
| | | #include "log4cpp/PatternLayout.hh" |
| | | |
| | | |
| | | #define LOG(__level) log4cpp::Category::getRoot() << log4cpp::Priority::__level << __FILE__ << ":" << __LINE__ << "\t" |
| | | #define LOGP(__level, __format, arg...) log4cpp::Category::getRoot().log(log4cpp::Priority::__level, "%s:%d\t" __format, __FILE__, __LINE__, ##arg); |
| | | |
| | | inline void initLogger(int verbose) |
| | | { |
| | | // initialize log4cpp |
| | | log4cpp::Category &log = log4cpp::Category::getRoot(); |
| | | log4cpp::Appender *app = new log4cpp::FileAppender("root", fileno(stdout)); |
| | | if (app) |
| | | { |
| | | log4cpp::PatternLayout *plt = new log4cpp::PatternLayout(); |
| | | if (plt) |
| | | { |
| | | plt->setConversionPattern("%d [%-6p] - %m%n"); |
| | | app->setLayout(plt); |
| | | } |
| | | log.addAppender(app); |
| | | } |
| | | switch (verbose) |
| | | { |
| | | case 2: log.setPriority(log4cpp::Priority::DEBUG); break; |
| | | case 1: log.setPriority(log4cpp::Priority::INFO); break; |
| | | default: log.setPriority(log4cpp::Priority::NOTICE); break; |
| | | |
| | | } |
| | | LOG(INFO) << "level:" << log4cpp::Priority::getPriorityName(log.getPriority()); |
| | | } |
| | | |
| | | #endif |
| | | |
| | |
| | |
|
| | | #include "PL_SensetimeFaceDetect.h"
|
| | |
|
| | | #include <iostream>
|
| | | using namespace std;
|
| | | #include "logger.h"
|
| | |
|
| | | int main(int argc, char** argv)
|
| | | {
|
| | | initLogger(2);
|
| | |
|
| | | PipeLine pipeLine;
|
| | |
|
| | | pipeLine.register_elem_creator("PL_RTSPClient", create_PL_RTSPClient);
|
| | |
| | | bool ret = rtspClient->init(&rtspConfig);
|
| | | if (!ret)
|
| | | {
|
| | | cout << "rtspClient.init error" << endl;
|
| | | LOG(ERROR) << "rtspClient.init error";
|
| | | exit(EXIT_FAILURE);
|
| | | }
|
| | | }
|
| | |
| | | // bool ret = queue1->init(&config);
|
| | | // if (!ret)
|
| | | // {
|
| | | // cout << "queue1.init error" << endl;
|
| | | // LOG(ERROR) << "queue1.init error";
|
| | | // exit(EXIT_FAILURE);
|
| | | // }
|
| | | //}
|
| | |
| | | bool ret = rtspServer->init(&config);
|
| | | if (!ret)
|
| | | {
|
| | | cout << "rtspServer.init error" << endl;
|
| | | LOG(ERROR) << "rtspServer.init error";
|
| | | exit(EXIT_FAILURE);
|
| | | }
|
| | | }
|
| | |
|
| | | while(true)
|
| | | {
|
| | | //cout << "begin pipe" << endl;
|
| | | //LOG(ERROR) << "begin pipe";
|
| | | pipeLine.pipe();
|
| | | //cout << "end pipe" << endl;
|
| | | //LOG(ERROR) << "end pipe";
|
| | | }
|
| | | }
|
| | |
| | | OPENCV_INC= |
| | | OPENCV_LIB="-lopencv_core" |
| | | |
| | | CPPFLAGS+="-pthread $LIVEMEDIA_INC $FFMPEG_INC $LIBBASE64_INC $LIBYUV_INC $SENSETIMEFACESDK_INC" |
| | | LDFLAGS+="-pthread $LIVEMEDIA_LIB $FFMPEG_LIB $LIBBASE64_LIB $LIBYUV_LIB $LIBX264_LIB $SENSETIMEFACESDK_LIB $OPENCV_LIB" |
| | | LIBLOG4CPP_BASE=/opt/log4cpp/inst |
| | | LIBLOG4CPP_INC="-I$LIBLOG4CPP_BASE/include" |
| | | LIBLOG4CPP_LIB="-L$LIBLOG4CPP_BASE/lib -llog4cpp" |
| | | |
| | | CPPFLAGS+="-pthread $LIVEMEDIA_INC $FFMPEG_INC $LIBBASE64_INC $LIBYUV_INC $SENSETIMEFACESDK_INC $LIBLOG4CPP_INC" |
| | | LDFLAGS+="-pthread $LIVEMEDIA_LIB $FFMPEG_LIB $LIBBASE64_LIB $LIBYUV_LIB $LIBX264_LIB $SENSETIMEFACESDK_LIB $OPENCV_LIB $LIBLOG4CPP_LIB" |
| | | |
| | | CFLAGS+="-D__STDC_CONSTANT_MACROS" |
| | | |
| | |
| | | $FFMPEGRTSPSERVER_OBJ PL_RTSPServer.o \ |
| | | $LDFLAGS -o rtsp_face |
| | | |
| | | #export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIBX264_BASE/lib:$FFMPEG_BASE/lib:$SENSETIMEFACESDK_BASE/libs/linux-x86_64 |
| | | #export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIBX264_BASE/lib:$FFMPEG_BASE/lib:$SENSETIMEFACESDK_BASE/libs/linux-x86_64:$LIBLOG4CPP_BASE/lib |
| | | #./rtsp_face rtsp://admin:admin12345@192.168.1.64:554/h264/ch1/main/av_stream |