face detect ok
git-svn-id: http://192.168.1.226/svn/proxy@49 454eff88-639b-444f-9e54-f578c98de674
| | |
| | | in->lastFrame.pts = frame->pts;
|
| | |
|
| | | //#test
|
| | | static size_t f=0;
|
| | | char fname[50];
|
| | | sprintf(fname, "%u.yuv420", ++f);
|
| | | FILE * pFile = fopen (fname,"wb");
|
| | | fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
|
| | | fclose(pFile);
|
| | | //static size_t f=0;
|
| | | //char fname[50];
|
| | | //sprintf(fname, "%u.yuv420", ++f);
|
| | | //FILE * pFile = fopen (fname,"wb");
|
| | | //fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
|
| | | //fclose(pFile);
|
| | |
|
| | | return true;
|
| | | }
|
| | |
| | | return true;
|
| | | }
|
| | |
|
| | | bool decodeH264(H264Decoder_Internal* in, uint8_t* buffer, size_t buffSize) |
| | | bool decodeH264(H264Decoder_Internal* in, uint8_t* buffer, size_t buffSize, timeval pts) |
| | | {
|
| | | AVPacket packet = {0};
|
| | | int gotPicture = buffSize; // frameFinished
|
| | |
| | | {
|
| | | printf("av_packet_from_data error\n");
|
| | | return false;
|
| | | } |
| | | }
|
| | | |
| | | packet.pts = packet.dts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000;
|
| | |
|
| | | // decode
|
| | | avcodec_decode_video2(in->pAVCodecContext, in->pAVFrame, &gotPicture, &packet);
|
| | |
| | |
|
| | | bool ret = false;
|
| | | if (pm.type == PipeMaterial::PMT_BYTES)
|
| | | ret = decodeH264(in, pm.buffer, pm.buffSize);
|
| | | {
|
| | | timeval pts = {0};
|
| | | ret = decodeH264(in, pm.buffer, pm.buffSize, pts);
|
| | | }
|
| | | else if (pm.type == PipeMaterial::PMT_FRAME)
|
| | | {
|
| | | MB_Frame* frame = (MB_Frame*)pm.buffer;
|
| | |
|
| | | ret = decodeH264(in, frame->buffer, frame->buffSize);
|
| | | ret = decodeH264(in, frame->buffer, frame->buffSize, frame->pts);
|
| | | if (ret)
|
| | | {
|
| | | in->lastFrame.type = MB_Frame::MBFT_PTR_AVFRAME;
|
| | |
| | | {
|
| | | #include <libavcodec/avcodec.h>
|
| | | #include <libavutil/frame.h>
|
| | | #include <libavformat/avformat.h>
|
| | | #include <libavutil/imgutils.h>
|
| | | #include <libavutil/opt.h>
|
| | | #include <libavformat/avformat.h>
|
| | |
|
| | | #include <libyuv.h>
|
| | | }
|
| | |
| | | in->pAVCodecContext->height = 600;//#todo from config
|
| | | in->pAVCodecContext->time_base.num=1;
|
| | | in->pAVCodecContext->time_base.den=25;
|
| | | in->pAVCodecContext->gop_size = 25;
|
| | | in->pAVCodecContext->gop_size = 2;
|
| | | in->pAVCodecContext->max_b_frames = 0;
|
| | | //in->pAVCodecContext->profile = FF_PROFILE_H264_MAIN;
|
| | | in->pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
|
| | |
|
| | | //av_opt_set(c->priv_data, "preset", "superfast", 0); |
| | | av_opt_set(in->pAVCodecContext->priv_data, "preset", "superfast", 0); |
| | | //av_opt_set(c->priv_data, "tune", "zerolatency", 0);
|
| | |
|
| | | if(avcodec_open2(in->pAVCodecContext, avCodec, NULL) >= 0)
|
| | |
| | | return !(in->payError);
|
| | | }
|
| | |
|
| | | bool encodeH264(H264Encoder_Internal* in, uint8_t* buffer, timeval pts)
|
| | | {
|
| | | AVFrame avFrame;
|
| | | avFrame.width = 1920;//#todo
|
| | | avFrame.height = 1080;
|
| | | avFrame.data[0] = buffer;
|
| | | avFrame.data[1] = buffer + 1920*1080;
|
| | | avFrame.data[2] = buffer + 1920*1080 + 1920*1080/4;
|
| | | return encodeH264(in, &avFrame, pts);
|
| | | }
|
| | |
|
| | | bool PL_H264Encoder::pay(const PipeMaterial& pm)
|
| | | {
|
| | | H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
|
| | |
| | | return false;
|
| | |
|
| | | MB_Frame* frame = (MB_Frame*)pm.buffer;
|
| | | if (frame->type != MB_Frame::MBFT_PTR_AVFRAME)
|
| | | |
| | | bool ret;
|
| | | |
| | | if (frame->type == MB_Frame::MBFT_PTR_AVFRAME)
|
| | | ret = encodeH264(in, (AVFrame*)(frame->buffer), frame->pts);
|
| | | else if (frame->type == MB_Frame::MBFT_YUV420)
|
| | | ret = encodeH264(in, (uint8_t*)(frame->buffer), frame->pts);
|
| | | else
|
| | | {
|
| | | printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME\n");
|
| | | printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME / MBFT_YUV420\n");
|
| | | in->payError = true;
|
| | | return false;
|
| | | }
|
| | | |
| | | bool ret = encodeH264(in, (AVFrame*)(frame->buffer), frame->pts);
|
| | |
|
| | | in->payError = !ret;
|
| | |
|
| | | if (ret)
|
| | |
| | | #include "PL_SensetimeFaceDetect.h"
|
| | | #include "MaterialBuffer.h"
|
| | |
|
| | | #include <opencv2/opencv.hpp>
|
| | | #include <cv_face.h>
|
| | |
|
| | | struct PL_SensetimeFaceDetect_Internal
|
| | | {
|
| | | uint8_t buffer[1920*1080*4];
|
| | | size_t buffSize;
|
| | | size_t buffSizeMax;
|
| | | //uint8_t buffer[1920*1080*4];
|
| | | //size_t buffSize;
|
| | | //size_t buffSizeMax;
|
| | | MB_Frame lastFrame;
|
| | | SensetimeFaceDetectConfig config;
|
| | |
|
| | | bool payError;
|
| | |
|
| | | cv_handle_t handle_track;
|
| | | |
| | | PL_SensetimeFaceDetect_Internal() :
|
| | | buffSize(0), buffSizeMax(sizeof(buffer)), lastFrame(), |
| | | payError(true)
|
| | | //buffSize(0), buffSizeMax(sizeof(buffer)), |
| | | lastFrame(), config(), payError(true), |
| | | handle_track(nullptr)
|
| | | {
|
| | | }
|
| | |
|
| | |
| | |
|
| | | void reset()
|
| | | {
|
| | | buffSize = 0;
|
| | | //buffSize = 0;
|
| | | payError = true;
|
| | |
|
| | | MB_Frame _lastFrame;
|
| | | lastFrame = _lastFrame;
|
| | | SensetimeFaceDetectConfig _config;
|
| | | config = _config;
|
| | | |
| | | handle_track = nullptr;
|
| | | }
|
| | | };
|
| | |
|
| | |
| | | PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
|
| | | in->reset();
|
| | |
|
| | | SensetimeFaceDetectConfig* config = (SensetimeFaceDetectConfig*)args;
|
| | | in->config = *config;
|
| | | if (in->config.point_size == 21)
|
| | | in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_21;
|
| | | else if (in->config.point_size == 106)
|
| | | in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106;
|
| | | else
|
| | | {
|
| | | printf("alignment point size must be 21 or 106\n");
|
| | | return false;
|
| | | }
|
| | |
|
| | | // init handle
|
| | | cv_result_t cv_result = cv_face_create_tracker(&(in->handle_track), nullptr, |
| | | in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD);
|
| | | if (cv_result != CV_OK)
|
| | | {
|
| | | printf("cv_face_create_tracker failed, error code %d\n", cv_result);
|
| | | return false;
|
| | | }
|
| | |
|
| | | int val = 0;
|
| | | cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val);
|
| | | if (cv_result != CV_OK)
|
| | | {
|
| | | printf("cv_face_track_set_detect_face_cnt_limit failed, error : %d\n", cv_result);
|
| | | return false;
|
| | | }
|
| | | else
|
| | | printf("detect face count limit : %d\n", val);
|
| | | |
| | | return true;
|
| | | }
|
| | |
|
| | |
| | | {
|
| | | PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
|
| | |
|
| | | // destroy track handle
|
| | | cv_face_destroy_tracker(in->handle_track);
|
| | | in->handle_track = nullptr;
|
| | | }
|
| | |
|
| | | int doFaceDetect(PL_SensetimeFaceDetect_Internal* in, |
| | | uint8_t* buffer, size_t width, size_t height, size_t stride, cv_pixel_format cvPixFmt)
|
| | | {
|
| | | //resize(bgr_frame, bgr_frame, Size(frame_width, frame_height), 0, 0, INTER_LINEAR);
|
| | |
|
| | | int face_count = 0;
|
| | | cv_result_t cv_result = CV_OK;
|
| | | cv_face_t* p_face = nullptr;
|
| | | |
| | | // realtime track
|
| | | cv_result = cv_face_track(in->handle_track, buffer, cvPixFmt,
|
| | | width, height, stride,
|
| | | CV_FACE_UP, &p_face, &face_count);
|
| | | if (cv_result != CV_OK)
|
| | | {
|
| | | printf("cv_face_track failed, error : %d\n", cv_result);
|
| | | cv_face_release_tracker_result(p_face, face_count);
|
| | | return -1;
|
| | | }
|
| | |
|
| | | // draw the video
|
| | | cv::Mat yuvMat(cv::Size(1920,1080), CV_8UC3, buffer);
|
| | | cv::Mat yMat(cv::Size(1920,1080), CV_8UC1, buffer);
|
| | | for (int i = 0; i < face_count; i++)
|
| | | {
|
| | | printf("face: %d-----[%d, %d, %d, %d]-----id: %d\n", i,
|
| | | p_face[i].rect.left, p_face[i].rect.top,
|
| | | p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID);
|
| | | |
| | | printf("face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n",
|
| | | p_face[i].yaw,
|
| | | p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist);
|
| | |
|
| | | cv::Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256,
|
| | | p_face[i].ID * 93 % 256,
|
| | | p_face[i].ID * 143 % 256);
|
| | | |
| | | //cv::rectangle(yMat, cv::Point2f(0, 0), cv::Point2f(50, 50), scalar_color, 2);
|
| | | //cv::rectangle(yMat, cv::Point2f(500, 500), cv::Point2f(550, 550), scalar_color, 2);
|
| | | |
| | | cv::rectangle(yMat, cv::Point2f(static_cast<float>(p_face[i].rect.left),
|
| | | static_cast<float>(p_face[i].rect.top)),
|
| | | cv::Point2f(static_cast<float>(p_face[i].rect.right),
|
| | | static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2);
|
| | | |
| | | for (int j = 0; j < p_face[i].points_count; j++)
|
| | | {
|
| | | cv::circle(yMat, cv::Point2f(p_face[i].points_array[j].x,
|
| | | p_face[i].points_array[j].y), 1, cv::Scalar(255, 255, 255));
|
| | | }
|
| | | }
|
| | | |
| | | //if (face_count > 0)
|
| | | //{
|
| | | // static size_t f=0;
|
| | | // char fname[50];
|
| | | // sprintf(fname, "face-%u.yuv420", ++f);
|
| | | // FILE * pFile = fopen (fname,"wb");
|
| | | // fwrite (yuvMat.data , sizeof(char), 1920*1080*1.5, pFile);
|
| | | // printf("write face file %s\n", fname);
|
| | | // fclose(pFile);
|
| | | //}
|
| | |
|
| | | // release the memory of face
|
| | | cv_face_release_tracker_result(p_face, face_count);
|
| | |
|
| | | return face_count;
|
| | | }
|
| | |
|
| | | bool PL_SensetimeFaceDetect::pay(const PipeMaterial& pm)
|
| | |
| | | return false;
|
| | | }
|
| | |
|
| | | |
| | | int face_count = doFaceDetect(in, frame->buffer, 1920, 1080, 1920, CV_PIX_FMT_YUV420P);
|
| | | if (face_count < 0)
|
| | | {
|
| | | in->payError = true;
|
| | | return false;
|
| | | }
|
| | | else
|
| | | in->payError = false;
|
| | |
|
| | | //in->buffer readly
|
| | |
|
| | | //static size_t f=0;
|
| | | //char fname[50];
|
| | | //sprintf(fname, "%u.bgra", ++f);
|
| | | //FILE * pFile = fopen (fname,"wb");
|
| | | //fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
|
| | | //fclose(pFile);
|
| | | in->lastFrame.type = MB_Frame::MBFT_YUV420;
|
| | | in->lastFrame.buffer = frame->buffer;//#todo should copy
|
| | | in->lastFrame.buffSize = frame->buffSize;
|
| | | in->lastFrame.pts = frame->pts;
|
| | |
|
| | | return true;
|
| | | }
|
| | |
| | | {
|
| | | PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
|
| | |
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = (uint8_t*)(&(in->lastFrame));
|
| | | pm.buffSize = sizeof(in->lastFrame);
|
| | | if (!in->payError)
|
| | | {
|
| | | pm.type = PipeMaterial::PMT_FRAME;
|
| | | pm.buffer = (uint8_t*)(&(in->lastFrame));
|
| | | pm.buffSize = sizeof(in->lastFrame);
|
| | | pm.former = this;
|
| | | }
|
| | | pm.former = this;
|
| | | return true;
|
| | | return !in->payError;
|
| | | }
|
| | |
| | |
|
| | | #include "PipeLine.h"
|
| | |
|
| | | struct SensetimeFaceDetectConfig
|
| | | {
|
| | | int point_size; // 21 / 106
|
| | | int point_size_config; // CV_DETECT_ENABLE_ALIGN_21 / CV_DETECT_ENABLE_ALIGN_106
|
| | | int detect_face_cnt_limit; // -1
|
| | | |
| | | SensetimeFaceDetectConfig() : |
| | | point_size(21), point_size_config(-1), detect_face_cnt_limit(-1)
|
| | | { }
|
| | | };
|
| | |
|
| | | class PL_SensetimeFaceDetect : public PipeLineElem
|
| | | {
|
| | | public:
|
| | |
| | | #include "PL_AVFrameBGRA.h"
|
| | | #include "PL_Queue.h"
|
| | |
|
| | | #include "PL_SensetimeFaceDetect.h"
|
| | |
|
| | | #include <iostream>
|
| | | using namespace std;
|
| | |
|
| | |
| | | pipeLine.register_elem_creator("PL_AVFrameYUV420", create_PL_AVFrameYUV420);
|
| | | pipeLine.register_elem_creator("PL_H264Encoder", create_PL_H264Encoder);
|
| | | pipeLine.register_elem_creator("PL_Queue", create_PL_Queue);
|
| | | |
| | | pipeLine.register_elem_creator("PL_SensetimeFaceDetect", create_PL_SensetimeFaceDetect);
|
| | |
|
| | | {
|
| | | PL_RTSPClient* rtspClient = (PL_RTSPClient*)pipeLine.push_elem("PL_RTSPClient");
|
| | |
| | | avFrameYUV420->init(nullptr);
|
| | | }
|
| | |
|
| | | {
|
| | | SensetimeFaceDetectConfig config;
|
| | | PL_SensetimeFaceDetect* stFaceDetect = (PL_SensetimeFaceDetect*)pipeLine.push_elem("PL_SensetimeFaceDetect");
|
| | | stFaceDetect->init(&config);
|
| | | }
|
| | |
|
| | | //{//#todo queue should support deep copy
|
| | | // PL_Queue_Config config;
|
| | | // PL_Queue* queue1 = (PL_Queue*)pipeLine.push_elem("PL_Queue");
|
| | |
| | | // }
|
| | | //}
|
| | |
|
| | | //{
|
| | | // PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
|
| | | // h264Encoder->init(nullptr);
|
| | | //}
|
| | | {
|
| | | PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
|
| | | h264Encoder->init(nullptr);
|
| | | }
|
| | |
|
| | | //{
|
| | | // RTSPServerConfig config;
|
| | | // PL_RTSPServer* rtspServer = (PL_RTSPServer*)pipeLine.push_elem("PL_RTSPServer");
|
| | | // bool ret = rtspServer->init(&config);
|
| | | // if (!ret)
|
| | | // {
|
| | | // cout << "rtspServer.init error" << endl;
|
| | | // exit(EXIT_FAILURE);
|
| | | // }
|
| | | //}
|
| | | {
|
| | | RTSPServerConfig config;
|
| | | PL_RTSPServer* rtspServer = (PL_RTSPServer*)pipeLine.push_elem("PL_RTSPServer");
|
| | | bool ret = rtspServer->init(&config);
|
| | | if (!ret)
|
| | | {
|
| | | cout << "rtspServer.init error" << endl;
|
| | | exit(EXIT_FAILURE);
|
| | | }
|
| | | }
|
| | |
|
| | | while(true)
|
| | | {
|
| | |
| | | FFMPEGRTSPSERVER_BASE=./FFmpegRTSPServer |
| | | FFMPEGRTSPSERVER_OBJ="FFmpegH264Source.o LiveRTSPServer.o LiveServerMediaSubsession.o" |
| | | |
| | | CPPFLAGS+="-pthread $LIVEMEDIA_INC $FFMPEG_INC $LIBBASE64_INC $LIBYUV_INC" |
| | | LDFLAGS+="-pthread $LIVEMEDIA_LIB $FFMPEG_LIB $LIBBASE64_LIB $LIBYUV_LIB $LIBX264_LIB" |
| | | SENSETIMEFACESDK_BASE=/opt/SensetimeFaceSDK |
| | | SENSETIMEFACESDK_INC="-I$SENSETIMEFACESDK_BASE/include" |
| | | SENSETIMEFACESDK_LIB="-L$SENSETIMEFACESDK_BASE/libs/linux-x86_64 -lcvface_api" |
| | | |
| | | OPENCV_BASE= |
| | | OPENCV_INC= |
| | | OPENCV_LIB="-lopencv_core" |
| | | |
| | | CPPFLAGS+="-pthread $LIVEMEDIA_INC $FFMPEG_INC $LIBBASE64_INC $LIBYUV_INC $SENSETIMEFACESDK_INC" |
| | | LDFLAGS+="-pthread $LIVEMEDIA_LIB $FFMPEG_LIB $LIBBASE64_LIB $LIBYUV_LIB $LIBX264_LIB $SENSETIMEFACESDK_LIB $OPENCV_LIB" |
| | | |
| | | CFLAGS+="-D__STDC_CONSTANT_MACROS" |
| | | |
| | |
| | | rm *.o |
| | | |
| | | g++ -g -c -std=c++11 main.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 PipeLine.cpp $CFLAGS $CPPFLAGS |
| | | |
| | | g++ -g -c -std=c++11 PL_RTSPClient.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 PL_RTSPServer.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 PL_H264Decoder.cpp $CFLAGS $CPPFLAGS |
| | |
| | | g++ -g -c -std=c++11 PL_AVFrameYUV420.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 PL_AVFrameBGRA.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 PL_Queue.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 PipeLine.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 PL_SensetimeFaceDetect.cpp $CFLAGS $CPPFLAGS |
| | | |
| | | g++ -g -c -std=c++11 $FFMPEGRTSPSERVER_BASE/FFmpegH264Source.cpp $CFLAGS $CPPFLAGS |
| | | g++ -g -c -std=c++11 $FFMPEGRTSPSERVER_BASE/LiveRTSPServer.cpp $CFLAGS $CPPFLAGS |
| | |
| | | g++ -g -std=c++11 \ |
| | | main.o PipeLine.o \ |
| | | PL_RTSPClient.o PL_H264Decoder.o PL_H264Encoder.o PL_AVFrameYUV420.o PL_AVFrameBGRA.o PL_Queue.o \ |
| | | PL_SensetimeFaceDetect.o \ |
| | | $FFMPEGRTSPSERVER_OBJ PL_RTSPServer.o \ |
| | | $LDFLAGS -o rtsp_face |
| | | |
| | | #export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIBX264_BASE/lib:$FFMPEG_BASE/lib |
| | | #export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIBX264_BASE/lib:$FFMPEG_BASE/lib:$SENSETIMEFACESDK_BASE/libs/linux-x86_64 |
| | | #./rtsp_face rtsp://admin:admin12345@192.168.1.64:554/h264/ch1/main/av_stream |