houxiao
2016-12-30 a88698ced8bcd58f0f1918b10380bc66b0bfcbbc
add pl_scale pl_fork

git-svn-id: http://192.168.1.226/svn/proxy@67 454eff88-639b-444f-9e54-f578c98de674
8个文件已添加
7个文件已修改
1031 ■■■■ 已修改文件
RtspFace/PL_Fork.cpp 49 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_Fork.h 24 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_H264Encoder.cpp 21 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_Paint.h 24 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_Scale.cpp 240 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_Scale.h 41 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_SensetimeFaceDetect.cpp 230 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_SensetimeFaceDetect.h 31 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_SensetimeFaceFeatureEmit.h 6 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_SensetimeFaceTrack.cpp 241 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_SensetimeFaceTrack.h 39 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PipeLine.cpp 12 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/logger.h 17 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/main.cpp 48 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/make.sh 8 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
RtspFace/PL_Fork.cpp
New file
@@ -0,0 +1,49 @@
#ifndef _PL_FORK_H_
#define _PL_FORK_H_
#include "PipeLine.h"
struct PL_Fork_Config
{
};
class PL_Fork : public PipeLineElem
{
public:
    enum ForkBy
    {
        FB_NONE,
        FB_TURNS,
        FB_RANDOM,
        FB_PM_TYPE,
        FB_MB_TYPE,
        FB_BREAK_LIST
    };
    enum ForkSync
    {
        FS_NONE,
        FS_SEQUENCE,
        FS_PARALLEL,
        FS_MAIN_PL_FIRST,
        FS_MAIN_PL_LAST,
    };
public:
    PL_Fork();
    virtual ~PL_Fork();
    virtual bool init(void* args);
    virtual void finit();
    virtual bool pay(const PipeMaterial& pm);
    virtual bool gain(PipeMaterial& pm);
private:
    void* internal;
};
PipeLineElem* create_PL_Paint();
#endif
RtspFace/PL_Fork.h
New file
@@ -0,0 +1,24 @@
#ifndef _PL_FORK_H_
#define _PL_FORK_H_
#include "PipeLine.h"
class PL_Fork : public PipeLineElem
{
public:
    PL_Fork();
    virtual ~PL_Fork();
    virtual bool init(void* args);
    virtual void finit();
    virtual bool pay(const PipeMaterial& pm);
    virtual bool gain(PipeMaterial& pm);
private:
    void* internal;
};
PipeLineElem* create_PL_Paint();
#endif
RtspFace/PL_H264Encoder.cpp
@@ -9,9 +9,9 @@
    #include <libavutil/imgutils.h>
    #include <libavutil/opt.h>
    #include <libavformat/avformat.h>
    #include <libyuv.h>
}
#include <libyuv.h>
PL_H264Encoder_Config::PL_H264Encoder_Config() : 
    inBufferSize(2*1024*1024), // 2MByte
@@ -47,6 +47,8 @@
    
    ~H264Encoder_Internal()
    {
        delete[] buffer;
        buffer = nullptr;
    }
    
    void reset()
@@ -67,8 +69,10 @@
        pAVFormatContext = nullptr;
        
        if (buffer != nullptr)
        {
            delete[] buffer;
        buffer = new uint8_t[config.inBufferSize];
            buffer = nullptr;
        }
    }
};
@@ -97,6 +101,8 @@
        PL_H264Encoder_Config* config = (PL_H264Encoder_Config*)args;
        in->config = *config;
    }
    in->buffer = new uint8_t[in->config.inBufferSize];
    
    return true;
}
@@ -235,7 +241,7 @@
    {
        in->frameCount++;
        LOGP(DEBUG, "Succeed to encode (1) frame=%d, size=%d", in->frameCount, pAVPacket.size);
        memcpy(in->buffer, pAVPacket.data, pAVPacket.size);
        memcpy(in->buffer, pAVPacket.data, pAVPacket.size);//#todo check inBufferSize
        in->buffSize = pAVPacket.size;
        av_free_packet(&pAVPacket);
    }
@@ -361,8 +367,11 @@
        in->lastFrame.buffSize = in->buffSize;
        in->lastFrame.width = frame->width;
        in->lastFrame.height = frame->height;
        in->lastFrame.pts = frame->pts;
        //#todo resetPts
        if (in->config.resetPTS)
            gettimeofday(&(in->lastFrame.pts),NULL);
        else
            in->lastFrame.pts = frame->pts;
    }
    
    return ret;
RtspFace/PL_Paint.h
New file
@@ -0,0 +1,24 @@
#ifndef _PL_PAINT_H_
#define _PL_PAINT_H_
#include "PipeLine.h"
class PL_Paint : public PipeLineElem
{
public:
    PL_Paint();
    virtual ~PL_Paint();
    virtual bool init(void* args);
    virtual void finit();
    virtual bool pay(const PipeMaterial& pm);
    virtual bool gain(PipeMaterial& pm);
private:
    void* internal;
};
PipeLineElem* create_PL_Paint();
#endif
RtspFace/PL_Scale.cpp
New file
@@ -0,0 +1,240 @@
#include "PL_Scale.h"
#include "MaterialBuffer.h"
#include "logger.h"
#include <libyuv.h>
struct PL_Scale_Internal
{
    uint8_t* buffer;
    size_t buffSize;
    size_t buffSizeMax;
    bool payError;
    PipeMaterial::PipeMaterialBufferType lastPmType;
    MB_Frame lastFrame;
    PL_Scale_Config config;
    PL_Scale_Internal() :
        buffer(nullptr), buffSize(0), buffSizeMax(0), payError(true),
        lastPmType(PipeMaterial::PMT_NONE), lastFrame(), config()
    {
    }
    ~PL_Scale_Internal()
    {
        delete[] buffer;
        buffer = nullptr;
    }
    void reset()
    {
        buffSize = 0;
        payError = true;
        lastPmType = PipeMaterial::PMT_NONE;
        MB_Frame _lastFrame;
        lastFrame = _lastFrame;
        PL_Scale_Config _config;
        config = _config;
        if (buffer != nullptr)
        {
            delete[] buffer;
            buffer = nullptr;
            buffSizeMax = 0;
        }
    }
};
PipeLineElem* create_PL_Scale()
{
    return new PL_Scale;
}
PL_Scale::PL_Scale() : internal(new PL_Scale_Internal)
{
}
PL_Scale::~PL_Scale()
{
    delete (PL_Scale_Internal*)internal;
    internal= nullptr;
}
bool PL_Scale::init(void* args)
{
    PL_Scale_Internal* in = (PL_Scale_Internal*)internal;
    in->reset();
    if (args != nullptr)
    {
        PL_Scale_Config* config = (PL_Scale_Config*)args;
        in->config = *config;
    }
    if (in->config.toWidth <= 0 || in->config.toHeight <= 0)
    {
        LOG_ERROR << "Config toWidth and toHeight should > 0";
        return false;
    }
    return true;
}
void PL_Scale::finit()
{
    PL_Scale_Internal* in = (PL_Scale_Internal*)internal;
}
bool image_scale(PL_Scale_Internal* in,
    uint8_t* srcBuffer, int srcBuffSize, MB_Frame::MBFType srcType, uint16_t srcWidth, uint16_t srcHeight)
{
#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
    const int dst_width = in->config.toWidth;
    const int dst_height = in->config.toHeight;
    size_t dstSizeMax = 0;
    if (srcType == MB_Frame::MBFT_YUV420)
        dstSizeMax = in->config.toWidth * in->config.toHeight * 1.5;
    else if (srcType == MB_Frame::MBFT_BGRA)
        dstSizeMax = in->config.toWidth * in->config.toHeight * 4;
    else
    {
        LOG_ERROR << "srcType only support MBFT_YUV420 and MBFT_BGRA";
        return false;
    }
    if (in->buffer == nullptr || in->buffSizeMax < dstSizeMax)
    {
        if (in->buffer != nullptr)
            delete[] in->buffer;
        in->buffer = new uint8_t[dstSizeMax];
        in->buffSizeMax = dstSizeMax;
        LOG_INFO << "image_scale alloc buffer size=" << dstSizeMax;
    }
    if (srcType == MB_Frame::MBFT_YUV420)
    {
        uint8_t* src_y = srcBuffer;
        uint8_t* src_u = src_y + srcWidth * srcHeight;
        uint8_t* src_v = src_u + srcWidth * srcHeight / 4;
        uint8_t* dst_y = in->buffer;
        uint8_t* dst_u = dst_y + dst_width * dst_height;
        uint8_t* dst_v = dst_u + dst_width * dst_height / 4;
        libyuv::I420Scale(
            src_y, srcWidth,
            src_u, SUBSAMPLE(srcWidth, 2),
            src_v, SUBSAMPLE(srcWidth, 2),
            srcWidth, srcHeight,
            dst_y, dst_width,
            dst_u, SUBSAMPLE(dst_width, 2),
            dst_v, SUBSAMPLE(dst_width, 2),
            dst_width, dst_height,
            (libyuv::FilterMode)(in->config.filterMode));
        in->buffSize = dstSizeMax;
    }
    else if (srcType == MB_Frame::MBFT_BGRA)
    {
        //#todo
        LOG_ERROR << "srcType only support MBFT_YUV420 and MBFT_BGRA";
        return false;
    }
}
bool PL_Scale::pay(const PipeMaterial& pm)
{
    PL_Scale_Internal* in = (PL_Scale_Internal*)internal;
    in->payError = true;
    if (pm.buffer == nullptr)
        return false;
    bool ret = false;
    in->lastPmType = pm.type;
    switch(pm.type)
    {
    case PipeMaterial::PMT_BYTES:
    {
        if (in->config.defaultBytesType <= 0 ||
            in->config.defaultBytesWidth <= 0 || in->config.defaultBytesHeight <= 0)
        {
            LOG_ERROR << "defaultBytesType/defaultBytesWidth/defaultBytesHeight not set";
            return false;
        }
        ret = image_scale(in, (uint8_t*)pm.buffer, pm.buffSize, (MB_Frame::MBFType)(in->config.defaultBytesType),
            in->config.defaultBytesWidth, in->config.defaultBytesHeight);
    }
    break;
    case PipeMaterial::PMT_FRAME:
    {
        MB_Frame* frame = (MB_Frame*)pm.buffer;
        switch(frame->type)
        {
        case MB_Frame::MBFT_YUV420:
        case MB_Frame::MBFT_BGRA:
            in->lastFrame = *frame;
            ret = image_scale(in, (uint8_t*)frame->buffer, frame->buffSize, frame->type,
                frame->width, frame->height);
            break;
        default:
            LOG_ERROR << "Only support MBFT_YUV420 / MBFT_BGRA";
            return false;
        }
    }
    break;
    default:
        LOG_ERROR << "Only support PMT_BYTES / PMT_FRAME";
        return false;
    }
    in->payError = !ret;
    return ret;
}
bool PL_Scale::gain(PipeMaterial& pm)
{
    PL_Scale_Internal* in = (PL_Scale_Internal*)internal;
    PipeMaterial newPm;
    newPm.type = PipeMaterial::PMT_NONE;
    newPm.former = this;
    switch(in->lastPmType)
    {
    case PipeMaterial::PMT_BYTES:
    {
        newPm.type = PipeMaterial::PMT_BYTES;
        newPm.buffer = in->buffer;
        newPm.buffSize = in->buffSize;
    }
    break;
    case PipeMaterial::PMT_FRAME:
    {
        newPm.type = PipeMaterial::PMT_FRAME;
        newPm.buffer = &(in->lastFrame);
        newPm.buffSize = 0;
        in->lastFrame.buffer = in->buffer;
        in->lastFrame.buffSize = in->buffSize;
        in->lastFrame.width = in->config.toWidth;
        in->lastFrame.height = in->config.toHeight;
    }
    break;
    default:
        LOG_ERROR << "Only support PMT_BYTES / PMT_FRAME";
    }
    pm = newPm;
    return !in->payError;
}
RtspFace/PL_Scale.h
New file
@@ -0,0 +1,41 @@
#ifndef _PL_SCALE_H_
#define _PL_SCALE_H_
#include "PipeLine.h"
struct PL_Scale_Config
{
    uint16_t toWidth;
    uint16_t toHeight;
    int filterMode; // libyuv/scale.h/FilterMode
    // Used only pm.type==PMT_BYTES
    int defaultBytesType; // MBFT_YUV420 / MBFT_BGRA
    uint16_t defaultBytesWidth;
    uint16_t defaultBytesHeight;
    PL_Scale_Config() :
        toWidth(0), toHeight(0), filterMode(0),
        defaultBytesType(0), defaultBytesWidth(0), defaultBytesHeight(0)
    { }
};
class PL_Scale : public PipeLineElem
{
public:
    PL_Scale();
    virtual ~PL_Scale();
    virtual bool init(void* args);
    virtual void finit();
    virtual bool pay(const PipeMaterial& pm);
    virtual bool gain(PipeMaterial& pm);
private:
    void* internal;
};
PipeLineElem* create_PL_Scale();
#endif
RtspFace/PL_SensetimeFaceDetect.cpp
@@ -1,231 +1 @@
#include "PL_SensetimeFaceDetect.h"
#include "MaterialBuffer.h"
#include "logger.h"
#include <opencv2/opencv.hpp>
#include <cv_face.h>
struct PL_SensetimeFaceDetect_Internal
{
    //uint8_t buffer[1920*1080*4];
    //size_t buffSize;
    //size_t buffSizeMax;
    MB_Frame lastFrame;
    SensetimeFaceDetectConfig config;
    bool payError;
    cv_handle_t handle_track;
    PL_SensetimeFaceDetect_Internal() :
        //buffSize(0), buffSizeMax(sizeof(buffer)),
        lastFrame(), config(), payError(true),
        handle_track(nullptr)
    {
    }
    ~PL_SensetimeFaceDetect_Internal()
    {
    }
    void reset()
    {
        //buffSize = 0;
        payError = true;
        MB_Frame _lastFrame;
        lastFrame = _lastFrame;
        SensetimeFaceDetectConfig _config;
        config = _config;
        handle_track = nullptr;
    }
};
PipeLineElem* create_PL_SensetimeFaceDetect()
{
    return new PL_SensetimeFaceDetect;
}
PL_SensetimeFaceDetect::PL_SensetimeFaceDetect() : internal(new PL_SensetimeFaceDetect_Internal)
{
}
PL_SensetimeFaceDetect::~PL_SensetimeFaceDetect()
{
    delete (PL_SensetimeFaceDetect_Internal*)internal;
    internal= nullptr;
}
bool PL_SensetimeFaceDetect::init(void* args)
{
    PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
    in->reset();
    SensetimeFaceDetectConfig* config = (SensetimeFaceDetectConfig*)args;
    in->config = *config;
    if (in->config.point_size == 21)
        in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_21;
    else if (in->config.point_size == 106)
        in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106;
    else
    {
        LOG_ERROR << "alignment point size must be 21 or 106";
        return false;
    }
    // init handle
    cv_result_t cv_result = cv_face_create_tracker(&(in->handle_track), nullptr,
                                in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD);
    if (cv_result != CV_OK)
    {
        LOG_ERROR << "cv_face_create_tracker failed, error code" << cv_result;
        return false;
    }
    int val = 0;
    cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val);
    if (cv_result != CV_OK)
    {
        LOG_ERROR << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result;
        return false;
    }
    else
        LOG_ERROR << "detect face count limit : " << val;
    return true;
}
void PL_SensetimeFaceDetect::finit()
{
    PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
    // destroy track handle
    cv_face_destroy_tracker(in->handle_track);
    in->handle_track = nullptr;
}
int doFaceDetect(PL_SensetimeFaceDetect_Internal* in,
                uint8_t* buffer, size_t width, size_t height, size_t stride, cv_pixel_format cvPixFmt)
{
    //resize(bgr_frame, bgr_frame, Size(frame_width, frame_height), 0, 0, INTER_LINEAR);
    int face_count = 0;
    cv_result_t cv_result = CV_OK;
    cv_face_t* p_face = nullptr;
    // realtime track
    cv_result = cv_face_track(in->handle_track, buffer, cvPixFmt,
                            width, height, stride,
                            CV_FACE_UP, &p_face, &face_count);
    if (cv_result != CV_OK)
    {
        LOG_ERROR << "cv_face_track failed, error : " << cv_result;
        cv_face_release_tracker_result(p_face, face_count);
        return -1;
    }
    // draw the video
    cv::Mat yuvMat(cv::Size(1920,1080), CV_8UC3, buffer);//#todo
    cv::Mat yMat(cv::Size(1920,1080), CV_8UC1, buffer);
    for (int i = 0; i < face_count; i++)
    {
        LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d", i,
            p_face[i].rect.left, p_face[i].rect.top,
            p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID);
        LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]",
            p_face[i].yaw,
            p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist);
        cv::Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256,
            p_face[i].ID * 93 % 256,
            p_face[i].ID * 143 % 256);
        //cv::rectangle(yMat, cv::Point2f(0, 0), cv::Point2f(50, 50), scalar_color, 2);
        //cv::rectangle(yMat, cv::Point2f(500, 500), cv::Point2f(550, 550), scalar_color, 2);
        cv::rectangle(yMat, cv::Point2f(static_cast<float>(p_face[i].rect.left),
            static_cast<float>(p_face[i].rect.top)),
            cv::Point2f(static_cast<float>(p_face[i].rect.right),
            static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2);
        for (int j = 0; j < p_face[i].points_count; j++)
        {
            cv::circle(yMat, cv::Point2f(p_face[i].points_array[j].x,
                p_face[i].points_array[j].y), 1, cv::Scalar(255, 255, 255));
        }
    }
    //if (face_count > 0)
    //{
    //    static size_t f=0;
    //    char fname[50];
    //    sprintf(fname, "face-%u.yuv420", ++f);
    //    FILE * pFile = fopen (fname,"wb");
    //    fwrite (yuvMat.data , sizeof(char), 1920*1080*1.5, pFile);
    //    printf("write face file %s\n", fname);
    //    fclose(pFile);
    //}
    // release the memory of face
    cv_face_release_tracker_result(p_face, face_count);
    return face_count;
}
bool PL_SensetimeFaceDetect::pay(const PipeMaterial& pm)
{
    PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
    if (pm.type != PipeMaterial::PMT_FRAME)
    {
        LOG_ERROR << "PL_H264Encoder::pay only support PMT_FRAME";
        return false;
    }
    if (pm.buffer == nullptr)
        return false;
    MB_Frame* frame = (MB_Frame*)pm.buffer;
    if (frame->type != MB_Frame::MBFT_YUV420)
    {
        LOG_ERROR << "PL_H264Encoder::pay only support MBFT_YUV420";
        return false;
    }
    int face_count = doFaceDetect(in, (uint8_t*)frame->buffer, 1920, 1080, 1920, CV_PIX_FMT_YUV420P);//#todo
    if (face_count < 0)
    {
        in->payError = true;
        return false;
    }
    else
        in->payError = false;
    //in->buffer readly
    in->lastFrame.type = MB_Frame::MBFT_YUV420;
    in->lastFrame.buffer = frame->buffer;//#todo should copy
    in->lastFrame.buffSize = frame->buffSize;
    in->lastFrame.width = frame->width;
    in->lastFrame.height = frame->height;
    in->lastFrame.pts = frame->pts;
    return true;
}
bool PL_SensetimeFaceDetect::gain(PipeMaterial& pm)
{
    PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
    if (!in->payError)
    {
        pm.type = PipeMaterial::PMT_FRAME;
        pm.buffer = &(in->lastFrame);
        pm.buffSize = 0;
        pm.former = this;
    }
    pm.former = this;
    return !in->payError;
}
RtspFace/PL_SensetimeFaceDetect.h
@@ -1,35 +1,4 @@
#ifndef _PL_PL_SENSETIMEFACEDETECT_H_
#define _PL_PL_SENSETIMEFACEDETECT_H_
#include "PipeLine.h"
struct SensetimeFaceDetectConfig
{
    int point_size; // 21 / 106
    int point_size_config; // CV_DETECT_ENABLE_ALIGN_21 / CV_DETECT_ENABLE_ALIGN_106
    int detect_face_cnt_limit; // -1
    SensetimeFaceDetectConfig() :
        point_size(21), point_size_config(-1), detect_face_cnt_limit(-1)
    { }
};
class PL_SensetimeFaceDetect : public PipeLineElem
{
public:
    PL_SensetimeFaceDetect();
    virtual ~PL_SensetimeFaceDetect();
    virtual bool init(void* args);
    virtual void finit();
    virtual bool pay(const PipeMaterial& pm);
    virtual bool gain(PipeMaterial& pm);
private:
    void* internal;
};
PipeLineElem* create_PL_SensetimeFaceDetect();
#endif
RtspFace/PL_SensetimeFaceFeatureEmit.h
New file
@@ -0,0 +1,6 @@
#ifndef _PL_SENSETIMEFACEFEATUREEMIT_H_
#define _PL_SENSETIMEFACEFEATUREEMIT_H_
#include "PipeLine.h"
#endif
RtspFace/PL_SensetimeFaceTrack.cpp
New file
@@ -0,0 +1,241 @@
#include "PL_SensetimeFaceTrack.h"
#include "MaterialBuffer.h"
#include "logger.h"
#include <opencv2/opencv.hpp>
#include <cv_face.h>
struct PL_SensetimeFaceTrack_Internal
{
    //uint8_t buffer[1920*1080*4];
    //size_t buffSize;
    //size_t buffSizeMax;
    MB_Frame lastFrame;
    SensetimeFaceTrackConfig config;
    bool payError;
    cv_handle_t handle_track;
    PL_SensetimeFaceTrack_Internal() :
        //buffSize(0), buffSizeMax(sizeof(buffer)),
        lastFrame(), config(), payError(true),
        handle_track(nullptr)
    {
    }
    ~PL_SensetimeFaceTrack_Internal()
    {
    }
    void reset()
    {
        //buffSize = 0;
        payError = true;
        MB_Frame _lastFrame;
        lastFrame = _lastFrame;
        SensetimeFaceTrackConfig _config;
        config = _config;
        handle_track = nullptr;
    }
};
PipeLineElem* create_PL_SensetimeFaceTrack()
{
    return new PL_SensetimeFaceTrack;
}
PL_SensetimeFaceTrack::PL_SensetimeFaceTrack() : internal(new PL_SensetimeFaceTrack_Internal)
{
}
PL_SensetimeFaceTrack::~PL_SensetimeFaceTrack()
{
    delete (PL_SensetimeFaceTrack_Internal*)internal;
    internal= nullptr;
}
bool PL_SensetimeFaceTrack::init(void* args)
{
    PL_SensetimeFaceTrack_Internal* in = (PL_SensetimeFaceTrack_Internal*)internal;
    in->reset();
    SensetimeFaceTrackConfig* config = (SensetimeFaceTrackConfig*)args;
    in->config = *config;
    if (in->config.point_size == 21)
        in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_21;
    else if (in->config.point_size == 106)
        in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106;
    else
    {
        LOG_ERROR << "alignment point size must be 21 or 106";
        return false;
    }
    // init handle
    cv_result_t cv_result = cv_face_create_tracker(&(in->handle_track), nullptr,
                                in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD);
    if (cv_result != CV_OK)
    {
        LOG_ERROR << "cv_face_create_tracker failed, error code" << cv_result;
        return false;
    }
    int val = 0;
    cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val);
    if (cv_result != CV_OK)
    {
        LOG_ERROR << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result;
        return false;
    }
    else
        LOG_ERROR << "detect face count limit : " << val;
    return true;
}
void PL_SensetimeFaceTrack::finit()
{
    PL_SensetimeFaceTrack_Internal* in = (PL_SensetimeFaceTrack_Internal*)internal;
    // destroy track handle
    cv_face_destroy_tracker(in->handle_track);
    in->handle_track = nullptr;
}
int doFaceTrack(PL_SensetimeFaceTrack_Internal* in,
                uint8_t* buffer, size_t width, size_t height, size_t stride, cv_pixel_format cvPixFmt)
{
    //resize(bgr_frame, bgr_frame, Size(frame_width, frame_height), 0, 0, INTER_LINEAR);
    int face_count = 0;
    cv_result_t cv_result = CV_OK;
    cv_face_t* p_face = nullptr;
    // realtime track
    cv_result = cv_face_track(in->handle_track, buffer, cvPixFmt,
                            width, height, stride,
                            CV_FACE_UP, &p_face, &face_count);
    if (cv_result != CV_OK)
    {
        LOG_ERROR << "cv_face_track failed, error : " << cv_result;
        cv_face_release_tracker_result(p_face, face_count);
        return -1;
    }
    // draw the video
    //cv::Mat yuvMat(cv::Size(width,height), CV_8UC3, buffer);
    cv::Mat yMat(cv::Size(width,height), CV_8UC1, buffer);
    for (int i = 0; i < face_count; i++)
    {
        LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d", i,
            p_face[i].rect.left, p_face[i].rect.top,
            p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID);
        LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]",
            p_face[i].yaw,
            p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist);
        if (in->config.draw_face_rect)
        {
            cv::Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256,
                p_face[i].ID * 93 % 256,
                p_face[i].ID * 143 % 256);
            //cv::rectangle(yMat, cv::Point2f(0, 0), cv::Point2f(50, 50), scalar_color, 2);
            //cv::rectangle(yMat, cv::Point2f(500, 500), cv::Point2f(550, 550), scalar_color, 2);
            cv::rectangle(yMat, cv::Point2f(static_cast<float>(p_face[i].rect.left),
                static_cast<float>(p_face[i].rect.top)),
                cv::Point2f(static_cast<float>(p_face[i].rect.right),
                static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2);
        }
        for (int j = 0; j < p_face[i].points_count; j++)
        {
            if (in->config.draw_face_feature_point)
            {
                cv::circle(yMat, cv::Point2f(p_face[i].points_array[j].x,
                    p_face[i].points_array[j].y), 1, cv::Scalar(255, 255, 255));
            }
        }
    }
    //if (face_count > 0)
    //{
    //    static size_t f=0;
    //    char fname[50];
    //    sprintf(fname, "face-%u.yuv420", ++f);
    //    FILE * pFile = fopen (fname,"wb");
    //    fwrite (yuvMat.data , sizeof(char), 1920*1080*1.5, pFile);
    //    printf("write face file %s\n", fname);
    //    fclose(pFile);
    //}
    // release the memory of face
    cv_face_release_tracker_result(p_face, face_count);
    return face_count;
}
bool PL_SensetimeFaceTrack::pay(const PipeMaterial& pm)
{
    PL_SensetimeFaceTrack_Internal* in = (PL_SensetimeFaceTrack_Internal*)internal;
    if (pm.type != PipeMaterial::PMT_FRAME)
    {
        LOG_ERROR << "PL_H264Encoder::pay only support PMT_FRAME";
        return false;
    }
    if (pm.buffer == nullptr)
        return false;
    MB_Frame* frame = (MB_Frame*)pm.buffer;
    if (frame->type != MB_Frame::MBFT_YUV420)
    {
        LOG_ERROR << "PL_H264Encoder::pay only support MBFT_YUV420";
        return false;
    }
    int face_count = doFaceTrack(
                        in, (uint8_t*)frame->buffer, frame->width, frame->height, frame->width, CV_PIX_FMT_YUV420P);
    if (face_count < 0)
    {
        in->payError = true;
        return false;
    }
    else
        in->payError = false;
    //in->buffer readly
    in->lastFrame.type = MB_Frame::MBFT_YUV420;
    in->lastFrame.buffer = frame->buffer;//#todo should copy
    in->lastFrame.buffSize = frame->buffSize;
    in->lastFrame.width = frame->width;
    in->lastFrame.height = frame->height;
    in->lastFrame.pts = frame->pts;
    return true;
}
bool PL_SensetimeFaceTrack::gain(PipeMaterial& pm)
{
    PL_SensetimeFaceTrack_Internal* in = (PL_SensetimeFaceTrack_Internal*)internal;
    if (!in->payError)
    {
        pm.type = PipeMaterial::PMT_FRAME;
        pm.buffer = &(in->lastFrame);
        pm.buffSize = 0;
        pm.former = this;
    }
    pm.former = this;
    return !in->payError;
}
RtspFace/PL_SensetimeFaceTrack.h
New file
@@ -0,0 +1,39 @@
#ifndef _PL_PL_SENSETIMEFACETRACK_H_
#define _PL_PL_SENSETIMEFACETRACK_H_
#include "PipeLine.h"
struct SensetimeFaceTrackConfig
{
    int point_size; // 21 / 106
    int point_size_config; // CV_DETECT_ENABLE_ALIGN_21 / CV_DETECT_ENABLE_ALIGN_106
    int detect_face_cnt_limit; // -1
    bool draw_face_rect;
    bool draw_face_feature_point;
    bool generate_face_feature; // for PL_SensetimeFaceFeatureEmit
    SensetimeFaceTrackConfig() :
        point_size(21), point_size_config(-1), detect_face_cnt_limit(-1),
        draw_face_rect(true), draw_face_feature_point(true), generate_face_feature(false)
    { }
};
class PL_SensetimeFaceTrack : public PipeLineElem
{
public:
    PL_SensetimeFaceTrack();
    virtual ~PL_SensetimeFaceTrack();
    virtual bool init(void* args);
    virtual void finit();
    virtual bool pay(const PipeMaterial& pm);
    virtual bool gain(PipeMaterial& pm);
private:
    void* internal;
};
PipeLineElem* create_PL_SensetimeFaceDetect();
#endif
RtspFace/PipeLine.cpp
@@ -90,16 +90,16 @@
    PipeDebugger(PipeLine* _pipeLine) : 
        pipeLine(_pipeLine), retElem(nullptr), pm(nullptr)
    {
        LOG_DEBUG << "pipe line begin";
        //LOG_DEBUG << "pipe line begin";
    }
    
    ~PipeDebugger()
    {
        bool retOK = (*(pipeLine->elems).rbegin() == retElem);
        if (retOK)
            LOG_DEBUG << "pipe line end, ret OK";
        else
            LOG_WARN << "pipe line end, ret ERROR";
        //bool retOK = (*(pipeLine->elems).rbegin() == retElem);
        //if (retOK)
        //    LOG_DEBUG << "pipe line end, ret OK";
        //else
        //    LOG_WARN << "pipe line end, ret ERROR";
    }
};
RtspFace/logger.h
@@ -16,6 +16,14 @@
#include "log4cpp/FileAppender.hh"
#include "log4cpp/PatternLayout.hh"
enum LoggerVerbose
{
    LV_ERROR,
    LV_WARN,
    LV_NOTICE,
    LV_INFO,
    LV_DEBUG
};
#define LOG(__level)  log4cpp::Category::getRoot() << log4cpp::Priority::__level << __FILE__ << ":" << __LINE__ << "\t" 
#define LOGP(__level, __format, arg...) log4cpp::Category::getRoot().log(log4cpp::Priority::__level, "%s:%d\t" __format, __FILE__, __LINE__, ##arg);
@@ -26,7 +34,7 @@
#define LOG_WARN     LOG(WARN) // Important event or input which will lead to errors
#define LOG_ERROR    LOG(ERROR) // Error message means program running in an abnormal (not expected) way
inline void initLogger(int verbose)
inline void initLogger(LoggerVerbose verbose)
{
    // initialize log4cpp
    log4cpp::Category &log = log4cpp::Category::getRoot();
@@ -43,8 +51,11 @@
    }
    switch (verbose)
    {
        case 2: log.setPriority(log4cpp::Priority::DEBUG); break;
        case 1: log.setPriority(log4cpp::Priority::INFO); break;
        case LV_DEBUG: log.setPriority(log4cpp::Priority::DEBUG); break;
        case LV_INFO: log.setPriority(log4cpp::Priority::INFO); break;
        case LV_NOTICE: log.setPriority(log4cpp::Priority::NOTICE); break;
        case LV_WARN: log.setPriority(log4cpp::Priority::WARN); break;
        case LV_ERROR: log.setPriority(log4cpp::Priority::ERROR); break;
        default: log.setPriority(log4cpp::Priority::NOTICE); break;
        
    }
RtspFace/main.cpp
@@ -6,14 +6,15 @@
#include "PL_AVFrameYUV420.h"
#include "PL_AVFrameBGRA.h"
#include "PL_Queue.h"
#include "PL_Scale.h"
#include "PL_SensetimeFaceDetect.h"
#include "PL_SensetimeFaceTrack.h"
#include "logger.h"
int main(int argc, char** argv)
{
    initLogger(2);
    initLogger(LV_DEBUG);
    PipeLine pipeLine;
    
@@ -23,8 +24,9 @@
    pipeLine.register_elem_creator("PL_AVFrameYUV420", create_PL_AVFrameYUV420);
    pipeLine.register_elem_creator("PL_H264Encoder", create_PL_H264Encoder);
    pipeLine.register_elem_creator("PL_Queue", create_PL_Queue);
    pipeLine.register_elem_creator("PL_Scale", create_PL_Scale);
    
    pipeLine.register_elem_creator("PL_SensetimeFaceDetect", create_PL_SensetimeFaceDetect);
    pipeLine.register_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack);
    
    {
        PL_RTSPClient* rtspClient = (PL_RTSPClient*)pipeLine.push_elem("PL_RTSPClient");
@@ -45,18 +47,41 @@
    {
        PL_H264Decoder* h264Decoder = (PL_H264Decoder*)pipeLine.push_elem("PL_H264Decoder");
        h264Decoder->init(nullptr);
        bool ret = h264Decoder->init(nullptr);
        if (!ret)
        {
            LOG_ERROR << "PL_H264Decoder.init error";
            exit(EXIT_FAILURE);
        }
    }
    {
        PL_AVFrameYUV420* avFrameYUV420 = (PL_AVFrameYUV420*)pipeLine.push_elem("PL_AVFrameYUV420");
        avFrameYUV420->init(nullptr);
        bool ret = avFrameYUV420->init(nullptr);
        if (!ret)
        {
            LOG_ERROR << "PL_AVFrameYUV420.init error";
            exit(EXIT_FAILURE);
        }
    }
    {
        PL_Scale_Config config;
        config.toWidth = 800;
        config.toHeight = 600;
        PL_Scale* pl = (PL_Scale*)pipeLine.push_elem("PL_Scale");
        bool ret = pl->init(&config);
        if (!ret)
        {
            LOG_ERROR << "PL_Scale.init error";
            exit(EXIT_FAILURE);
        }
    }
    {
        SensetimeFaceDetectConfig config;
        PL_SensetimeFaceDetect* stFaceDetect = (PL_SensetimeFaceDetect*)pipeLine.push_elem("PL_SensetimeFaceDetect");
        stFaceDetect->init(&config);
        SensetimeFaceTrackConfig config;
        PL_SensetimeFaceTrack* pl = (PL_SensetimeFaceTrack*)pipeLine.push_elem("PL_SensetimeFaceTrack");
        pl->init(&config);
    }
    //{//#todo queue should support deep copy
@@ -72,7 +97,12 @@
    {
        PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
        h264Encoder->init(nullptr);
        bool ret = h264Encoder->init(nullptr);
        if (!ret)
        {
            LOG_ERROR << "PL_H264Encoder.init error";
            exit(EXIT_FAILURE);
        }
    }
    
    {
RtspFace/make.sh
@@ -51,7 +51,9 @@
g++ -g -c -std=c++11 PL_AVFrameYUV420.cpp $CFLAGS $CPPFLAGS
g++ -g -c -std=c++11 PL_AVFrameBGRA.cpp $CFLAGS $CPPFLAGS
g++ -g -c -std=c++11 PL_Queue.cpp $CFLAGS $CPPFLAGS
g++ -g -c -std=c++11 PL_SensetimeFaceDetect.cpp $CFLAGS $CPPFLAGS
g++ -g -c -std=c++11 PL_Scale.cpp $CFLAGS $CPPFLAGS
g++ -g -c -std=c++11 PL_SensetimeFaceTrack.cpp $CFLAGS $CPPFLAGS
g++ -g -c -std=c++11 $FFMPEGRTSPSERVER_BASE/FFmpegH264Source.cpp $CFLAGS $CPPFLAGS
g++ -g -c -std=c++11 $FFMPEGRTSPSERVER_BASE/LiveRTSPServer.cpp $CFLAGS $CPPFLAGS
@@ -59,8 +61,8 @@
g++ -g -std=c++11 \
  main.o PipeLine.o \
  PL_RTSPClient.o PL_H264Decoder.o PL_H264Encoder.o PL_AVFrameYUV420.o PL_AVFrameBGRA.o PL_Queue.o \
  PL_SensetimeFaceDetect.o \
  PL_RTSPClient.o PL_H264Decoder.o PL_H264Encoder.o PL_AVFrameYUV420.o PL_AVFrameBGRA.o PL_Queue.o PL_Scale.o \
  PL_SensetimeFaceTrack.o \
  $FFMPEGRTSPSERVER_OBJ PL_RTSPServer.o \
  $LDFLAGS -o rtsp_face