RtspFace/PL_Gainer.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_Gainer.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_Payer.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_Payer.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_SensetimeFaceDetect.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_SensetimeFaceDetect.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/ev_proto.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/ev_server.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/ev_server.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/face_daemon_proto.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/main_face_daemon.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/make.sh | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
RtspFace/PL_Gainer.cpp
New file @@ -0,0 +1,153 @@ #include "PL_Gainer.h" #include "MaterialBuffer.h" #include "logger.h" #include <string.h> // for memcpy struct PL_Gainer_Internal { PL_Gainer_Config config; PipeMaterial lastPm; MB_Frame lastFrame; PL_Gainer_Internal() : config(), lastPm(), lastFrame() { } ~PL_Gainer_Internal() { } void reset() { PL_Gainer_Config _config; config = _config; PipeMaterial _lastPm; lastPm = _lastPm; MB_Frame _lastFrame; lastFrame = _lastFrame; } }; PipeLineElem* create_PL_Gainer() { return new PL_Gainer; } PL_Gainer::PL_Gainer() : internal(new PL_Gainer_Internal) { } PL_Gainer::~PL_Gainer() { delete (PL_Gainer_Internal*)internal; internal= nullptr; } bool PL_Gainer::init(void* args) { PL_Gainer_Internal* in = (PL_Gainer_Internal*)internal; in->reset(); if (args != nullptr) { PL_Gainer_Config* config = (PL_Gainer_Config*)args; in->config = *config; } return true; } void PL_Gainer::finit() { PL_Gainer_Internal* in = (PL_Gainer_Internal*)internal; } void pl_gainer_deleter_func(PipeMaterial* pm) { PL_Gainer_Internal* in = (PL_Gainer_Internal*)(pm->args); if (in->config.copyData) { switch(pm->type) { case PipeMaterial::PMT_BYTES: { delete[] (uint8_t*)pm->buffer; } break; case PipeMaterial::PMT_FRAME: { MB_Frame* pmFrame = (MB_Frame*)(pm->buffer); delete[] (uint8_t*)pmFrame->buffer; } break; default: //#todo support list or pm::copier operator LOG_ERROR << "Only support PMT_BYTES / PMT_FRAME"; return; } } PipeMaterial _pm; *pm = _pm; } bool PL_Gainer::pay(const PipeMaterial& pm) { return true; } bool PL_Gainer::gain(PipeMaterial& pm) { PL_Gainer_Internal* in = (PL_Gainer_Internal*)internal; if (in->config.copyData) { PipeMaterial newPm = pm; switch(pm.type) { case PipeMaterial::PMT_BYTES: { newPm.buffer = new uint8_t[pm.buffSize]; memcpy(newPm.buffer, pm.buffer, pm.buffSize); } break; case PipeMaterial::PMT_FRAME: { MB_Frame* pmFrame = (MB_Frame*)(pm.buffer); in->lastFrame = *pmFrame; in->lastFrame.buffer = new uint8_t[pmFrame->buffSize]; memcpy(in->lastFrame.buffer, pmFrame->buffer, pmFrame->buffSize); newPm.buffer = &(in->lastFrame); } break; default: //#todo support list or pm::copier operator LOG_ERROR << "Only support PMT_BYTES / PMT_FRAME"; return false; } newPm.args = in; newPm.deleter = pl_gainer_deleter_func; newPm.former = this; if (pm.deleter != nullptr) (pm.deleter)(&pm); pm = newPm; return true; } else { pm.former = this; return true; } } RtspFace/PL_Gainer.h
New file @@ -0,0 +1,33 @@ #ifndef _PL_Gainer_H_ #define _PL_Gainer_H_ #include "PipeLine.h" struct PL_Gainer_Config { bool copyData; PL_Gainer_Config() : copyData(false) { } }; class PL_Gainer : public PipeLineElem { public: PL_Gainer(); virtual ~PL_Gainer(); virtual bool init(void* args); virtual void finit(); virtual bool pay(const PipeMaterial& pm); virtual bool gain(PipeMaterial& pm); private: void* internal; }; PipeLineElem* create_PL_Gainer(); #endif RtspFace/PL_Payer.cpp
New file @@ -0,0 +1,119 @@ #include "PL_Payer.h" #include "MaterialBuffer.h" #include "logger.h" #include <string.h> // for memcpy struct PL_Payer_Internal { PL_Payer_Config config; PipeMaterial lastPm; MB_Frame lastFrame; PL_Payer_Internal() : config(), lastPm(), lastFrame() { } ~PL_Payer_Internal() { } void reset() { PL_Payer_Config _config; config = _config; PipeMaterial _lastPm; lastPm = _lastPm; MB_Frame _lastFrame; lastFrame = _lastFrame; } }; PipeLineElem* create_PL_Payer() { return new PL_Payer; } PL_Payer::PL_Payer() : internal(new PL_Payer_Internal) { } PL_Payer::~PL_Payer() { } bool PL_Payer::init(void* args) { PL_Payer_Internal* in = (PL_Payer_Internal*)internal; in->reset(); if (args != nullptr) { PL_Payer_Config* config = (PL_Payer_Config*)args; in->config = *config; } return true; } void PL_Payer::finit() { PL_Payer_Internal* in = (PL_Payer_Internal*)internal; } void pl_payer_deleter_func(PipeMaterial* pm) {//#todo } bool PL_Payer::pay(const PipeMaterial& pm) { PL_Payer_Internal* in = (PL_Payer_Internal*)internal; in->lastPm = pm; if (in->config.copyData) { switch(pm.type) { case PipeMaterial::PMT_BYTES: { in->lastPm.buffer = new uint8_t[pm.buffSize]; memcpy(in->lastPm.buffer, pm.buffer, pm.buffSize); in->lastPm.args = in; in->lastPm.deleter = pl_payer_deleter_func; } break; case PipeMaterial::PMT_FRAME: { MB_Frame* pmFrame = (MB_Frame*)(pm.buffer); in->lastFrame = *pmFrame; in->lastFrame.buffer = new uint8_t[pmFrame->buffSize]; memcpy(in->lastFrame.buffer, pmFrame->buffer, pmFrame->buffSize); in->lastPm.buffer = &(in->lastFrame); in->lastPm.args = in; in->lastPm.deleter = pl_payer_deleter_func; } break; default: //#todo support list or pm::copier operator LOG_ERROR << "Only support PMT_BYTES / PMT_FRAME"; } } in->lastPm.former = this; return true; } bool PL_Payer::gain(PipeMaterial& pm) { PL_Payer_Internal* in = (PL_Payer_Internal*)internal; pm = in->lastPm; return true; } RtspFace/PL_Payer.h
New file @@ -0,0 +1,33 @@ #ifndef _PL_Payer_H_ #define _PL_Payer_H_ #include "PipeLine.h" struct PL_Payer_Config { bool copyData; PL_Payer_Config() : copyData(false) { } }; class PL_Payer : public PipeLineElem { public: PL_Payer(); virtual ~PL_Payer(); virtual bool init(void* args); virtual void finit(); virtual bool pay(const PipeMaterial& pm); virtual bool gain(PipeMaterial& pm); private: void* internal; }; PipeLineElem* create_PL_Payer(); #endif RtspFace/PL_SensetimeFaceDetect.cpp
@@ -1 +1,288 @@ #include "PL_SensetimeFaceDetect.h" #include "MaterialBuffer.h" #include "logger.h" #include <opencv2/opencv.hpp> #include <cv_face.h> struct PL_SensetimeFaceDetect_Internal { //uint8_t buffer[1920*1080*4]; //size_t buffSize; //size_t buffSizeMax; MB_Frame lastFrame; PipeMaterial pmList[2]; PL_SensetimeFaceDetectConfig config; st_ff_vect_t faceFeatures; bool payError; cv_handle_t handle_track; PL_SensetimeFaceDetect_Internal() : //buffSize(0), buffSizeMax(sizeof(buffer)), lastFrame(), pmList(), config(), faceFeatures(), payError(true), handle_track(nullptr) { } ~PL_SensetimeFaceDetect_Internal() { } void reset() { //buffSize = 0; payError = true; MB_Frame _lastFrame; lastFrame = _lastFrame; PipeMaterial _pm; pmList[0] = _pm; pmList[1] = _pm; PL_SensetimeFaceDetectConfig _config; config = _config; handle_track = nullptr; } }; PipeLineElem* create_PL_SensetimeFaceDetect() { return new PL_SensetimeFaceDetect; } PL_SensetimeFaceDetect::PL_SensetimeFaceDetect() : internal(new PL_SensetimeFaceDetect_Internal) { } PL_SensetimeFaceDetect::~PL_SensetimeFaceDetect() { delete (PL_SensetimeFaceDetect_Internal*)internal; internal= nullptr; } bool PL_SensetimeFaceDetect::init(void* args) { PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; in->reset(); PL_SensetimeFaceDetectConfig* config = (PL_SensetimeFaceDetectConfig*)args; in->config = *config; if (in->config.point_size == 21) in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_21; else if (in->config.point_size == 106) in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106; else { LOG_ERROR << "alignment point size must be 21 or 106"; return false; } // init handle cv_result_t cv_result = cv_face_create_tracker(&(in->handle_track), nullptr, in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD); if (cv_result != CV_OK) { LOG_ERROR << "cv_face_create_tracker failed, error code" << cv_result; return false; } int val = 0; cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val); if (cv_result != CV_OK) { LOG_ERROR << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result; return false; } else LOG_ERROR << "detect face count limit : " << val; return true; } void PL_SensetimeFaceDetect::finit() { PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; // destroy track handle cv_face_destroy_tracker(in->handle_track); in->handle_track = nullptr; } int doFaceTrack(PL_SensetimeFaceDetect_Internal* in, uint8_t* buffer, size_t width, size_t height, size_t stride, cv_pixel_format cvPixFmt) { //resize(bgr_frame, bgr_frame, Size(frame_width, frame_height), 0, 0, INTER_LINEAR); int face_count = 0; cv_result_t cv_result = CV_OK; cv_face_t* p_face = nullptr; // realtime track cv_result = cv_face_track(in->handle_track, buffer, cvPixFmt, width, height, stride, CV_FACE_UP, &p_face, &face_count); if (cv_result != CV_OK) { LOG_ERROR << "cv_face_track failed, error : " << cv_result; cv_face_release_tracker_result(p_face, face_count); return -1; } // draw the video //cv::Mat yuvMat(cv::Size(width,height), CV_8UC3, buffer); cv::Mat yMat(cv::Size(width,height), CV_8UC1, buffer); for (int i = 0; i < face_count; i++) { SensetimeFaceFeature faceFeature; faceFeature.rect.leftTop.x = p_face[i].rect.left; faceFeature.rect.leftTop.y = p_face[i].rect.top; faceFeature.rect.rightBottom.x = p_face[i].rect.right; faceFeature.rect.rightBottom.y = p_face[i].rect.bottom; faceFeature.id = p_face[i].ID; faceFeature.yaw = p_face[i].yaw; faceFeature.pitch = p_face[i].pitch; faceFeature.roll = p_face[i].roll; faceFeature.eyeDistance = p_face[i].eye_dist; LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d", i, p_face[i].rect.left, p_face[i].rect.top, p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID); LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]", p_face[i].yaw, p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist); if (in->config.draw_face_rect) { cv::Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256, p_face[i].ID * 93 % 256, p_face[i].ID * 143 % 256); //cv::rectangle(yMat, cv::Point2f(0, 0), cv::Point2f(50, 50), scalar_color, 2); //cv::rectangle(yMat, cv::Point2f(500, 500), cv::Point2f(550, 550), scalar_color, 2); cv::rectangle(yMat, cv::Point2f(static_cast<float>(p_face[i].rect.left), static_cast<float>(p_face[i].rect.top)), cv::Point2f(static_cast<float>(p_face[i].rect.right), static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2); } for (int j = 0; j < p_face[i].points_count; j++) { FacePoint featurePoint; featurePoint.x = p_face[i].points_array[j].x; featurePoint.y = p_face[i].points_array[j].y; faceFeature.featurePoints.push_back(featurePoint); if (in->config.draw_face_feature_point) { cv::circle(yMat, cv::Point2f(p_face[i].points_array[j].x, p_face[i].points_array[j].y), 1, cv::Scalar(255, 255, 255)); } } if (in->config.generate_face_feature) in->faceFeatures.push_back(faceFeature); } //if (face_count > 0) //{ // static size_t f=0; // char fname[50]; // sprintf(fname, "face-%u.yuv420", ++f); // FILE * pFile = fopen (fname,"wb"); // fwrite (yuvMat.data , sizeof(char), 1920*1080*1.5, pFile); // printf("write face file %s\n", fname); // fclose(pFile); //} // release the memory of face cv_face_release_tracker_result(p_face, face_count); return face_count; } bool PL_SensetimeFaceDetect::pay(const PipeMaterial& pm) { PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; if (pm.type != PipeMaterial::PMT_FRAME) { LOG_ERROR << "Only support PMT_FRAME"; return false; } if (pm.buffer == nullptr) return false; MB_Frame* frame = (MB_Frame*)pm.buffer; if (frame->type != MB_Frame::MBFT_YUV420) { LOG_ERROR << "Only support MBFT_YUV420"; return false; } in->faceFeatures.clear(); int face_count = doFaceTrack( in, (uint8_t*)frame->buffer, frame->width, frame->height, frame->width, CV_PIX_FMT_YUV420P); if (face_count < 0) { in->payError = true; return false; } else in->payError = false; //in->buffer readly in->lastFrame.type = MB_Frame::MBFT_YUV420; in->lastFrame.buffer = frame->buffer;//#todo should copy in->lastFrame.buffSize = frame->buffSize; in->lastFrame.width = frame->width; in->lastFrame.height = frame->height; in->lastFrame.pts = frame->pts; return true; } bool PL_SensetimeFaceDetect::gain(PipeMaterial& pm) { PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; if (in->payError) { pm.former = this; return false; } if (!in->config.generate_face_feature) { pm.type = PipeMaterial::PMT_FRAME; pm.buffer = &(in->lastFrame); pm.buffSize = 0; } else { in->pmList[0].type = PipeMaterial::PMT_FRAME; in->pmList[0].buffer = &(in->lastFrame); in->pmList[0].buffSize = 0; in->pmList[0].former = this; in->pmList[1].type = PipeMaterial::PMT_BYTES; in->pmList[1].buffer = &(in->faceFeatures); in->pmList[1].buffSize = 0; in->pmList[1].former = this; pm.type = PipeMaterial::PMT_PM_LIST; pm.buffer = in->pmList; pm.buffSize = sizeof(in->pmList) / sizeof(PipeMaterial); } pm.former = this; return true; } RtspFace/PL_SensetimeFaceDetect.h
@@ -1,4 +1,40 @@ #ifndef _PL_PL_SENSETIMEFACEDETECT_H_ #define _PL_PL_SENSETIMEFACEDETECT_H_ #ifndef _PL_SENSETIMEFACEDETECT_H_ #define _PL_SENSETIMEFACEDETECT_H_ #include "PipeLine.h" #include <vector> struct PL_SensetimeFaceDetectConfig { int point_size; // 21 / 106 int point_size_config; // CV_DETECT_ENABLE_ALIGN_21 / CV_DETECT_ENABLE_ALIGN_106 int detect_face_cnt_limit; // -1 bool draw_face_rect; bool draw_face_feature_point; bool generate_face_feature; // for PL_SensetimeFaceFeatureEmit PL_SensetimeFaceDetectConfig() : point_size(21), point_size_config(-1), detect_face_cnt_limit(-1), draw_face_rect(true), draw_face_feature_point(true), generate_face_feature(false) { } }; class PL_SensetimeFaceDetect : public PipeLineElem { public: PL_SensetimeFaceDetect(); virtual ~PL_SensetimeFaceDetect(); virtual bool init(void* args); virtual void finit(); virtual bool pay(const PipeMaterial& pm); virtual bool gain(PipeMaterial& pm); private: void* internal; }; PipeLineElem* create_PL_SensetimeFaceDetect(); #endif RtspFace/ev_proto.h
@@ -11,8 +11,22 @@ enum EVPC { EVPC__FIRST, EVPC_SENSETIMEFACEDETECT, EVPC__LAST, EVPC_STATUS = 1, EVPC_USER_DEFINE = 128, EVPC__LAST }; }; struct EVPStatus { enum EVPS { EVPS__FIRST, EVPS_OK = 1, EVPS_ERROR = 128, EVPS_INTERNAL_ERROR, EVPS_PARAMETER_ERROR, EVPS__LAST }; }; @@ -22,12 +36,21 @@ uint32_t size; // sizeof(EVPHeader)+sizeof(subcmd) }; struct EVP_Status { int16_t status; }; struct EVP_VariableBuffer { int16_t mb_type; // MB_Frame::MBFType int16_t type; uint8_t buff[0]; }; //#todo template<typename TPacket> void endian_convert(TPacket& packet); #pragma pack() #endif RtspFace/ev_server.cpp
@@ -157,16 +157,15 @@ bool closeClient = true; if (client->proc != nullptr) { EVClientStub cs; EVClientStub cs(client->recvbuff, client->recvbuff_end); cs.id = client->fd; cs.recvBuff = client->recvbuff; cs.recvBuffSize = client->recvbuff_end; closeClient = !(client->proc(cs)); if (cs.sendBuff != nullptr && cs.sendBuffSize > 0) { //#todo bufferevent_write size_t writeSize = bufferevent_write(bufev, cs.sendBuff, cs.sendBuffSize); if (writeSize != cs.sendBuffSize) LOG_WARN << "server send truncate " << (cs.sendBuffSize - writeSize) << " bytes"; if (cs.deleteSendBuff) delete[] cs.sendBuff; @@ -283,3 +282,17 @@ return EXIT_SUCCESS; } void ev_send_status_packet(EVClientStub& client, EVPStatus::EVPS status) { client.sendBuffSize = sizeof(EVPHeader)+sizeof(EVP_Status); client.sendBuff = new uint8_t[client.sendBuffSize]; client.deleteSendBuff = true; EVPHeader* evpHeader = new (client.sendBuff) EVPHeader; evpHeader->cmd = EVPCommand::EVPC_STATUS; evpHeader->size = client.sendBuffSize; EVP_Status* evpStatus = new (client.sendBuff + sizeof(EVPHeader)) EVP_Status; evpStatus->status = status; } RtspFace/ev_server.h
@@ -3,6 +3,7 @@ #include <stddef.h> #include <stdint.h> #include "ev_proto.h" #define SERVER_PORT 5432 #define REUSEADDR_ON 1 @@ -25,6 +26,13 @@ sendBuff(nullptr), sendBuffSize(0), deleteSendBuff(false) { } EVClientStub(const uint8_t* _recvBuff, size_t _recvBuffSize) : id(-1), recvBuff(_recvBuff), recvBuffSize(_recvBuffSize), sendBuff(nullptr), sendBuffSize(0), deleteSendBuff(false) { } }; typedef bool (*evclient_proc_t)(EVClientStub& client); @@ -32,4 +40,7 @@ int server_main(int argc, char **argv); void ev_send_packet(EVClientStub& client); void ev_send_status_packet(EVClientStub& client, EVPStatus::EVPS status); #endif RtspFace/face_daemon_proto.h
New file @@ -0,0 +1,37 @@ #ifndef _FACE_DAEMON_PROTO_H_ #define _FACE_DAEMON_PROTO_H_ #include <stddef.h> #include <stdint.h> #include "ev_proto.h" #pragma pack(1) struct FaceDaemonCommand { enum FDC { FDC__FIRST, FDC_SENSETIMEFACEDETECT = EVPCommand::EVPC_USER_DEFINE + 1, FDC_SENSETIMEFACEDETECT_RESULT FDC__LAST, }; }; struct FDP_Image { int32_t school_id; int16_t mb_type; // MB_Frame::MBFType int16_t width; int16_t height; uint8_t buff[0]; }; struct FDP_FaceDetectResult { int32_t stid; // sensetime id }; #pragma pack() #endif RtspFace/main_face_daemon.cpp
@@ -1,4 +1,5 @@ #include "PipeLine.h" #include "MaterialBuffer.h" #include "PL_RTSPClient.h" #include "PL_RTSPServer.h" #include "PL_H264Decoder.h" @@ -8,6 +9,8 @@ #include "PL_Queue.h" #include "PL_Scale.h" #include "PL_Fork.h" #include "PL_Payer.h" #include "PL_Gainer.h" #include "PL_SensetimeFaceTrack.h" #include "PL_DlibFaceTrack.h" @@ -15,6 +18,7 @@ #include "ev_server.h" #include "ev_proto.h" #include "face_daemon_proto.h" #include "logger.h" @@ -22,18 +26,24 @@ evclient_proc_t evclient_proc; bool ev_proc_SensetimeFaceDetect(EVClientStub& client) void send_SensetimeFaceDetectResult(PipeMaterial& lastPm) { if (lastPm.type == PipeMaterial::PMT_PM_LIST) { PipeMaterial& facePM = ((PipeMaterial*)(lastPm.buffer))[1]; st_ff_vect_t& faceFeatures = *((st_ff_vect_t*)facePM.buffer); LOG_NOTICE << "faceFeatures " << faceFeatures.size(); //#todo send result packet } } bool ev_proc(EVClientStub& client) bool ev_proc_SensetimeFaceDetect(EVClientStub& client) { EVPHeader* evpHeader = (EVPHeader*)client.recvBuff; //#todo check cmd and size //#test send 01000B0000004142434445 //LOG_DEBUG << "cmd=" << evpHeader->cmd << ", size=" << evpHeader->size << ", \t" << (char*)(evpHeader + sizeof(EVPHeader)); //return true; FDP_Image* fdpImage = (FDP_Image*)(client.recvBuff + sizeof(EVPHeader)); PipeLine* pipeLine = nullptr; if (g_PipeLinePool.wait_free()) @@ -41,37 +51,69 @@ if (pipeLine == nullptr) { LOG_WARN << "can't get free pipeline";//#todo send err packet LOG_WARN << "can't get free pipeline"; ev_send_status_packet(client, EVPStatus::EVPS_INTERNAL_ERROR); return false; } PipeMaterial pm; // fill MB_Frame frame; frame.type = (MB_Frame::MBFType)(fdpImage->mb_type); frame.buffer = fdpImage->buff; frame.buffSize = client.recvBuffSize - sizeof(EVPHeader) - sizeof(FDP_Image); frame.width = fdpImage->width; frame.height = fdpImage->height; PipeLineElem* plElem = pipeLine.pipe(&pm); if (! pipeLine.check_pipe_complete(plElem)) PipeMaterial pm; pm.type = PipeMaterial::PMT_FRAME; pm.buffer = &frame; pm.buffSize = 0; PipeLineElem* plElem = pipeLine->pipe(&pm); if (! pipeLine->check_pipe_complete(plElem)) { LOG_WARN << "pipeline not complete"; g_PipeLinePool.release(pipeLine);//#todo send err packet g_PipeLinePool.release(pipeLine); ev_send_status_packet(client, EVPStatus::EVPS_INTERNAL_ERROR); return false; } if (!plElem->gain(pm)) { LOG_WARN << "pipeline gain error"; g_PipeLinePool.release(pipeLine);//#todo send err packet g_PipeLinePool.release(pipeLine); ev_send_status_packet(client, EVPStatus::EVPS_INTERNAL_ERROR); return false; } send_SensetimeFaceDetectResult(pm); g_PipeLinePool.release(pipeLine); return false; } bool ev_proc(EVClientStub& client) { EVPHeader* evpHeader = (EVPHeader*)client.recvBuff; if (evpHeader->size != client.recvBuffSize) { LOG_WARN << "Truncated buffer " << (evpHeader->size - client.recvBuffSize) << " bytes"; return false; } if (pm.type == PipeMaterial::PMT_PM_LIST) switch(evpHeader->cmd) { PipeMaterial& facePM = ((PipeMaterial*)(pm.buffer))[1]; st_ff_vect_t& faceFeatures = *((st_ff_vect_t*)facePM.buffer); LOG_NOTICE << "faceFeatures " << faceFeatures.size(); //#todo send result packet case EVPCommand::EVPC_USER_DEFINE + 1: return ev_proc_SensetimeFaceDetect(client); break; default: LOG_WARN << "Unknown command"; ev_send_status_packet(client, EVPStatus::EVPS_PARAMETER_ERROR); return false; break; } g_PipeLinePool.release(pipeLine); // return false to disconnect return false; } @@ -80,6 +122,7 @@ initLogger(LV_DEBUG); PipeLine::register_global_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack); PipeLine::register_global_elem_creator("PL_Gainer", create_PL_Gainer); g_PipeLinePool = new PipeLinePool(true); @@ -87,17 +130,28 @@ { PipeLine* pipeLine = new PipeLine; {//payer//#todo { PL_Payer_Config config; config.copyData = true; PL_Gainer* ple = (PL_Gainer*)pipeLine->push_elem("PL_Gainer"); bool ret = ple->init(&config); if (!ret) { LOG_ERROR << "ple init error"; exit(EXIT_FAILURE); } } { SensetimeFaceTrackConfig config; config.draw_face_rect = false; config.draw_face_feature_point = false; config.generate_face_feature = true; PL_SensetimeFaceTrack* sensetimeFaceTrack = (PL_SensetimeFaceTrack*)pipeLine->push_elem("PL_SensetimeFaceTrack"); bool ret = sensetimeFaceTrack->init(&config); PL_SensetimeFaceTrack* ple = (PL_SensetimeFaceTrack*)pipeLine->push_elem("PL_SensetimeFaceTrack"); bool ret = ple->init(&config); if (!ret) { LOG_ERROR << "sensetimeFaceTrack init error"; LOG_ERROR << "ple init error"; exit(EXIT_FAILURE); } } @@ -107,22 +161,4 @@ evclient_proc = ev_proc; return server_main(argc, argv); while(true) { //LOG_ERROR << "begin pipe"; PipeMaterial pm; if (pipeLine.pipe(&pm) == sensetimeFaceTrack); sensetimeFaceTrack->gain(pm); if (pm.type == PipeMaterial::PMT_PM_LIST) { PipeMaterial& facePM = ((PipeMaterial*)(pm.buffer))[1]; st_ff_vect_t& faceFeatures = *((st_ff_vect_t*)facePM.buffer); LOG_NOTICE << "faceFeatures " << faceFeatures.size(); } //LOG_ERROR << "end pipe"; } } RtspFace/make.sh
@@ -49,7 +49,7 @@ #g++ main.cpp $CFLAGS $CPPFLAGS -o main.o #g++ main_dump_st_face.cpp $CFLAGS $CPPFLAGS -o main.o g++ main_face_daemon.cpp $CFLAGS $CPPFLAGS -o main.o g++ face_daemon_server.cpp $CFLAGS $CPPFLAGS g++ ev_server.cpp $CFLAGS $CPPFLAGS g++ PipeLine.cpp $CFLAGS $CPPFLAGS g++ PipeLinePool.cpp $CFLAGS $CPPFLAGS @@ -62,6 +62,8 @@ g++ PL_Queue.cpp $CFLAGS $CPPFLAGS g++ PL_Scale.cpp $CFLAGS $CPPFLAGS g++ PL_Fork.cpp $CFLAGS $CPPFLAGS g++ PL_Payer.cpp $CFLAGS $CPPFLAGS g++ PL_Gainer.cpp $CFLAGS $CPPFLAGS g++ PL_SensetimeFaceTrack.cpp $CFLAGS $CPPFLAGS @@ -73,8 +75,8 @@ g++ -g -std=c++11 \ main.o PipeLine.o PipeLinePool.o \ face_daemon_server.o \ PL_RTSPClient.o PL_H264Decoder.o PL_H264Encoder.o PL_AVFrameYUV420.o PL_AVFrameBGRA.o PL_Queue.o PL_Scale.o PL_Fork.o \ ev_server.o \ PL_RTSPClient.o PL_H264Decoder.o PL_H264Encoder.o PL_AVFrameYUV420.o PL_AVFrameBGRA.o PL_Queue.o PL_Scale.o PL_Fork.o PL_Payer.o PL_Gainer.o \ PL_SensetimeFaceTrack.o \ PL_DlibFaceTrack.o \ $FFMPEGRTSPSERVER_OBJ PL_RTSPServer.o \