From 91bcfe2c50c8732c1ccf792ca1f8964966808352 Mon Sep 17 00:00:00 2001 From: houxiao <houxiao@454eff88-639b-444f-9e54-f578c98de674> Date: 星期二, 10 一月 2017 17:25:40 +0800 Subject: [PATCH] add face daemon --- RtspFace/PL_SensetimeFaceDetect.cpp | 287 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 287 insertions(+), 0 deletions(-) diff --git a/RtspFace/PL_SensetimeFaceDetect.cpp b/RtspFace/PL_SensetimeFaceDetect.cpp index cc04c79..c79a5f7 100644 --- a/RtspFace/PL_SensetimeFaceDetect.cpp +++ b/RtspFace/PL_SensetimeFaceDetect.cpp @@ -1 +1,288 @@ #include "PL_SensetimeFaceDetect.h" +#include "MaterialBuffer.h" +#include "logger.h" + +#include <opencv2/opencv.hpp> +#include <cv_face.h> + +struct PL_SensetimeFaceDetect_Internal +{ + //uint8_t buffer[1920*1080*4]; + //size_t buffSize; + //size_t buffSizeMax; + MB_Frame lastFrame; + PipeMaterial pmList[2]; + PL_SensetimeFaceDetectConfig config; + st_ff_vect_t faceFeatures; + + bool payError; + + cv_handle_t handle_track; + + PL_SensetimeFaceDetect_Internal() : + //buffSize(0), buffSizeMax(sizeof(buffer)), + lastFrame(), pmList(), config(), faceFeatures(), payError(true), + handle_track(nullptr) + { + } + + ~PL_SensetimeFaceDetect_Internal() + { + } + + void reset() + { + //buffSize = 0; + payError = true; + + MB_Frame _lastFrame; + lastFrame = _lastFrame; + + PipeMaterial _pm; + pmList[0] = _pm; + pmList[1] = _pm; + + PL_SensetimeFaceDetectConfig _config; + config = _config; + + handle_track = nullptr; + } +}; + +PipeLineElem* create_PL_SensetimeFaceDetect() +{ + return new PL_SensetimeFaceDetect; +} + +PL_SensetimeFaceDetect::PL_SensetimeFaceDetect() : internal(new PL_SensetimeFaceDetect_Internal) +{ +} + +PL_SensetimeFaceDetect::~PL_SensetimeFaceDetect() +{ + delete (PL_SensetimeFaceDetect_Internal*)internal; + internal= nullptr; +} + +bool PL_SensetimeFaceDetect::init(void* args) +{ + PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; + in->reset(); + + PL_SensetimeFaceDetectConfig* config = (PL_SensetimeFaceDetectConfig*)args; + in->config = *config; + if (in->config.point_size == 21) + in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_21; + else if (in->config.point_size == 106) + in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106; + else + { + LOG_ERROR << "alignment point size must be 21 or 106"; + return false; + } + + // init handle + cv_result_t cv_result = cv_face_create_tracker(&(in->handle_track), nullptr, + in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD); + if (cv_result != CV_OK) + { + LOG_ERROR << "cv_face_create_tracker failed, error code" << cv_result; + return false; + } + + int val = 0; + cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val); + if (cv_result != CV_OK) + { + LOG_ERROR << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result; + return false; + } + else + LOG_ERROR << "detect face count limit : " << val; + + return true; +} + +void PL_SensetimeFaceDetect::finit() +{ + PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; + + // destroy track handle + cv_face_destroy_tracker(in->handle_track); + in->handle_track = nullptr; +} + +int doFaceTrack(PL_SensetimeFaceDetect_Internal* in, + uint8_t* buffer, size_t width, size_t height, size_t stride, cv_pixel_format cvPixFmt) +{ + //resize(bgr_frame, bgr_frame, Size(frame_width, frame_height), 0, 0, INTER_LINEAR); + + int face_count = 0; + cv_result_t cv_result = CV_OK; + cv_face_t* p_face = nullptr; + + // realtime track + cv_result = cv_face_track(in->handle_track, buffer, cvPixFmt, + width, height, stride, + CV_FACE_UP, &p_face, &face_count); + if (cv_result != CV_OK) + { + LOG_ERROR << "cv_face_track failed, error : " << cv_result; + cv_face_release_tracker_result(p_face, face_count); + return -1; + } + + // draw the video + //cv::Mat yuvMat(cv::Size(width,height), CV_8UC3, buffer); + cv::Mat yMat(cv::Size(width,height), CV_8UC1, buffer); + for (int i = 0; i < face_count; i++) + { + SensetimeFaceFeature faceFeature; + faceFeature.rect.leftTop.x = p_face[i].rect.left; + faceFeature.rect.leftTop.y = p_face[i].rect.top; + faceFeature.rect.rightBottom.x = p_face[i].rect.right; + faceFeature.rect.rightBottom.y = p_face[i].rect.bottom; + faceFeature.id = p_face[i].ID; + faceFeature.yaw = p_face[i].yaw; + faceFeature.pitch = p_face[i].pitch; + faceFeature.roll = p_face[i].roll; + faceFeature.eyeDistance = p_face[i].eye_dist; + + LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d", i, + p_face[i].rect.left, p_face[i].rect.top, + p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID); + + LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]", + p_face[i].yaw, + p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist); + + if (in->config.draw_face_rect) + { + cv::Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256, + p_face[i].ID * 93 % 256, + p_face[i].ID * 143 % 256); + + //cv::rectangle(yMat, cv::Point2f(0, 0), cv::Point2f(50, 50), scalar_color, 2); + //cv::rectangle(yMat, cv::Point2f(500, 500), cv::Point2f(550, 550), scalar_color, 2); + + cv::rectangle(yMat, cv::Point2f(static_cast<float>(p_face[i].rect.left), + static_cast<float>(p_face[i].rect.top)), + cv::Point2f(static_cast<float>(p_face[i].rect.right), + static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2); + } + + + for (int j = 0; j < p_face[i].points_count; j++) + { + FacePoint featurePoint; + featurePoint.x = p_face[i].points_array[j].x; + featurePoint.y = p_face[i].points_array[j].y; + faceFeature.featurePoints.push_back(featurePoint); + + if (in->config.draw_face_feature_point) + { + cv::circle(yMat, cv::Point2f(p_face[i].points_array[j].x, + p_face[i].points_array[j].y), 1, cv::Scalar(255, 255, 255)); + } + } + + if (in->config.generate_face_feature) + in->faceFeatures.push_back(faceFeature); + } + + //if (face_count > 0) + //{ + // static size_t f=0; + // char fname[50]; + // sprintf(fname, "face-%u.yuv420", ++f); + // FILE * pFile = fopen (fname,"wb"); + // fwrite (yuvMat.data , sizeof(char), 1920*1080*1.5, pFile); + // printf("write face file %s\n", fname); + // fclose(pFile); + //} + + // release the memory of face + cv_face_release_tracker_result(p_face, face_count); + + return face_count; +} + +bool PL_SensetimeFaceDetect::pay(const PipeMaterial& pm) +{ + PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; + + if (pm.type != PipeMaterial::PMT_FRAME) + { + LOG_ERROR << "Only support PMT_FRAME"; + return false; + } + + if (pm.buffer == nullptr) + return false; + + MB_Frame* frame = (MB_Frame*)pm.buffer; + if (frame->type != MB_Frame::MBFT_YUV420) + { + LOG_ERROR << "Only support MBFT_YUV420"; + return false; + } + + in->faceFeatures.clear(); + int face_count = doFaceTrack( + in, (uint8_t*)frame->buffer, frame->width, frame->height, frame->width, CV_PIX_FMT_YUV420P); + if (face_count < 0) + { + in->payError = true; + return false; + } + else + in->payError = false; + + //in->buffer readly + + in->lastFrame.type = MB_Frame::MBFT_YUV420; + in->lastFrame.buffer = frame->buffer;//#todo should copy + in->lastFrame.buffSize = frame->buffSize; + in->lastFrame.width = frame->width; + in->lastFrame.height = frame->height; + in->lastFrame.pts = frame->pts; + + return true; +} + +bool PL_SensetimeFaceDetect::gain(PipeMaterial& pm) +{ + PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal; + + if (in->payError) + { + pm.former = this; + return false; + } + + if (!in->config.generate_face_feature) + { + pm.type = PipeMaterial::PMT_FRAME; + pm.buffer = &(in->lastFrame); + pm.buffSize = 0; + } + else + { + in->pmList[0].type = PipeMaterial::PMT_FRAME; + in->pmList[0].buffer = &(in->lastFrame); + in->pmList[0].buffSize = 0; + in->pmList[0].former = this; + + in->pmList[1].type = PipeMaterial::PMT_BYTES; + in->pmList[1].buffer = &(in->faceFeatures); + in->pmList[1].buffSize = 0; + in->pmList[1].former = this; + + pm.type = PipeMaterial::PMT_PM_LIST; + pm.buffer = in->pmList; + pm.buffSize = sizeof(in->pmList) / sizeof(PipeMaterial); + } + + pm.former = this; + return true; +} -- Gitblit v1.8.0