From 21d2e4cd4e3ec5ec6f7f32ed5426b069a303adfb Mon Sep 17 00:00:00 2001
From: pans <pans@454eff88-639b-444f-9e54-f578c98de674>
Date: 星期二, 10 一月 2017 18:02:07 +0800
Subject: [PATCH] 

---
 RtspFace/PL_SensetimeFaceDetect.cpp |  127 ++++++++++++++++++++++++++++++-----------
 1 files changed, 92 insertions(+), 35 deletions(-)

diff --git a/RtspFace/PL_SensetimeFaceDetect.cpp b/RtspFace/PL_SensetimeFaceDetect.cpp
index 85245fb..c79a5f7 100644
--- a/RtspFace/PL_SensetimeFaceDetect.cpp
+++ b/RtspFace/PL_SensetimeFaceDetect.cpp
@@ -11,7 +11,9 @@
 	//size_t buffSize;
 	//size_t buffSizeMax;
 	MB_Frame lastFrame;
-	SensetimeFaceDetectConfig config;
+	PipeMaterial pmList[2];
+	PL_SensetimeFaceDetectConfig config;
+	st_ff_vect_t faceFeatures;
 
 	bool payError;
 	
@@ -19,7 +21,7 @@
 	
 	PL_SensetimeFaceDetect_Internal() : 
 		//buffSize(0), buffSizeMax(sizeof(buffer)), 
-		lastFrame(), config(), payError(true), 
+		lastFrame(), pmList(), config(), faceFeatures(), payError(true), 
 		handle_track(nullptr)
 	{
 	}
@@ -35,7 +37,12 @@
 		
 		MB_Frame _lastFrame;
 		lastFrame = _lastFrame;
-		SensetimeFaceDetectConfig _config;
+		
+		PipeMaterial _pm;
+		pmList[0] = _pm;
+		pmList[1] = _pm;
+		
+		PL_SensetimeFaceDetectConfig _config;
 		config = _config;
 		
 		handle_track = nullptr;
@@ -62,7 +69,7 @@
 	PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
 	in->reset();
 	
-	SensetimeFaceDetectConfig* config = (SensetimeFaceDetectConfig*)args;
+	PL_SensetimeFaceDetectConfig* config = (PL_SensetimeFaceDetectConfig*)args;
 	in->config = *config;
 	if (in->config.point_size == 21)
 		in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_21;
@@ -70,7 +77,7 @@
 		in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106;
 	else
 	{
-		LOG(ERROR) << "alignment point size must be 21 or 106";
+		LOG_ERROR << "alignment point size must be 21 or 106";
 		return false;
 	}
 
@@ -79,7 +86,7 @@
 								in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD);
 	if (cv_result != CV_OK)
 	{
-		LOG(ERROR) << "cv_face_create_tracker failed, error code" << cv_result;
+		LOG_ERROR << "cv_face_create_tracker failed, error code" << cv_result;
 		return false;
 	}
 
@@ -87,11 +94,11 @@
 	cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val);
 	if (cv_result != CV_OK)
 	{
-		LOG(ERROR) << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result;
+		LOG_ERROR << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result;
 		return false;
 	}
 	else
-		LOG(ERROR) << "detect face count limit : " << val;
+		LOG_ERROR << "detect face count limit : " << val;
 	
 	return true;
 }
@@ -105,7 +112,7 @@
 	in->handle_track = nullptr;
 }
 
-int doFaceDetect(PL_SensetimeFaceDetect_Internal* in, 
+int doFaceTrack(PL_SensetimeFaceDetect_Internal* in, 
 				uint8_t* buffer, size_t width, size_t height, size_t stride, cv_pixel_format cvPixFmt)
 {
 	//resize(bgr_frame, bgr_frame, Size(frame_width, frame_height), 0, 0, INTER_LINEAR);
@@ -120,43 +127,69 @@
 							CV_FACE_UP, &p_face, &face_count);
 	if (cv_result != CV_OK)
 	{
-		LOG(ERROR) << "cv_face_track failed, error : " << cv_result;
+		LOG_ERROR << "cv_face_track failed, error : " << cv_result;
 		cv_face_release_tracker_result(p_face, face_count);
 		return -1;
 	}
 
 	// draw the video
-	cv::Mat yuvMat(cv::Size(1920,1080), CV_8UC3, buffer);//#todo
-	cv::Mat yMat(cv::Size(1920,1080), CV_8UC1, buffer);
+	//cv::Mat yuvMat(cv::Size(width,height), CV_8UC3, buffer);
+	cv::Mat yMat(cv::Size(width,height), CV_8UC1, buffer);
 	for (int i = 0; i < face_count; i++)
 	{
-		LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d\n", i,
+		SensetimeFaceFeature faceFeature;
+		faceFeature.rect.leftTop.x = p_face[i].rect.left;
+		faceFeature.rect.leftTop.y = p_face[i].rect.top;
+		faceFeature.rect.rightBottom.x = p_face[i].rect.right;
+		faceFeature.rect.rightBottom.y = p_face[i].rect.bottom;
+		faceFeature.id = p_face[i].ID;
+		faceFeature.yaw = p_face[i].yaw;
+		faceFeature.pitch = p_face[i].pitch;
+		faceFeature.roll = p_face[i].roll;
+		faceFeature.eyeDistance = p_face[i].eye_dist;
+		
+		LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d", i,
 			p_face[i].rect.left, p_face[i].rect.top,
 			p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID);
 			
-		LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n",
+		LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]",
 			p_face[i].yaw,
 			p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist);
 
-		cv::Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256,
-			p_face[i].ID * 93 % 256,
-			p_face[i].ID * 143 % 256);
-		
-		//cv::rectangle(yMat, cv::Point2f(0, 0), cv::Point2f(50, 50), scalar_color, 2);
-		//cv::rectangle(yMat, cv::Point2f(500, 500), cv::Point2f(550, 550), scalar_color, 2);
-		
-		cv::rectangle(yMat, cv::Point2f(static_cast<float>(p_face[i].rect.left),
-			static_cast<float>(p_face[i].rect.top)),
-			cv::Point2f(static_cast<float>(p_face[i].rect.right),
-			static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2);
-        
+		if (in->config.draw_face_rect)
+		{
+			cv::Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256,
+				p_face[i].ID * 93 % 256,
+				p_face[i].ID * 143 % 256);
+			
+			//cv::rectangle(yMat, cv::Point2f(0, 0), cv::Point2f(50, 50), scalar_color, 2);
+			//cv::rectangle(yMat, cv::Point2f(500, 500), cv::Point2f(550, 550), scalar_color, 2);
+			
+			cv::rectangle(yMat, cv::Point2f(static_cast<float>(p_face[i].rect.left),
+				static_cast<float>(p_face[i].rect.top)),
+				cv::Point2f(static_cast<float>(p_face[i].rect.right),
+				static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2);
+		}
+
+
 		for (int j = 0; j < p_face[i].points_count; j++)
 		{
-			cv::circle(yMat, cv::Point2f(p_face[i].points_array[j].x,
-				p_face[i].points_array[j].y), 1, cv::Scalar(255, 255, 255));
+			FacePoint featurePoint;
+			featurePoint.x = p_face[i].points_array[j].x;
+			featurePoint.y = p_face[i].points_array[j].y;
+			faceFeature.featurePoints.push_back(featurePoint);
+			
+			if (in->config.draw_face_feature_point)
+			{
+				cv::circle(yMat, cv::Point2f(p_face[i].points_array[j].x,
+					p_face[i].points_array[j].y), 1, cv::Scalar(255, 255, 255));
+			}
 		}
+		
+		if (in->config.generate_face_feature)
+			in->faceFeatures.push_back(faceFeature);
 	}
-	
+
 	//if (face_count > 0)
 	//{
 	//	static size_t f=0;
@@ -180,7 +213,7 @@
 
 	if (pm.type != PipeMaterial::PMT_FRAME)
 	{
-		LOG(ERROR) << "PL_H264Encoder::pay only support PMT_FRAME";
+		LOG_ERROR << "Only support PMT_FRAME";
 		return false;
 	}
 	
@@ -190,11 +223,13 @@
 	MB_Frame* frame = (MB_Frame*)pm.buffer;
 	if (frame->type != MB_Frame::MBFT_YUV420)
 	{
-		LOG(ERROR) << "PL_H264Encoder::pay only support MBFT_YUV420";
+		LOG_ERROR << "Only support MBFT_YUV420";
 		return false;
 	}
 
-	int face_count = doFaceDetect(in, (uint8_t*)frame->buffer, 1920, 1080, 1920, CV_PIX_FMT_YUV420P);//#todo
+	in->faceFeatures.clear();
+	int face_count = doFaceTrack(
+						in, (uint8_t*)frame->buffer, frame->width, frame->height, frame->width, CV_PIX_FMT_YUV420P);
 	if (face_count < 0)
 	{
 		in->payError = true;
@@ -219,13 +254,35 @@
 {
 	PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
 
-	if (!in->payError)
+	if (in->payError)
+	{
+		pm.former = this;
+		return false;
+	}
+
+	if (!in->config.generate_face_feature)
 	{
 		pm.type = PipeMaterial::PMT_FRAME;
 		pm.buffer = &(in->lastFrame);
 		pm.buffSize = 0;
-		pm.former = this;
 	}
+	else
+	{
+		in->pmList[0].type = PipeMaterial::PMT_FRAME;
+		in->pmList[0].buffer = &(in->lastFrame);
+		in->pmList[0].buffSize = 0;
+		in->pmList[0].former = this;
+		
+		in->pmList[1].type = PipeMaterial::PMT_BYTES;
+		in->pmList[1].buffer = &(in->faceFeatures);
+		in->pmList[1].buffSize = 0;
+		in->pmList[1].former = this;
+		
+		pm.type = PipeMaterial::PMT_PM_LIST;
+		pm.buffer = in->pmList;
+		pm.buffSize = sizeof(in->pmList) / sizeof(PipeMaterial);
+	}
+	
 	pm.former = this;
-	return !in->payError;
+	return true;
 }

--
Gitblit v1.8.0