From 633e76c1d533c3d9c257b92df7ebdfd36c9fd8a0 Mon Sep 17 00:00:00 2001 From: houxiao <houxiao@454eff88-639b-444f-9e54-f578c98de674> Date: 星期四, 29 十二月 2016 18:42:50 +0800 Subject: [PATCH] unify log --- RtspFace/PL_SensetimeFaceDetect.cpp | 29 ++++++++++++++++------------- 1 files changed, 16 insertions(+), 13 deletions(-) diff --git a/RtspFace/PL_SensetimeFaceDetect.cpp b/RtspFace/PL_SensetimeFaceDetect.cpp index f6b22c1..85245fb 100644 --- a/RtspFace/PL_SensetimeFaceDetect.cpp +++ b/RtspFace/PL_SensetimeFaceDetect.cpp @@ -1,5 +1,6 @@ #include "PL_SensetimeFaceDetect.h" #include "MaterialBuffer.h" +#include "logger.h" #include <opencv2/opencv.hpp> #include <cv_face.h> @@ -69,7 +70,7 @@ in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106; else { - printf("alignment point size must be 21 or 106\n"); + LOG(ERROR) << "alignment point size must be 21 or 106"; return false; } @@ -78,7 +79,7 @@ in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD); if (cv_result != CV_OK) { - printf("cv_face_create_tracker failed, error code %d\n", cv_result); + LOG(ERROR) << "cv_face_create_tracker failed, error code" << cv_result; return false; } @@ -86,11 +87,11 @@ cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val); if (cv_result != CV_OK) { - printf("cv_face_track_set_detect_face_cnt_limit failed, error : %d\n", cv_result); + LOG(ERROR) << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result; return false; } else - printf("detect face count limit : %d\n", val); + LOG(ERROR) << "detect face count limit : " << val; return true; } @@ -119,21 +120,21 @@ CV_FACE_UP, &p_face, &face_count); if (cv_result != CV_OK) { - printf("cv_face_track failed, error : %d\n", cv_result); + LOG(ERROR) << "cv_face_track failed, error : " << cv_result; cv_face_release_tracker_result(p_face, face_count); return -1; } // draw the video - cv::Mat yuvMat(cv::Size(1920,1080), CV_8UC3, buffer); + cv::Mat yuvMat(cv::Size(1920,1080), CV_8UC3, buffer);//#todo cv::Mat yMat(cv::Size(1920,1080), CV_8UC1, buffer); for (int i = 0; i < face_count; i++) { - printf("face: %d-----[%d, %d, %d, %d]-----id: %d\n", i, + LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d\n", i, p_face[i].rect.left, p_face[i].rect.top, p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID); - printf("face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n", + LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n", p_face[i].yaw, p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist); @@ -179,7 +180,7 @@ if (pm.type != PipeMaterial::PMT_FRAME) { - printf("PL_H264Encoder::pay only support PMT_FRAME\n"); + LOG(ERROR) << "PL_H264Encoder::pay only support PMT_FRAME"; return false; } @@ -189,11 +190,11 @@ MB_Frame* frame = (MB_Frame*)pm.buffer; if (frame->type != MB_Frame::MBFT_YUV420) { - printf("PL_H264Encoder::pay only support MBFT_YUV420\n"); + LOG(ERROR) << "PL_H264Encoder::pay only support MBFT_YUV420"; return false; } - int face_count = doFaceDetect(in, frame->buffer, 1920, 1080, 1920, CV_PIX_FMT_YUV420P); + int face_count = doFaceDetect(in, (uint8_t*)frame->buffer, 1920, 1080, 1920, CV_PIX_FMT_YUV420P);//#todo if (face_count < 0) { in->payError = true; @@ -207,6 +208,8 @@ in->lastFrame.type = MB_Frame::MBFT_YUV420; in->lastFrame.buffer = frame->buffer;//#todo should copy in->lastFrame.buffSize = frame->buffSize; + in->lastFrame.width = frame->width; + in->lastFrame.height = frame->height; in->lastFrame.pts = frame->pts; return true; @@ -219,8 +222,8 @@ if (!in->payError) { pm.type = PipeMaterial::PMT_FRAME; - pm.buffer = (uint8_t*)(&(in->lastFrame)); - pm.buffSize = sizeof(in->lastFrame); + pm.buffer = &(in->lastFrame); + pm.buffSize = 0; pm.former = this; } pm.former = this; -- Gitblit v1.8.0