QiaoJiaSystem/testCodeMod/FaceDefine.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
QiaoJiaSystem/testCodeMod/FaceTrackingWrapper.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
QiaoJiaSystem/testCodeMod/FaceTrackingWrapper.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
QiaoJiaSystem/testCodeMod/main.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
QiaoJiaSystem/testCodeMod/FaceDefine.h
@@ -12,8 +12,30 @@ using std::vector; using std::string; namespace BasicFace { typedef vector<unsigned char> Feature; struct InitParam { int nDeviceID;//device id for GPU device.eg:0,1,2,3..... int nImageWidth;//image width of video int nImageHeight;//image height of video int nMaxFaceNum;//max face number for tracking int nSampleSize;//down sample size for face detection int nDetectionIntervalFrame;//interval frame number of face detection for face tracking InitParam() { nMaxFaceNum = 100; nSampleSize = 640; nDeviceID = 0; nDetectionIntervalFrame = 5; } }; struct FaceFeatureResult { vector<unsigned char> feature; Feature feature; float score; }; @@ -31,7 +53,7 @@ float roll; // 旋转角,真实度量的左负右正, 单位,角度 float angle; // sqrt(yaw*yaw/3+pitch*pitch/3+roll*roll/3) vector<char> attributes; float trackingId; long trackingId; }; struct DbSearchResult { @@ -57,4 +79,7 @@ int stride; unsigned char *data; }; } #endif //TESTCODE_FACEDEFINE_H QiaoJiaSystem/testCodeMod/FaceTrackingWrapper.cpp
@@ -3,6 +3,8 @@ // #include "FaceTrackingWrapper.h" #include "Debug.h" FaceTrackingWrapper::FaceTrackingWrapper() { @@ -11,3 +13,96 @@ FaceTrackingWrapper::~FaceTrackingWrapper() { } void FaceTrackingWrapper::setChannelParam(int channel, const BasicFace::InitParam &initParam) { m_mapParam.insert(std::make_pair(channel, initParam)); } bool FaceTrackingWrapper::initHandle() { // todo add gpu support int size = m_mapParam.size(); THFT_Param *param = new THFT_Param[size]; for (auto &item :m_mapParam) { int pos = item.first; auto t_param = item.second; param[pos].nDeviceID = t_param.nDeviceID; param[pos].nImageWidth = t_param.nImageWidth; param[pos].nImageHeight = t_param.nImageHeight; param[pos].nMaxFaceNum = t_param.nMaxFaceNum; param[pos].nSampleSize = t_param.nSampleSize; param[pos].nDetectionIntervalFrame = t_param.nDetectionIntervalFrame; } int nNum = -1; nNum = THFT_Create(size, param); delete[] param; return (nNum > 0); } /*** * @todo * * @param image * @return */ std::vector<BasicFace::FaceDetectResult> FaceTrackingWrapper::detectFace(BasicFace::FaceImage image) { return vector<BasicFace::FaceDetectResult>(); } std::vector<BasicFace::FaceDetectResult> FaceTrackingWrapper::trackingFace(int channel, BasicFace::FaceImage image) { vector<BasicFace::FaceDetectResult> results; // ClockTimer ct("CasiaFaceWapper::detectFace"); if (channel == -1) { ERR("invalid face channel, face detect faild"); return results; } THFT_FaceInfo facePos[MAX_DETECT_FACE]; int faceNum = THFT_FaceTracking(channel, image.data, facePos); //int faceNum = THFI_DetectFace(channel, image.data, 24, image.width, image.height, facePos, MAX_DETECT_FACE); if (faceNum < 0) { ERR("THFI_DetectFace return " << faceNum); } else { results.resize(faceNum); for (int i = 0; i < faceNum; i++) { BasicFace::FaceDetectResult &result = results[i]; THFT_FaceInfo &face = facePos[i]; result.angle = sqrt(face.fAngle.pitch * face.fAngle.pitch / 3 + face.fAngle.roll * face.fAngle.roll / 3 + face.fAngle.yaw * face.fAngle.yaw / 3); result.yaw = face.fAngle.yaw; result.pitch = face.fAngle.pitch; result.roll = face.fAngle.roll; result.left = face.rcFace.left; result.top = face.rcFace.top; result.width = face.rcFace.right - face.rcFace.left; result.height = face.rcFace.bottom - face.rcFace.top; // result.score = face.nQuality / 100.0f; result.score = face.fAngle.confidence; result.trackingId = face.nFaceID; } } return results; } /*** * @todo * * @param image * @return */ vector<BasicFace::FaceFeatureResult> FaceTrackingWrapper::extractFace(BasicFace::FaceImage image) { return vector<BasicFace::FaceFeatureResult>(); } /*** * @todo * @param feature1 * @param feature2 * @return */ float FaceTrackingWrapper::compareFeature(BasicFace::Feature &feature1, BasicFace::Feature &feature2) { return 0; } QiaoJiaSystem/testCodeMod/FaceTrackingWrapper.h
@@ -10,11 +10,18 @@ #include <string.h> #include <sys/time.h> #include <time.h> #include <list> #include <opencv2/opencv.hpp> #include <FiStdDefEx.h> #include <THFaceTracking_i.h> #include "FaceDefine.h" //typedef std::list<FaceTrackingInfo> ObjectList; #define MAX_DETECT_FACE 50 class FaceTrackingWrapper { @@ -23,7 +30,26 @@ virtual ~FaceTrackingWrapper(); // std::vector<FaceDetectResult> void detectFace(FaceImage image); void setChannelParam(int channel, const BasicFace::InitParam &); bool initHandle(); std::vector<BasicFace::FaceDetectResult> detectFace(BasicFace::FaceImage image); std::vector<BasicFace::FaceDetectResult> trackingFace(int channel, BasicFace::FaceImage image); vector<BasicFace::FaceFeatureResult> extractFace(BasicFace::FaceImage image); static float compareFeature(BasicFace::Feature &feature1, BasicFace::Feature &feature2); private: int nGPUDeviceID = 0; //sdk 初始化参数 std::map<int, BasicFace::InitParam> m_mapParam; //保存上次跟踪的目标 // std::map<int, ObjectList> m_objListCache; }; QiaoJiaSystem/testCodeMod/main.cpp
@@ -3,10 +3,95 @@ // #include <Debug.h> #include "FaceTrackingWrapper.h" using namespace cv; //get current system time double msecond() { struct timeval tv; gettimeofday(&tv, 0); return (tv.tv_sec * 1.0e3 + tv.tv_usec * 1.0e-3); } int main(int argc, char **argv) { ENABLEGLOG("./log/"); FaceTrackingWrapper faceTrackingWrapper; bool bOpen; VideoCapture vc; // rtsp stream address bOpen = vc.open("rtsp://admin:a1234567@192.168.1.188:554/h264/ch1/main/av_stream"); //camera // if (1) { // bool bSet1 = vc.set(CV_CAP_PROP_FRAME_WIDTH, w); // bool bSet2 = vc.set(CV_CAP_PROP_FRAME_HEIGHT, h); // bOpen = vc.open(devID); // // } // //video file // else { // bOpen = vc.open("test.avi"); // // } if (!bOpen) { printf("Open video source faild."); return 0; } int nWidth = vc.get(CV_CAP_PROP_FRAME_WIDTH); int nHeight = vc.get(CV_CAP_PROP_FRAME_HEIGHT); printf("FRAME_WIDTH=%d,FRAME_HEIGHT=%d\n", nWidth, nHeight); BasicFace::InitParam initParam; initParam.nDeviceID = 0; initParam.nImageWidth = nWidth; initParam.nImageHeight = nHeight; initParam.nMaxFaceNum = 50; initParam.nSampleSize = nWidth / 2; initParam.nDetectionIntervalFrame = 12; faceTrackingWrapper.setChannelParam(0, initParam); faceTrackingWrapper.setChannelParam(1, initParam); faceTrackingWrapper.setChannelParam(2, initParam); faceTrackingWrapper.initHandle(); Mat frame; while (1) { vc >> frame; if (frame.empty()) { waitKey(30); continue; } int nNum = 0; THFT_FaceInfo *pFaceInfos = new THFT_FaceInfo[50]; double t1, t2; t1 = msecond(); // nNum = THFT_FaceTracking(2, frame.data, pFaceInfos); BasicFace::FaceImage faceImage2{frame.cols, frame.rows, frame.step, frame.data}; auto t_lists = faceTrackingWrapper.trackingFace(2, faceImage2); t2 = msecond(); delete[] pFaceInfos; printf("face tracking time=%fms faceNun is %d\n", t2 - t1, (int) t_lists.size()); imshow("Face Tracking", frame); waitKey(30); } destroyWindow("Face Tracking"); vc.release(); THFT_Release(); getchar(); return 0; INFO("test");