| | |
| | | memcpy(&face, &pos, sizeof(pos) - sizeof(pos.pFacialData)); |
| | | face.pFacialData.resize(sizeof(pos.pFacialData)); |
| | | memcpy(face.pFacialData.data(), pos.pFacialData, sizeof(pos.pFacialData)); |
| | | face.pfaceId = -1; |
| | | // DBG(face.fAngle.confidence); |
| | | faces.push_back(face); |
| | | } |
| | |
| | | ${LIBS} |
| | | ) |
| | | |
| | | #add_executable(AppPipeControllerTest |
| | | # AppPipeControllerTest.cpp |
| | | # ${SOURCES}) |
| | | #target_link_libraries(AppPipeControllerTest |
| | | # ${LIBS} |
| | | # ) |
New file |
| | |
| | | // |
| | | // Created by ps on 18-12-18. |
| | | // |
| | | |
| | | #ifndef TESTCODE_FACEDEFINE_H |
| | | #define TESTCODE_FACEDEFINE_H |
| | | |
| | | #include <vector> |
| | | #include <string> |
| | | |
| | | //using namespace std; |
| | | using std::vector; |
| | | using std::string; |
| | | |
| | | namespace BasicFace { |
| | | |
| | | typedef vector<unsigned char> Feature; |
| | | |
| | | |
| | | struct InitParam { |
| | | int nDeviceID;//device id for GPU device.eg:0,1,2,3..... |
| | | |
| | | int nImageWidth;//image width of video |
| | | int nImageHeight;//image height of video |
| | | int nMaxFaceNum;//max face number for tracking |
| | | int nSampleSize;//down sample size for face detection |
| | | int nDetectionIntervalFrame;//interval frame number of face detection for face tracking |
| | | |
| | | InitParam() { |
| | | nMaxFaceNum = 100; |
| | | nSampleSize = 640; |
| | | nDeviceID = 0; |
| | | nDetectionIntervalFrame = 5; |
| | | } |
| | | }; |
| | | |
| | | struct FaceFeatureResult { |
| | | Feature feature; |
| | | float score; |
| | | }; |
| | | |
| | | struct FaceDetectResult { |
| | | FaceDetectResult() : attributes(256), trackingId(-1) {} |
| | | |
| | | int id; |
| | | int left; |
| | | int top; |
| | | int width; |
| | | int height; |
| | | float score; |
| | | float yaw; // 水平转角,真实度量的左负右正, 单位,角度 |
| | | float pitch; // 俯仰角,真实度量的上负下正, 单位,角度 |
| | | float roll; // 旋转角,真实度量的左负右正, 单位,角度 |
| | | float angle; // sqrt(yaw*yaw/3+pitch*pitch/3+roll*roll/3) |
| | | vector<char> attributes; |
| | | long trackingId; |
| | | }; |
| | | |
| | | struct DbSearchResult { |
| | | int index; |
| | | float confidence; |
| | | string dbId; |
| | | }; |
| | | |
| | | struct FaceSearchResult { |
| | | int index; |
| | | int left; |
| | | int top; |
| | | int width; |
| | | int height; |
| | | float score; |
| | | float confidence; |
| | | string dbId; |
| | | }; |
| | | |
| | | struct FaceImage { |
| | | int width; |
| | | int height; |
| | | int stride; |
| | | unsigned char *data; |
| | | }; |
| | | |
| | | } |
| | | |
| | | #endif //TESTCODE_FACEDEFINE_H |
| | |
| | | #include <QtCore/QString> |
| | | #include <basic/timer_counter/Clocktimer.h> |
| | | #include <basic/util/opencv/CvUtil.h> |
| | | #include "FaceTrackingWrapper.h" |
| | | |
| | | #define GETSCORE(IDENT) appPref.getFloatData(IDENT) == -1 ? 95 : appPref.getFloatData(IDENT); |
| | | |
| | |
| | | sharedMemory(nullptr), trackingTrigger(nullptr) { |
| | | sharedMemory = new QSharedMemory(QString(shareMemoryName.c_str())); |
| | | if (!sharedMemory->create(4608 * 2592 * 4)) { |
| | | sharedMemory->attach(); |
| | | sharedMemory-> |
| | | |
| | | attach(); |
| | | DBG("size is " << sharedMemory->size()); |
| | | } |
| | | |
| | | // string t_camIdex = getProperty("dev_id"); |
| | | //#todo |
| | | //#todo |
| | | string t_camIdex; |
| | | if (shareMemoryName.find("/")) { |
| | | string_replace(shareMemoryName, "//", "/"); |
| | | string_replace(shareMemoryName, |
| | | "//", "/"); |
| | | auto dev_pos = shareMemoryName.find("/cut/") + 5; |
| | | auto ch_pos = shareMemoryName.find("/", dev_pos) + 1; |
| | | auto str_device_id = shareMemoryName.substr(dev_pos, ch_pos - dev_pos - 1); |
| | |
| | | t_score = t_score / 100; |
| | | |
| | | trackingTrigger = new TrackingTrigger(t_score); |
| | | m_trackingRet = appPref.getIntData("FaceTrackingRet"); |
| | | } |
| | | |
| | | FaceRpcElement::~FaceRpcElement() { |
| | |
| | | try { |
| | | auto server = rpcClient.getServer(); |
| | | if (!server) ERR("server is null"); |
| | | faces = server->faceDetect(image.cols, image.rows, sharedMemory->key().toStdString()); |
| | | |
| | | if (m_trackingRet) { |
| | | // #todo xxxx.detectFace |
| | | faces = faceTrackingFunc(m_channel, image); |
| | | } else { |
| | | faces = server->faceDetect(image.cols, image.rows, sharedMemory->key().toStdString()); |
| | | } |
| | | // DBG("faces.size " << faces.size()); |
| | | for (auto face: faces) { |
| | | for (auto &face: faces) { |
| | | ::FaceDetect::RECT &rect = face.rcFace; |
| | | ScoredRect scoredRect; |
| | | int x = face.rcFace.left; |
| | |
| | | scoredRect.rect = {x, y, w, h}; |
| | | scoredRect.score = (float) face.fAngle.confidence; |
| | | |
| | | if (trackingTrigger->triggerOnce(scoredRect)) { |
| | | bool newFaceRet = m_trackingRet ? trackingTrigger->triggerOnce(scoredRect, face.pfaceId) |
| | | : trackingTrigger->triggerOnce(scoredRect); |
| | | if (newFaceRet) { |
| | | auto property = server->faceProperty(image.cols, image.rows, face, sharedMemory->key().toStdString()); |
| | | trackingTrigger->getLastRect().properties["id"] = to_string(scoredRect.id); |
| | | trackingTrigger->getLastRect().properties["age"] = to_string(property.age); |
| | |
| | | QSharedMemory *sharedMemory; |
| | | ::FaceDetect::Faces faces; |
| | | TrackingTrigger *trackingTrigger; |
| | | |
| | | // 当前帧新增人脸 ? |
| | | ::FaceDetect::Faces triggerFaces; |
| | | // 当前帧新增人脸图片? |
| | | std::vector<cv::Mat> triggerMats; |
| | | // 当前帧新增人脸位置? |
| | | std::vector<ScoredRect> triggerScoredRects; |
| | | |
| | | //录像触发 |
| | | TriggerElement m_triggerElement; |
| | | // 是否使用sdk跟踪 |
| | | bool m_trackingRet; |
| | | }; |
| | | |
| | | #endif // FACERPCELEMENT_H |
New file |
| | |
| | | // |
| | | // Created by ps on 18-12-18. |
| | | // |
| | | |
| | | #include "FaceTrackingWrapper.h" |
| | | #include "Debug.h" |
| | | |
| | | #include <FaceDetectServer/rpc/FaceServer.h> |
| | | #include <opencv2/opencv.hpp> |
| | | |
| | | static FaceTrackingWrapper g_faceTrackingWrapper; |
| | | static std::map<std::string, int> g_channelCache; |
| | | |
| | | static ::FaceDetect::Faces faceTrackingFunc(int channel, cv::Mat &image) { |
| | | FaceDetect::Faces faces; |
| | | int channel = 0; |
| | | BasicFace::FaceImage faceImage{image.cols, image.rows, image.step, image.data}; |
| | | |
| | | THFT_FaceInfo facePos[MAX_DETECT_FACE]; |
| | | int faceNum = THFT_FaceTracking(channel, image.data, facePos); |
| | | |
| | | if (faceNum > 0) { |
| | | for (int i = 0; i < faceNum; i++) { |
| | | FaceDetect::FacePos face; |
| | | auto &pos = facesPos[i]; |
| | | memcpy(&face, &pos, sizeof(pos) - sizeof(pos.pFacialData) - sizeof(pos.nFaceID)); |
| | | face.pFacialData.resize(sizeof(pos.pFacialData)); |
| | | memcpy(face.pFacialData.data(), pos.pFacialData, sizeof(pos.pFacialData)); |
| | | face.pfaceId = pos.nFaceID; |
| | | // DBG(face.fAngle.confidence); |
| | | faces.push_back(face); |
| | | } |
| | | } else { |
| | | DBG("Face num is 0"); |
| | | } |
| | | |
| | | } |
| | | |
| | | |
| | | FaceTrackingWrapper::FaceTrackingWrapper() { |
| | | |
| | | } |
| | | |
| | | FaceTrackingWrapper::~FaceTrackingWrapper() { |
| | | |
| | | } |
| | | |
| | | void FaceTrackingWrapper::setChannelParam(int channel, const BasicFace::InitParam &initParam) { |
| | | m_mapParam.insert(std::make_pair(channel, initParam)); |
| | | } |
| | | |
| | | bool FaceTrackingWrapper::initHandle() { |
| | | // todo add gpu support |
| | | int size = m_mapParam.size(); |
| | | THFT_Param *param = new THFT_Param[size]; |
| | | |
| | | for (auto &item :m_mapParam) { |
| | | int pos = item.first; |
| | | auto t_param = item.second; |
| | | param[pos].nDeviceID = t_param.nDeviceID; |
| | | param[pos].nImageWidth = t_param.nImageWidth; |
| | | param[pos].nImageHeight = t_param.nImageHeight; |
| | | param[pos].nMaxFaceNum = t_param.nMaxFaceNum; |
| | | param[pos].nSampleSize = t_param.nSampleSize; |
| | | param[pos].nDetectionIntervalFrame = t_param.nDetectionIntervalFrame; |
| | | } |
| | | int nNum = -1; |
| | | nNum = THFT_Create(size, param); |
| | | delete[] param; |
| | | return (nNum > 0); |
| | | } |
| | | |
| | | /*** |
| | | * @todo |
| | | * |
| | | * @param image |
| | | * @return |
| | | */ |
| | | std::vector<BasicFace::FaceDetectResult> FaceTrackingWrapper::detectFace(const BasicFace::FaceImage &image) { |
| | | return vector<BasicFace::FaceDetectResult>(); |
| | | } |
| | | |
| | | std::vector<BasicFace::FaceDetectResult> |
| | | FaceTrackingWrapper::trackingFace(int channel, const BasicFace::FaceImage &image) { |
| | | vector<BasicFace::FaceDetectResult> results; |
| | | // ClockTimer ct("CasiaFaceWapper::detectFace"); |
| | | if (channel == -1) { |
| | | ERR("invalid face channel, face detect faild"); |
| | | return results; |
| | | } |
| | | THFT_FaceInfo facePos[MAX_DETECT_FACE]; |
| | | int faceNum = THFT_FaceTracking(channel, image.data, facePos); |
| | | //int faceNum = THFI_DetectFace(channel, image.data, 24, image.width, image.height, facePos, MAX_DETECT_FACE); |
| | | if (faceNum < 0) { |
| | | ERR("THFI_DetectFace return " << faceNum); |
| | | } else { |
| | | results.resize(faceNum); |
| | | for (int i = 0; i < faceNum; i++) { |
| | | BasicFace::FaceDetectResult &result = results[i]; |
| | | THFT_FaceInfo &face = facePos[i]; |
| | | result.angle = sqrt(face.fAngle.pitch * face.fAngle.pitch / 3 + |
| | | face.fAngle.roll * face.fAngle.roll / 3 + |
| | | face.fAngle.yaw * face.fAngle.yaw / 3); |
| | | result.yaw = face.fAngle.yaw; |
| | | result.pitch = face.fAngle.pitch; |
| | | result.roll = face.fAngle.roll; |
| | | result.left = face.rcFace.left; |
| | | result.top = face.rcFace.top; |
| | | result.width = face.rcFace.right - face.rcFace.left; |
| | | result.height = face.rcFace.bottom - face.rcFace.top; |
| | | // result.score = face.nQuality / 100.0f; |
| | | result.score = face.fAngle.confidence; |
| | | result.trackingId = face.nFaceID; |
| | | } |
| | | } |
| | | return results; |
| | | } |
| | | |
| | | /*** |
| | | * @todo |
| | | * |
| | | * @param image |
| | | * @return |
| | | */ |
| | | vector<BasicFace::FaceFeatureResult> FaceTrackingWrapper::extractFace(const BasicFace::FaceImage &image) { |
| | | return vector<BasicFace::FaceFeatureResult>(); |
| | | } |
| | | |
| | | /*** |
| | | * @todo |
| | | * @param feature1 |
| | | * @param feature2 |
| | | * @return |
| | | */ |
| | | float FaceTrackingWrapper::compareFeature(BasicFace::Feature &feature1, BasicFace::Feature &feature2) { |
| | | return 0; |
| | | } |
| | | |
| | | |
| | | |
New file |
| | |
| | | // |
| | | // Created by ps on 18-12-18. |
| | | // |
| | | |
| | | #ifndef TESTCODE_FACETRACKINGWRAPPER_H |
| | | #define TESTCODE_FACETRACKINGWRAPPER_H |
| | | |
| | | #include <stdio.h> |
| | | #include <stdlib.h> |
| | | #include <string.h> |
| | | #include <sys/time.h> |
| | | #include <time.h> |
| | | #include <list> |
| | | |
| | | #include <opencv2/opencv.hpp> |
| | | #include <FiStdDefEx.h> |
| | | |
| | | #include <THFaceTracking_i.h> |
| | | |
| | | #include "FaceDefine.h" |
| | | |
| | | |
| | | //typedef std::list<FaceTrackingInfo> ObjectList; |
| | | #define MAX_DETECT_FACE 50 |
| | | |
| | | |
| | | class FaceTrackingWrapper { |
| | | public: |
| | | explicit FaceTrackingWrapper(); |
| | | |
| | | virtual ~FaceTrackingWrapper(); |
| | | |
| | | void setChannelParam(int channel, const BasicFace::InitParam &); |
| | | |
| | | bool initHandle(); |
| | | |
| | | std::vector<BasicFace::FaceDetectResult> detectFace(const BasicFace::FaceImage &image); |
| | | |
| | | std::vector<BasicFace::FaceDetectResult> trackingFace(int channel, const BasicFace::FaceImage &image); |
| | | |
| | | vector<BasicFace::FaceFeatureResult> extractFace(const BasicFace::FaceImage &image); |
| | | |
| | | static float compareFeature(BasicFace::Feature &feature1, BasicFace::Feature &feature2); |
| | | |
| | | private: |
| | | int nGPUDeviceID = 0; |
| | | |
| | | //sdk 初始化参数 |
| | | std::map<int, BasicFace::InitParam> m_mapParam; |
| | | |
| | | //保存上次跟踪的目标 |
| | | // std::map<int, ObjectList> m_objListCache; |
| | | }; |
| | | |
| | | |
| | | #endif //TESTCODE_FACETRACKINGWRAPPER_H |
| | |
| | | using namespace std; |
| | | |
| | | struct ScoredRect { |
| | | ScoredRect() : id(-1) ,isMask(false){} |
| | | ScoredRect() : id(-1), isMask(false) {} |
| | | |
| | | bool isMask; |
| | | float score; |
| | | cv::Rect rect; |
| | |
| | | TrackingTrigger(float threshold) : |
| | | threshold(threshold), faceTrackingId(0) {} |
| | | |
| | | bool triggerOnce(ScoredRect &rect) { |
| | | |
| | | bool triggerOnce(ScoredRect &rect, long faceId = -1) { |
| | | if (faceId < 0) { |
| | | return triggerOnce(rect, false); |
| | | } else { |
| | | bool found = false; |
| | | for (auto lastRect: lastScoreRects) { |
| | | if (lastRect.id >= 0 && lastRect.id == faceId) { |
| | | found = true; |
| | | rect.id = faceId; |
| | | rect.properties = lastRect.properties; |
| | | tempScoreRects.push_back(rect); |
| | | break; |
| | | } |
| | | } |
| | | if (!found) { |
| | | if (rect.score < threshold) { |
| | | // tempScoreRects.push_back(rect); |
| | | return false; |
| | | } else { |
| | | rect.id = faceId; |
| | | tempScoreRects.push_back(rect); |
| | | return true; |
| | | } |
| | | } |
| | | return false; |
| | | } |
| | | } |
| | | |
| | | void triggerLine() { |
| | | lastScoreRects.swap(tempScoreRects);// = tempScoreRects; |
| | | tempScoreRects.clear(); |
| | | } |
| | | |
| | | ScoredRect &getLastRect() { |
| | | return tempScoreRects[tempScoreRects.size() - 1]; |
| | | } |
| | | |
| | | std::vector<ScoredRect> getLastScoreRects() const { |
| | | return lastScoreRects; |
| | | } |
| | | |
| | | private: |
| | | bool triggerOnce(ScoredRect &rect, bool) { |
| | | bool found = false; |
| | | for (auto lastRect: lastScoreRects) { |
| | | if (lastRect.id >= 0 && (rect.rect & lastRect.rect).area() > lastRect.rect.area() * 0.4) { |
| | |
| | | return false; |
| | | } |
| | | |
| | | void triggerLine() { |
| | | lastScoreRects = tempScoreRects; |
| | | tempScoreRects.clear(); |
| | | } |
| | | |
| | | ScoredRect &getLastRect() { |
| | | return tempScoreRects[tempScoreRects.size() - 1]; |
| | | } |
| | | |
| | | std::vector<ScoredRect> getLastScoreRects() const { |
| | | return lastScoreRects; |
| | | } |
| | | |
| | | private: |
| | | float threshold; |
| | | std::vector<ScoredRect> lastScoreRects; |
| | | std::vector<ScoredRect> tempScoreRects; |
| | | std::vector<int> lastScoreInts; |
| | | std::vector<int> tempScoreInts; |
| | | std::atomic<long> faceTrackingId; |
| | | }; |
| | | |
| | |
| | | ../StructureApp/RecordVideoElement.cpp |
| | | ../StructureApp/JudgmentRetrogradeTool.cpp |
| | | ../StructureApp/PerimeterElement.cpp |
| | | ../StructureApp/FaceTrackingWrapper.cpp |
| | | |
| | | ../StructureApp/NewRecordVideoElement.cpp |
| | | |
| | |
| | | } |
| | | |
| | | void RtspAnalysElement::init() { |
| | | appPref.setIntData("FaceTrackingRet", 1); |
| | | auto lst = m_lDBTool->searchCamDevTableAll(); |
| | | auto lst_dev = m_lDBTool->searchConfigTableWithinServerInfo(); |
| | | |
| | |
| | | vector<int> sdkDetCoVec = chnString2Vec(en_sdk.str_det_thr.toStdString()); |
| | | vector<int> sdkComCoVec = chnString2Vec(en_sdk.str_cmp_thr.toStdString()); |
| | | |
| | | int camId=item.str_cam_dev_id.toInt(); |
| | | int camId = item.str_cam_dev_id.toInt(); |
| | | int t_size = sdkVec.size(); |
| | | for (int i = 0; i < t_size; i++) { |
| | | switch (sdkVec[i]) { |
| | |
| | | SETSCORE(sdkDetCoVec, i, t_camIdex + "face.det"); |
| | | SETSCORE(sdkComCoVec, i, t_camIdex + "face.cmp"); |
| | | DBG(" TESTCODE " << sdkDetCoVec[i] << " " << sdkComCoVec[i]); |
| | | // #todo add param |
| | | |
| | | break; |
| | | } |
| | |
| | | json["perimeter.enable"] = "1"; |
| | | SETSCORE(sdkDetCoVec, i, t_camIdex + "perimeter.det"); |
| | | SETSCORE(sdkComCoVec, i, t_camIdex + "perimeter.cmp"); |
| | | setDataByType(4,json,camId); |
| | | setDataByType(4, json, camId); |
| | | break; |
| | | } |
| | | } |
| | | case 5: { |
| | | |
| | | json["crowd.enable"] = "1"; |
| | | SETSCORE(sdkDetCoVec, i, t_camIdex + "crowd.det"); |
| | | SETSCORE(sdkComCoVec, i, t_camIdex + "crowd.cmp"); |
| | | setDataByType(5,json,camId); |
| | | setDataByType(5, json, camId); |
| | | break; |
| | | } |
| | | case 6: { |
| | |
| | | json["keepRight.enable"] = "1"; |
| | | SETSCORE(sdkDetCoVec, i, t_camIdex + "keepRight.det"); |
| | | SETSCORE(sdkComCoVec, i, t_camIdex + "keepRight.cmp"); |
| | | setDataByType(6,json,camId); |
| | | setDataByType(6, json, camId); |
| | | break; |
| | | } |
| | | } |
| | |
| | | } |
| | | return result; |
| | | } |
| | | void RtspAnalysElement::setDataByType(int type,Json::Value& json,int camId) |
| | | { |
| | | |
| | | void RtspAnalysElement::setDataByType(int type, Json::Value &json, int camId) { |
| | | switch (type) { |
| | | case 4: |
| | | { |
| | | auto rule=m_lDBTool->searchPerimeterRuleByCamId(camId); |
| | | json["perimeter.area"]=rule.strAreas.toStdString(); |
| | | json["perimeter.num"]=rule.nAlarmPeopleNum; |
| | | json["perimeter.delay"]=rule.nTriggerDelay; |
| | | json["perimeter.tolerance"]=rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | case 5: |
| | | { |
| | | auto rule=m_lDBTool->searchCrowdRuleByCamId(camId); |
| | | json["crowd.area"]=rule.strAreas.toStdString(); |
| | | json["crowd.num"]=rule.nAlarmPeopleNum; |
| | | json["crowd.delay"]=rule.nTriggerDelay; |
| | | json["crowd.tolerance"]=rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | case 6: |
| | | { |
| | | auto rule=m_lDBTool->searchActRuleByCamId(camId); |
| | | json["keepRight.leftArea"]=rule.strAreas.toStdString(); |
| | | json["keepRight.leftLine"]=rule.strLine.toStdString(); |
| | | json["keepRight.rightArea"]=rule.strExAreas.toStdString(); |
| | | json["keepRight.rightLine"]=rule.strExLine.toStdString(); |
| | | json["keepRight.delay"]=rule.nTriggerDelay; |
| | | json["keepRight.tolerance"]=rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | default: |
| | | break; |
| | | case 4: { |
| | | auto rule = m_lDBTool->searchPerimeterRuleByCamId(camId); |
| | | json["perimeter.area"] = rule.strAreas.toStdString(); |
| | | json["perimeter.num"] = rule.nAlarmPeopleNum; |
| | | json["perimeter.delay"] = rule.nTriggerDelay; |
| | | json["perimeter.tolerance"] = rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | case 5: { |
| | | auto rule = m_lDBTool->searchCrowdRuleByCamId(camId); |
| | | json["crowd.area"] = rule.strAreas.toStdString(); |
| | | json["crowd.num"] = rule.nAlarmPeopleNum; |
| | | json["crowd.delay"] = rule.nTriggerDelay; |
| | | json["crowd.tolerance"] = rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | case 6: { |
| | | auto rule = m_lDBTool->searchActRuleByCamId(camId); |
| | | json["keepRight.leftArea"] = rule.strAreas.toStdString(); |
| | | json["keepRight.leftLine"] = rule.strLine.toStdString(); |
| | | json["keepRight.rightArea"] = rule.strExAreas.toStdString(); |
| | | json["keepRight.rightLine"] = rule.strExLine.toStdString(); |
| | | json["keepRight.delay"] = rule.nTriggerDelay; |
| | | json["keepRight.tolerance"] = rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | default: |
| | | break; |
| | | } |
| | | } |