2个文件已删除
3 文件已重命名
10个文件已修改
| | |
| | | memcpy(&face, &pos, sizeof(pos) - sizeof(pos.pFacialData)); |
| | | face.pFacialData.resize(sizeof(pos.pFacialData)); |
| | | memcpy(face.pFacialData.data(), pos.pFacialData, sizeof(pos.pFacialData)); |
| | | face.pfaceId = -1; |
| | | // DBG(face.fAngle.confidence); |
| | | faces.push_back(face); |
| | | } |
| | |
| | | ${LIBS} |
| | | ) |
| | | |
| | | #add_executable(AppPipeControllerTest |
| | | # AppPipeControllerTest.cpp |
| | | # ${SOURCES}) |
| | | #target_link_libraries(AppPipeControllerTest |
| | | # ${LIBS} |
| | | # ) |
| | |
| | | #include <QtCore/QString> |
| | | #include <basic/timer_counter/Clocktimer.h> |
| | | #include <basic/util/opencv/CvUtil.h> |
| | | #include "FaceTrackingWrapper.h" |
| | | |
| | | #define GETSCORE(IDENT) appPref.getFloatData(IDENT) == -1 ? 95 : appPref.getFloatData(IDENT); |
| | | |
| | |
| | | sharedMemory(nullptr), trackingTrigger(nullptr) { |
| | | sharedMemory = new QSharedMemory(QString(shareMemoryName.c_str())); |
| | | if (!sharedMemory->create(4608 * 2592 * 4)) { |
| | | sharedMemory->attach(); |
| | | sharedMemory-> |
| | | |
| | | attach(); |
| | | DBG("size is " << sharedMemory->size()); |
| | | } |
| | | |
| | |
| | | //#todo |
| | | string t_camIdex; |
| | | if (shareMemoryName.find("/")) { |
| | | string_replace(shareMemoryName, "//", "/"); |
| | | string_replace(shareMemoryName, |
| | | "//", "/"); |
| | | auto dev_pos = shareMemoryName.find("/cut/") + 5; |
| | | auto ch_pos = shareMemoryName.find("/", dev_pos) + 1; |
| | | auto str_device_id = shareMemoryName.substr(dev_pos, ch_pos - dev_pos - 1); |
| | |
| | | t_score = t_score / 100; |
| | | |
| | | trackingTrigger = new TrackingTrigger(t_score); |
| | | m_trackingRet = appPref.getIntData("FaceTrackingRet"); |
| | | } |
| | | |
| | | FaceRpcElement::~FaceRpcElement() { |
| | |
| | | try { |
| | | auto server = rpcClient.getServer(); |
| | | if (!server) ERR("server is null"); |
| | | |
| | | if (m_trackingRet) { |
| | | // #todo xxxx.detectFace |
| | | faces = faceTrackingFunc(m_channel, image); |
| | | } else { |
| | | faces = server->faceDetect(image.cols, image.rows, sharedMemory->key().toStdString()); |
| | | } |
| | | // DBG("faces.size " << faces.size()); |
| | | for (auto face: faces) { |
| | | for (auto &face: faces) { |
| | | ::FaceDetect::RECT &rect = face.rcFace; |
| | | ScoredRect scoredRect; |
| | | int x = face.rcFace.left; |
| | |
| | | scoredRect.rect = {x, y, w, h}; |
| | | scoredRect.score = (float) face.fAngle.confidence; |
| | | |
| | | if (trackingTrigger->triggerOnce(scoredRect)) { |
| | | bool newFaceRet = m_trackingRet ? trackingTrigger->triggerOnce(scoredRect, face.pfaceId) |
| | | : trackingTrigger->triggerOnce(scoredRect); |
| | | if (newFaceRet) { |
| | | auto property = server->faceProperty(image.cols, image.rows, face, sharedMemory->key().toStdString()); |
| | | trackingTrigger->getLastRect().properties["id"] = to_string(scoredRect.id); |
| | | trackingTrigger->getLastRect().properties["age"] = to_string(property.age); |
| | |
| | | QSharedMemory *sharedMemory; |
| | | ::FaceDetect::Faces faces; |
| | | TrackingTrigger *trackingTrigger; |
| | | |
| | | // 当前帧新增人脸 ? |
| | | ::FaceDetect::Faces triggerFaces; |
| | | // 当前帧新增人脸图片? |
| | | std::vector<cv::Mat> triggerMats; |
| | | // 当前帧新增人脸位置? |
| | | std::vector<ScoredRect> triggerScoredRects; |
| | | |
| | | //录像触发 |
| | | TriggerElement m_triggerElement; |
| | | // 是否使用sdk跟踪 |
| | | bool m_trackingRet; |
| | | }; |
| | | |
| | | #endif // FACERPCELEMENT_H |
File was renamed from QiaoJiaSystem/testCodeMod/FaceTrackingWrapper.cpp |
| | |
| | | #include "FaceTrackingWrapper.h" |
| | | #include "Debug.h" |
| | | |
| | | #include <FaceDetectServer/rpc/FaceServer.h> |
| | | #include <opencv2/opencv.hpp> |
| | | |
| | | static FaceTrackingWrapper g_faceTrackingWrapper; |
| | | static std::map<std::string, int> g_channelCache; |
| | | |
| | | static ::FaceDetect::Faces faceTrackingFunc(int channel, cv::Mat &image) { |
| | | FaceDetect::Faces faces; |
| | | int channel = 0; |
| | | BasicFace::FaceImage faceImage{image.cols, image.rows, image.step, image.data}; |
| | | |
| | | THFT_FaceInfo facePos[MAX_DETECT_FACE]; |
| | | int faceNum = THFT_FaceTracking(channel, image.data, facePos); |
| | | |
| | | if (faceNum > 0) { |
| | | for (int i = 0; i < faceNum; i++) { |
| | | FaceDetect::FacePos face; |
| | | auto &pos = facesPos[i]; |
| | | memcpy(&face, &pos, sizeof(pos) - sizeof(pos.pFacialData) - sizeof(pos.nFaceID)); |
| | | face.pFacialData.resize(sizeof(pos.pFacialData)); |
| | | memcpy(face.pFacialData.data(), pos.pFacialData, sizeof(pos.pFacialData)); |
| | | face.pfaceId = pos.nFaceID; |
| | | // DBG(face.fAngle.confidence); |
| | | faces.push_back(face); |
| | | } |
| | | } else { |
| | | DBG("Face num is 0"); |
| | | } |
| | | |
| | | } |
| | | |
| | | |
| | | FaceTrackingWrapper::FaceTrackingWrapper() { |
| | | |
| | |
| | | * @param image |
| | | * @return |
| | | */ |
| | | std::vector<BasicFace::FaceDetectResult> FaceTrackingWrapper::detectFace(BasicFace::FaceImage image) { |
| | | std::vector<BasicFace::FaceDetectResult> FaceTrackingWrapper::detectFace(const BasicFace::FaceImage &image) { |
| | | return vector<BasicFace::FaceDetectResult>(); |
| | | } |
| | | |
| | | std::vector<BasicFace::FaceDetectResult> FaceTrackingWrapper::trackingFace(int channel, BasicFace::FaceImage image) { |
| | | std::vector<BasicFace::FaceDetectResult> |
| | | FaceTrackingWrapper::trackingFace(int channel, const BasicFace::FaceImage &image) { |
| | | vector<BasicFace::FaceDetectResult> results; |
| | | // ClockTimer ct("CasiaFaceWapper::detectFace"); |
| | | if (channel == -1) { |
| | |
| | | * @param image |
| | | * @return |
| | | */ |
| | | vector<BasicFace::FaceFeatureResult> FaceTrackingWrapper::extractFace(BasicFace::FaceImage image) { |
| | | vector<BasicFace::FaceFeatureResult> FaceTrackingWrapper::extractFace(const BasicFace::FaceImage &image) { |
| | | return vector<BasicFace::FaceFeatureResult>(); |
| | | } |
| | | |
File was renamed from QiaoJiaSystem/testCodeMod/FaceTrackingWrapper.h |
| | |
| | | |
| | | bool initHandle(); |
| | | |
| | | std::vector<BasicFace::FaceDetectResult> detectFace(BasicFace::FaceImage image); |
| | | std::vector<BasicFace::FaceDetectResult> detectFace(const BasicFace::FaceImage &image); |
| | | |
| | | std::vector<BasicFace::FaceDetectResult> trackingFace(int channel, BasicFace::FaceImage image); |
| | | std::vector<BasicFace::FaceDetectResult> trackingFace(int channel, const BasicFace::FaceImage &image); |
| | | |
| | | vector<BasicFace::FaceFeatureResult> extractFace(BasicFace::FaceImage image); |
| | | vector<BasicFace::FaceFeatureResult> extractFace(const BasicFace::FaceImage &image); |
| | | |
| | | static float compareFeature(BasicFace::Feature &feature1, BasicFace::Feature &feature2); |
| | | |
| | |
| | | |
| | | struct ScoredRect { |
| | | ScoredRect() : id(-1) ,isMask(false){} |
| | | |
| | | bool isMask; |
| | | float score; |
| | | cv::Rect rect; |
| | |
| | | TrackingTrigger(float threshold) : |
| | | threshold(threshold), faceTrackingId(0) {} |
| | | |
| | | bool triggerOnce(ScoredRect &rect) { |
| | | |
| | | bool triggerOnce(ScoredRect &rect, long faceId = -1) { |
| | | if (faceId < 0) { |
| | | return triggerOnce(rect, false); |
| | | } else { |
| | | bool found = false; |
| | | for (auto lastRect: lastScoreRects) { |
| | | if (lastRect.id >= 0 && lastRect.id == faceId) { |
| | | found = true; |
| | | rect.id = faceId; |
| | | rect.properties = lastRect.properties; |
| | | tempScoreRects.push_back(rect); |
| | | break; |
| | | } |
| | | } |
| | | if (!found) { |
| | | if (rect.score < threshold) { |
| | | // tempScoreRects.push_back(rect); |
| | | return false; |
| | | } else { |
| | | rect.id = faceId; |
| | | tempScoreRects.push_back(rect); |
| | | return true; |
| | | } |
| | | } |
| | | return false; |
| | | } |
| | | } |
| | | |
| | | void triggerLine() { |
| | | lastScoreRects.swap(tempScoreRects);// = tempScoreRects; |
| | | tempScoreRects.clear(); |
| | | } |
| | | |
| | | ScoredRect &getLastRect() { |
| | | return tempScoreRects[tempScoreRects.size() - 1]; |
| | | } |
| | | |
| | | std::vector<ScoredRect> getLastScoreRects() const { |
| | | return lastScoreRects; |
| | | } |
| | | |
| | | private: |
| | | bool triggerOnce(ScoredRect &rect, bool) { |
| | | bool found = false; |
| | | for (auto lastRect: lastScoreRects) { |
| | | if (lastRect.id >= 0 && (rect.rect & lastRect.rect).area() > lastRect.rect.area() * 0.4) { |
| | |
| | | return false; |
| | | } |
| | | |
| | | void triggerLine() { |
| | | lastScoreRects = tempScoreRects; |
| | | tempScoreRects.clear(); |
| | | } |
| | | |
| | | ScoredRect &getLastRect() { |
| | | return tempScoreRects[tempScoreRects.size() - 1]; |
| | | } |
| | | |
| | | std::vector<ScoredRect> getLastScoreRects() const { |
| | | return lastScoreRects; |
| | | } |
| | | |
| | | private: |
| | | float threshold; |
| | | std::vector<ScoredRect> lastScoreRects; |
| | | std::vector<ScoredRect> tempScoreRects; |
| | | std::vector<int> lastScoreInts; |
| | | std::vector<int> tempScoreInts; |
| | | std::atomic<long> faceTrackingId; |
| | | }; |
| | | |
| | |
| | | ../StructureApp/RecordVideoElement.cpp |
| | | ../StructureApp/JudgmentRetrogradeTool.cpp |
| | | ../StructureApp/PerimeterElement.cpp |
| | | ../StructureApp/FaceTrackingWrapper.cpp |
| | | |
| | | ../StructureApp/NewRecordVideoElement.cpp |
| | | |
| | |
| | | } |
| | | |
| | | void RtspAnalysElement::init() { |
| | | appPref.setIntData("FaceTrackingRet", 1); |
| | | auto lst = m_lDBTool->searchCamDevTableAll(); |
| | | auto lst_dev = m_lDBTool->searchConfigTableWithinServerInfo(); |
| | | |
| | |
| | | SETSCORE(sdkDetCoVec, i, t_camIdex + "face.det"); |
| | | SETSCORE(sdkComCoVec, i, t_camIdex + "face.cmp"); |
| | | DBG(" TESTCODE " << sdkDetCoVec[i] << " " << sdkComCoVec[i]); |
| | | // #todo add param |
| | | |
| | | break; |
| | | } |
| | |
| | | } |
| | | return result; |
| | | } |
| | | void RtspAnalysElement::setDataByType(int type,Json::Value& json,int camId) |
| | | { |
| | | |
| | | void RtspAnalysElement::setDataByType(int type, Json::Value &json, int camId) { |
| | | switch (type) { |
| | | case 4: |
| | | { |
| | | case 4: { |
| | | auto rule=m_lDBTool->searchPerimeterRuleByCamId(camId); |
| | | json["perimeter.area"]=rule.strAreas.toStdString(); |
| | | json["perimeter.num"]=rule.nAlarmPeopleNum; |
| | |
| | | json["perimeter.tolerance"]=rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | case 5: |
| | | { |
| | | case 5: { |
| | | auto rule=m_lDBTool->searchCrowdRuleByCamId(camId); |
| | | json["crowd.area"]=rule.strAreas.toStdString(); |
| | | json["crowd.num"]=rule.nAlarmPeopleNum; |
| | |
| | | json["crowd.tolerance"]=rule.nTriggertolerance; |
| | | break; |
| | | } |
| | | case 6: |
| | | { |
| | | case 6: { |
| | | auto rule=m_lDBTool->searchActRuleByCamId(camId); |
| | | json["keepRight.leftArea"]=rule.strAreas.toStdString(); |
| | | json["keepRight.leftLine"]=rule.strLine.toStdString(); |