|
#include <cstdio>
|
#include <cstdlib>
|
#include <cstring>
|
#include <ctime>
|
#include <unistd.h>
|
#include <algorithm>
|
#include <iostream>
|
#include <thread>
|
|
#include "FiStdDefEx.h"
|
#include "detect.h"
|
|
faceTracking::faceTracking(int devId, int chan, int maxFaceNum, int minWidth, int yaw, int pitch, int roll,
|
int confidence, int sampSize, int interval) {
|
nChannel = chan < MAX_CHANNEL ? chan : MAX_CHANNEL;
|
nMaxFaceNum = maxFaceNum < MAX_FACE_NUM ? maxFaceNum : MAX_FACE_NUM;
|
nSampleSize = sampSize / 80 * 80;
|
setThresVal(confidence, minWidth, yaw, pitch, roll);
|
nDetectionIntervalFrame = interval < 1 ? 1 : interval;
|
int ret = initDecode(devId);
|
if (ret < 0) {
|
printf("initDecode:%d video:%d\n", ret, devId);
|
}
|
printf("initDecode:%d video:%d\n", ret, devId);
|
ret = initTrack();
|
printf("initTrack:%d \n", ret);
|
}
|
|
faceTracking::~faceTracking() {
|
printf("~faceTracking()");
|
THFT_Release();
|
vc.release();
|
}
|
|
int faceTracking::initDecode(int devId) {
|
if (devId <= 0) {
|
devId = 0;
|
}
|
|
bool bOpen = vc.open(devId);
|
if (!bOpen) {
|
printf("Open devId %d failed.\n", devId);
|
return -1;
|
}
|
nWidth = vc.get(CV_CAP_PROP_FRAME_WIDTH);
|
nHeight = vc.get(CV_CAP_PROP_FRAME_HEIGHT);
|
printf("FRAME_WIDTH=%d,FRAME_HEIGHT=%d\n", nWidth, nHeight);
|
|
return 0;
|
}
|
|
int faceTracking::initTrack(void) {
|
printf("THFI_Create_Ex:nChannel:%d\n", nChannel);
|
THFT_Param detParam;
|
detParam.nImageWidth = nWidth;
|
detParam.nImageHeight = nHeight;
|
detParam.nMaxFaceNum = nMaxFaceNum;
|
detParam.nSampleSize = nSampleSize;
|
detParam.nDetectionIntervalFrame = nDetectionIntervalFrame;
|
|
int num = THFT_Create(nChannel, &detParam);
|
if (num != nChannel) {
|
printf("THFI_Create failed!(ret=%d)\n", num);
|
return -1;
|
}
|
printf("THFI_Create:ret:%d\n", num);
|
|
for (int i = 0; i < nChannel; i++) {
|
std::map<long, float> *faces = new std::map<long, float>;
|
recChanFaces.insert(std::pair<int, std::map<long, float>>(i, *faces));
|
}
|
|
return 0;
|
}
|
|
int faceTracking::resetWH(int w, int h, int chan) {
|
printf("THFT_Reset:%d %d %d\n", chan, w, h);
|
if ((chan >= nChannel) || (w <= 0) || (h <= 0)) {
|
printf("THFT_Reset params error!\n");
|
return -1;
|
}
|
THFT_Param detParam;
|
detParam.nImageWidth = w;
|
detParam.nImageHeight = h;
|
detParam.nMaxFaceNum = nMaxFaceNum;
|
detParam.nSampleSize = nSampleSize;
|
detParam.nDetectionIntervalFrame = nDetectionIntervalFrame;
|
|
int ret = THFT_Reset(nChannel, &detParam);
|
if (ret != 0) {
|
printf("THFT_Reset failed!(ret=%d)\n", ret);
|
return -1;
|
}
|
return 0;
|
}
|
|
int faceTracking::detect(int chan, BYTE *imgData, THFT_FaceInfo *outObjs) {
|
double t1, t2;
|
t1 = msecond();
|
int num = THFT_FaceTracking(chan, imgData, outObjs);
|
t2 = msecond();
|
printf("THFI_DetectFace time = %04f ms\n", (float) (t2 - t1));
|
|
if (num < 0) {
|
printf("THFI_DetectFace:(ret=%d)\n", num);
|
return -1;
|
}
|
|
return num;
|
}
|
|
void faceTracking::drawFaceRect(cv::Mat& frame, RECT rcFace)
|
{
|
int lenghtPercent = 8;
|
auto color = cvScalar(225, 225, 225, 128);
|
int thick = 2;
|
cv::rectangle(frame, cv::Point(rcFace.left, rcFace.top),
|
cv::Point(rcFace.right, rcFace.bottom),
|
color, 1, CV_AA, 0);
|
|
cv::line(frame, cv::Point(rcFace.left, rcFace.top),
|
cv::Point(rcFace.left, rcFace.top +
|
(rcFace.bottom - rcFace.top)/lenghtPercent),
|
color, thick, CV_AA, 0);
|
cv::line(frame, cv::Point(rcFace.left, rcFace.top),
|
cv::Point(rcFace.left +
|
(rcFace.right - rcFace.left)/lenghtPercent,
|
rcFace.top),
|
color, thick, CV_AA, 0);
|
|
cv::line(frame, cv::Point(rcFace.left, rcFace.bottom -
|
(rcFace.bottom - rcFace.top)/lenghtPercent),
|
cv::Point(rcFace.left, rcFace.bottom),
|
color, thick, CV_AA, 0);
|
cv::line(frame, cv::Point(rcFace.left, rcFace.bottom),
|
cv::Point(rcFace.left +
|
(rcFace.right - rcFace.left)/lenghtPercent,
|
rcFace.bottom),
|
color, thick, CV_AA, 0);
|
|
cv::line(frame, cv::Point(rcFace.right -
|
(rcFace.right - rcFace.left)/lenghtPercent,
|
rcFace.bottom),
|
cv::Point(rcFace.right, rcFace.bottom),
|
color, thick, CV_AA, 0);
|
cv::line(frame, cv::Point(rcFace.right, rcFace.bottom),
|
cv::Point(rcFace.right,
|
rcFace.bottom -
|
(rcFace.bottom - rcFace.top)/lenghtPercent),
|
color, thick, CV_AA, 0);
|
|
cv::line(frame, cv::Point(rcFace.right -
|
(rcFace.right - rcFace.left)/lenghtPercent,
|
rcFace.top),
|
cv::Point(rcFace.right, rcFace.top),
|
color, thick, CV_AA, 0);
|
cv::line(frame, cv::Point(rcFace.right, rcFace.top),
|
cv::Point(rcFace.right,
|
rcFace.top +
|
(rcFace.bottom - rcFace.top)/lenghtPercent),
|
color, thick, CV_AA, 0);
|
}
|
|
void faceTracking::run(int chan) {
|
printf("faceTracking chan: %d \n", chan);
|
std::thread([&](short chan, faceTracking *pThis) {
|
cv::Mat frame;
|
double showT1, showT2;
|
static bool isAdvNow = false;
|
while (true) {
|
if (!frame.empty()) {
|
frame.release();
|
}
|
pThis->vc >> frame;
|
|
if (frame.empty()) {
|
usleep(30000);//30ms
|
continue;
|
}
|
|
cv::flip(frame, frame, 1);
|
|
THFT_FaceInfo *pFaceInfos = new THFT_FaceInfo[pThis->nMaxFaceNum];
|
// double t1, t2;
|
// t1 = msecond();
|
int num = THFT_FaceTracking(chan, frame.data, pFaceInfos);
|
// t2 = msecond();
|
// printf("THFI_DetectFace time = %04f ms, num: %d \n", (float) (t2 - t1), num);
|
|
for (int i = 0; i < num; i++) {
|
showT1 = msecond();
|
if (isAdvNow == true) {
|
emit signalAdvertise(false);
|
isAdvNow = false;
|
}
|
//draw face rect
|
drawFaceRect(frame, pFaceInfos[i].rcFace);
|
|
|
|
// char title[128];
|
// auto it = mIDName.find(pFaceInfos[i].nFaceID);
|
// if (it != mIDName.end()){
|
// sprintf(title, "id=%ld %s", pFaceInfos[i].nFaceID, it->second.c_str());
|
// } else {
|
// sprintf(title, "id=%ld", pFaceInfos[i].nFaceID);
|
// }
|
|
// cv::putText(frame, title, cv::Point(pFaceInfos[i].rcFace.left + 1, pFaceInfos[i].rcFace.bottom - 5),
|
// cv::FONT_HERSHEY_COMPLEX, 1.0f, cvScalar(0, 255, 255),
|
// 1, 8, false);
|
}
|
|
cv::Mat frameNew;
|
cv::cvtColor(frame, frameNew, cv::COLOR_BGR2RGB);
|
ImgToShow imgS;
|
imgS.nWidth = frameNew.cols;
|
imgS.nHeight = frameNew.rows;
|
imgS.imgData = new BYTE[frameNew.cols * frameNew.rows * 3];
|
memcpy(imgS.imgData, frameNew.data, frameNew.cols * frameNew.rows * 3);
|
emit drawImage(imgS);
|
// std::unique_lock<std::mutex> lockfs(mtxFrameShow, std::defer_lock);
|
// lockfs.lock();
|
// qFrameShow.push(imgS);
|
// lockfs.unlock();
|
|
if (num <= 0) {
|
delete[] pFaceInfos;
|
} else if (num > 0) {
|
char *img = new char[frame.cols * frame.rows * 3];
|
memcpy(img, frame.data, frame.cols * frame.rows * 3);
|
// printf("sendFacesToExtract: wxh = %d x %d img:%p\n", frame.cols, frame.rows, img);
|
sendFacesToExtract(chan, pFaceInfos, num, (BYTE *) img, frame.cols, frame.rows);
|
}
|
|
showT2 = msecond();
|
|
if (showT2 - showT1 > pThis->screenSaveTime_ms) {
|
// std::cout << "showT2 - showT1:" << showT2 - showT1 << " > " << pThis->screenSaveTime_ms << std::endl;
|
if (isAdvNow == false) {
|
emit signalAdvertise(true);
|
isAdvNow = true;
|
}
|
}
|
}
|
}, short(chan), this).detach();
|
}
|
|
|
bool faceTracking::sendFacesToExtract(int chan, THFT_FaceInfo *faces, int num, BYTE *imgData, int width, int height) {
|
//todo
|
bool ok = false;
|
ImgToExtract *img2extr = new ImgToExtract;
|
|
int faceCount = 0;
|
|
for (int i = 0; i < num; i++) {
|
if ((faces[i].fAngle.confidence >= treshConfidence) &&
|
(faces[i].rcFace.right - faces[i].rcFace.left >= treshMinWidth) &&
|
(std::abs(faces[i].fAngle.yaw) <= treshYaw) &&
|
(std::abs(faces[i].fAngle.pitch) <= treshPitch) &&
|
(std::abs(faces[i].fAngle.roll) <= treshRoll)) {
|
auto scoreIter = recChanFaces[chan].find(faces[i].nFaceID);
|
if (scoreIter == recChanFaces[chan].end()) {
|
recChanFaces[chan].insert(std::pair<long, float>(faces[i].nFaceID, faces[i].fAngle.confidence));
|
ok = true;
|
THFT_FaceInfo *vface = new THFT_FaceInfo(faces[i]);
|
img2extr->vFaces.emplace_back(vface);
|
faceCount++;
|
|
auto it = mIDTime.find(faces[i].nFaceID);
|
if (it == mIDTime.end()) {
|
mIDTime[faces[i].nFaceID] = msecond();
|
}
|
} else {
|
if ((scoreIter->second + confidenceInc) < faces[i].fAngle.confidence) {
|
recChanFaces[chan][faces[i].nFaceID] = faces[i].fAngle.confidence;
|
ok = true;
|
THFT_FaceInfo *vface = new THFT_FaceInfo(faces[i]);
|
img2extr->vFaces.emplace_back(vface);
|
faceCount++;
|
} else {
|
double nowT = msecond();
|
auto it = mIDTime.find(faces[i].nFaceID);
|
if ((it == mIDTime.end()) || (nowT > (interval + mIDTime[faces[i].nFaceID]))){
|
mIDTime[faces[i].nFaceID] = msecond();
|
recChanFaces[chan][faces[i].nFaceID] = faces[i].fAngle.confidence;
|
ok = true;
|
THFT_FaceInfo *vface = new THFT_FaceInfo(faces[i]);
|
img2extr->vFaces.emplace_back(vface);
|
faceCount++;
|
}
|
}
|
}
|
} else {
|
auto it = mIDName.find(faces[i].nFaceID);
|
if (it != mIDName.end()){
|
emit signalTips(std::string(it->second) + std::string(",请进"));
|
} else {
|
emit signalTips(std::string("请正视镜头"));
|
}
|
}
|
}
|
if (ok) {
|
img2extr->nWidth = width;
|
img2extr->nHeight = height;
|
img2extr->imgData = imgData;
|
pushQImg2Extr(img2extr);
|
} else {
|
delete imgData;
|
delete img2extr;
|
}
|
delete[] faces;
|
|
return ok;
|
}
|
|
void faceTracking::setThresVal(int confidence, int width, int yaw, int pitch, int roll) {
|
treshYaw = yaw < MAX_ANGLE ? yaw : MAX_ANGLE;
|
treshPitch = pitch < MAX_ANGLE ? pitch : MAX_ANGLE;
|
treshRoll = roll < MAX_ANGLE ? roll : MAX_ANGLE;
|
treshMinWidth = width > MIN_WIDTH ? width : MIN_WIDTH;
|
treshConfidence = confidence > MIN_CONFIDENCE ? (float) confidence / 100 : (float) MIN_CONFIDENCE / 100;
|
}
|