#include <jni.h>
|
#include <stdio.h>
|
#include <stdlib.h>
|
#include <string.h>
|
#include <sys/time.h>
|
#include <time.h>
|
|
#include "RgbDetect.h"
|
#include "GrayDetect.h"
|
#include "LivenessDetect.h"
|
|
#include "FiStdDefEx.h"
|
//#include "THFaceLive_i.h"
|
|
#include <json/json.h>
|
#include <memory>
|
#include "THFeature_i.h"
|
#include "FaceDetectHelper.h"
|
#include "LogUtil.h"
|
#include <memory>
|
#include <sstream>
|
#include "TrackingTrigger.h"
|
#include "THFaceLive_i.h"
|
#include <map>
|
|
#include <memory>
|
#include "THFeature_i.h"
|
#include <sys/time.h>
|
#include <time.h>
|
#include <iostream>
|
#include <string>
|
#include <cstring>
|
|
|
#include <android/log.h>
|
#include <json_cpp/json/value.h>
|
|
//定义TAG之后,我们可以在LogCat通过TAG过滤出NDK打印的日志
|
#define TAG "jni ndk "
|
// 定义info信息
|
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,TAG,__VA_ARGS__)
|
// 定义debug信息
|
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
|
// 定义error信息
|
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,TAG,__VA_ARGS__)
|
|
using namespace std;
|
|
FaceDetectHelper *FaceDetectHelper::instance = nullptr;
|
|
FaceDetectHelper *FaceDetectHelper::getInstance() {
|
if (instance == nullptr) {
|
instance = new FaceDetectHelper();
|
}
|
return instance;
|
}
|
|
//get current system time
|
double msecond()
|
{
|
struct timeval tv;
|
gettimeofday(&tv, 0);
|
return (tv.tv_sec * 1.0e3 + tv.tv_usec * 1.0e-3);
|
}
|
|
FaceDetectHelper::FaceDetectHelper() {}
|
|
BYTE* feature1 = nullptr;
|
BYTE* idcard_feature = nullptr;
|
int featureSize = 0;
|
bool FaceDetectHelper::initSdk(const char* modelPath) {
|
if (!init_sdk_success) {
|
int nOmpThreadNum = 2;
|
THFI_Param detParam;
|
detParam.nMinFaceSize = 50;
|
detParam.nRollAngle = 30;
|
detParam.bOnlyDetect = false;
|
detParam.dwReserved = NULL;
|
THFI_SetDir(modelPath, modelPath);
|
int ret = THFI_Create_Ex(2, nOmpThreadNum);
|
if (ret < 0) {
|
printf("THFI_Create failed!(ret=%d)\n", ret);
|
init_sdk_success = false;
|
return init_sdk_success;
|
}
|
EF_SetDir(modelPath, modelPath);
|
ret=EF_Init_Ex(2, nOmpThreadNum);
|
if (ret < 0)
|
{
|
printf("EF_Init failed!(ret=%d)\n",ret);
|
THFI_Release();
|
init_sdk_success = false;
|
return init_sdk_success;
|
}
|
featureSize = EF_Size();
|
feature1 = new BYTE[featureSize];
|
idcard_feature = new BYTE[featureSize];
|
|
|
THFL_SDK_SetDir(modelPath, modelPath);
|
ret = THFL_Create_Ex(1, 2);
|
if (ret < 0) {
|
printf("THFL_Create_Ex failed!(ret=%d)\n", ret);
|
THFI_Release();
|
init_sdk_success = false;
|
}
|
|
init_sdk_success = true;
|
}
|
return init_sdk_success;
|
}
|
|
TrackingTrigger trackingTrigger(0.8);
|
ScoredRect sr;
|
long track_id[10];
|
bool liveness[10];
|
string featureNames[10];
|
|
double prevExtractFeatureTime=0;
|
|
#include <pthread.h>
|
#include <mutex>
|
std::mutex myMutex;
|
|
string
|
FaceDetectHelper::detectRealFace(const char* modelPath, const char *rgbFileName, const char *grayFileName, int width,
|
int height, const char* baseFeatureName, int shouldExtractFeature, bool useGrayCamera, int detectFaceCount) {
|
std::lock_guard<std::mutex> guard(myMutex);
|
//useGrayCamera = false;
|
int featureCount = 0;
|
string faces = "";
|
if (rgbFileName != nullptr) {
|
trackingTrigger.triggerLine();
|
}
|
if (rgbFileName != nullptr) {
|
initSdk(modelPath);
|
}
|
if (true) {
|
//pthread_mutex_lock(&mutex1);
|
GrayDetect grayDetect(grayFileName, width, height);
|
//pthread_mutex_unlock(&mutex1);
|
if (rgbFileName != nullptr) {
|
RgbDetect rgbDetect(rgbFileName, width, height);
|
LivenessDetect livenessDetect;
|
if (rgbDetect.detect(detectFaceCount)) {
|
stringstream ss;
|
int face_nums = RgbDetect::face_nums;
|
int face_nums_size = rgbDetect.facePos.get()->size();
|
for (int i = 0; i < face_nums && i < face_nums_size; i++) {
|
try {
|
THFI_FacePos *fps;
|
fps = &rgbDetect.facePos.get()->at(i);
|
ScoredRect scoredRect{fps[0].fAngle.confidence, {
|
fps[0].rcFace.left,
|
fps[0].rcFace.top,
|
fps[0].rcFace.right,
|
fps[0].rcFace.bottom
|
}, -1, map<string, string>()};
|
bool not_duplicate = trackingTrigger.triggerOnce(scoredRect);
|
bool track_id_changed = (track_id[i] != scoredRect.id);
|
track_id[i] = scoredRect.id;
|
if (track_id[i] == 1) {
|
int m = 0;
|
}
|
if (track_id[i] == -1) {
|
continue;
|
}
|
if (useGrayCamera && shouldExtractFeature > 0) {
|
//pthread_mutex_lock(&mutex1);
|
grayDetect.detect(detectFaceCount);
|
liveness[i] = livenessDetect.detect(i);
|
//pthread_mutex_unlock(&mutex1);
|
}
|
int featureId = i;
|
double d1 = msecond();
|
string featurePath = "";
|
if (shouldExtractFeature > 0) {
|
if ((track_id_changed || (msecond() - prevExtractFeatureTime) > 1000)
|
) {
|
memset(feature1, 0, featureSize);
|
int ret=EF_Extract(0, rgbDetect.rgb_image_buffer.get()->data(), width, height, 3, &fps[0], feature1);
|
|
if (ret == 1) {
|
featureCount++;
|
prevExtractFeatureTime = msecond();
|
string featureName(baseFeatureName);
|
featureName.append("_");
|
featureName.append(to_string(track_id[i]));
|
featureName.append("_");
|
featureName.append(to_string(i));
|
featureNames[i] = featureName;
|
FILE *file = fopen(featureName.c_str(), "wb");
|
fwrite(feature1, sizeof(BYTE), featureSize, file);
|
fclose(file);
|
} else {
|
featureId = -1;
|
}
|
}
|
featurePath = featureNames[i];
|
}
|
if (shouldExtractFeature == 0) {
|
featureId = -1;
|
featurePath = "";
|
}
|
ss << fps[0].rcFace.left << "," << fps[0].rcFace.top << ","
|
<< fps[0].rcFace.right
|
<< "," << fps[0].rcFace.bottom << "," << fps[0].fAngle.confidence
|
<< "," << track_id[i] << "," << not_duplicate<<","<<(liveness[i]?"活体":"非活体")
|
<<","<<featureId<<","<<featurePath<<","<<fps[0].fAngle.yaw<<","<<fps[0].fAngle.pitch<<","<<fps[0].fAngle.roll;
|
if (i == 0 && face_nums > 1 && i != (face_nums - 1)) {
|
ss << "|";
|
}
|
} catch (exception &e) {
|
LogUtil::log(e.what());
|
}
|
}
|
return ss.str();
|
} else {
|
LogUtil::log("红外人脸检测失败");
|
}
|
} else {
|
LogUtil::log("初始化SDK失败");
|
}
|
return faces;
|
} else {
|
}
|
|
}
|
|
string
|
FaceDetectHelper::detectRealFace2(JNIEnv *env, const char* modelPath, const char *rgbFileName, const char *grayFileName, int width,
|
int height, const char* baseFeatureName,
|
int shouldExtractFeature, bool useGrayCamera, int detectFaceCount, jbyteArray bgrArray) {
|
int featureCount = 0;
|
string faces = "";
|
if (rgbFileName != nullptr)
|
trackingTrigger.triggerLine();
|
if (initSdk(modelPath)) {
|
RgbDetect rgbDetect(nullptr, width, height);
|
if (bgrArray != nullptr) {
|
RgbDetect::width = width;
|
RgbDetect::height = height;
|
long sz = env->GetArrayLength(bgrArray);
|
if (RgbDetect::rgb_image_buffer == nullptr) {
|
RgbDetect::fileSize = sz;
|
RgbDetect::rgb_image_buffer = make_shared<vector<unsigned char>>(sz);
|
}
|
if (RgbDetect::facePos == nullptr) {
|
RgbDetect::facePos = make_shared<vector<THFI_FacePos>>(5);
|
}
|
unsigned char* img = (unsigned char*)env->GetByteArrayElements(bgrArray, 0);
|
memcpy(RgbDetect::rgb_image_buffer.get()->data(), img, (size_t)sz);
|
env->ReleaseByteArrayElements(bgrArray, (signed char*)img, 0);
|
}
|
GrayDetect grayDetect(grayFileName, width, height);
|
LivenessDetect livenessDetect;
|
if (rgbFileName != nullptr) {
|
if (rgbDetect.detect(detectFaceCount)) {
|
stringstream ss;
|
int face_nums = RgbDetect::face_nums;
|
int face_nums_size = rgbDetect.facePos.get()->size();
|
for (int i = 0; i < face_nums && i < face_nums_size; i++) {
|
try {
|
THFI_FacePos *fps;
|
fps = &rgbDetect.facePos.get()->at(i);
|
ScoredRect scoredRect{fps[0].fAngle.confidence, {
|
fps[0].rcFace.left,
|
fps[0].rcFace.top,
|
fps[0].rcFace.right,
|
fps[0].rcFace.bottom
|
}, -1, map<string, string>()};
|
bool not_duplicate = trackingTrigger.triggerOnce(scoredRect);
|
bool track_id_changed = (track_id[i] != scoredRect.id);
|
track_id[i] = scoredRect.id;
|
if (track_id[i] == 1) {
|
int m = 0;
|
}
|
if (track_id[i] == -1) {
|
continue;
|
}
|
if (useGrayCamera) {
|
grayDetect.detect(detectFaceCount);
|
liveness[i] = livenessDetect.detect(i);
|
}
|
int featureId = i;
|
double d1 = msecond();
|
string featurePath = "";
|
if (shouldExtractFeature > 0) {
|
if ((track_id_changed || (msecond() - prevExtractFeatureTime) > 1000)
|
) {
|
memset(feature1, 0, featureSize);
|
int ret=EF_Extract(0, rgbDetect.rgb_image_buffer.get()->data(), width, height, 3, &fps[0], feature1);
|
|
if (ret == 1) {
|
featureCount++;
|
prevExtractFeatureTime = msecond();
|
string featureName(baseFeatureName);
|
featureName.append("_");
|
featureName.append(to_string(track_id[i]));
|
featureName.append("_");
|
featureName.append(to_string(i));
|
featureNames[i] = featureName;
|
FILE *file = fopen(featureName.c_str(), "wb");
|
fwrite(feature1, sizeof(BYTE), featureSize, file);
|
fclose(file);
|
} else {
|
featureId = -1;
|
}
|
}
|
featurePath = featureNames[i];
|
}
|
if (shouldExtractFeature == 0) {
|
featureId = -1;
|
featurePath = "";
|
}
|
ss << fps[0].rcFace.left << "," << fps[0].rcFace.top << ","
|
<< fps[0].rcFace.right
|
<< "," << fps[0].rcFace.bottom << "," << fps[0].fAngle.confidence
|
<< "," << track_id[i] << "," << not_duplicate<<","<<(liveness[i]?"活体":"非活体")
|
<<","<<featureId<<","<<featurePath<<","<<fps[0].fAngle.yaw<<","<<fps[0].fAngle.pitch<<","<<fps[0].fAngle.roll;
|
if (i == 0 && face_nums > 1 && i != (face_nums - 1)) {
|
ss << "|";
|
}
|
} catch (exception &e) {
|
LogUtil::log(e.what());
|
}
|
}
|
return ss.str();
|
} else {
|
LogUtil::log("红外人脸检测失败");
|
}
|
} else {
|
LogUtil::log("初始化SDK失败");
|
}
|
return faces;
|
} else {
|
}
|
|
}
|
|
|
string
|
FaceDetectHelper::detectRealFace4(JNIEnv *env, int channel, const char* modelPath, const char *rgbFileName, const char *grayFileName, int width,
|
int height, const char* baseFeatureName,
|
int shouldExtractFeature, bool useGrayCamera, int detectFaceCount, jbyteArray bgrArray) {
|
int featureCount = 0;
|
string faces = "";
|
if (initSdk(modelPath)) {
|
RgbDetect rgbDetect(nullptr, width, height);
|
if (bgrArray != nullptr) {
|
RgbDetect::width = width;
|
RgbDetect::height = height;
|
long sz = env->GetArrayLength(bgrArray);
|
if (RgbDetect::rgb_image_buffer4 == nullptr) {
|
RgbDetect::fileSize = sz;
|
RgbDetect::rgb_image_buffer4 = make_shared<vector<unsigned char>>(sz);
|
}
|
if (RgbDetect::facePos4 == nullptr) {
|
RgbDetect::facePos4 = make_shared<vector<THFI_FacePos>>(5);
|
}
|
unsigned char* img = (unsigned char*)env->GetByteArrayElements(bgrArray, 0);
|
memcpy(RgbDetect::rgb_image_buffer4.get()->data(), img, (size_t)sz);
|
env->ReleaseByteArrayElements(bgrArray, (signed char*)img, 0);
|
}
|
GrayDetect grayDetect(grayFileName, width, height);
|
LivenessDetect livenessDetect;
|
if (rgbFileName != nullptr) {
|
if (rgbDetect.detect4(channel, detectFaceCount)) {
|
stringstream ss;
|
int face_nums = RgbDetect::face_nums4;
|
int face_nums_size = rgbDetect.facePos4.get()->size();
|
for (int i = 0; i < face_nums && i < face_nums_size; i++) {
|
try {
|
THFI_FacePos *fps;
|
fps = &rgbDetect.facePos4.get()->at(i);
|
ScoredRect scoredRect{fps[0].fAngle.confidence, {
|
fps[0].rcFace.left,
|
fps[0].rcFace.top,
|
fps[0].rcFace.right,
|
fps[0].rcFace.bottom
|
}, -1, map<string, string>()};
|
bool not_duplicate = true;
|
bool track_id_changed = (track_id[i] != scoredRect.id);
|
track_id[i] = scoredRect.id;
|
if (track_id[i] == 1) {
|
int m = 0;
|
}
|
if (useGrayCamera) {
|
grayDetect.detect(channel, detectFaceCount);
|
liveness[i] = livenessDetect.detect(i);
|
}
|
int featureId = i;
|
double d1 = msecond();
|
string featurePath = "";
|
if (shouldExtractFeature > 0) {
|
if (1==1 || (track_id_changed || (msecond() - prevExtractFeatureTime) > 1000)
|
) {
|
memset(feature1, 0, featureSize);
|
int ret=EF_Extract(channel, rgbDetect.rgb_image_buffer4.get()->data(), width, height, 3, &fps[0], feature1);
|
|
if (ret == 1) {
|
featureCount++;
|
prevExtractFeatureTime = msecond();
|
string featureName(baseFeatureName);
|
featureName.append("_");
|
featureName.append(to_string(track_id[i]));
|
featureName.append("_");
|
featureName.append(to_string(i));
|
featureNames[i] = featureName;
|
FILE *file = fopen(featureName.c_str(), "wb");
|
fwrite(feature1, sizeof(BYTE), featureSize, file);
|
fclose(file);
|
} else {
|
featureId = -1;
|
}
|
}
|
featurePath = featureNames[i];
|
}
|
if (shouldExtractFeature == 0) {
|
featureId = -1;
|
featurePath = "";
|
}
|
ss << fps[0].rcFace.left << "," << fps[0].rcFace.top << ","
|
<< fps[0].rcFace.right
|
<< "," << fps[0].rcFace.bottom << "," << fps[0].fAngle.confidence
|
<< "," << track_id[i] << "," << not_duplicate<<","<<(liveness[i]?"活体":"非活体")
|
<<","<<featureId<<","<<featurePath<<","<<fps[0].fAngle.yaw<<","<<fps[0].fAngle.pitch<<","<<fps[0].fAngle.roll;
|
if (i == 0 && face_nums > 1 && i != (face_nums - 1)) {
|
ss << "|";
|
}
|
} catch (exception &e) {
|
LogUtil::log(e.what());
|
}
|
}
|
return ss.str();
|
} else {
|
LogUtil::log("红外人脸检测失败");
|
}
|
} else {
|
LogUtil::log("初始化SDK失败");
|
}
|
return faces;
|
} else {
|
}
|
|
}
|
|
|
#undef clamp_g
|
#define clamp_g(x, minValue, maxValue) ((x) < (minValue) ? (minValue) : ((x) > (maxValue) ? (maxValue) : (x)))
|
|
int NV21ToBGR(unsigned char * srcYVU, unsigned char * destBGR, int width, int height)
|
{
|
|
unsigned char * srcVU = srcYVU + width * height;
|
|
unsigned char Y, U, V;
|
int B, G, R;
|
|
for (int i = 0; i < height; i++)
|
{
|
for (int j = 0; j < width; j++)
|
{
|
Y = srcYVU[i * width + j];
|
V = srcVU[(i / 2 * width / 2 + j / 2) * 2 + 0];
|
U = srcVU[(i / 2 * width / 2 + j / 2) * 2 + 1];
|
|
|
R = 1.164*(Y - 16) + 1.596*(V - 128);
|
G = 1.164*(Y - 16) - 0.813*(V - 128) - 0.392*(U - 128);
|
B = 1.164*(Y - 16) + 2.017*(U - 128);
|
|
destBGR[(i * width + j) * 3 + 0] = clamp_g(B, 0, 255);
|
destBGR[(i * width + j) * 3 + 1] = clamp_g(G, 0, 255);
|
destBGR[(i * width + j) * 3 + 2] = clamp_g(R, 0, 255);
|
|
|
}
|
}
|
return 0;
|
}
|
|
string
|
FaceDetectHelper::detectRealFace3(jint channel,JNIEnv *env, const char* modelPath, int width,
|
int height, const char* baseFeatureName,
|
int shouldExtractFeature, bool useGrayCamera, int detectFaceCount, jbyteArray nv21Array) {
|
int featureCount = 0;
|
string faces = "";
|
if (nv21Array != nullptr)
|
trackingTrigger.triggerLine();
|
if (initSdk(modelPath)) {
|
RgbDetect rgbDetect(nullptr, width, height);
|
if (nv21Array != nullptr) {
|
RgbDetect::width = width;
|
RgbDetect::height = height;
|
long sz = env->GetArrayLength(nv21Array);
|
if (RgbDetect::rgb_image_buffer == nullptr) {
|
RgbDetect::fileSize = width * height * 3;
|
RgbDetect::rgb_image_buffer = make_shared<vector<unsigned char>>(RgbDetect::fileSize);
|
}
|
if (RgbDetect::facePos == nullptr) {
|
RgbDetect::facePos = make_shared<vector<THFI_FacePos>>(5);
|
}
|
unsigned char* nv12ArrayPtr = (unsigned char*)env->GetByteArrayElements(nv21Array, 0);
|
NV21ToBGR(nv12ArrayPtr, RgbDetect::rgb_image_buffer.get()->data(), width, height);
|
env->ReleaseByteArrayElements(nv21Array, (signed char*)nv12ArrayPtr, 0);
|
}
|
GrayDetect grayDetect(nullptr, width, height);
|
LivenessDetect livenessDetect;
|
if (nv21Array != nullptr) {
|
if (rgbDetect.detect(channel, detectFaceCount)) {
|
stringstream ss;
|
int face_nums = RgbDetect::face_nums;
|
int face_nums_size = rgbDetect.facePos.get()->size();
|
for (int i = 0; i < face_nums && i < face_nums_size; i++) {
|
try {
|
THFI_FacePos *fps;
|
fps = &rgbDetect.facePos.get()->at(i);
|
ScoredRect scoredRect{fps[0].fAngle.confidence, {
|
fps[0].rcFace.left,
|
fps[0].rcFace.top,
|
fps[0].rcFace.right,
|
fps[0].rcFace.bottom
|
}, -1, map<string, string>()};
|
bool not_duplicate = trackingTrigger.triggerOnce(scoredRect);
|
bool track_id_changed = (track_id[i] != scoredRect.id);
|
track_id[i] = scoredRect.id;
|
if (track_id[i] == 1) {
|
int m = 0;
|
}
|
if (track_id[i] == -1) {
|
continue;
|
}
|
if (useGrayCamera) {
|
grayDetect.detect(channel, detectFaceCount);
|
liveness[i] = livenessDetect.detect(i);
|
}
|
int featureId = i;
|
double d1 = msecond();
|
string featurePath = "";
|
if (shouldExtractFeature > 0) {
|
if (1==1 || (track_id_changed || (msecond() - prevExtractFeatureTime) > 1000)
|
) {
|
memset(feature1, 0, featureSize);
|
int ret=EF_Extract(channel, rgbDetect.rgb_image_buffer.get()->data(), width, height, 3, &fps[0], feature1);
|
|
if (ret == 1) {
|
featureCount++;
|
prevExtractFeatureTime = msecond();
|
string featureName(baseFeatureName);
|
featureName.append("_");
|
featureName.append(to_string(track_id[i]));
|
featureName.append("_");
|
featureName.append(to_string(i));
|
featureNames[i] = featureName;
|
FILE *file = fopen(featureName.c_str(), "wb");
|
fwrite(feature1, sizeof(BYTE), featureSize, file);
|
fclose(file);
|
} else {
|
featureId = -1;
|
}
|
}
|
featurePath = featureNames[i];
|
}
|
if (shouldExtractFeature == 0) {
|
featureId = -1;
|
featurePath = "";
|
}
|
ss << fps[0].rcFace.left << "," << fps[0].rcFace.top << ","
|
<< fps[0].rcFace.right
|
<< "," << fps[0].rcFace.bottom << "," << fps[0].fAngle.confidence
|
<< "," << track_id[i] << "," << not_duplicate<<","<<(liveness[i]?"活体":"非活体")
|
<<","<<featureId<<","<<featurePath<<","<<fps[0].fAngle.yaw<<","<<fps[0].fAngle.pitch<<","<<fps[0].fAngle.roll;
|
if (i == 0 && face_nums > 1 && i != (face_nums - 1)) {
|
ss << "|";
|
}
|
} catch (exception &e) {
|
LogUtil::log(e.what());
|
}
|
}
|
return ss.str();
|
} else {
|
LogUtil::log("红外人脸检测失败");
|
}
|
} else {
|
LogUtil::log("初始化SDK失败");
|
}
|
return faces;
|
} else {
|
}
|
|
}
|
|
|
jbyteArray
|
FaceDetectHelper::extractFeature(JNIEnv *env, const char *jpgFileName) {
|
cv::Mat image;
|
image = cv::imread(jpgFileName, 1);
|
if( !image.data )
|
{
|
printf( " No image data \n " );
|
return nullptr;
|
}
|
THFI_FacePos fps1[1];
|
int height = image.rows;
|
int width = image.cols;
|
BYTE* bgrData = image.data;
|
int face_nums = THFI_DetectFace(0, bgrData, 24, width, height, fps1, 1,360);
|
if (face_nums == 1) {
|
int ret=EF_Extract(0, bgrData, width, height, 3, fps1, idcard_feature);
|
if (ret == 1) {
|
jbyteArray featureByteArray = env->NewByteArray (featureSize);
|
env->SetByteArrayRegion (featureByteArray, 0, featureSize, reinterpret_cast<jbyte*>(idcard_feature));
|
return featureByteArray;
|
}
|
}
|
return nullptr;
|
}
|
|
|
string
|
FaceDetectHelper::facePosition(JNIEnv *env, const char *jpgFileName) {
|
cv::Mat image;
|
image = cv::imread(jpgFileName, 1);
|
if( !image.data )
|
{
|
printf( " No image data \n " );
|
return nullptr;
|
}
|
THFI_FacePos fps1[1];
|
int height = image.rows;
|
int width = image.cols;
|
BYTE* bgrData = image.data;
|
int face_nums = THFI_DetectFace(0, bgrData, 24, width, height, fps1, 1,360);
|
stringstream str;
|
if (face_nums >= 1) {
|
str << to_string(fps1[0].rcFace.left) << ","
|
<< to_string(fps1[0].rcFace.top) << ","
|
<< to_string(fps1[0].rcFace.right)<< ","
|
<< to_string(fps1[0].rcFace.bottom);
|
|
}
|
return str.str();
|
}
|
|
|
|
jbyteArray
|
FaceDetectHelper::extractFeature(JNIEnv *env, jbyteArray bgrByteArray, int width, int height) {
|
THFI_FacePos fps1[1];
|
int len = env->GetArrayLength (bgrByteArray);
|
shared_ptr<vector<unsigned char>> rgb_image_buffer = make_shared<vector<unsigned char>>(len);
|
env->GetByteArrayRegion (bgrByteArray, 0, len, reinterpret_cast<jbyte*>(rgb_image_buffer.get()->data()));
|
BYTE* bgrData = rgb_image_buffer.get()->data();
|
int face_nums = THFI_DetectFace(0, bgrData, 24, width, height, fps1, 1,360);
|
if (face_nums == 1) {
|
int ret=EF_Extract(0, bgrData, width, height, 3, fps1, idcard_feature);
|
if (ret == 1) {
|
jbyteArray featureByteArray = env->NewByteArray (featureSize);
|
env->SetByteArrayRegion (featureByteArray, 0, featureSize, reinterpret_cast<jbyte*>(idcard_feature));
|
return featureByteArray;
|
}
|
}
|
return nullptr;
|
}
|