//
|
// Created by Scheaven on 2019/11/19.
|
//
|
|
#include "detecter_manager.h"
|
#include <thread>
|
#include <unistd.h>
|
#include <cstdlib>
|
|
DetecterManager* DetecterManager::instance = NULL;
|
|
// 单例模式,如果没有指针指向对象则实例化对象,否则返回实例化对象
|
DetecterManager* DetecterManager::getInstance()
|
{
|
if(instance==NULL)
|
{
|
instance = new DetecterManager();
|
}
|
return instance;
|
}
|
|
// 初始化加载检测模型
|
DetecterManager::DetecterManager()
|
{
|
std::cout << "loading detector model......" << std::endl;
|
}
|
|
|
DetecterManager::~DetecterManager()
|
{
|
}
|
|
void DetecterManager::release()
|
{
|
Detector::getInstance()->release();
|
|
delete DetecterManager::instance;
|
DetecterManager::instance = NULL;
|
}
|
|
//调用YOLO检测,进行图像的检测
|
void DetecterManager::detecter_main(cv::Mat &mat_image, TResult &t_result)
|
{
|
std::vector<bbox_t> result_vec;
|
|
result_vec = Detector::getInstance()->detect(mat_image); // 静态单例模式,调用单例指针对象进行人体检测
|
|
encoder_features(result_vec, t_result); //将检测的结果转化成指定的格式
|
|
// show_result(result_vec);
|
// draw_boxes(mat_image, result_vec);
|
}
|
|
// 将检测结果的result_box信息融合到detections中
|
void DetecterManager::encoder_features(std::vector<bbox_t> boxes, TResult &t_result)
|
{
|
t_result.targets = (Target*)malloc(sizeof(Target) * boxes.size()); // 绘制人脸信息的
|
int w_count = 0;
|
|
std::vector<float> confidences;
|
std::vector<int> human_index;
|
|
int result_index=0;
|
|
//NMS 去掉重叠的框信息 start
|
std::vector<bbox_t> n_boxes;
|
float threshold = 0.99;
|
// 需要使用迭代器的方式进行擦除
|
for (vector<bbox_t>::iterator it = boxes.begin();it !=boxes.end();)
|
{
|
if(it->obj_id!=0) //将非人体信息转移到n_boxes
|
{
|
n_boxes.push_back(*it);
|
it = boxes.erase(it);
|
}else
|
++it;
|
}
|
|
while(boxes.size()>0)
|
{
|
std::sort(boxes.begin(),boxes.end(),[](bbox_t box1, bbox_t box2){
|
return box1.prob>box2.prob;
|
});
|
n_boxes.push_back(boxes[0]);
|
|
for (vector<bbox_t>::iterator it = boxes.begin()+1; it !=(boxes.end());)
|
{
|
float iou_value = box_iou(*boxes.begin(),*it);
|
if(iou_value>threshold)
|
{
|
it = boxes.erase(it);
|
// printf("=====iou_value:%f,%f", iou_value, threshold);
|
// exit(-1);
|
}
|
else
|
it++;
|
}
|
boxes.erase(boxes.begin());
|
}
|
//NMS 去掉重叠的框信息 end
|
|
|
for (const auto &result_box:n_boxes)
|
{
|
// #ifdef S_DEBUG
|
// printf("--%d-%d-%d-%d-%d---result_box-----\n", result_box.obj_id, result_box.x, result_box.y, result_box.w, result_box.h);
|
// #endif
|
if(result_box.obj_id == 0)
|
{
|
Target target;
|
init_target(&target);
|
|
target.rect.left = result_box.x; // 跟踪的框信息
|
target.rect.top = result_box.y;
|
target.rect.right = result_box.x + result_box.w;
|
target.rect.bottom = result_box.y + result_box.h;
|
|
target.confidence = result_box.prob*100;
|
target.id = w_count;
|
|
t_result.targets[w_count] = target;
|
w_count ++;
|
|
|
// // cv::Rect box = cv::Rect(result_box.x,result_box.y,result_box.w,result_box.h);
|
// //目标检测 代码的可视化
|
// // get_detections(DETECTBOX(box.x, box.y,box.width, box.height), confidences[idx],d);
|
// DETECTION_ROW tmpRow;
|
// tmpRow.tlwh = DETECTBOX(result_box.x,result_box.y,result_box.w,result_box.h); //DETECTBOX(x, y, w, h);
|
// tmpRow.confidence = result_box.prob;
|
// tmpRow.obj_id = result_box.obj_id;
|
// tmpRow.is_hat = true;
|
// tmpRow.is_mask = true;
|
// tmpRow.is_smoke = true;
|
// tmpRow.hatScore = 0;
|
// tmpRow.maskScore = 0;
|
// tmpRow.smokeScore = 0;
|
// tmpRow.hasFace = false;
|
//// tmpRow.human_face = nullptr;
|
//
|
// int sub_box_index=0;
|
// human_index.push_back(result_index);
|
//
|
// detection.push_back(tmpRow);
|
}
|
// result_index++;
|
}
|
t_result.count = w_count;
|
|
// for(const auto &kk:human_index)
|
// {
|
// printf("---%d--%d--%d--%d-\n", n_boxes.size(), human_index.size(), kk, n_boxes[kk].obj_id);
|
// }
|
}
|
|
bool DetecterManager::sort_score(bbox_t box1, bbox_t box2)
|
{
|
return (box1.prob>box2.prob);
|
}
|
|
float DetecterManager::box_iou(bbox_t box1, bbox_t box2)
|
{
|
int x1 = std::max(box1.x, box2.x);
|
int y1 = std::max(box1.y, box2.y);
|
int x2 = std::min((box1.x+box1.w),(box2.x+box2.w));
|
int y2 = std::min((box1.y+box1.h),(box2.y+box2.h));
|
float over_area = (x2-x1)*(y2-y1);
|
// printf("over_ares---%f ----%d----%d\n", over_area, box1.w*box1.h, box2.w*box2.h);
|
float iou = over_area/((box1.w*box1.h<box2.w*box2.h)?box1.w*box1.h:box2.w*box2.h);
|
return iou;
|
}
|
|
void DetecterManager::init_target(Target *t)
|
{
|
t->attribute = NULL;
|
t->feature = NULL;
|
t->id = 0;
|
t->rect = TRect{0,0,0,0};
|
t->confidence = 0;
|
t->attribute_size = 0;
|
t->feature_size = 0;
|
}
|