派生自 Algorithm/baseDetector

Scheaven
2021-06-03 168af40fe9a3cc81c6ee16b3e81f154780c36bdb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
//
// Created by Scheaven on 2019/11/19.
//
 
#include "detecter_manager.h"
#include <thread>
#include <unistd.h>
#include <cstdlib>
 
DetecterManager* DetecterManager::instance = NULL;
 
// 单例模式,如果没有指针指向对象则实例化对象,否则返回实例化对象
DetecterManager* DetecterManager::getInstance()
{
    if(instance==NULL)
    {
        instance = new DetecterManager();
    }
    return instance;
}
 
// 初始化加载检测模型
DetecterManager::DetecterManager()
{
    std::cout << "loading detector model......" << std::endl;
}
 
 
DetecterManager::~DetecterManager()
{
}
 
void DetecterManager::release()
{
    Detector::getInstance()->release();
 
    delete DetecterManager::instance;
    DetecterManager::instance = NULL;
}
 
//调用YOLO检测,进行图像的检测
void DetecterManager::detecter_main(cv::Mat &mat_image, TResult &t_result)
{
    std::vector<bbox_t> result_vec;
 
    result_vec = Detector::getInstance()->detect(mat_image); // 静态单例模式,调用单例指针对象进行人体检测
 
    encoder_features(result_vec, t_result); //将检测的结果转化成指定的格式
 
  //  show_result(result_vec);
//    draw_boxes(mat_image, result_vec);
}
 
// 将检测结果的result_box信息融合到detections中
void DetecterManager::encoder_features(std::vector<bbox_t> boxes, TResult &t_result)
{
    t_result.targets = (Target*)malloc(sizeof(Target) * boxes.size()); // 绘制人脸信息的
    int w_count = 0;
 
    std::vector<float> confidences;
    std::vector<int> human_index;
 
    int result_index=0;
 
    //NMS 去掉重叠的框信息  start
    std::vector<bbox_t> n_boxes;
    float threshold = 0.99;
    // 需要使用迭代器的方式进行擦除
    for (vector<bbox_t>::iterator it = boxes.begin();it !=boxes.end();)
    {
        if(it->obj_id!=0) //将非人体信息转移到n_boxes
        {
            n_boxes.push_back(*it);
            it = boxes.erase(it);
        }else
            ++it;
    }
 
    while(boxes.size()>0)
    {
        std::sort(boxes.begin(),boxes.end(),[](bbox_t box1, bbox_t box2){
            return box1.prob>box2.prob;
        });
        n_boxes.push_back(boxes[0]);
 
        for (vector<bbox_t>::iterator it = boxes.begin()+1; it !=(boxes.end());)
        {
            float iou_value = box_iou(*boxes.begin(),*it);
            if(iou_value>threshold)
            {
                it = boxes.erase(it);
//                printf("=====iou_value:%f,%f", iou_value, threshold);
//                exit(-1);
            }
            else
                it++;
        }
        boxes.erase(boxes.begin());
    }
    //NMS 去掉重叠的框信息  end
 
 
    for (const auto &result_box:n_boxes)
    {
//        #ifdef S_DEBUG
//                printf("--%d-%d-%d-%d-%d---result_box-----\n", result_box.obj_id, result_box.x, result_box.y, result_box.w, result_box.h);
//        #endif
        if(result_box.obj_id == 0)
        {
            Target target;
            init_target(&target);
 
            target.rect.left = result_box.x; // 跟踪的框信息
            target.rect.top =  result_box.y;
            target.rect.right =  result_box.x + result_box.w;
            target.rect.bottom = result_box.y + result_box.h;
 
            target.confidence = result_box.prob*100;
            target.id = w_count;
 
            t_result.targets[w_count] = target;
            w_count ++;
 
 
//            // cv::Rect box = cv::Rect(result_box.x,result_box.y,result_box.w,result_box.h);
//            //目标检测 代码的可视化
//            // get_detections(DETECTBOX(box.x, box.y,box.width,  box.height), confidences[idx],d);
//            DETECTION_ROW tmpRow;
//            tmpRow.tlwh = DETECTBOX(result_box.x,result_box.y,result_box.w,result_box.h); //DETECTBOX(x, y, w, h);
//            tmpRow.confidence = result_box.prob;
//            tmpRow.obj_id = result_box.obj_id;
//            tmpRow.is_hat = true;
//            tmpRow.is_mask = true;
//            tmpRow.is_smoke = true;
//            tmpRow.hatScore = 0;
//            tmpRow.maskScore = 0;
//            tmpRow.smokeScore = 0;
//            tmpRow.hasFace = false;
////            tmpRow.human_face = nullptr;
//
//            int sub_box_index=0;
//            human_index.push_back(result_index);
//
//            detection.push_back(tmpRow);
        }
//        result_index++;
    }
    t_result.count = w_count;
 
//    for(const auto &kk:human_index)
//    {
//        printf("---%d--%d--%d--%d-\n", n_boxes.size(), human_index.size(), kk, n_boxes[kk].obj_id);
//    }
}
 
bool DetecterManager::sort_score(bbox_t box1, bbox_t box2)
{
    return (box1.prob>box2.prob);
}
 
float DetecterManager::box_iou(bbox_t box1, bbox_t box2)
{
    int x1 = std::max(box1.x, box2.x);
    int y1 = std::max(box1.y, box2.y);
    int x2 = std::min((box1.x+box1.w),(box2.x+box2.w));
    int y2 = std::min((box1.y+box1.h),(box2.y+box2.h));
    float over_area = (x2-x1)*(y2-y1);
//    printf("over_ares---%f ----%d----%d\n", over_area, box1.w*box1.h, box2.w*box2.h);
    float iou = over_area/((box1.w*box1.h<box2.w*box2.h)?box1.w*box1.h:box2.w*box2.h);
    return iou;
}
 
void DetecterManager::init_target(Target *t)
{
    t->attribute = NULL;
    t->feature = NULL;
    t->id = 0;
    t->rect = TRect{0,0,0,0};
    t->confidence = 0;
    t->attribute_size = 0;
    t->feature_size = 0;
}