派生自 Algorithm/baseDetector

Scheaven
2021-01-05 6ae75cc17b2952c63a79ff2c86da841f0dbbf3c6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
//
// Created by Scheaven on 2019/11/18.
//
#include "lib/core/tracker_manager.h"
#include <opencv2/opencv.hpp>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/platform/env.h"
#ifdef OPENCV
#include <opencv2/opencv.hpp>            // C++
#pragma comment(lib, "opencv_core249.lib")
#pragma comment(lib, "opencv_imgproc249.lib")
#pragma comment(lib, "opencv_highgui249.lib")
 
#endif    // OPENCV
using namespace std;
std::vector<cv::Rect> boxes; //存储box
 
 
int main(int argc, char *argv[])
{
    //视频流信息
    VideoCapture cap;
    VideoCapture cap2;
    int cam_id = 1;
    int cam_id2 = 2;
 
    try {
        cap.open("../data/cs01.avi");
        cap2.open("../data/cs02.avi");
    }catch(exception){
        cout<<"输入视频"<<endl;
        return 0;
    }
    Mat frame, blob;
    Mat frame2, blob2;
 
 
    TrackerManager TM;
    TM.add_cam(cam_id);
    TM.add_cam(cam_id2);
    while (true) {
        std::vector<int> cam_ids;
        std::vector<cv::Mat> frame_vec;
        std::vector<FRAME_RESULT> results_vec;
        cam_ids.push_back(cam_id);
        cam_ids.push_back(cam_id2);
        //读取视频帧
        cap >> frame;
        cap2 >> frame2;
        frame_vec.push_back(frame);
        frame_vec.push_back(frame2);
        cout << "start--" << endl;
        if(!frame.empty()){
            FRAME_RESULT result;
            FRAME_RESULT result2;
            results_vec.push_back(result);
            results_vec.push_back(result2);
//            TM.single_detect_tracking(cam_id, frame, result);
            TM.mul_detect_tracking(cam_ids, frame_vec, results_vec);
//            cout<< "result: "<< result[0] << endl;
        }else{
            cout << "-----------------------over--" << endl;
            break;
        }
//        //计算效率
//        std::vector<double> layersTimes;
//        double freq = cv::getTickFrequency() / 1000;
//        double t = net.getPerfProfile(layersTimes) / freq;
//        std::string label = cv::format("Inference time for a frame : %.2f ms", t);
//        putText(frame, label, cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255));
//        waitKey(1);
    }
    return 0;
}