派生自 Algorithm/baseDetector

Scheaven
2021-06-03 168af40fe9a3cc81c6ee16b3e81f154780c36bdb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#include "remain_filter.h"
#include <cuda_runtime_api.h>
#include <torch/torch.h>
 
#include <chrono>
#include "config_util.h"
 
using namespace std;
 
RemainFilter* RemainFilter::instance = NULL;
 
RemainFilter* RemainFilter::getInstance()
{
    if (instance==NULL)
    {
        int gpu_id = 0;
        instance = new RemainFilter(0);
    }
    return instance;
}
 
RemainFilter::RemainFilter(int gpu_id)
{
    M_STATICSTRUCT config;
    if (gpu_id==-1)
    {
        this->module = torch::jit::load(config.helmet_model);
        this->module.to(torch::kCPU);
        this->module.eval();
        this->is_gpu = false;
    }else if (torch::cuda::is_available() && torch::cuda::device_count()>= gpu_id)
    {
        this->gpu_id = gpu_id;
        cudaSetDevice(gpu_id);
        this->module = torch::jit::load(config.helmet_model,torch::Device(torch::DeviceType::CUDA,gpu_id));
        this->module.to(torch::Device(torch::DeviceType::CUDA, gpu_id));
        this->module.eval();
        this->is_gpu = true;
    }
    SLOG::getInstance()->addLog(0,"model load ok - gpu_id:" +  to_string(gpu_id));
}
RemainFilter::~RemainFilter(){}
 
float RemainFilter::whichRemain(unsigned char *pBuf)
{
    // cv::Mat tmp = cv::imread("/data/disk2/01_dataset/05_ImageNet/val/ILSVRC2012_val_00050000.JPEG");
    SLOG::getInstance()->addLog(0,"remain_filter.cpp 47: start select abondan: ");
    auto t1 = std::chrono::steady_clock::now();
    auto input_tensor = torch::from_blob(pBuf, {1, 224,224,3});
    input_tensor = input_tensor.permute({0,3,1,2});
    input_tensor[0][0] = input_tensor[0][0].sub_(0.485).div_(0.229);
    input_tensor[0][1] = input_tensor[0][1].sub_(0.456).div_(0.224);
    input_tensor[0][2] = input_tensor[0][2].sub_(0.406).div_(0.225);
 
    if (is_gpu)
    {
        input_tensor = input_tensor.to(torch::Device(torch::DeviceType::CUDA, this->gpu_id));
    }
 
    auto out = this->module.forward({input_tensor}).toTensor();
    auto results = out.sort(-1,true);
    auto t_len = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - t1).count();
    std::cout << "-------FPS----"<<t_len<<std::endl<<std::endl;
    SLOG::getInstance()->addLog(0,"module use milliseconds:: " +  to_string(t_len));
 
    // std::cout << out<<std::endl;
    auto pre_value = std::get<0>(results)[0].softmax(0);
    auto pre_index = std::get<1>(results)[0];
 
    for (int i = 0; i < 16; ++i) {
        auto idx = pre_index[i].item<int>();
        std::cout << "    ============= Top-" << i + 1
        << " =============" << std::endl;
        std::cout << "    Label:  " << idx << std::endl;
        std::cout << "    With Probability:  "
         << pre_value[i].item<float>() * 100.0f << "%" << std::endl;
    }
 
    if(pre_index[0].item<int>()<5)
    {
        SLOG::getInstance()->addLog(0,"abandon is human!!  results ID is: " +  to_string(pre_index[0].item<int>()));
        return 0.0f;
    }
 
    return pre_value[0].item<float>() * 100.0f;
 
// tmp;
    // std::cout << std::get<0>(results)[0].softmax(0) << "----------out::"<<std::endl;
 
}