#include "model.h"
|
#include <memory>
|
#include <vector>
|
#include <fstream>
|
#include <iostream>
|
#include <sstream>
|
#include <iomanip>
|
|
using namespace nvinfer1;
|
// REGISTER_TENSORRT_PLUGIN(DetectPluginCreator);
|
|
|
Detecter::Detecter( const NetworkInfo& networkInfo, const InferParams& inferParams, int type) :
|
m_InputBlobName(networkInfo.inputBlobName),
|
m_InputH(416),
|
m_InputW(416),
|
m_InputC(3),
|
m_InputSize(m_InputH*m_InputW*m_InputC),
|
m_ProbThresh(inferParams.probThresh),
|
m_NMSThresh(inferParams.nmsThresh),
|
m_Logger(Logger()),
|
m_Network(nullptr),
|
m_Builder(nullptr),
|
m_ModelStream(nullptr),
|
m_Engine(nullptr),
|
m_Context(nullptr),
|
m_InputBindingIndex(-1),
|
m_CudaStream(nullptr),
|
m_PluginFactory(new PluginFactory)
|
{
|
setOutput(type);
|
m_EnginePath = m_staticStruct::model_path;
|
DEBUG((boost::format("m_EnginePath:%s")%m_EnginePath).str());
|
assert(m_PluginFactory != nullptr);
|
m_Engine = loadTRTEngine(m_EnginePath, m_PluginFactory, m_Logger);
|
assert(m_Engine != nullptr);
|
m_Context = m_Engine->createExecutionContext();
|
assert(m_Context != nullptr);
|
m_InputBindingIndex = m_Engine->getBindingIndex(m_InputBlobName.c_str());
|
assert(m_InputBindingIndex != -1);
|
assert(m_BatchSize <= static_cast<uint32_t>(m_Engine->getMaxBatchSize()));
|
allocateBuffers();
|
NV_CUDA_CHECK(cudaStreamCreate(&m_CudaStream));
|
assert(verifyEngine());
|
}
|
|
Detecter::~Detecter()
|
{
|
for (auto& tensor : m_OutputTensors) NV_CUDA_CHECK(cudaFreeHost(tensor.hostBuffer));
|
for (auto& deviceBuffer : m_DeviceBuffers) NV_CUDA_CHECK(cudaFree(deviceBuffer));
|
NV_CUDA_CHECK(cudaStreamDestroy(m_CudaStream));
|
if (m_Context)
|
{
|
m_Context->destroy();
|
m_Context = nullptr;
|
}
|
|
if (m_Engine)
|
{
|
m_Engine->destroy();
|
m_Engine = nullptr;
|
}
|
|
if (m_PluginFactory)
|
{
|
m_PluginFactory->destroy();
|
m_PluginFactory = nullptr;
|
}
|
|
// m_TinyMaxpoolPaddingFormula.reset();
|
}
|
|
void Detecter::doInference(const unsigned char* input, const uint32_t batchSize)
|
{
|
Timer timer;
|
// std::vector<int64_t> bufferSize;
|
// int nbBindings = m_Engine->getNbBindings();
|
// bufferSize.resize(nbBindings);
|
|
// for (int i = 0; i < nbBindings; ++i)
|
// {
|
// nvinfer1::Dims dims = m_Engine->getBindingDimensions(i);
|
// nvinfer1::DataType dtype = m_Engine->getBindingDataType(i);
|
// int64_t totalSize = volume(dims) * 1 * getElementSize(dtype);
|
// bufferSize[i] = totalSize;
|
// std::cout << "binding" << i << ": " << totalSize << std::endl;
|
// cudaMalloc(&s_buffers[i], totalSize);
|
// }
|
|
// CHECK(cudaMalloc(&s_buffers[0], batchSize *416 * 416 * 3 * sizeof(float)));
|
// CHECK(cudaMalloc(&s_buffers[1], batchSize * 3 * sizeof(float)));
|
// CHECK(cudaMalloc(&s_buffers[2], batchSize * 3 * sizeof(float)));
|
// CHECK(cudaMalloc(&s_buffers[3], batchSize * 3 * sizeof(float)));
|
|
assert(batchSize <= m_BatchSize && "Image batch size exceeds TRT engines batch size");
|
// cudaMemcpyAsync(s_buffers[0], input,
|
// batchSize * m_InputSize * sizeof(float), cudaMemcpyHostToDevice,
|
// m_CudaStream);
|
|
// m_Context->enqueue(batchSize, s_buffers, m_CudaStream, nullptr);
|
NV_CUDA_CHECK(cudaMemcpyAsync(m_DeviceBuffers.at(m_InputBindingIndex), input,
|
batchSize * m_InputSize * sizeof(float), cudaMemcpyHostToDevice,
|
m_CudaStream));
|
|
m_Context->enqueue(batchSize, m_DeviceBuffers.data(), m_CudaStream, nullptr);
|
for (auto& tensor : m_OutputTensors)
|
{
|
NV_CUDA_CHECK(cudaMemcpyAsync(tensor.hostBuffer, m_DeviceBuffers.at(tensor.bindingIndex),
|
batchSize * tensor.volume * sizeof(float),
|
cudaMemcpyDeviceToHost, m_CudaStream));
|
}
|
|
|
// int outSize1 = bufferSize[1] / sizeof(float) / BATCH_SIZE;
|
// int outSize2 = bufferSize[2] / sizeof(float) / BATCH_SIZE;
|
// int outSize3 = bufferSize[3] / sizeof(float) / BATCH_SIZE;
|
// std::cout << bufferSize[1] << std::endl;
|
// std::cout << bufferSize[2] << std::endl;
|
// std::cout << bufferSize[3] << std::endl;
|
|
// auto *out1 = new float[batchSize * 3];
|
// auto *out2 = new float[batchSize * 3];
|
// auto *out3 = new float[batchSize * 3];
|
// cudaMemcpyAsync(out1, s_buffers[1], bufferSize[1], cudaMemcpyDeviceToHost, m_CudaStream);
|
// cudaMemcpyAsync(out2, s_buffers[2], bufferSize[2], cudaMemcpyDeviceToHost, m_CudaStream);
|
// cudaMemcpyAsync(out3, s_buffers[3], bufferSize[3], cudaMemcpyDeviceToHost, m_CudaStream);
|
// CHECK(cudaMemcpyAsync(out1, s_buffers[1], batchSize * 3 * sizeof(float), cudaMemcpyDeviceToHost, m_CudaStream));
|
// CHECK(cudaMemcpyAsync(out2, s_buffers[2], batchSize * 3 * sizeof(float), cudaMemcpyDeviceToHost, m_CudaStream));
|
// CHECK(cudaMemcpyAsync(out3, s_buffers[3], batchSize * 3 * sizeof(float), cudaMemcpyDeviceToHost, m_CudaStream));
|
// exit(0);
|
|
|
cudaStreamSynchronize(m_CudaStream);
|
timer.out("inference");
|
}
|
|
std::vector<BBoxInfo> Detecter::decodeDetections(const int& imageIdx,
|
const int& imageH,
|
const int& imageW)
|
{
|
Timer timer;
|
std::vector<BBoxInfo> binfo;
|
for (auto& tensor : m_OutputTensors)
|
{
|
std::vector<BBoxInfo> curBInfo = decodeTensor(imageIdx, imageH, imageW, tensor);
|
binfo.insert(binfo.end(), curBInfo.begin(), curBInfo.end());
|
}
|
timer.out("decodeDetections");
|
return binfo;
|
}
|
|
|
void Detecter::allocateBuffers()
|
{
|
m_DeviceBuffers.resize(m_Engine->getNbBindings(), nullptr);
|
assert(m_InputBindingIndex != -1 && "Invalid input binding index");
|
NV_CUDA_CHECK(cudaMalloc(&m_DeviceBuffers.at(m_InputBindingIndex),
|
m_BatchSize * m_InputSize * sizeof(float)));
|
|
for (auto& tensor : m_OutputTensors)
|
{
|
tensor.bindingIndex = m_Engine->getBindingIndex(tensor.blobName.c_str());
|
assert((tensor.bindingIndex != -1) && "Invalid output binding index");
|
NV_CUDA_CHECK(cudaMalloc(&m_DeviceBuffers.at(tensor.bindingIndex),
|
m_BatchSize * tensor.volume * sizeof(float)));
|
NV_CUDA_CHECK(
|
cudaMallocHost(&tensor.hostBuffer, tensor.volume * m_BatchSize * sizeof(float)));
|
}
|
}
|
|
bool Detecter::verifyEngine()
|
{
|
assert((m_Engine->getNbBindings() == (1 + m_OutputTensors.size())
|
&& "Binding info doesn't match between cfg and engine file \n"));
|
|
for (auto tensor : m_OutputTensors)
|
{
|
assert(!strcmp(m_Engine->getBindingName(tensor.bindingIndex), tensor.blobName.c_str())
|
&& "Blobs names dont match between cfg and engine file \n");
|
assert(get3DTensorVolume(m_Engine->getBindingDimensions(tensor.bindingIndex))
|
== tensor.volume
|
&& "Tensor volumes dont match between cfg and engine file \n");
|
}
|
|
assert(m_Engine->bindingIsInput(m_InputBindingIndex) && "Incorrect input binding index \n");
|
assert(m_Engine->getBindingName(m_InputBindingIndex) == m_InputBlobName
|
&& "Input blob name doesn't match between config and engine file");
|
assert(get3DTensorVolume(m_Engine->getBindingDimensions(m_InputBindingIndex)) == m_InputSize);
|
return true;
|
}
|
|
void Detecter::destroyNetworkUtils(std::vector<nvinfer1::Weights>& trtWeights)
|
{
|
if (m_Network) m_Network->destroy();
|
if (m_Engine) m_Engine->destroy();
|
if (m_Builder) m_Builder->destroy();
|
if (m_ModelStream) m_ModelStream->destroy();
|
|
// deallocate the weights
|
for (auto & trtWeight : trtWeights)
|
{
|
if (trtWeight.count > 0) free(const_cast<void*>(trtWeight.values));
|
}
|
}
|
|
std::vector<BBoxInfo> Detecter::decodeTensor(const int imageIdx, const int imageH, const int imageW, const TensorInfo& tensor)
|
{
|
float scale_h = 1.f;
|
float scale_w = 1.f;
|
int xOffset = 0;
|
int yOffset = 0;
|
|
const float* detections = &tensor.hostBuffer[imageIdx * tensor.volume];
|
|
std::vector<BBoxInfo> binfo;
|
for (uint32_t y = 0; y < tensor.grid_h; ++y)
|
{
|
for (uint32_t x = 0; x < tensor.grid_w; ++x)
|
{
|
for (uint32_t b = 0; b < tensor.numBBoxes; ++b)
|
{
|
const float pw = tensor.anchors[tensor.masks[b] * 2];
|
const float ph = tensor.anchors[tensor.masks[b] * 2 + 1];
|
|
const int numGridCells = tensor.grid_h * tensor.grid_w;
|
const int bbindex = y * tensor.grid_w + x;
|
const float bx
|
= x + detections[bbindex + numGridCells * (b * (5 + tensor.numClasses) + 0)];
|
const float by
|
= y + detections[bbindex + numGridCells * (b * (5 + tensor.numClasses) + 1)];
|
const float bw
|
= pw * detections[bbindex + numGridCells * (b * (5 + tensor.numClasses) + 2)];
|
const float bh
|
= ph * detections[bbindex + numGridCells * (b * (5 + tensor.numClasses) + 3)];
|
|
const float objectness
|
= detections[bbindex + numGridCells * (b * (5 + tensor.numClasses) + 4)];
|
|
float maxProb = 0.0f;
|
int maxIndex = -1;
|
|
for (uint32_t i = 0; i < tensor.numClasses; ++i)
|
{
|
float prob
|
= (detections[bbindex
|
+ numGridCells * (b * (5 + tensor.numClasses) + (5 + i))]);
|
|
if (prob > maxProb)
|
{
|
maxProb = prob;
|
maxIndex = i;
|
}
|
}
|
maxProb = objectness * maxProb;
|
// printf("-=-=-=-=-=-%f\n",maxProb);
|
|
if (maxProb > m_ProbThresh)
|
{
|
add_bbox_proposal(bx, by, bw, bh, tensor.stride_h, tensor.stride_w, scale_h, scale_w, xOffset, yOffset, maxIndex, maxProb, imageW, imageH, binfo);
|
}
|
}
|
}
|
}
|
return binfo;
|
}
|
|
|
void Detecter::setOutput(int type)
|
{
|
m_OutputTensors.clear();
|
printf("0-0-0-0-0-0------------------%d",type);
|
if(type==2)
|
for (int i = 0; i < 2; ++i)
|
{
|
TensorInfo outputTensor;
|
outputTensor.numClasses = CLASS_BUM;
|
outputTensor.blobName = "yolo_" + std::to_string(i);
|
outputTensor.gridSize = (m_InputH / 32) * pow(2, i);
|
outputTensor.grid_h = (m_InputH / 32) * pow(2, i);
|
outputTensor.grid_w = (m_InputW / 32) * pow(2, i);
|
outputTensor.stride = m_InputH / outputTensor.gridSize;
|
outputTensor.stride_h = m_InputH / outputTensor.grid_h;
|
outputTensor.stride_w = m_InputW / outputTensor.grid_w;
|
outputTensor.numBBoxes = 3;
|
outputTensor.volume = outputTensor.grid_h* outputTensor.grid_w
|
*(outputTensor.numBBoxes*(5 + outputTensor.numClasses));
|
if(i==1)
|
{
|
outputTensor.masks.push_back(1);
|
outputTensor.masks.push_back(2);
|
outputTensor.masks.push_back(3);
|
}
|
if(i==0)
|
{
|
outputTensor.masks.push_back(3);
|
outputTensor.masks.push_back(4);
|
outputTensor.masks.push_back(5);
|
}
|
outputTensor.anchors.push_back(10);
|
outputTensor.anchors.push_back(14);
|
outputTensor.anchors.push_back(23);
|
outputTensor.anchors.push_back(27);
|
outputTensor.anchors.push_back(37);
|
outputTensor.anchors.push_back(58);
|
outputTensor.anchors.push_back(81);
|
outputTensor.anchors.push_back(82);
|
outputTensor.anchors.push_back(135);
|
outputTensor.anchors.push_back(169);
|
outputTensor.anchors.push_back(344);
|
outputTensor.anchors.push_back(319);
|
|
if (m_ClassNames.empty())
|
{
|
for (uint32_t j=0;j< outputTensor.numClasses;++j)
|
{
|
m_ClassNames.push_back(std::to_string(j));
|
}
|
}
|
m_OutputTensors.push_back(outputTensor);
|
}
|
else
|
for (int i = 0; i < 3; ++i)
|
{
|
TensorInfo outputTensor;
|
outputTensor.numClasses = CLASS_BUM;
|
outputTensor.blobName = "yolo_" + to_string(i);
|
// if (i==0)
|
// {
|
// outputTensor.blobName = "139_convolutional_reshape_2";
|
// }else if (i==1)
|
// {
|
// outputTensor.blobName = "150_convolutional_reshape_2";
|
// }else if (i==2)
|
// {
|
// outputTensor.blobName = "161_convolutional_reshape_2";
|
// }
|
outputTensor.gridSize = (m_InputH / 32) * pow(2, 2-i);
|
outputTensor.grid_h = (m_InputH / 32) * pow(2, 2-i);
|
outputTensor.grid_w = (m_InputW / 32) * pow(2, 2-i);
|
outputTensor.stride = m_InputH / outputTensor.gridSize;
|
outputTensor.stride_h = m_InputH / outputTensor.grid_h;
|
outputTensor.stride_w = m_InputW / outputTensor.grid_w;
|
outputTensor.numBBoxes = 3;
|
outputTensor.volume = outputTensor.grid_h* outputTensor.grid_w
|
*(outputTensor.numBBoxes*(5 + outputTensor.numClasses));
|
if(i==0)
|
{
|
outputTensor.masks.push_back(0);
|
outputTensor.masks.push_back(1);
|
outputTensor.masks.push_back(2);
|
}
|
if(i==1)
|
{
|
outputTensor.masks.push_back(3);
|
outputTensor.masks.push_back(4);
|
outputTensor.masks.push_back(5);
|
}
|
if(i==2)
|
{
|
outputTensor.masks.push_back(6);
|
outputTensor.masks.push_back(7);
|
outputTensor.masks.push_back(8);
|
}
|
outputTensor.anchors.push_back(12);
|
outputTensor.anchors.push_back(16);
|
outputTensor.anchors.push_back(19);
|
outputTensor.anchors.push_back(36);
|
outputTensor.anchors.push_back(40);
|
outputTensor.anchors.push_back(28);
|
outputTensor.anchors.push_back(36);
|
outputTensor.anchors.push_back(75);
|
outputTensor.anchors.push_back(76);
|
outputTensor.anchors.push_back(55);
|
outputTensor.anchors.push_back(72);
|
outputTensor.anchors.push_back(146);
|
outputTensor.anchors.push_back(142);
|
outputTensor.anchors.push_back(110);
|
outputTensor.anchors.push_back(192);
|
outputTensor.anchors.push_back(243);
|
outputTensor.anchors.push_back(459);
|
outputTensor.anchors.push_back(401);
|
|
if (m_ClassNames.empty())
|
{
|
for (uint32_t j=0;j< outputTensor.numClasses;++j)
|
{
|
m_ClassNames.push_back(std::to_string(j));
|
}
|
}
|
m_OutputTensors.push_back(outputTensor);
|
}
|
}
|