派生自 Algorithm/baseDetector

孙天宇
2022-07-12 ce9d187fd294cca192a27f52719094e9df7b1b62
src/detecter_tools/trt_utils.h
@@ -67,6 +67,34 @@
        }
    }
};
nvinfer1::ILayer* netAddMaxpool(int layerIdx, std::map<std::string, std::string>& block,
                                nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
nvinfer1::ILayer* netAddConvLinear(int layerIdx, std::map<std::string, std::string>& block,
                                   std::vector<float>& weights,
                                   std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
                                   int& inputChannels, nvinfer1::ITensor* input,
                                   nvinfer1::INetworkDefinition* network);
nvinfer1::ILayer* net_conv_bn_mish(int layerIdx,
    std::map<std::string, std::string>& block,
    std::vector<float>& weights,
    std::vector<nvinfer1::Weights>& trtWeights,
    int& weightPtr,
    int& inputChannels,
    nvinfer1::ITensor* input,
    nvinfer1::INetworkDefinition* network);
nvinfer1::ILayer* netAddConvBNLeaky(int layerIdx, std::map<std::string, std::string>& block,
                                    std::vector<float>& weights,
                                    std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
                                    int& inputChannels, nvinfer1::ITensor* input,
                                    nvinfer1::INetworkDefinition* network);
std::string dimsToString(const nvinfer1::Dims d);
std::vector<float> loadWeights(const std::string weightsFilePath, const std::string& networkType);
int getNumChannels(nvinfer1::ITensor* t);
std::string trim(std::string s);
std::string triml(std::string s, const char* t);
std::string trimr(std::string s, const char* t);
float clamp(const float val, const float minVal, const float maxVal);
// Common helper functions
cv::Mat blobFromDsImages(const std::vector<DsImage>& inputImages, const int& inputH,
@@ -77,9 +105,53 @@
nvinfer1::ICudaEngine* loadTRTEngine(const std::string planFilePath, PluginFactory* pluginFactory,
                                     Logger& logger);
uint64_t get3DTensorVolume(nvinfer1::Dims inputDims);
std::vector<std::string> loadImageList(const std::string filename, const std::string prefix);
std::vector<BBoxInfo> nmsAllClasses(const float nmsThresh, std::vector<BBoxInfo>& binfo,
                                    const uint32_t numClasses, const std::string &model_type);
void printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput,
                    std::string layerOutput, std::string weightPtr);
std::vector<BBoxInfo> nonMaximumSuppression(const float nmsThresh, std::vector<BBoxInfo> binfo);
std::vector<int> split_layer_index(const std::string &s_,const std::string &delimiter_);
nvinfer1::ILayer * layer_split(const int n_layer_index_,
    nvinfer1::ITensor *input_,
    nvinfer1::INetworkDefinition* network);
nvinfer1::ILayer* netAddUpsample(int layerIdx, std::map<std::string, std::string>& block,
                                 std::vector<float>& weights,
                                 std::vector<nvinfer1::Weights>& trtWeights, int& inputChannels,
                                 nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
std::vector<std::string> loadListFromTextFile(const std::string filename);
class YoloTinyMaxpoolPaddingFormula : public nvinfer1::IOutputDimensionsFormula
{
private:
    std::set<std::string> m_SamePaddingLayers;
    nvinfer1::DimsHW compute(nvinfer1::DimsHW inputDims, nvinfer1::DimsHW kernelSize,
                             nvinfer1::DimsHW stride, nvinfer1::DimsHW padding,
                             nvinfer1::DimsHW dilation, const char* layerName) const override
    {
     //   assert(inputDims.d[0] == inputDims.d[1]);
        assert(kernelSize.d[0] == kernelSize.d[1]);
        assert(stride.d[0] == stride.d[1]);
        assert(padding.d[0] == padding.d[1]);
        int output_h, output_w;
        // Only layer maxpool_12 makes use of same padding
        if (m_SamePaddingLayers.find(layerName) != m_SamePaddingLayers.end())
        {
            output_h = (inputDims.d[0] + 2 * padding.d[0]) / stride.d[0];
            output_w = (inputDims.d[1] + 2 * padding.d[1]) / stride.d[1];
        }
        // Valid Padding
        else
        {
            output_h = (inputDims.d[0] - kernelSize.d[0]) / stride.d[0] + 1;
            output_w = (inputDims.d[1] - kernelSize.d[1]) / stride.d[1] + 1;
        }
        return nvinfer1::DimsHW{output_h, output_w};
    }
public:
    void addSamePaddingLayer(std::string input) { m_SamePaddingLayers.insert(input); }
};
#endif