NVIDIA DeepStream SDK API Reference

6.4 Release
trt_utils.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 
24 #ifndef __TRT_UTILS_H__
25 #define __TRT_UTILS_H__
26 
27 #include <set>
28 #include <map>
29 #include <string>
30 #include <vector>
31 #include <cassert>
32 #include <iostream>
33 #include <fstream>
34 
35 #include "NvInfer.h"
36 
37 #define UNUSED(expr) (void)(expr)
38 #define DIVUP(n, d) ((n) + (d)-1) / (d)
39 
40 std::string trim(std::string s);
41 float clamp(const float val, const float minVal, const float maxVal);
42 bool fileExists(const std::string fileName, bool verbose = true);
43 std::vector<float> loadWeights(const std::string weightsFilePath, const std::string& networkType);
44 std::string dimsToString(const nvinfer1::Dims d);
45 int getNumChannels(nvinfer1::ITensor* t);
46 uint64_t get3DTensorVolume(nvinfer1::Dims inputDims);
47 
48 // Helper functions to create yolo engine
49 nvinfer1::ILayer* netAddMaxpool(int layerIdx, std::map<std::string, std::string>& block,
50  nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
51 nvinfer1::ILayer* netAddConvLinear(int layerIdx, std::map<std::string, std::string>& block,
52  std::vector<float>& weights,
53  std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
54  int& inputChannels, nvinfer1::ITensor* input,
55  nvinfer1::INetworkDefinition* network);
56 nvinfer1::ILayer* netAddConvBNLeaky(int layerIdx, std::map<std::string, std::string>& block,
57  std::vector<float>& weights,
58  std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
59  int& inputChannels, nvinfer1::ITensor* input,
60  nvinfer1::INetworkDefinition* network);
61 nvinfer1::ILayer* netAddUpsample(int layerIdx, std::map<std::string, std::string>& block,
62  std::vector<float>& weights,
63  std::vector<nvinfer1::Weights>& trtWeights, int& inputChannels,
64  nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
65 void printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput,
66  std::string layerOutput, std::string weightPtr);
67 
68 #endif
clamp
float clamp(const float val, const float minVal, const float maxVal)
trim
std::string trim(std::string s)
inputDims
NvDsInferDimsCHW inputDims
Holds the input dimensions for the model.
Definition: nvdsinfer_context.h:264
netAddConvLinear
nvinfer1::ILayer * netAddConvLinear(int layerIdx, std::map< std::string, std::string > &block, std::vector< float > &weights, std::vector< nvinfer1::Weights > &trtWeights, int &weightPtr, int &inputChannels, nvinfer1::ITensor *input, nvinfer1::INetworkDefinition *network)
fileExists
bool fileExists(const std::string fileName, bool verbose=true)
netAddUpsample
nvinfer1::ILayer * netAddUpsample(int layerIdx, std::map< std::string, std::string > &block, std::vector< float > &weights, std::vector< nvinfer1::Weights > &trtWeights, int &inputChannels, nvinfer1::ITensor *input, nvinfer1::INetworkDefinition *network)
loadWeights
std::vector< float > loadWeights(const std::string weightsFilePath, const std::string &networkType)
dimsToString
std::string dimsToString(const nvinfer1::Dims d)
netAddMaxpool
nvinfer1::ILayer * netAddMaxpool(int layerIdx, std::map< std::string, std::string > &block, nvinfer1::ITensor *input, nvinfer1::INetworkDefinition *network)
netAddConvBNLeaky
nvinfer1::ILayer * netAddConvBNLeaky(int layerIdx, std::map< std::string, std::string > &block, std::vector< float > &weights, std::vector< nvinfer1::Weights > &trtWeights, int &weightPtr, int &inputChannels, nvinfer1::ITensor *input, nvinfer1::INetworkDefinition *network)
get3DTensorVolume
uint64_t get3DTensorVolume(nvinfer1::Dims inputDims)
printLayerInfo
void printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput, std::string layerOutput, std::string weightPtr)
getNumChannels
int getNumChannels(nvinfer1::ITensor *t)