Jetson Linux API Reference

32.7.5 Release
trt_inference.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of NVIDIA CORPORATION nor the names of its
13  * contributors may be used to endorse or promote products derived
14  * from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 #ifndef TRT_INFERENCE_H_
29 #define TRT_INFERENCE_H_
30 
31 #include <fstream>
32 #include <queue>
33 #include "NvInfer.h"
34 #include "NvCaffeParser.h"
35 #include "NvOnnxParser.h"
36 #include "opencv2/video/tracking.hpp"
37 #include "opencv2/imgproc/imgproc.hpp"
38 #include "opencv2/highgui/highgui.hpp"
39 #include <opencv2/objdetect/objdetect.hpp>
40 using namespace nvinfer1;
41 using namespace nvcaffeparser1;
42 using namespace nvonnxparser;
43 using namespace std;
44 
45 // Model Index
46 #define GOOGLENET_SINGLE_CLASS 0
47 #define GOOGLENET_THREE_CLASS 1
48 #define RESNET_THREE_CLASS 2
49 
50 class Logger;
51 
52 class Profiler;
53 
55 {
56 public:
57  //net related parameter
58  int getNetWidth() const;
59 
60  int getNetHeight() const;
61 
62  uint32_t getBatchSize() const;
63 
64  int getChannel() const;
65 
66  int getModelClassCnt() const;
67 
68  void* getScales() const;
69 
70  void* getOffsets() const;
71 
72  // Buffer is allocated in TRT_Conxtex,
73  // Expose this interface for inputing data
74  void*& getBuffer(const int& index);
75 
76  float*& getInputBuf();
77 
78  uint32_t getNumTrtInstances() const;
79 
80  //0 fp16 1 fp32 2 int8
81  void setMode(const int& mode);
82 
83  void setBatchSize(const uint32_t& batchsize);
84 
85  void setDumpResult(const bool& dump_result);
86 
87  void setTrtProfilerEnabled(const bool& enable_trt_profiler);
88 
89  int getFilterNum() const;
90  void setFilterNum(const unsigned int& filter_num);
91 
92  TRT_Context();
93 
94  void setModelIndex(int modelIndex);
95 
96  void buildTrtContext(const string& deployfile,
97  const string& modelfile, bool bUseCPUBuf = false, bool isOnnxModel = false);
98 
99  void doInference(
100  queue< vector<cv::Rect> >* rectList_queue,
101  float *input = NULL);
102 
103  void destroyTrtContext(bool bUseCPUBuf = false);
104 
105  ~TRT_Context();
106 
107 private:
108  int net_width;
109  int net_height;
110  int filter_num;
111  void **buffers;
112  float *input_buf;
113  float *output_cov_buf;
114  float *output_bbox_buf;
115  void* offset_gpu;
116  void* scales_gpu;
117  float helnet_scale[4];
118  IRuntime *runtime;
119  ICudaEngine *engine;
120  IExecutionContext *context;
121  uint32_t *pResultArray;
122  int channel; //input file's channel
123  int num_bindings;
124  int trtinstance_num; //inference channel num
125  int batch_size;
126  int mode;
127  bool dump_result;
128  ofstream fstream;
129  bool enable_trt_profiler;
130  bool is_onnx_model;
131  IHostMemory *trtModelStream{nullptr};
132  vector<string> outputs;
133  string result_file;
134  Logger *pLogger;
135  Profiler *pProfiler;
136  int frame_num;
137  uint64_t elapsed_frame_num;
138  uint64_t elapsed_time;
139  int inputIndex;
140  int outputIndex;
141  int outputIndexBBOX;
142  Dims3 inputDims;
143  Dims3 outputDims;
144  Dims3 outputDimsBBOX;
145  size_t inputSize;
146  size_t outputSize;
147  size_t outputSizeBBOX;
148 
149  struct {
150  const int classCnt;
151  float THRESHOLD[3];
152  const char *INPUT_BLOB_NAME;
153  const char *OUTPUT_BLOB_NAME;
154  const char *OUTPUT_BBOX_NAME;
155  const int STRIDE;
156  const int WORKSPACE_SIZE;
157  int offsets[3];
158  float input_scale[3];
159  float bbox_output_scales[4];
160  const int ParseFunc_ID;
161  } *g_pModelNetAttr, gModelNetAttr[4] = {
162  {
163  // GOOGLENET_SINGLE_CLASS
164  1,
165  {0.8, 0, 0},
166  "data",
167  "coverage",
168  "bboxes",
169  4,
170  450 * 1024 * 1024,
171  {0, 0, 0},
172  {1.0f, 1.0f, 1.0f},
173  {1, 1, 1, 1},
174  0
175  },
176 
177  {
178  // GOOGLENET_THREE_CLASS
179  3,
180  {0.6, 0.6, 1.0}, //People, Motorbike, Car
181  "data",
182  "Layer16_cov",
183  "Layer16_bbox",
184  16,
185  110 * 1024 * 1024,
186  {124, 117, 104},
187  {1.0f, 1.0f, 1.0f},
188  {-640, -368, 640, 368},
189  0
190  },
191 
192  {
193  // RESNET_THREE_CLASS
194  4,
195  {0.1, 0.1, 0.1}, //People, Motorbike, Car
196  "data",
197  "Layer7_cov",
198  "Layer7_bbox",
199  16,
200  110 * 1024 * 1024,
201  {0, 0, 0},
202  {0.0039215697906911373, 0.0039215697906911373, 0.0039215697906911373},
203  {-640, -368, 640, 368},
204  1
205  },
206  };
207  enum Mode_type{
208  MODE_FP16 = 0,
209  MODE_FP32 = 1,
210  MODE_INT8 = 2
211  };
212  int parseNet(const string& deployfile);
213  void parseBbox(vector<cv::Rect>* rectList, int batch_th);
214  void ParseResnet10Bbox(vector<cv::Rect>* rectList, int batch_th);
215  void allocateMemory(bool bUseCPUBuf);
216  void releaseMemory(bool bUseCPUBuf);
217  void caffeToTRTModel(const string& deployfile, const string& modelfile);
218  void onnxToTRTModel(const string& modelfile);
219 };
220 
221 #endif
TRT_Context::OUTPUT_BBOX_NAME
const char * OUTPUT_BBOX_NAME
Definition: trt_inference.h:154
TRT_Context::OUTPUT_BLOB_NAME
const char * OUTPUT_BLOB_NAME
Definition: trt_inference.h:153
TRT_Context::INPUT_BLOB_NAME
const char * INPUT_BLOB_NAME
Definition: trt_inference.h:152
TRT_Context::classCnt
const int classCnt
Definition: trt_inference.h:150
TRT_Context::WORKSPACE_SIZE
const int WORKSPACE_SIZE
Definition: trt_inference.h:156
TRT_Context::ParseFunc_ID
const int ParseFunc_ID
Definition: trt_inference.h:160
TRT_Context::STRIDE
const int STRIDE
Definition: trt_inference.h:155
TRT_Context
Definition: trt_inference.h:54