NVIDIA DeepStream SDK API Reference

6.4 Release
nvdsinfer_model_builder.h
Go to the documentation of this file.
1 
12 #ifndef __NVDSINFER_MODEL_BUILDER_H__
13 #define __NVDSINFER_MODEL_BUILDER_H__
14 
15 #include <stdarg.h>
16 #include <algorithm>
17 #include <condition_variable>
18 #include <map>
19 #include <memory>
20 #include <mutex>
21 #include <queue>
22 #include <string>
23 #include <unordered_map>
24 
25 #include <NvCaffeParser.h>
26 #include <NvInfer.h>
27 #include <NvInferRuntime.h>
28 #include <NvOnnxParser.h>
29 
30 #include <nvdsinfer_custom_impl.h>
31 #include "nvdsinfer_func_utils.h"
32 #include "nvdsinfer_tlt.h"
33 
34 /* This file provides APIs for building models from Caffe/UFF/ONNX files. It
35  * also defines an interface where users can provide custom model parsers for
36  * custom networks. A helper class (TrtEngine) written on top of TensorRT's
37  * nvinfer1::ICudaEngine is also defined in this file.
38  *
39  * These interfaces/APIs are used by NvDsInferContextImpl class. */
40 
41 namespace nvdsinfer {
42 
44 
45 static const size_t kWorkSpaceSize = 450 * 1024 * 1024; // 450MB
46 
51 class BaseModelParser : public IModelParser
52 {
53 public:
55  const std::shared_ptr<DlLibHandle>& dllib)
56  : m_ModelParams(params), m_LibHandle(dllib) {}
57  virtual ~BaseModelParser() {}
58  virtual bool isValid() const = 0;
59 
60 private:
61  DISABLE_CLASS_COPY(BaseModelParser);
62 
63 protected:
65  std::shared_ptr<DlLibHandle> m_LibHandle;
66 };
67 
73 {
74 public:
76  const std::shared_ptr<DlLibHandle>& handle = nullptr);
77  ~CaffeModelParser() override;
78  bool isValid() const override { return m_CaffeParser.get(); }
79  const char* getModelName() const override { return m_ModelPath.c_str(); }
80  bool hasFullDimsSupported() const override { return true; }
81 
82  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
83 
84 private:
85  NvDsInferStatus setPluginFactory();
86 
87 private:
88  std::string m_ProtoPath;
89  std::string m_ModelPath;
90  std::vector<std::string> m_OutputLayers;
91  NvDsInferPluginFactoryCaffe m_CaffePluginFactory{nullptr};
92  UniquePtrWDestroy<nvcaffeparser1::ICaffeParser> m_CaffeParser;
93 };
94 
100 {
101 public:
102  struct ModelParams
103  {
104  std::string uffFilePath;
105  nvuffparser::UffInputOrder inputOrder;
106  std::vector<std::string> inputNames;
107  std::vector<nvinfer1::Dims> inputDims;
108  std::vector<std::string> outputNames;
109  };
110 
111 public:
112  UffModelParser(const NvDsInferContextInitParams& initParams,
113  const std::shared_ptr<DlLibHandle>& handle = nullptr);
114  ~UffModelParser() override;
115  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
116  bool isValid() const override { return m_UffParser.get(); }
117  const char* getModelName() const override
118  {
119  return m_ModelParams.uffFilePath.c_str();
120  }
121  bool hasFullDimsSupported() const override { return false; }
122 
123 protected:
127 };
128 
134 {
135 public:
137  const std::shared_ptr<DlLibHandle>& handle = nullptr)
138  : BaseModelParser(initParams, handle),
139  m_ModelName(initParams.onnxFilePath) {}
140  ~OnnxModelParser() override = default;
141  bool isValid() const override { return !m_ModelName.empty(); }
142  const char* getModelName() const override { return m_ModelName.c_str(); }
143  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
144  bool hasFullDimsSupported() const override { return true; }
145 
146 private:
147  std::string m_ModelName;
148 
149 protected:
151 };
152 
161 {
162 public:
164  const std::shared_ptr<DlLibHandle>& handle);
165 
167 
168  bool isValid() const override
169  {
170  return (bool)m_CustomParser;
171  }
172 
173  const char* getModelName() const override
174  {
175  return isValid() ? safeStr(m_CustomParser->getModelName()) : "";
176  }
177 
178  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
179  bool hasFullDimsSupported() const override
180  {
181  return m_CustomParser->hasFullDimsSupported();
182  }
183 
184 private:
185  std::unique_ptr<IModelParser> m_CustomParser;
186 };
187 
189 class TrtModelBuilder;
190 
196 {
197  using TensorIOFormat =
198  std::tuple<nvinfer1::DataType, nvinfer1::TensorFormats>;
199  using LayerDevicePrecision =
200  std::tuple<nvinfer1::DataType, nvinfer1::DeviceType>;
201 
205  int dlaCore = -1;
206  std::unordered_map<std::string, TensorIOFormat> inputFormats;
207  std::unordered_map<std::string, TensorIOFormat> outputFormats;
208  std::unordered_map<std::string, LayerDevicePrecision> layerDevicePrecisions;
209 
210 public:
211  virtual ~BuildParams(){};
212  virtual NvDsInferStatus configBuilder(TrtModelBuilder& builder) = 0;
213  virtual bool sanityCheck() const;
214 };
215 
220 {
221  int maxBatchSize = 0;
222  std::vector<nvinfer1::Dims> inputDims;
223 
224 private:
225  NvDsInferStatus configBuilder(TrtModelBuilder& builder) override;
226 
227  bool sanityCheck() const override;
228 };
229 
230 using ProfileDims = std::array<nvinfer1::Dims,
231  nvinfer1::EnumMax<nvinfer1::OptProfileSelector>()>;
232 
237 {
238  // profileSelector, dims without batchSize
239  // each input must have 3 selector MIN/OPT/MAX for profile0,
240  // doesn't support multiple profiles
241  std::vector<ProfileDims> inputProfileDims;
242  int minBatchSize = 1;
243  int optBatchSize = 1;
244  int maxBatchSize = 1;
246 
247 private:
248  NvDsInferStatus configBuilder(TrtModelBuilder& builder) override;
249  bool sanityCheck() const override;
250 };
251 
256 {
257 public:
259  : m_Engine(std::move(engine)), m_DlaCore(dlaCore) {}
260 
262  const SharedPtrWDestroy<nvinfer1::IRuntime>& runtime, int dlaCore = -1,
263  const std::shared_ptr<DlLibHandle>& dlHandle = nullptr,
264  nvinfer1::IPluginFactory* pluginFactory = nullptr);
265 
266  ~TrtEngine();
267 
268  bool hasDla() const { return m_DlaCore >= 0; }
269  int getDlaCore() const { return m_DlaCore; }
270 
272  std::vector<NvDsInferBatchDimsLayerInfo>& layersInfo);
274  int profileIdx, std::vector<NvDsInferBatchDimsLayerInfo>& layersInfo);
276 
277  void printEngineInfo();
278 
279  nvinfer1::ICudaEngine& engine()
280  {
281  assert(m_Engine);
282  return *m_Engine;
283  }
284 
285  nvinfer1::ICudaEngine* operator->()
286  {
287  assert(m_Engine);
288  return m_Engine.get();
289  }
290 
291 private:
292  DISABLE_CLASS_COPY(TrtEngine);
293 
296  std::shared_ptr<DlLibHandle> m_DlHandle;
297  nvinfer1::IPluginFactory* m_RuntimePluginFactory = nullptr;
298  int m_DlaCore = -1;
299 
300  friend bool ::NvDsInferCudaEngineGetFromTltModel( nvinfer1::IBuilder * const builder,
301  nvinfer1::IBuilderConfig * const builderConfig,
302  const NvDsInferContextInitParams * const initParams,
303  nvinfer1::DataType dataType,
304  nvinfer1::ICudaEngine *& cudaEngine);
305 };
306 
316 {
317 public:
318  TrtModelBuilder(int gpuId, nvinfer1::ILogger& logger,
319  const std::shared_ptr<DlLibHandle>& dlHandle = nullptr);
320 
322  m_Parser.reset();
323  }
324 
325  void setInt8Calibrator(std::unique_ptr<nvinfer1::IInt8Calibrator>&& calibrator)
326  {
327  m_Int8Calibrator = std::move(calibrator);
328  }
329 
330  /* Populate INetworkDefinition by parsing the model, build the engine and
331  * return it as TrtEngine instance. Also, returns a suggested path for
332  * writing the serialized engine to.
333  *
334  * Suggested path has the following format:
335  * suggested path = [modelName]_b[#batchSize]_[#device]_[#dataType].engine
336  */
337  std::unique_ptr<TrtEngine> buildModel(
338  const NvDsInferContextInitParams& initParams,
339  std::string& suggestedPathName);
340 
341  /* Builds the engine from an already populated INetworkDefinition based on
342  * the BuildParams passed to it. Returns the engine in the form of TrtEngine
343  * instance.
344  */
345  std::unique_ptr<TrtEngine> buildEngine(
346  nvinfer1::INetworkDefinition& network, BuildParams& options);
347 
348  /* Serialize engine to file
349  */
351  const std::string& path, nvinfer1::ICudaEngine& engine);
352 
353  /* Deserialize engine from file
354  */
355  std::unique_ptr<TrtEngine> deserializeEngine(
356  const std::string& path, int dla = -1);
357 
358 private:
359  /* Parses a model file using an IModelParser implementation for
360  * Caffe/UFF/ONNX formats or from custom IModelParser implementation.
361  */
362  NvDsInferStatus buildNetwork(const NvDsInferContextInitParams& initParams);
363 
364  /* build cudaEngine from Netwwork, be careful for implicitBatch and
365  * explicitBatch.
366  */
367  std::unique_ptr<TrtEngine> buildEngine();
368 
369  /* Calls a custom library's implementaion of NvDsInferCudaEngineGet function
370  * to get a built ICudaEngine. */
371  std::unique_ptr<TrtEngine> getCudaEngineFromCustomLib(
372  NvDsInferCudaEngineGetFcnDeprecated cudaEngineGetDeprecatedFcn,
373  NvDsInferEngineCreateCustomFunc cudaEngineGetFcn,
374  const NvDsInferContextInitParams& initParams,
375  NvDsInferNetworkMode &networkMode);
376 
377 
378  /* config builder options */
379  NvDsInferStatus configCommonOptions(BuildParams& params);
380  NvDsInferStatus configImplicitOptions(ImplicitBuildParams& params);
381  NvDsInferStatus configExplicitOptions(ExplicitBuildParams& params);
382 
383  std::unique_ptr<BuildParams> createImplicitParams(
384  const NvDsInferContextInitParams& initParams);
385  std::unique_ptr<BuildParams> createDynamicParams(
386  const NvDsInferContextInitParams& initParams);
387  void initCommonParams(
388  BuildParams& params, const NvDsInferContextInitParams& initParams);
389 
390  DISABLE_CLASS_COPY(TrtModelBuilder);
391 
392  int m_GpuId = 0;
393  nvinfer1::ILogger& m_Logger;
394  std::shared_ptr<DlLibHandle> m_DlLib;
395  std::shared_ptr<BaseModelParser> m_Parser;
396  std::unique_ptr<BuildParams> m_Options;
400  std::shared_ptr<nvinfer1::IInt8Calibrator> m_Int8Calibrator;
401 
402  friend class BuildParams;
403  friend class ImplicitBuildParams;
404  friend class ExplicitBuildParams;
405 
406  friend bool ::NvDsInferCudaEngineGetFromTltModel( nvinfer1::IBuilder * const builder,
407  nvinfer1::IBuilderConfig * const builderConfig,
408  const NvDsInferContextInitParams * const initParams,
409  nvinfer1::DataType dataType,
410  nvinfer1::ICudaEngine *& cudaEngine);
411 };
412 
413 } // end of namespace nvdsinfer
414 
415 #endif
nvdsinfer::UffModelParser::ModelParams::inputNames
std::vector< std::string > inputNames
Definition: nvdsinfer_model_builder.h:106
nvdsinfer::CaffeModelParser::~CaffeModelParser
~CaffeModelParser() override
nvdsinfer::UffModelParser::UffModelParser
UffModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle=nullptr)
nvdsinfer::OnnxModelParser::~OnnxModelParser
~OnnxModelParser() override=default
nvdsinfer::OnnxModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:144
nvdsinfer::BuildParams::inputFormats
std::unordered_map< std::string, TensorIOFormat > inputFormats
Definition: nvdsinfer_model_builder.h:206
nvdsinfer::CustomModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:168
nvdsinfer::CaffeModelParser
Implementation of ModelParser for caffemodels derived from BaseModelParser.
Definition: nvdsinfer_model_builder.h:72
nvdsinfer::TrtEngine::getLayerInfo
NvDsInferStatus getLayerInfo(int idx, NvDsInferLayerInfo &layer)
nvdsinfer::TrtEngine::getImplicitLayersInfo
NvDsInferStatus getImplicitLayersInfo(std::vector< NvDsInferBatchDimsLayerInfo > &layersInfo)
nvdsinfer::BuildParams::sanityCheck
virtual bool sanityCheck() const
NvDsInferTensorOrder
NvDsInferTensorOrder
Defines UFF input layer orders.
Definition: nvdsinfer_context.h:170
nvdsinfer::ImplicitBuildParams
Holds build parameters required for implicit batch dimension network.
Definition: nvdsinfer_model_builder.h:219
nvdsinfer::CustomModelParser
Implementation of ModelParser for custom models.
Definition: nvdsinfer_model_builder.h:160
nvdsinfer::TrtEngine
Helper class written on top of nvinfer1::ICudaEngine.
Definition: nvdsinfer_model_builder.h:255
nvdsinfer::UffModelParser::ModelParams
Definition: nvdsinfer_model_builder.h:102
nvdsinfer::UffModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:121
nvdsinfer::UffModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::BaseModelParser::m_LibHandle
std::shared_ptr< DlLibHandle > m_LibHandle
Definition: nvdsinfer_model_builder.h:65
nvdsinfer::UffModelParser::m_ModelParams
ModelParams m_ModelParams
Definition: nvdsinfer_model_builder.h:125
nvdsinfer::CustomModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::ExplicitBuildParams::inputProfileDims
std::vector< ProfileDims > inputProfileDims
Definition: nvdsinfer_model_builder.h:241
nvdsinfer::UffModelParser::ModelParams::inputDims
std::vector< nvinfer1::Dims > inputDims
Definition: nvdsinfer_model_builder.h:107
ds3d::DataType
DataType
Definition: idatatype.h:77
nvdsinfer::BuildParams::workspaceSize
size_t workspaceSize
Definition: nvdsinfer_model_builder.h:202
nvdsinfer::OnnxModelParser
Implementation of ModelParser for ONNX models derived from BaseModelParser.
Definition: nvdsinfer_model_builder.h:133
NvDsInferCudaEngineGetFromTltModel
bool NvDsInferCudaEngineGetFromTltModel(nvinfer1::IBuilder *const builder, nvinfer1::IBuilderConfig *const builderConfig, const NvDsInferContextInitParams *const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine)
Decodes and creates a CUDA engine file from a TLT encoded model.
nvdsinfer::ImplicitBuildParams::maxBatchSize
int maxBatchSize
Definition: nvdsinfer_model_builder.h:221
nvdsinfer::TrtModelBuilder
Helper class to build models and generate the TensorRT ICudaEngine required for inference.
Definition: nvdsinfer_model_builder.h:315
nvdsinfer::TrtEngine::printEngineInfo
void printEngineInfo()
nvdsinfer::BuildParams::outputFormats
std::unordered_map< std::string, TensorIOFormat > outputFormats
Definition: nvdsinfer_model_builder.h:207
nvdsinfer::BaseModelParser::BaseModelParser
BaseModelParser(const NvDsInferContextInitParams &params, const std::shared_ptr< DlLibHandle > &dllib)
Definition: nvdsinfer_model_builder.h:54
nvdsinfer::BuildParams::int8CalibrationFilePath
std::string int8CalibrationFilePath
Definition: nvdsinfer_model_builder.h:204
nvdsinfer::CaffeModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:79
nvdsinfer::CustomModelParser::~CustomModelParser
~CustomModelParser()
Definition: nvdsinfer_model_builder.h:166
nvdsinfer::BuildParams::dlaCore
int dlaCore
Definition: nvdsinfer_model_builder.h:205
nvdsinfer::UffModelParser::ModelParams::inputOrder
nvuffparser::UffInputOrder inputOrder
Definition: nvdsinfer_model_builder.h:105
nvdsinfer::TrtModelBuilder::serializeEngine
NvDsInferStatus serializeEngine(const std::string &path, nvinfer1::ICudaEngine &engine)
nvdsinfer::BuildParams::networkMode
NvDsInferNetworkMode networkMode
Definition: nvdsinfer_model_builder.h:203
nvdsinfer::TrtEngine::TrtEngine
TrtEngine(UniquePtrWDestroy< nvinfer1::ICudaEngine > &&engine, int dlaCore=-1)
Definition: nvdsinfer_model_builder.h:258
NvDsInferCudaEngineGet
bool NvDsInferCudaEngineGet(nvinfer1::IBuilder *builder, NvDsInferContextInitParams *initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine) __attribute__((deprecated("Use 'engine-create-func-name' config parameter instead")))
The NvDsInferCudaEngineGet interface has been deprecated and has been replaced by NvDsInferEngineCrea...
NvDsInferNetworkMode
NvDsInferNetworkMode
Defines internal data formats used by the inference engine.
Definition: nvdsinfer_context.h:115
nvdsinfer
Copyright (c) 2019-2021, NVIDIA CORPORATION.
Definition: nvdsinfer_model_builder.h:41
nvdsinfer::BuildParams::TensorIOFormat
std::tuple< nvinfer1::DataType, nvinfer1::TensorFormats > TensorIOFormat
Definition: nvdsinfer_model_builder.h:198
nvdsinfer::OnnxModelParser::m_OnnxParser
UniquePtrWDestroy< nvonnxparser::IParser > m_OnnxParser
Definition: nvdsinfer_model_builder.h:150
nvdsinfer::TrtModelBuilder::~TrtModelBuilder
~TrtModelBuilder()
Definition: nvdsinfer_model_builder.h:321
NvDsInferNetworkMode_FP32
@ NvDsInferNetworkMode_FP32
Definition: nvdsinfer_context.h:117
nvdsinfer::UffModelParser::m_UffParser
UniquePtrWDestroy< nvuffparser::IUffParser > m_UffParser
Definition: nvdsinfer_model_builder.h:126
nvdsinfer::CaffeModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::TrtEngine::engine
nvinfer1::ICudaEngine & engine()
Definition: nvdsinfer_model_builder.h:279
nvdsinfer::ExplicitBuildParams::optBatchSize
int optBatchSize
Definition: nvdsinfer_model_builder.h:243
NvDsInferEngineCreateCustomFunc
bool(* NvDsInferEngineCreateCustomFunc)(nvinfer1::IBuilder *const builder, nvinfer1::IBuilderConfig *const builderConfig, const NvDsInferContextInitParams *const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine)
Type definition for functions that build and return a CudaEngine for custom models.
Definition: nvdsinfer_custom_impl.h:374
nvdsinfer::TrtEngine::hasDla
bool hasDla() const
Definition: nvdsinfer_model_builder.h:268
nvdsinfer::CaffeModelParser::CaffeModelParser
CaffeModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle=nullptr)
nvdsinfer::BaseModelParser::isValid
virtual bool isValid() const =0
NvDsInferLayerInfo
Holds information about one layer in the model.
Definition: nvdsinfer.h:87
nvdsinfer::SharedPtrWDestroy< nvinfer1::IRuntime >
nvdsinfer_custom_impl.h
nvdsinfer::CaffeModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:78
nvdsinfer::TrtModelBuilder::deserializeEngine
std::unique_ptr< TrtEngine > deserializeEngine(const std::string &path, int dla=-1)
nvdsinfer::ProfileDims
std::array< nvinfer1::Dims, nvinfer1::EnumMax< nvinfer1::OptProfileSelector >()> ProfileDims
Definition: nvdsinfer_model_builder.h:231
NvDsInferPluginFactoryCaffe
Holds a pointer to a heap-allocated Plugin Factory object required during Caffe model parsing.
Definition: nvdsinfer_custom_impl.h:407
nvdsinfer::BaseModelParser::~BaseModelParser
virtual ~BaseModelParser()
Definition: nvdsinfer_model_builder.h:57
nvdsinfer::BaseModelParser::m_ModelParams
NvDsInferContextInitParams m_ModelParams
Definition: nvdsinfer_model_builder.h:64
nvdsinfer::ExplicitBuildParams::maxBatchSize
int maxBatchSize
Definition: nvdsinfer_model_builder.h:244
nvdsinfer::BuildParams::LayerDevicePrecision
std::tuple< nvinfer1::DataType, nvinfer1::DeviceType > LayerDevicePrecision
Definition: nvdsinfer_model_builder.h:200
nvdsinfer::OnnxModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:141
nvdsinfer::OnnxModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:142
nvdsinfer::UffModelParser::~UffModelParser
~UffModelParser() override
NvDsInferTensorOrder_kNCHW
@ NvDsInferTensorOrder_kNCHW
Definition: nvdsinfer_context.h:171
nvdsinfer::OnnxModelParser::OnnxModelParser
OnnxModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle=nullptr)
Definition: nvdsinfer_model_builder.h:136
nvdsinfer::TrtModelBuilder::buildModel
std::unique_ptr< TrtEngine > buildModel(const NvDsInferContextInitParams &initParams, std::string &suggestedPathName)
nvdsinfer::UniquePtrWDestroy< nvuffparser::IUffParser >
nvdsinfer::TrtEngine::getDlaCore
int getDlaCore() const
Definition: nvdsinfer_model_builder.h:269
nvdsinfer::UffModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:117
nvdsinfer::BaseModelParser
ModelParser base.
Definition: nvdsinfer_model_builder.h:51
nvdsinfer::UffModelParser
Implementation of ModelParser for UFF models derived from BaseModelParser.
Definition: nvdsinfer_model_builder.h:99
_NvDsInferContextInitParams
Holds the initialization parameters required for the NvDsInferContext interface.
Definition: nvdsinfer_context.h:233
nvdsinfer::BuildParams::~BuildParams
virtual ~BuildParams()
Definition: nvdsinfer_model_builder.h:211
nvdsinfer::BuildParams::configBuilder
virtual NvDsInferStatus configBuilder(TrtModelBuilder &builder)=0
nvdsinfer::NvDsInferCudaEngineGetFcnDeprecated
decltype(&NvDsInferCudaEngineGet) NvDsInferCudaEngineGetFcnDeprecated
Definition: nvdsinfer_model_builder.h:43
nvdsinfer::TrtEngine::getFullDimsLayersInfo
NvDsInferStatus getFullDimsLayersInfo(int profileIdx, std::vector< NvDsInferBatchDimsLayerInfo > &layersInfo)
nvdsinfer::CustomModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:173
nvdsinfer::UffModelParser::ModelParams::outputNames
std::vector< std::string > outputNames
Definition: nvdsinfer_model_builder.h:108
nvdsinfer::TrtEngine::operator->
nvinfer1::ICudaEngine * operator->()
Definition: nvdsinfer_model_builder.h:285
nvdsinfer::safeStr
const char * safeStr(const char *str)
Definition: nvdsinfer_func_utils.h:80
nvdsinfer::UffModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:116
nvdsinfer::ImplicitBuildParams::inputDims
std::vector< nvinfer1::Dims > inputDims
Definition: nvdsinfer_model_builder.h:222
nvdsinfer_tlt.h
nvdsinfer::BuildParams::layerDevicePrecisions
std::unordered_map< std::string, LayerDevicePrecision > layerDevicePrecisions
Definition: nvdsinfer_model_builder.h:208
nvdsinfer::ExplicitBuildParams
Holds build parameters required for full dimensions network.
Definition: nvdsinfer_model_builder.h:236
nvdsinfer::BuildParams
Holds build parameters common to implicit batch dimension/full dimension networks.
Definition: nvdsinfer_model_builder.h:195
nvdsinfer::ExplicitBuildParams::inputOrder
NvDsInferTensorOrder inputOrder
Definition: nvdsinfer_model_builder.h:245
nvdsinfer::kWorkSpaceSize
static const size_t kWorkSpaceSize
Definition: nvdsinfer_model_builder.h:45
nvdsinfer::UffModelParser::ModelParams::uffFilePath
std::string uffFilePath
Definition: nvdsinfer_model_builder.h:104
nvdsinfer::CustomModelParser::CustomModelParser
CustomModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle)
nvdsinfer::TrtEngine::~TrtEngine
~TrtEngine()
nvdsinfer::OnnxModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::ExplicitBuildParams::minBatchSize
int minBatchSize
Definition: nvdsinfer_model_builder.h:242
nvdsinfer::CustomModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:179
nvdsinfer::TrtModelBuilder::TrtModelBuilder
TrtModelBuilder(int gpuId, nvinfer1::ILogger &logger, const std::shared_ptr< DlLibHandle > &dlHandle=nullptr)
nvdsinfer::CaffeModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:80
nvdsinfer::TrtModelBuilder::setInt8Calibrator
void setInt8Calibrator(std::unique_ptr< nvinfer1::IInt8Calibrator > &&calibrator)
Definition: nvdsinfer_model_builder.h:325
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218
nvdsinfer::UffModelParser::initParser
NvDsInferStatus initParser()
nvdsinfer_func_utils.h