NVIDIA DeepStream SDK API Reference

7.0 Release
nvdsinfer_model_builder.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
13 #ifndef __NVDSINFER_MODEL_BUILDER_H__
14 #define __NVDSINFER_MODEL_BUILDER_H__
15 
16 #include <stdarg.h>
17 #include <algorithm>
18 #include <condition_variable>
19 #include <map>
20 #include <memory>
21 #include <mutex>
22 #include <queue>
23 #include <string>
24 #include <unordered_map>
25 
26 #include <NvCaffeParser.h>
27 #include <NvInfer.h>
28 #include <NvInferRuntime.h>
29 #include <NvOnnxParser.h>
30 
31 #include <nvdsinfer_custom_impl.h>
32 #include "nvdsinfer_func_utils.h"
33 #include "nvdsinfer_tlt.h"
34 
35 /* This file provides APIs for building models from Caffe/UFF/ONNX files. It
36  * also defines an interface where users can provide custom model parsers for
37  * custom networks. A helper class (TrtEngine) written on top of TensorRT's
38  * nvinfer1::ICudaEngine is also defined in this file.
39  *
40  * These interfaces/APIs are used by NvDsInferContextImpl class. */
41 
42 namespace nvdsinfer {
43 
45 
46 static const size_t kWorkSpaceSize = 450 * 1024 * 1024; // 450MB
47 
52 class BaseModelParser : public IModelParser
53 {
54 public:
56  const std::shared_ptr<DlLibHandle>& dllib)
57  : m_ModelParams(params), m_LibHandle(dllib) {}
58  virtual ~BaseModelParser() {}
59  virtual bool isValid() const = 0;
60 
61 private:
62  DISABLE_CLASS_COPY(BaseModelParser);
63 
64 protected:
66  std::shared_ptr<DlLibHandle> m_LibHandle;
67 };
68 
74 {
75 public:
77  const std::shared_ptr<DlLibHandle>& handle = nullptr);
78  ~CaffeModelParser() override;
79  bool isValid() const override { return m_CaffeParser.get(); }
80  const char* getModelName() const override { return m_ModelPath.c_str(); }
81  bool hasFullDimsSupported() const override { return true; }
82 
83  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
84 
85 private:
86  NvDsInferStatus setPluginFactory();
87 
88 private:
89  std::string m_ProtoPath;
90  std::string m_ModelPath;
91  std::vector<std::string> m_OutputLayers;
92  NvDsInferPluginFactoryCaffe m_CaffePluginFactory{nullptr};
93  UniquePtrWDestroy<nvcaffeparser1::ICaffeParser> m_CaffeParser;
94 };
95 
101 {
102 public:
103  struct ModelParams
104  {
105  std::string uffFilePath;
106  nvuffparser::UffInputOrder inputOrder;
107  std::vector<std::string> inputNames;
108  std::vector<nvinfer1::Dims> inputDims;
109  std::vector<std::string> outputNames;
110  };
111 
112 public:
113  UffModelParser(const NvDsInferContextInitParams& initParams,
114  const std::shared_ptr<DlLibHandle>& handle = nullptr);
115  ~UffModelParser() override;
116  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
117  bool isValid() const override { return m_UffParser.get(); }
118  const char* getModelName() const override
119  {
120  return m_ModelParams.uffFilePath.c_str();
121  }
122  bool hasFullDimsSupported() const override { return false; }
123 
124 protected:
128 };
129 
135 {
136 public:
138  const std::shared_ptr<DlLibHandle>& handle = nullptr)
139  : BaseModelParser(initParams, handle),
140  m_ModelName(initParams.onnxFilePath) {}
141  ~OnnxModelParser() override = default;
142  bool isValid() const override { return !m_ModelName.empty(); }
143  const char* getModelName() const override { return m_ModelName.c_str(); }
144  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
145  bool hasFullDimsSupported() const override { return true; }
146 
147 private:
148  std::string m_ModelName;
149 
150 protected:
152 };
153 
162 {
163 public:
165  const std::shared_ptr<DlLibHandle>& handle);
166 
168 
169  bool isValid() const override
170  {
171  return (bool)m_CustomParser;
172  }
173 
174  const char* getModelName() const override
175  {
176  return isValid() ? safeStr(m_CustomParser->getModelName()) : "";
177  }
178 
179  NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
180  bool hasFullDimsSupported() const override
181  {
182  return m_CustomParser->hasFullDimsSupported();
183  }
184 
185 private:
186  std::unique_ptr<IModelParser> m_CustomParser;
187 };
188 
190 class TrtModelBuilder;
191 
197 {
198  using TensorIOFormat =
199  std::tuple<nvinfer1::DataType, nvinfer1::TensorFormats>;
200  using LayerDevicePrecision =
201  std::tuple<nvinfer1::DataType, nvinfer1::DeviceType>;
202 
206  int dlaCore = -1;
207  std::unordered_map<std::string, TensorIOFormat> inputFormats;
208  std::unordered_map<std::string, TensorIOFormat> outputFormats;
209  std::unordered_map<std::string, LayerDevicePrecision> layerDevicePrecisions;
210 
211 public:
212  virtual ~BuildParams(){};
213  virtual NvDsInferStatus configBuilder(TrtModelBuilder& builder) = 0;
214  virtual bool sanityCheck() const;
215 };
216 
221 {
222  int maxBatchSize = 0;
223  std::vector<nvinfer1::Dims> inputDims;
224 
225 private:
226  NvDsInferStatus configBuilder(TrtModelBuilder& builder) override;
227 
228  bool sanityCheck() const override;
229 };
230 
231 using ProfileDims = std::array<nvinfer1::Dims,
232  nvinfer1::EnumMax<nvinfer1::OptProfileSelector>()>;
233 
238 {
239  // profileSelector, dims without batchSize
240  // each input must have 3 selector MIN/OPT/MAX for profile0,
241  // doesn't support multiple profiles
242  std::vector<ProfileDims> inputProfileDims;
243  int minBatchSize = 1;
244  int optBatchSize = 1;
245  int maxBatchSize = 1;
247 
248 private:
249  NvDsInferStatus configBuilder(TrtModelBuilder& builder) override;
250  bool sanityCheck() const override;
251 };
252 
257 {
258 public:
260  : m_Engine(std::move(engine)), m_DlaCore(dlaCore) {}
261 
263  const SharedPtrWDestroy<nvinfer1::IRuntime>& runtime, int dlaCore = -1,
264  const std::shared_ptr<DlLibHandle>& dlHandle = nullptr,
265  nvinfer1::IPluginFactory* pluginFactory = nullptr);
266 
267  ~TrtEngine();
268 
269  bool hasDla() const { return m_DlaCore >= 0; }
270  int getDlaCore() const { return m_DlaCore; }
271 
273  std::vector<NvDsInferBatchDimsLayerInfo>& layersInfo);
275  int profileIdx, std::vector<NvDsInferBatchDimsLayerInfo>& layersInfo);
277 
278  void printEngineInfo();
279 
280  nvinfer1::ICudaEngine& engine()
281  {
282  assert(m_Engine);
283  return *m_Engine;
284  }
285 
286  nvinfer1::ICudaEngine* operator->()
287  {
288  assert(m_Engine);
289  return m_Engine.get();
290  }
291 
292 private:
293  DISABLE_CLASS_COPY(TrtEngine);
294 
297  std::shared_ptr<DlLibHandle> m_DlHandle;
298  nvinfer1::IPluginFactory* m_RuntimePluginFactory = nullptr;
299  int m_DlaCore = -1;
300 
301  friend bool ::NvDsInferCudaEngineGetFromTltModel( nvinfer1::IBuilder * const builder,
302  nvinfer1::IBuilderConfig * const builderConfig,
303  const NvDsInferContextInitParams * const initParams,
304  nvinfer1::DataType dataType,
305  nvinfer1::ICudaEngine *& cudaEngine);
306 };
307 
317 {
318 public:
319  TrtModelBuilder(int gpuId, nvinfer1::ILogger& logger,
320  const std::shared_ptr<DlLibHandle>& dlHandle = nullptr);
321 
323  m_Parser.reset();
324  }
325 
326  void setInt8Calibrator(std::unique_ptr<nvinfer1::IInt8Calibrator>&& calibrator)
327  {
328  m_Int8Calibrator = std::move(calibrator);
329  }
330 
331  /* Populate INetworkDefinition by parsing the model, build the engine and
332  * return it as TrtEngine instance. Also, returns a suggested path for
333  * writing the serialized engine to.
334  *
335  * Suggested path has the following format:
336  * suggested path = [modelName]_b[#batchSize]_[#device]_[#dataType].engine
337  */
338  std::unique_ptr<TrtEngine> buildModel(
339  const NvDsInferContextInitParams& initParams,
340  std::string& suggestedPathName);
341 
342  /* Builds the engine from an already populated INetworkDefinition based on
343  * the BuildParams passed to it. Returns the engine in the form of TrtEngine
344  * instance.
345  */
346  std::unique_ptr<TrtEngine> buildEngine(
347  nvinfer1::INetworkDefinition& network, BuildParams& options);
348 
349  /* Serialize engine to file
350  */
352  const std::string& path, nvinfer1::ICudaEngine& engine);
353 
354  /* Deserialize engine from file
355  */
356  std::unique_ptr<TrtEngine> deserializeEngine(
357  const std::string& path, int dla = -1);
358 
359 private:
360  /* Parses a model file using an IModelParser implementation for
361  * Caffe/UFF/ONNX formats or from custom IModelParser implementation.
362  */
363  NvDsInferStatus buildNetwork(const NvDsInferContextInitParams& initParams);
364 
365  /* build cudaEngine from Netwwork, be careful for implicitBatch and
366  * explicitBatch.
367  */
368  std::unique_ptr<TrtEngine> buildEngine();
369 
370  /* Calls a custom library's implementaion of NvDsInferCudaEngineGet function
371  * to get a built ICudaEngine. */
372  std::unique_ptr<TrtEngine> getCudaEngineFromCustomLib(
373  NvDsInferCudaEngineGetFcnDeprecated cudaEngineGetDeprecatedFcn,
374  NvDsInferEngineCreateCustomFunc cudaEngineGetFcn,
375  const NvDsInferContextInitParams& initParams,
376  NvDsInferNetworkMode &networkMode);
377 
378 
379  /* config builder options */
380  NvDsInferStatus configCommonOptions(BuildParams& params);
381  NvDsInferStatus configImplicitOptions(ImplicitBuildParams& params);
382  NvDsInferStatus configExplicitOptions(ExplicitBuildParams& params);
383 
384  std::unique_ptr<BuildParams> createImplicitParams(
385  const NvDsInferContextInitParams& initParams);
386  std::unique_ptr<BuildParams> createDynamicParams(
387  const NvDsInferContextInitParams& initParams);
388  void initCommonParams(
389  BuildParams& params, const NvDsInferContextInitParams& initParams);
390 
391  DISABLE_CLASS_COPY(TrtModelBuilder);
392 
393  int m_GpuId = 0;
394  nvinfer1::ILogger& m_Logger;
395  std::shared_ptr<DlLibHandle> m_DlLib;
396  std::shared_ptr<BaseModelParser> m_Parser;
397  std::unique_ptr<BuildParams> m_Options;
401  std::shared_ptr<nvinfer1::IInt8Calibrator> m_Int8Calibrator;
402 
403  friend class BuildParams;
404  friend class ImplicitBuildParams;
405  friend class ExplicitBuildParams;
406 
407  friend bool ::NvDsInferCudaEngineGetFromTltModel( nvinfer1::IBuilder * const builder,
408  nvinfer1::IBuilderConfig * const builderConfig,
409  const NvDsInferContextInitParams * const initParams,
410  nvinfer1::DataType dataType,
411  nvinfer1::ICudaEngine *& cudaEngine);
412 };
413 
414 } // end of namespace nvdsinfer
415 
416 #endif
nvdsinfer::UffModelParser::ModelParams::inputNames
std::vector< std::string > inputNames
Definition: nvdsinfer_model_builder.h:107
nvdsinfer::CaffeModelParser::~CaffeModelParser
~CaffeModelParser() override
nvdsinfer::UffModelParser::UffModelParser
UffModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle=nullptr)
nvdsinfer::OnnxModelParser::~OnnxModelParser
~OnnxModelParser() override=default
nvdsinfer::OnnxModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:145
nvdsinfer::BuildParams::inputFormats
std::unordered_map< std::string, TensorIOFormat > inputFormats
Definition: nvdsinfer_model_builder.h:207
nvdsinfer::CustomModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:169
nvdsinfer::CaffeModelParser
Implementation of ModelParser for caffemodels derived from BaseModelParser.
Definition: nvdsinfer_model_builder.h:73
nvdsinfer::TrtEngine::getLayerInfo
NvDsInferStatus getLayerInfo(int idx, NvDsInferLayerInfo &layer)
nvdsinfer::TrtEngine::getImplicitLayersInfo
NvDsInferStatus getImplicitLayersInfo(std::vector< NvDsInferBatchDimsLayerInfo > &layersInfo)
nvdsinfer::BuildParams::sanityCheck
virtual bool sanityCheck() const
NvDsInferTensorOrder
NvDsInferTensorOrder
Defines UFF input layer orders.
Definition: nvdsinfer_context.h:176
nvdsinfer::ImplicitBuildParams
Holds build parameters required for implicit batch dimension network.
Definition: nvdsinfer_model_builder.h:220
nvdsinfer::CustomModelParser
Implementation of ModelParser for custom models.
Definition: nvdsinfer_model_builder.h:161
nvdsinfer::TrtEngine
Helper class written on top of nvinfer1::ICudaEngine.
Definition: nvdsinfer_model_builder.h:256
nvdsinfer::UffModelParser::ModelParams
Definition: nvdsinfer_model_builder.h:103
nvdsinfer::UffModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:122
nvdsinfer::UffModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::BaseModelParser::m_LibHandle
std::shared_ptr< DlLibHandle > m_LibHandle
Definition: nvdsinfer_model_builder.h:66
nvdsinfer::UffModelParser::m_ModelParams
ModelParams m_ModelParams
Definition: nvdsinfer_model_builder.h:126
nvdsinfer::CustomModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::ExplicitBuildParams::inputProfileDims
std::vector< ProfileDims > inputProfileDims
Definition: nvdsinfer_model_builder.h:242
nvdsinfer::UffModelParser::ModelParams::inputDims
std::vector< nvinfer1::Dims > inputDims
Definition: nvdsinfer_model_builder.h:108
ds3d::DataType
DataType
Definition: idatatype.h:77
nvdsinfer::BuildParams::workspaceSize
size_t workspaceSize
Definition: nvdsinfer_model_builder.h:203
nvdsinfer::OnnxModelParser
Implementation of ModelParser for ONNX models derived from BaseModelParser.
Definition: nvdsinfer_model_builder.h:134
NvDsInferCudaEngineGetFromTltModel
bool NvDsInferCudaEngineGetFromTltModel(nvinfer1::IBuilder *const builder, nvinfer1::IBuilderConfig *const builderConfig, const NvDsInferContextInitParams *const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine)
Decodes and creates a CUDA engine file from a TLT encoded model.
nvdsinfer::ImplicitBuildParams::maxBatchSize
int maxBatchSize
Definition: nvdsinfer_model_builder.h:222
nvdsinfer::TrtModelBuilder
Helper class to build models and generate the TensorRT ICudaEngine required for inference.
Definition: nvdsinfer_model_builder.h:316
nvdsinfer::TrtEngine::printEngineInfo
void printEngineInfo()
nvdsinfer::BuildParams::outputFormats
std::unordered_map< std::string, TensorIOFormat > outputFormats
Definition: nvdsinfer_model_builder.h:208
nvdsinfer::BaseModelParser::BaseModelParser
BaseModelParser(const NvDsInferContextInitParams &params, const std::shared_ptr< DlLibHandle > &dllib)
Definition: nvdsinfer_model_builder.h:55
nvdsinfer::BuildParams::int8CalibrationFilePath
std::string int8CalibrationFilePath
Definition: nvdsinfer_model_builder.h:205
nvdsinfer::CaffeModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:80
nvdsinfer::CustomModelParser::~CustomModelParser
~CustomModelParser()
Definition: nvdsinfer_model_builder.h:167
nvdsinfer::BuildParams::dlaCore
int dlaCore
Definition: nvdsinfer_model_builder.h:206
nvdsinfer::UffModelParser::ModelParams::inputOrder
nvuffparser::UffInputOrder inputOrder
Definition: nvdsinfer_model_builder.h:106
nvdsinfer::TrtModelBuilder::serializeEngine
NvDsInferStatus serializeEngine(const std::string &path, nvinfer1::ICudaEngine &engine)
nvdsinfer::BuildParams::networkMode
NvDsInferNetworkMode networkMode
Definition: nvdsinfer_model_builder.h:204
nvdsinfer::TrtEngine::TrtEngine
TrtEngine(UniquePtrWDestroy< nvinfer1::ICudaEngine > &&engine, int dlaCore=-1)
Definition: nvdsinfer_model_builder.h:259
NvDsInferCudaEngineGet
bool NvDsInferCudaEngineGet(nvinfer1::IBuilder *builder, NvDsInferContextInitParams *initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine) __attribute__((deprecated("Use 'engine-create-func-name' config parameter instead")))
The NvDsInferCudaEngineGet interface has been deprecated and has been replaced by NvDsInferEngineCrea...
NvDsInferNetworkMode
NvDsInferNetworkMode
Defines internal data formats used by the inference engine.
Definition: nvdsinfer_context.h:120
nvdsinfer
Definition: nvdsinfer_model_builder.h:42
nvdsinfer::BuildParams::TensorIOFormat
std::tuple< nvinfer1::DataType, nvinfer1::TensorFormats > TensorIOFormat
Definition: nvdsinfer_model_builder.h:199
nvdsinfer::OnnxModelParser::m_OnnxParser
UniquePtrWDestroy< nvonnxparser::IParser > m_OnnxParser
Definition: nvdsinfer_model_builder.h:151
nvdsinfer::TrtModelBuilder::~TrtModelBuilder
~TrtModelBuilder()
Definition: nvdsinfer_model_builder.h:322
NvDsInferNetworkMode_FP32
@ NvDsInferNetworkMode_FP32
Definition: nvdsinfer_context.h:122
nvdsinfer::UffModelParser::m_UffParser
UniquePtrWDestroy< nvuffparser::IUffParser > m_UffParser
Definition: nvdsinfer_model_builder.h:127
nvdsinfer::CaffeModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::TrtEngine::engine
nvinfer1::ICudaEngine & engine()
Definition: nvdsinfer_model_builder.h:280
nvdsinfer::ExplicitBuildParams::optBatchSize
int optBatchSize
Definition: nvdsinfer_model_builder.h:244
NvDsInferEngineCreateCustomFunc
bool(* NvDsInferEngineCreateCustomFunc)(nvinfer1::IBuilder *const builder, nvinfer1::IBuilderConfig *const builderConfig, const NvDsInferContextInitParams *const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine)
Type definition for functions that build and return a CudaEngine for custom models.
Definition: nvdsinfer_custom_impl.h:374
nvdsinfer::TrtEngine::hasDla
bool hasDla() const
Definition: nvdsinfer_model_builder.h:269
nvdsinfer::CaffeModelParser::CaffeModelParser
CaffeModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle=nullptr)
nvdsinfer::BaseModelParser::isValid
virtual bool isValid() const =0
NvDsInferLayerInfo
Holds information about one layer in the model.
Definition: nvdsinfer.h:87
nvdsinfer::SharedPtrWDestroy< nvinfer1::IRuntime >
nvdsinfer_custom_impl.h
nvdsinfer::CaffeModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:79
nvdsinfer::TrtModelBuilder::deserializeEngine
std::unique_ptr< TrtEngine > deserializeEngine(const std::string &path, int dla=-1)
nvdsinfer::ProfileDims
std::array< nvinfer1::Dims, nvinfer1::EnumMax< nvinfer1::OptProfileSelector >()> ProfileDims
Definition: nvdsinfer_model_builder.h:232
NvDsInferPluginFactoryCaffe
Holds a pointer to a heap-allocated Plugin Factory object required during Caffe model parsing.
Definition: nvdsinfer_custom_impl.h:407
nvdsinfer::BaseModelParser::~BaseModelParser
virtual ~BaseModelParser()
Definition: nvdsinfer_model_builder.h:58
nvdsinfer::BaseModelParser::m_ModelParams
NvDsInferContextInitParams m_ModelParams
Definition: nvdsinfer_model_builder.h:65
nvdsinfer::ExplicitBuildParams::maxBatchSize
int maxBatchSize
Definition: nvdsinfer_model_builder.h:245
nvdsinfer::BuildParams::LayerDevicePrecision
std::tuple< nvinfer1::DataType, nvinfer1::DeviceType > LayerDevicePrecision
Definition: nvdsinfer_model_builder.h:201
nvdsinfer::OnnxModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:142
nvdsinfer::OnnxModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:143
nvdsinfer::UffModelParser::~UffModelParser
~UffModelParser() override
NvDsInferTensorOrder_kNCHW
@ NvDsInferTensorOrder_kNCHW
Definition: nvdsinfer_context.h:177
nvdsinfer::OnnxModelParser::OnnxModelParser
OnnxModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle=nullptr)
Definition: nvdsinfer_model_builder.h:137
nvdsinfer::TrtModelBuilder::buildModel
std::unique_ptr< TrtEngine > buildModel(const NvDsInferContextInitParams &initParams, std::string &suggestedPathName)
nvdsinfer::UniquePtrWDestroy< nvuffparser::IUffParser >
nvdsinfer::TrtEngine::getDlaCore
int getDlaCore() const
Definition: nvdsinfer_model_builder.h:270
nvdsinfer::UffModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:118
nvdsinfer::BaseModelParser
ModelParser base.
Definition: nvdsinfer_model_builder.h:52
nvdsinfer::UffModelParser
Implementation of ModelParser for UFF models derived from BaseModelParser.
Definition: nvdsinfer_model_builder.h:100
_NvDsInferContextInitParams
Holds the initialization parameters required for the NvDsInferContext interface.
Definition: nvdsinfer_context.h:239
nvdsinfer::BuildParams::~BuildParams
virtual ~BuildParams()
Definition: nvdsinfer_model_builder.h:212
nvdsinfer::BuildParams::configBuilder
virtual NvDsInferStatus configBuilder(TrtModelBuilder &builder)=0
nvdsinfer::NvDsInferCudaEngineGetFcnDeprecated
decltype(&NvDsInferCudaEngineGet) NvDsInferCudaEngineGetFcnDeprecated
Definition: nvdsinfer_model_builder.h:44
nvdsinfer::TrtEngine::getFullDimsLayersInfo
NvDsInferStatus getFullDimsLayersInfo(int profileIdx, std::vector< NvDsInferBatchDimsLayerInfo > &layersInfo)
nvdsinfer::CustomModelParser::getModelName
const char * getModelName() const override
Definition: nvdsinfer_model_builder.h:174
nvdsinfer::UffModelParser::ModelParams::outputNames
std::vector< std::string > outputNames
Definition: nvdsinfer_model_builder.h:109
nvdsinfer::TrtEngine::operator->
nvinfer1::ICudaEngine * operator->()
Definition: nvdsinfer_model_builder.h:286
nvdsinfer::safeStr
const char * safeStr(const char *str)
Definition: nvdsinfer_func_utils.h:81
nvdsinfer::UffModelParser::isValid
bool isValid() const override
Definition: nvdsinfer_model_builder.h:117
nvdsinfer::ImplicitBuildParams::inputDims
std::vector< nvinfer1::Dims > inputDims
Definition: nvdsinfer_model_builder.h:223
nvdsinfer_tlt.h
nvdsinfer::BuildParams::layerDevicePrecisions
std::unordered_map< std::string, LayerDevicePrecision > layerDevicePrecisions
Definition: nvdsinfer_model_builder.h:209
nvdsinfer::ExplicitBuildParams
Holds build parameters required for full dimensions network.
Definition: nvdsinfer_model_builder.h:237
nvdsinfer::BuildParams
Holds build parameters common to implicit batch dimension/full dimension networks.
Definition: nvdsinfer_model_builder.h:196
nvdsinfer::ExplicitBuildParams::inputOrder
NvDsInferTensorOrder inputOrder
Definition: nvdsinfer_model_builder.h:246
nvdsinfer::kWorkSpaceSize
static const size_t kWorkSpaceSize
Definition: nvdsinfer_model_builder.h:46
nvdsinfer::UffModelParser::ModelParams::uffFilePath
std::string uffFilePath
Definition: nvdsinfer_model_builder.h:105
nvdsinfer::CustomModelParser::CustomModelParser
CustomModelParser(const NvDsInferContextInitParams &initParams, const std::shared_ptr< DlLibHandle > &handle)
nvdsinfer::TrtEngine::~TrtEngine
~TrtEngine()
nvdsinfer::OnnxModelParser::parseModel
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition &network) override
nvdsinfer::ExplicitBuildParams::minBatchSize
int minBatchSize
Definition: nvdsinfer_model_builder.h:243
nvdsinfer::CustomModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:180
nvdsinfer::TrtModelBuilder::TrtModelBuilder
TrtModelBuilder(int gpuId, nvinfer1::ILogger &logger, const std::shared_ptr< DlLibHandle > &dlHandle=nullptr)
nvdsinfer::CaffeModelParser::hasFullDimsSupported
bool hasFullDimsSupported() const override
Definition: nvdsinfer_model_builder.h:81
nvdsinfer::TrtModelBuilder::setInt8Calibrator
void setInt8Calibrator(std::unique_ptr< nvinfer1::IInt8Calibrator > &&calibrator)
Definition: nvdsinfer_model_builder.h:326
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218
nvdsinfer::UffModelParser::initParser
NvDsInferStatus initParser()
nvdsinfer_func_utils.h