NVIDIA DeepStream SDK API Reference

6.1.1 Release

 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nvdsinfer_custom_impl.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
3  *
4  * NVIDIA Corporation and its licensors retain all intellectual property
5  * and proprietary rights in and to this software, related documentation
6  * and any modifications thereto. Any use, reproduction, disclosure or
7  * distribution of this software and related documentation without an express
8  * license agreement from NVIDIA Corporation is strictly prohibited.
9  *
10  */
11 
118 #ifndef _NVDSINFER_CUSTOM_IMPL_H_
119 #define _NVDSINFER_CUSTOM_IMPL_H_
120 
121 #include <string>
122 #include <vector>
123 
124 #pragma GCC diagnostic push
125 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
126 #include "NvCaffeParser.h"
127 #include "NvUffParser.h"
128 #pragma GCC diagnostic pop
129 
130 #include "nvdsinfer.h"
131 
132 /*
133  * C++ interfaces
134  */
135 #ifdef __cplusplus
136 
146 class IModelParser
147 {
148 public:
149  IModelParser() = default;
152  virtual ~IModelParser() = default;
153 
162  virtual NvDsInferStatus parseModel(
163  nvinfer1::INetworkDefinition& network) = 0;
164 
168  virtual bool hasFullDimsSupported() const = 0;
169 
174  virtual const char* getModelName() const = 0;
175 };
176 #endif
177 
178 /*
179  * C interfaces
180  */
181 
182 #ifdef __cplusplus
183 extern "C"
184 {
185 #endif
186 
190 typedef struct
191 {
195  unsigned int numClassesConfigured;
201  std::vector<float> perClassPreclusterThreshold;
202  /* Per-class threshold to be applied after the clustering operation. */
203  std::vector<float> perClassPostclusterThreshold;
204 
207  std::vector<float> &perClassThreshold = perClassPreclusterThreshold;
209 
221 typedef bool (* NvDsInferParseCustomFunc) (
222  std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
223  NvDsInferNetworkInfo const &networkInfo,
224  NvDsInferParseDetectionParams const &detectionParams,
225  std::vector<NvDsInferObjectDetectionInfo> &objectList);
226 
231 #define CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(customParseFunc) \
232  static void checkFunc_ ## customParseFunc (NvDsInferParseCustomFunc func = customParseFunc) \
233  { checkFunc_ ## customParseFunc (); }; \
234  extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
235  NvDsInferNetworkInfo const &networkInfo, \
236  NvDsInferParseDetectionParams const &detectionParams, \
237  std::vector<NvDsInferObjectDetectionInfo> &objectList);
238 
251  std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
252  NvDsInferNetworkInfo const &networkInfo,
253  NvDsInferParseDetectionParams const &detectionParams,
254  std::vector<NvDsInferInstanceMaskInfo> &objectList);
255 
260 #define CHECK_CUSTOM_INSTANCE_MASK_PARSE_FUNC_PROTOTYPE(customParseFunc) \
261  static void checkFunc_ ## customParseFunc (NvDsInferInstanceMaskParseCustomFunc func = customParseFunc) \
262  { checkFunc_ ## customParseFunc (); }; \
263  extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
264  NvDsInferNetworkInfo const &networkInfo, \
265  NvDsInferParseDetectionParams const &detectionParams, \
266  std::vector<NvDsInferInstanceMaskInfo> &objectList);
267 
282  std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
283  NvDsInferNetworkInfo const &networkInfo,
284  float classifierThreshold,
285  std::vector<NvDsInferAttribute> &attrList,
286  std::string &descString);
287 
292 #define CHECK_CUSTOM_CLASSIFIER_PARSE_FUNC_PROTOTYPE(customParseFunc) \
293  static void checkFunc_ ## customParseFunc (NvDsInferClassiferParseCustomFunc func = customParseFunc) \
294  { checkFunc_ ## customParseFunc (); }; \
295  extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
296  NvDsInferNetworkInfo const &networkInfo, \
297  float classifierThreshold, \
298  std::vector<NvDsInferAttribute> &attrList, \
299  std::string &descString);
300 
302 
335  nvinfer1::IBuilder * const builder, nvinfer1::IBuilderConfig * const builderConfig,
336  const NvDsInferContextInitParams * const initParams,
337  nvinfer1::DataType dataType,
338  nvinfer1::ICudaEngine *& cudaEngine);
339 
344 #define CHECK_CUSTOM_ENGINE_CREATE_FUNC_PROTOTYPE(customEngineCreateFunc) \
345  static void checkFunc_ ## customEngineCreateFunc (NvDsInferEngineCreateCustomFunc = customEngineCreateFunc) \
346  { checkFunc_ ## customEngineCreateFunc(); }; \
347  extern "C" bool customEngineCreateFunc ( \
348  nvinfer1::IBuilder * const builder, \
349  nvinfer1::IBuilderConfig * const builderConfig, \
350  const NvDsInferContextInitParams * const initParams, \
351  nvinfer1::DataType dataType, \
352  nvinfer1::ICudaEngine *& cudaEngine);
353 
357 typedef enum
358 {
362 
367 typedef union
368 {
369  nvcaffeparser1::IPluginFactoryV2 *pluginFactoryV2;
371 
389 
399 
410 bool NvDsInferPluginFactoryRuntimeGet (nvinfer1::IPluginFactory *& pluginFactory);
411 
419 void NvDsInferPluginFactoryRuntimeDestroy (nvinfer1::IPluginFactory * pluginFactory);
420 
436 bool NvDsInferInitializeInputLayers (std::vector<NvDsInferLayerInfo> const &inputLayersInfo,
437  NvDsInferNetworkInfo const &networkInfo,
438  unsigned int maxBatchSize);
443 bool NvDsInferCudaEngineGet(nvinfer1::IBuilder *builder,
444  NvDsInferContextInitParams *initParams,
445  nvinfer1::DataType dataType,
446  nvinfer1::ICudaEngine *& cudaEngine)
447  __attribute__((deprecated("Use 'engine-create-func-name' config parameter instead")));
448 
458 IModelParser* NvDsInferCreateModelParser(
459  const NvDsInferContextInitParams* initParams);
460 
461 #ifdef __cplusplus
462 }
463 #endif
464 
465 #endif
466 
unsigned int maxBatchSize
Holds the maximum number of frames to be inferred together in a batch.
std::vector< float > perClassPreclusterThreshold
Holds a per-class vector of detection confidence thresholds to be applied prior to the clustering ope...
bool(* NvDsInferEngineCreateCustomFunc)(nvinfer1::IBuilder *const builder, nvinfer1::IBuilderConfig *const builderConfig, const NvDsInferContextInitParams *const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine)
Type definition for functions that build and return a CudaEngine for custom models.
void NvDsInferPluginFactoryRuntimeDestroy(nvinfer1::IPluginFactory *pluginFactory)
Destroys a Plugin Factory instance created by NvDsInferPluginFactoryRuntimeGet(). ...
Holds the initialization parameters required for the NvDsInferContext interface.
Holds the detection parameters required for parsing objects.
bool NvDsInferPluginFactoryRuntimeGet(nvinfer1::IPluginFactory *&pluginFactory)
Returns a new instance of a Plugin Factory interface to be used during parsing deserialization of CUD...
bool(* NvDsInferInstanceMaskParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector< NvDsInferInstanceMaskInfo > &objectList)
Type definition for the custom bounding box and instance mask parsing function.
NVIDIA DeepStream inference specifications
NvDsInferPluginFactoryType
Specifies the type of the Plugin Factory.
bool(* NvDsInferParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector< NvDsInferObjectDetectionInfo > &objectList)
Type definition for the custom bounding box parsing function.
bool NvDsInferCudaEngineGet(nvinfer1::IBuilder *builder, NvDsInferContextInitParams *initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine *&cudaEngine) __attribute__((deprecated("Use 'engine-create-func-name' config parameter instead")))
The NvDsInferCudaEngineGet interface has been deprecated and has been replaced by NvDsInferEngineCrea...
Specifies nvcaffeparser1::IPluginFactoryV2.
Holds a pointer to a heap-allocated Plugin Factory object required during Caffe model parsing...
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:217
unsigned int numClassesConfigured
Holds the number of classes requested to be parsed, starting with class ID 0.
bool(* NvDsInferClassiferParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, float classifierThreshold, std::vector< NvDsInferAttribute > &attrList, std::string &descString)
Type definition for the custom classifier output parsing function.
IModelParser * NvDsInferCreateModelParser(const NvDsInferContextInitParams *initParams)
Create a customized neural network parser for user-defined models.
bool NvDsInferInitializeInputLayers(std::vector< NvDsInferLayerInfo > const &inputLayersInfo, NvDsInferNetworkInfo const &networkInfo, unsigned int maxBatchSize)
Initializes the input layers for inference.
Holds information about the model network.
Definition: nvdsinfer.h:109
bool NvDsInferPluginFactoryCaffeGet(NvDsInferPluginFactoryCaffe &pluginFactory, NvDsInferPluginFactoryType &type)
Gets a new instance of a Plugin Factory interface to be used during parsing of Caffe models...
std::vector< float > perClassPostclusterThreshold
void NvDsInferPluginFactoryCaffeDestroy(NvDsInferPluginFactoryCaffe &pluginFactory)
Destroys a Plugin Factory instance created by NvDsInferPluginFactoryCaffeGet().
nvcaffeparser1::IPluginFactoryV2 * pluginFactoryV2