NVIDIA DeepStream SDK API Reference

7.0 Release
nvdsinfer_context_impl.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
13 #ifndef __NVDSINFER_CONTEXT_IMPL_H__
14 #define __NVDSINFER_CONTEXT_IMPL_H__
15 
16 #include <stdarg.h>
17 #include <condition_variable>
18 #include <functional>
19 #include <list>
20 #include <memory>
21 #include <mutex>
22 #include <queue>
23 #include <iostream>
24 #include <fstream>
25 
26 #include <NvCaffeParser.h>
27 #include <NvInfer.h>
28 #include <cuda_runtime_api.h>
29 
30 #pragma GCC diagnostic push
31 #if __GNUC__ >= 8
32 #pragma GCC diagnostic ignored "-Wclass-memaccess"
33 #endif
34 #ifdef WITH_OPENCV
35 #include <opencv2/objdetect/objdetect.hpp>
36 #endif
37 #pragma GCC diagnostic pop
38 
39 #include <nvdsinfer_context.h>
40 #include <nvdsinfer_custom_impl.h>
41 #include <nvdsinfer_utils.h>
42 #include <nvdsinfer_logger.h>
43 
44 #include "nvdsinfer_backend.h"
45 
46 namespace nvdsinfer {
47 
49  std::function<void(NvDsInferLogLevel, const char* msg)>;
50 
54 typedef struct
55 {
56  std::vector<void*> m_DeviceBuffers;
57  std::vector<std::unique_ptr<CudaHostBuffer>> m_HostBuffers;
58 
59  std::vector<std::unique_ptr<CudaDeviceBuffer>> m_OutputDeviceBuffers;
60 
61  unsigned int m_BatchSize = 0;
62  std::unique_ptr<CudaEvent> m_OutputCopyDoneEvent = nullptr;
63  bool m_BuffersWithContext = true;
64 
66 
71 {
72 public:
74  const NvDsInferBatchDimsLayerInfo& layerInfo, int id = 0);
75  virtual ~InferPreprocessor() = default;
76 
78  {
79  m_LoggingFunc = func;
80  }
81  bool setScaleOffsets(float scale, const std::vector<float>& offsets = {});
82  bool setMeanFile(const std::string& file);
83  bool setInputOrder(const NvDsInferTensorOrder order);
84 
87 
89  void* devBuf, CudaStream& mainStream, CudaEvent* waitingEvent);
90 
91 private:
92  NvDsInferStatus readMeanImageFile();
93  DISABLE_CLASS_COPY(InferPreprocessor);
94 
95 private:
96  int m_UniqueID = 0;
97  NvDsInferLoggingFunc m_LoggingFunc;
98 
99  NvDsInferNetworkInfo m_NetworkInfo = {0};
101  NvDsInferFormat m_NetworkInputFormat = NvDsInferFormat_RGB;
103  NvDsInferBatchDimsLayerInfo m_NetworkInputLayer;
104  float m_Scale = 1.0f;
105  std::vector<float> m_ChannelMeans; // same as channels
106  std::string m_MeanFile;
107 
108  std::unique_ptr<CudaStream> m_PreProcessStream;
109  /* Cuda Event for synchronizing completion of pre-processing. */
110  std::shared_ptr<CudaEvent> m_PreProcessCompleteEvent;
111  std::unique_ptr<CudaDeviceBuffer> m_MeanDataBuffer;
112 };
113 
118 {
119 protected:
120  InferPostprocessor(NvDsInferNetworkType type, int id, int gpuId)
121  : m_NetworkType(type), m_UniqueID(id), m_GpuID(gpuId) {}
122 
123 public:
124  virtual ~InferPostprocessor();
125  void setDlHandle(const std::shared_ptr<DlLibHandle>& dlHandle)
126  {
127  m_CustomLibHandle = dlHandle;
128  }
130  {
131  m_NetworkInfo = info;
132  }
133  void setAllLayerInfo(std::vector<NvDsInferBatchDimsLayerInfo>& info)
134  {
135  m_AllLayerInfo.resize(info.size());
136  std::copy(info.begin(), info.end(), m_AllLayerInfo.begin());
137  }
138  void setOutputLayerInfo(std::vector<NvDsInferBatchDimsLayerInfo>& info)
139  {
140  m_OutputLayerInfo.resize(info.size());
141  std::copy(info.begin(), info.end(), m_OutputLayerInfo.begin());
142  }
144  {
145  m_LoggingFunc = func;
146  }
147  const std::vector<std::vector<std::string>>& getLabels() const
148  {
149  return m_Labels;
150  }
151  bool needInputCopy() const { return m_CopyInputToHostBuffers; }
152 
154 
156  const NvDsInferContextInitParams& initParams);
157 
158  /* Copy inference output from device to host memory. */
160  NvDsInferBatch& buffer, CudaStream& mainStream);
161 
164 
165  void freeBatchOutput(NvDsInferContextBatchOutput& batchOutput);
166 
167 private:
168  /* Parse the output of each frame in batch. */
169  virtual NvDsInferStatus parseEachBatch(
170  const std::vector<NvDsInferLayerInfo>& outputLayers,
171  NvDsInferFrameOutput& result) = 0;
172 
173 protected:
174  NvDsInferStatus parseLabelsFile(const std::string& path);
176  void releaseFrameOutput(NvDsInferFrameOutput& frameOutput);
177 
178 private:
179  DISABLE_CLASS_COPY(InferPostprocessor);
180 
181 protected:
182  /* Processor type */
184 
185  int m_UniqueID = 0;
186  uint32_t m_GpuID = 0;
188 
189  /* Custom library implementation. */
190  std::shared_ptr<DlLibHandle> m_CustomLibHandle;
193  bool m_DumpOpTensor = false;
194  std::vector<std::pair<std::string, std::string>> m_DumpOpTensorFiles;
195  bool m_OverwriteOpTensor = false;
196  std::vector<std::pair<std::string, int>> m_OverwriteOpTensorFilePairs;
197  std::vector<std::ifstream *> m_OverwriteOpTensorFiles;
198  /* Network input information. */
200  std::vector<NvDsInferLayerInfo> m_AllLayerInfo;
201  std::vector<NvDsInferLayerInfo> m_OutputLayerInfo;
202 
203  /* Holds the string labels for classes. */
204  std::vector<std::vector<std::string>> m_Labels;
205 };
206 
209 {
210 public:
211  DetectPostprocessor(int id, int gpuId = 0)
213  ~DetectPostprocessor() override = default;
214 
216  const NvDsInferContextInitParams& initParams) override;
217 
218 private:
219  NvDsInferStatus parseEachBatch(
220  const std::vector<NvDsInferLayerInfo>& outputLayers,
221  NvDsInferFrameOutput& result) override;
222 
223  bool parseBoundingBox(
224  std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
225  NvDsInferNetworkInfo const& networkInfo,
226  NvDsInferParseDetectionParams const& detectionParams,
227  std::vector<NvDsInferObjectDetectionInfo>& objectList);
228 
229  std::vector<int> nonMaximumSuppression
230  (std::vector<std::pair<float, int>>& scoreIndex,
231  std::vector<NvDsInferParseObjectInfo>& bbox,
232  const float nmsThreshold);
233  void clusterAndFillDetectionOutputNMS(NvDsInferDetectionOutput &output);
234  void clusterAndFillDetectionOutputCV(NvDsInferDetectionOutput& output);
235  void clusterAndFillDetectionOutputDBSCAN(NvDsInferDetectionOutput& output);
236  void clusterAndFillDetectionOutputHybrid(NvDsInferDetectionOutput& output);
237  void fillUnclusteredOutput(NvDsInferDetectionOutput& output);
238  NvDsInferStatus fillDetectionOutput(
239  const std::vector<NvDsInferLayerInfo>& outputLayers,
240  NvDsInferDetectionOutput& output);
241  void preClusteringThreshold(NvDsInferParseDetectionParams const &detectionParams,
242  std::vector<NvDsInferObjectDetectionInfo> &objectList);
243  void filterTopKOutputs(const int topK,
244  std::vector<NvDsInferObjectDetectionInfo> &objectList);
245 
246 private:
247  _DS_DEPRECATED_("Use m_ClusterMode instead")
248  bool m_UseDBScan = false;
249  std::shared_ptr<NvDsInferDBScan> m_DBScanHandle;
250  NvDsInferClusterMode m_ClusterMode;
251 
252  /* Number of classes detected by the model. */
253  uint32_t m_NumDetectedClasses = 0;
254 
255  /* Detection / grouping parameters. */
256  std::vector<NvDsInferDetectionParams> m_PerClassDetectionParams;
257  NvDsInferParseDetectionParams m_DetectionParams = {0, {}, {}};
258 
259  /* Vector for all parsed objects. */
260  std::vector<NvDsInferObjectDetectionInfo> m_ObjectList;
261 #ifdef WITH_OPENCV
262  /* Vector of cv::Rect vectors for each class. */
263  std::vector<std::vector<cv::Rect>> m_PerClassCvRectList;
264 #endif
265  /* Vector of NvDsInferObjectDetectionInfo vectors for each class. */
266  std::vector<std::vector<NvDsInferObjectDetectionInfo>> m_PerClassObjectList;
267 
268  NvDsInferParseCustomFunc m_CustomBBoxParseFunc = nullptr;
269 };
270 
273 {
274 public:
275  InstanceSegmentPostprocessor(int id, int gpuId = 0)
277  ~InstanceSegmentPostprocessor() override = default;
278 
280  const NvDsInferContextInitParams& initParams) override;
281 
282 private:
283  NvDsInferStatus parseEachBatch(
284  const std::vector<NvDsInferLayerInfo>& outputLayers,
285  NvDsInferFrameOutput& result) override;
286 
287  void fillUnclusteredOutput(NvDsInferDetectionOutput& output);
288  NvDsInferStatus fillDetectionOutput(
289  const std::vector<NvDsInferLayerInfo>& outputLayers,
290  NvDsInferDetectionOutput& output);
291  void preClusteringThreshold(NvDsInferParseDetectionParams const &detectionParams,
292  std::vector<NvDsInferInstanceMaskInfo> &objectList);
293  void filterTopKOutputs(const int topK,
294  std::vector<NvDsInferInstanceMaskInfo> &objectList);
295 
296 private:
297  NvDsInferClusterMode m_ClusterMode;
298 
299  /* Number of classes detected by the model. */
300  uint32_t m_NumDetectedClasses = 0;
301 
302  /* Detection / grouping parameters. */
303  std::vector<NvDsInferDetectionParams> m_PerClassDetectionParams;
304  NvDsInferParseDetectionParams m_DetectionParams = {0, {}, {}};
305 
306  /* Vector for all parsed instance masks. */
307  std::vector<NvDsInferInstanceMaskInfo> m_InstanceMaskList;
308  /* Vector of NvDsInferInstanceMaskInfo vectors for each class. */
309  std::vector<std::vector<NvDsInferInstanceMaskInfo>> m_PerClassInstanceMaskList;
310 
311  NvDsInferInstanceMaskParseCustomFunc m_CustomParseFunc = nullptr;
312 };
313 
316 {
317 public:
318  ClassifyPostprocessor(int id, int gpuId = 0)
320 
322  const NvDsInferContextInitParams& initParams) override;
323 
324 private:
325  NvDsInferStatus parseEachBatch(
326  const std::vector<NvDsInferLayerInfo>& outputLayers,
327  NvDsInferFrameOutput& result) override;
328 
329  NvDsInferStatus fillClassificationOutput(
330  const std::vector<NvDsInferLayerInfo>& outputLayers,
332 
333  bool parseAttributesFromSoftmaxLayers(
334  std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
335  NvDsInferNetworkInfo const& networkInfo, float classifierThreshold,
336  std::vector<NvDsInferAttribute>& attrList, std::string& attrString);
337 
338 private:
339  float m_ClassifierThreshold = 0.0f;
340  NvDsInferClassiferParseCustomFunc m_CustomClassifierParseFunc = nullptr;
341 };
342 
345 {
346 public:
347  SegmentPostprocessor(int id, int gpuId = 0)
349 
351  const NvDsInferContextInitParams& initParams) override;
352 
353 private:
354  NvDsInferStatus parseEachBatch(
355  const std::vector<NvDsInferLayerInfo>& outputLayers,
356  NvDsInferFrameOutput& result) override;
357 
358  NvDsInferStatus fillSegmentationOutput(
359  const std::vector<NvDsInferLayerInfo>& outputLayers,
361 
362 private:
363  float m_SegmentationThreshold = 0.0f;
364  NvDsInferTensorOrder m_SegmentationOutputOrder = NvDsInferTensorOrder_kNCHW;
365 };
366 
368 {
369 public:
370  OtherPostprocessor(int id, int gpuId = 0)
372 
374  const NvDsInferContextInitParams& initParams) override;
375 
376 private:
377  NvDsInferStatus parseEachBatch(
378  const std::vector<NvDsInferLayerInfo>& outputLayers,
379  NvDsInferFrameOutput& result) override {
380  return NVDSINFER_SUCCESS;
381  }
382 };
383 
384 class BackendContext;
385 
389 class NvDsInferContextImpl : public INvDsInferContext
390 {
391 public:
396 
402  void *userCtx, NvDsInferContextLoggingFunc logFunc);
403 
404 private:
408  ~NvDsInferContextImpl() override;
409 
410  /* Implementation of the public methods of INvDsInferContext interface. */
411  NvDsInferStatus queueInputBatch(NvDsInferContextBatchInput &batchInput) override;
412  NvDsInferStatus queueInputBatchPreprocessed(NvDsInferContextBatchPreprocessedInput &batchInput) override;
413  NvDsInferStatus dequeueOutputBatch(NvDsInferContextBatchOutput &batchOutput) override;
414  void releaseBatchOutput(NvDsInferContextBatchOutput &batchOutput) override;
415  void fillLayersInfo(std::vector<NvDsInferLayerInfo> &layersInfo) override;
416  void getNetworkInfo(NvDsInferNetworkInfo &networkInfo) override;
417  const std::vector<std::vector<std::string>>& getLabels() override;
418  void destroy() override;
419 
420  /* Other private methods. */
421  NvDsInferStatus initInferenceInfo(
422  const NvDsInferContextInitParams& initParams, BackendContext& ctx);
423  NvDsInferStatus preparePreprocess(
424  const NvDsInferContextInitParams& initParams);
425  NvDsInferStatus preparePostprocess(
426  const NvDsInferContextInitParams& initParams);
427 
428  std::unique_ptr<BackendContext> generateBackendContext(
429  NvDsInferContextInitParams& initParams);
430  std::unique_ptr<BackendContext> buildModel(
431  NvDsInferContextInitParams& initParams);
432  bool deserializeEngineAndBackend(const std::string enginePath, int dla,
433  std::shared_ptr<TrtEngine>& engine,
434  std::unique_ptr<BackendContext>& backend);
435  NvDsInferStatus checkBackendParams(
436  BackendContext& ctx, const NvDsInferContextInitParams& initParams);
437 
438  NvDsInferStatus getBoundLayersInfo();
439  NvDsInferStatus resizeOutputBufferpool(uint32_t numBuffers);
440  NvDsInferStatus allocateBuffers();
441  NvDsInferStatus initNonImageInputLayers();
442 
443  /* Input layer has a binding index of 0 */
444  static const int INPUT_LAYER_INDEX = 0;
445 
448  uint32_t m_UniqueID = 0;
449  uint32_t m_GpuID = 0;
450 
451  /* Custom unique_ptrs. These TensorRT objects will get deleted automatically
452  * when the NvDsInferContext object is deleted. */
453  std::unique_ptr<BackendContext> m_BackendContext;
454  std::shared_ptr<DlLibHandle> m_CustomLibHandle;
455 
456  std::unique_ptr<InferPreprocessor> m_Preprocessor;
457  std::unique_ptr<InferPostprocessor> m_Postprocessor;
458 
459  uint32_t m_MaxBatchSize = 0;
460  /* Network input information. */
461  NvDsInferNetworkInfo m_NetworkInfo;
462 
463  /* Vectors for holding information about bound layers. */
464  std::vector<NvDsInferBatchDimsLayerInfo> m_AllLayerInfo;
465  std::vector<NvDsInferBatchDimsLayerInfo> m_OutputLayerInfo;
466  NvDsInferBatchDimsLayerInfo m_InputImageLayerInfo;
467 
468  std::vector<void *> m_BindingBuffers;
469  std::vector<std::unique_ptr<CudaDeviceBuffer>> m_InputDeviceBuffers;
470 
471  uint32_t m_OutputBufferPoolSize = NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE;
472  std::vector<std::shared_ptr<NvDsInferBatch>> m_Batches;
473  std::mutex m_BatchesMutex;
474 
475  /* Queues and synchronization members for processing multiple batches
476  * in parallel.
477  */
478  GuardQueue<std::list<NvDsInferBatch*>> m_FreeBatchQueue;
479  GuardQueue<std::list<NvDsInferBatch*>> m_ProcessBatchQueue;
480 
481  std::unique_ptr<CudaStream> m_InferStream;
482  std::unique_ptr<CudaStream> m_PostprocessStream;
483 
484  /* Cuda Event for synchronizing input consumption by TensorRT CUDA engine. */
485  std::shared_ptr<CudaEvent> m_InputConsumedEvent;
486 
487  /* Cuda Event for synchronizing infer completion by TensorRT CUDA engine. */
488  std::shared_ptr<CudaEvent> m_InferCompleteEvent;
489 
490  NvDsInferLoggingFunc m_LoggingFunc;
491 
492  bool m_Initialized = false;
493  uint32_t m_AutoIncMem = 1;
494  double m_MaxGPUMem = 99;
495  bool m_DumpIpTensor = false;
496  std::string m_DumpIpTensorFilePath = " ";
497  bool m_OverwriteIpTensor = false;
498  std::string m_OverwriteIpTensorFilePath = " ";
499  std::ifstream m_OverwriteIpTensorFile;
500 };
501 
502 }
503 
504 #define printMsg(level, tag_str, fmt, ...) \
505  do { \
506  char* baseName = strrchr((char*)__FILE__, '/'); \
507  baseName = (baseName) ? (baseName + 1) : (char*)__FILE__; \
508  char logMsgBuffer[5 * _MAX_STR_LENGTH + 1]; \
509  snprintf(logMsgBuffer, 5 * _MAX_STR_LENGTH, \
510  tag_str " NvDsInferContextImpl::%s() <%s:%d> [UID = %d]: " fmt, \
511  __func__, baseName, __LINE__, m_UniqueID, ##__VA_ARGS__); \
512  if (m_LoggingFunc) { \
513  m_LoggingFunc(level, logMsgBuffer); \
514  } else { \
515  fprintf(stderr, "%s\n", logMsgBuffer); \
516  } \
517  } while (0)
518 
519 #define printError(fmt, ...) \
520  do { \
521  printMsg (NVDSINFER_LOG_ERROR, "Error in", fmt, ##__VA_ARGS__); \
522  } while (0)
523 
524 #define printWarning(fmt, ...) \
525  do { \
526  printMsg (NVDSINFER_LOG_WARNING, "Warning from", fmt, ##__VA_ARGS__); \
527  } while (0)
528 
529 #define printInfo(fmt, ...) \
530  do { \
531  printMsg (NVDSINFER_LOG_INFO, "Info from", fmt, ##__VA_ARGS__); \
532  } while (0)
533 
534 #define printDebug(fmt, ...) \
535  do { \
536  printMsg (NVDSINFER_LOG_DEBUG, "DEBUG", fmt, ##__VA_ARGS__); \
537  } while (0)
538 
539 #endif
nvdsinfer::InferPostprocessor::setAllLayerInfo
void setAllLayerInfo(std::vector< NvDsInferBatchDimsLayerInfo > &info)
Definition: nvdsinfer_context_impl.h:133
nvdsinfer::DetectPostprocessor::~DetectPostprocessor
~DetectPostprocessor() override=default
nvdsinfer::InferPreprocessor::setLoggingFunc
void setLoggingFunc(const NvDsInferLoggingFunc &func)
Definition: nvdsinfer_context_impl.h:77
nvdsinfer::NvDsInferLoggingFunc
std::function< void(NvDsInferLogLevel, const char *msg)> NvDsInferLoggingFunc
Definition: nvdsinfer_context_impl.h:49
nvdsinfer::InferPreprocessor::setMeanFile
bool setMeanFile(const std::string &file)
nvdsinfer_utils.h
NvDsInferTensorOrder
NvDsInferTensorOrder
Defines UFF input layer orders.
Definition: nvdsinfer_context.h:176
nvdsinfer::GuardQueue
Definition: nvdsinfer_func_utils.h:169
NvDsInferNetworkType_Classifier
@ NvDsInferNetworkType_Classifier
Specifies a classifier.
Definition: nvdsinfer_context.h:138
nvdsinfer::DetectPostprocessor::DetectPostprocessor
DetectPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:211
nvdsinfer::InferPostprocessor::initResource
virtual NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams)
NvDsInferFormat
NvDsInferFormat
Defines color formats.
Definition: nvdsinfer_context.h:156
nvdsinfer::InferPostprocessor::setLoggingFunc
void setLoggingFunc(const NvDsInferLoggingFunc &func)
Definition: nvdsinfer_context_impl.h:143
nvdsinfer::BackendContext
Abstract interface for managing the actual inferencing implementation.
Definition: nvdsinfer_backend.h:168
nvdsinfer::InferPostprocessor::needOutputCopyB4Processing
bool needOutputCopyB4Processing() const
Definition: nvdsinfer_context_impl.h:153
nvdsinfer::InferPostprocessor::~InferPostprocessor
virtual ~InferPostprocessor()
nvdsinfer::CudaStream
Helper class for managing Cuda Streams.
Definition: nvdsinfer_backend.h:40
nvdsinfer::InferPostprocessor::m_GpuID
uint32_t m_GpuID
Definition: nvdsinfer_context_impl.h:186
nvdsinfer_backend.h
nvdsinfer::InferPostprocessor::releaseFrameOutput
void releaseFrameOutput(NvDsInferFrameOutput &frameOutput)
nvdsinfer::InferPostprocessor::m_CustomLibHandle
std::shared_ptr< DlLibHandle > m_CustomLibHandle
Definition: nvdsinfer_context_impl.h:190
nvdsinfer::InferPostprocessor::getLabels
const std::vector< std::vector< std::string > > & getLabels() const
Definition: nvdsinfer_context_impl.h:147
NvDsInferSegmentationOutput
Holds the information parsed from segmentation network output for one frame.
Definition: infer_post_datatypes.h:75
nvdsinfer::InferPostprocessor::allocDeviceResource
NvDsInferStatus allocDeviceResource()
nvdsinfer::InferPostprocessor::setDlHandle
void setDlHandle(const std::shared_ptr< DlLibHandle > &dlHandle)
Definition: nvdsinfer_context_impl.h:125
nvdsinfer::InferPostprocessor::needInputCopy
bool needInputCopy() const
Definition: nvdsinfer_context_impl.h:151
NVDSINFER_SUCCESS
@ NVDSINFER_SUCCESS
NvDsInferContext operation succeeded.
Definition: nvdsinfer.h:220
nvdsinfer::InferPostprocessor::postProcessHost
virtual NvDsInferStatus postProcessHost(NvDsInferBatch &buffer, NvDsInferContextBatchOutput &output)
nvdsinfer::InstanceSegmentPostprocessor
Implementation of post-processing class for instance segmentation networks.
Definition: nvdsinfer_context_impl.h:272
nvdsinfer::InferPostprocessor::setOutputLayerInfo
void setOutputLayerInfo(std::vector< NvDsInferBatchDimsLayerInfo > &info)
Definition: nvdsinfer_context_impl.h:138
nvdsinfer::InferPostprocessor::freeBatchOutput
void freeBatchOutput(NvDsInferContextBatchOutput &batchOutput)
nvdsinfer::InferPostprocessor::m_disableOutputHostCopy
bool m_disableOutputHostCopy
Definition: nvdsinfer_context_impl.h:192
nvdsinfer::InferPostprocessor::m_OverwriteOpTensor
bool m_OverwriteOpTensor
Definition: nvdsinfer_context_impl.h:195
nvdsinfer::InferPostprocessor
Base class for post-processing on inference output.
Definition: nvdsinfer_context_impl.h:117
nvdsinfer::InferPostprocessor::m_OverwriteOpTensorFilePairs
std::vector< std::pair< std::string, int > > m_OverwriteOpTensorFilePairs
Definition: nvdsinfer_context_impl.h:196
NvDsInferLogLevel
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
Definition: nvdsinfer.h:249
nvdsinfer::InferPostprocessor::copyBuffersToHostMemory
virtual NvDsInferStatus copyBuffersToHostMemory(NvDsInferBatch &buffer, CudaStream &mainStream)
nvdsinfer::ClassifyPostprocessor::ClassifyPostprocessor
ClassifyPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:318
NvDsInferClassificationOutput
Holds information on all attributes classifed by a classifier network for one frame.
Definition: nvdsinfer_context.h:543
NvDsInferNetworkType_Detector
@ NvDsInferNetworkType_Detector
Specifies a detector.
Definition: nvdsinfer_context.h:135
nvdsinfer
Definition: nvdsinfer_model_builder.h:42
NvDsInferParseDetectionParams
Holds the detection parameters required for parsing objects.
Definition: nvdsinfer_custom_impl.h:191
NvDsInferContextBatchInput
Holds information about one batch to be inferred.
Definition: nvdsinfer_context.h:468
nvdsinfer::DetectPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferFormat_RGB
@ NvDsInferFormat_RGB
Specifies 24-bit interleaved R-G-B format.
Definition: nvdsinfer_context.h:159
NvDsInferInstanceMaskParseCustomFunc
bool(* NvDsInferInstanceMaskParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector< NvDsInferInstanceMaskInfo > &objectList)
Type definition for the custom bounding box and instance mask parsing function.
Definition: nvdsinfer_custom_impl.h:251
CudaStream
Helper class for managing Cuda Streams.
Definition: nvdspreprocess_impl.h:97
NvDsInferClusterMode
NvDsInferClusterMode
Enum for clustering mode for detectors.
Definition: nvdsinfer_context.h:228
nvdsinfer::InferPostprocessor::m_OverwriteOpTensorFiles
std::vector< std::ifstream * > m_OverwriteOpTensorFiles
Definition: nvdsinfer_context_impl.h:197
nvdsinfer::InferPostprocessor::setNetworkInfo
void setNetworkInfo(const NvDsInferNetworkInfo &info)
Definition: nvdsinfer_context_impl.h:129
nvdsinfer::NvDsInferContextImpl::NvDsInferContextImpl
NvDsInferContextImpl()
Default constructor.
NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE
#define NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE
Defines the minimum number of sets of output buffers that must be allocated.
Definition: nvdsinfer_context.h:111
nvdsinfer::NvDsInferBatch::m_OutputDeviceBuffers
std::vector< std::unique_ptr< CudaDeviceBuffer > > m_OutputDeviceBuffers
Definition: nvdsinfer_context_impl.h:59
nvdsinfer::SegmentPostprocessor::SegmentPostprocessor
SegmentPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:347
nvdsinfer::InferPreprocessor::transform
NvDsInferStatus transform(NvDsInferContextBatchInput &batchInput, void *devBuf, CudaStream &mainStream, CudaEvent *waitingEvent)
nvdsinfer::SegmentPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferDetectionOutput
Holds the information on all objects detected by a detector network in one frame.
Definition: infer_post_datatypes.h:43
nvdsinfer::InferPostprocessor::m_OutputLayerInfo
std::vector< NvDsInferLayerInfo > m_OutputLayerInfo
Definition: nvdsinfer_context_impl.h:201
nvdsinfer::NvDsInferBatch::m_DeviceBuffers
std::vector< void * > m_DeviceBuffers
Definition: nvdsinfer_context_impl.h:56
NvDsInferNetworkInfo
Holds information about the model network.
Definition: nvdsinfer.h:110
nvdsinfer::InferPreprocessor
Provides pre-processing functionality like mean subtraction and normalization.
Definition: nvdsinfer_context_impl.h:70
nvdsinfer::NvDsInferContextImpl
Implementation of the INvDsInferContext interface.
Definition: nvdsinfer_context_impl.h:389
nvdsinfer::NvDsInferBatch::m_HostBuffers
std::vector< std::unique_ptr< CudaHostBuffer > > m_HostBuffers
Definition: nvdsinfer_context_impl.h:57
nvdsinfer_context.h
nvdsinfer_custom_impl.h
nvdsinfer::InferPostprocessor::m_NetworkInfo
NvDsInferNetworkInfo m_NetworkInfo
Definition: nvdsinfer_context_impl.h:199
nvdsinfer::OtherPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferTensorOrder_kNCHW
@ NvDsInferTensorOrder_kNCHW
Definition: nvdsinfer_context.h:177
NvDsInferClassiferParseCustomFunc
bool(* NvDsInferClassiferParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, float classifierThreshold, std::vector< NvDsInferAttribute > &attrList, std::string &descString)
Type definition for the custom classifier output parsing function.
Definition: nvdsinfer_custom_impl.h:282
nvdsinfer::InferPostprocessor::parseLabelsFile
NvDsInferStatus parseLabelsFile(const std::string &path)
nvdsinfer_logger.h
nvdsinfer::InferPostprocessor::m_Labels
std::vector< std::vector< std::string > > m_Labels
Definition: nvdsinfer_context_impl.h:204
nvdsinfer::InferPreprocessor::setScaleOffsets
bool setScaleOffsets(float scale, const std::vector< float > &offsets={})
NvDsInferParseCustomFunc
bool(* NvDsInferParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector< NvDsInferObjectDetectionInfo > &objectList)
Type definition for the custom bounding box parsing function.
Definition: nvdsinfer_custom_impl.h:222
nvdsinfer::InstanceSegmentPostprocessor::InstanceSegmentPostprocessor
InstanceSegmentPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:275
NvDsInferNetworkType_InstanceSegmentation
@ NvDsInferNetworkType_InstanceSegmentation
Specifies a instance segmentation network.
Definition: nvdsinfer_context.h:145
_NvDsInferContextInitParams
Holds the initialization parameters required for the NvDsInferContext interface.
Definition: nvdsinfer_context.h:239
nvdsinfer::InferPostprocessor::InferPostprocessor
InferPostprocessor(NvDsInferNetworkType type, int id, int gpuId)
Definition: nvdsinfer_context_impl.h:120
nvdsinfer::InferPostprocessor::m_CopyInputToHostBuffers
bool m_CopyInputToHostBuffers
Definition: nvdsinfer_context_impl.h:191
nvdsinfer::InstanceSegmentPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
nvdsinfer::InferPostprocessor::m_NetworkType
NvDsInferNetworkType m_NetworkType
Definition: nvdsinfer_context_impl.h:183
nvdsinfer::InferPostprocessor::m_DumpOpTensorFiles
std::vector< std::pair< std::string, std::string > > m_DumpOpTensorFiles
Definition: nvdsinfer_context_impl.h:194
nvdsinfer::ClassifyPostprocessor
Implementation of post-processing class for classification networks.
Definition: nvdsinfer_context_impl.h:315
NvDsInferContextLoggingFunc
void(* NvDsInferContextLoggingFunc)(NvDsInferContextHandle handle, unsigned int uniqueID, NvDsInferLogLevel logLevel, const char *logMessage, void *userCtx)
Type declaration for a logging callback.
Definition: nvdsinfer_context.h:647
_DS_DEPRECATED_
#define _DS_DEPRECATED_(STR)
Definition: nvdsinfer.h:41
NvDsInferNetworkType_Segmentation
@ NvDsInferNetworkType_Segmentation
Specifies a segmentation network.
Definition: nvdsinfer_context.h:141
NvDsInferDetectionParams
Holds detection and bounding box grouping parameters.
Definition: nvdsinfer_context.h:192
nvdsinfer::InferPostprocessor::m_DumpOpTensor
bool m_DumpOpTensor
Definition: nvdsinfer_context_impl.h:193
NvDsInferFrameOutput
Holds the information inferred by the network on one frame.
Definition: nvdsinfer_context.h:579
nvdsinfer::InferPreprocessor::~InferPreprocessor
virtual ~InferPreprocessor()=default
nvdsinfer::OtherPostprocessor
Definition: nvdsinfer_context_impl.h:367
NvDsInferNetworkType
NvDsInferNetworkType
Defines network types.
Definition: nvdsinfer_context.h:131
NvDsInferContextBatchPreprocessedInput
Definition: nvdsinfer_context.h:486
NvDsInferContextBatchOutput
Holds the output for all of the frames in a batch (an array of frame), and related buffer information...
Definition: nvdsinfer_context.h:605
nvdsinfer::InferPostprocessor::m_UniqueID
int m_UniqueID
Definition: nvdsinfer_context_impl.h:185
nvdsinfer::ClassifyPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferNetworkType_Other
@ NvDsInferNetworkType_Other
Specifies other.
Definition: nvdsinfer_context.h:150
nvdsinfer::NvDsInferContextImpl::initialize
NvDsInferStatus initialize(NvDsInferContextInitParams &initParams, void *userCtx, NvDsInferContextLoggingFunc logFunc)
Initializes the Infer engine, allocates layer buffers and other required initialization steps.
nvdsinfer::InferPreprocessor::InferPreprocessor
InferPreprocessor(const NvDsInferNetworkInfo &info, NvDsInferFormat format, const NvDsInferBatchDimsLayerInfo &layerInfo, int id=0)
nvdsinfer::DetectPostprocessor
Implementation of post-processing class for object detection networks.
Definition: nvdsinfer_context_impl.h:208
nvdsinfer::NvDsInferBatch
Holds information for one batch for processing.
Definition: nvdsinfer_context_impl.h:54
nvdsinfer::InstanceSegmentPostprocessor::~InstanceSegmentPostprocessor
~InstanceSegmentPostprocessor() override=default
nvdsinfer::OtherPostprocessor::OtherPostprocessor
OtherPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:370
nvdsinfer::InferPreprocessor::setInputOrder
bool setInputOrder(const NvDsInferTensorOrder order)
nvdsinfer::InferPostprocessor::m_LoggingFunc
NvDsInferLoggingFunc m_LoggingFunc
Definition: nvdsinfer_context_impl.h:187
nvdsinfer::SegmentPostprocessor
Implementation of post-processing class for segmentation networks.
Definition: nvdsinfer_context_impl.h:344
nvdsinfer::InferPreprocessor::allocateResource
NvDsInferStatus allocateResource()
nvdsinfer::InferPostprocessor::m_AllLayerInfo
std::vector< NvDsInferLayerInfo > m_AllLayerInfo
Definition: nvdsinfer_context_impl.h:200
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218
nvdsinfer::InferPreprocessor::syncStream
NvDsInferStatus syncStream()