NVIDIA DeepStream SDK API Reference

7.0 Release
infer_base_context.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
23 #ifndef __INFER_BASE_CONTEXT_H__
24 #define __INFER_BASE_CONTEXT_H__
25 
26 #include <stdarg.h>
27 #include <condition_variable>
28 #include <functional>
29 #include <list>
30 #include <memory>
31 #include <mutex>
32 #include <queue>
33 
34 #include "infer_base_backend.h"
35 #include "infer_common.h"
36 #include "infer_icontext.h"
37 #include "nvdsinferserver_config.pb.h"
38 
39 namespace ic = nvdsinferserver::config;
40 
41 namespace nvdsinferserver {
42 
43 class DlLibHandle;
44 
45 using InferCompleted = std::function<void(NvDsInferStatus, SharedBatchArray)>;
46 
52 class InferBaseContext : public IInferContext {
53 public:
55  ~InferBaseContext() override;
56 
58  const std::string& prototxt, InferLoggingFunc logFunc) final;
59  NvDsInferStatus run(SharedIBatchArray input, InferOutputCb outputCb) final;
60  NvDsInferStatus deinit() override;
61 
62 private:
63  virtual NvDsInferStatus createNNBackend(const ic::BackendParams& params,
64  int maxBatchSize, UniqBackend& backend) = 0;
65  virtual NvDsInferStatus fixateInferenceInfo(
66  const ic::InferenceConfig& config, BaseBackend& backend) = 0;
67  virtual NvDsInferStatus createPreprocessor(
68  const ic::PreProcessParams& params,
69  std::vector<UniqPreprocessor>& processors) = 0;
70  virtual NvDsInferStatus createPostprocessor(
71  const ic::PostProcessParams& params, UniqPostprocessor& processor) = 0;
72  virtual NvDsInferStatus allocateResource(
73  const ic::InferenceConfig& config) = 0;
74 
75  virtual NvDsInferStatus preInference(
76  SharedBatchArray& inputs, const ic::InferenceConfig& config)
77  {
78  return NVDSINFER_SUCCESS;
79  }
80 
81  virtual NvDsInferStatus extraOutputTensorCheck(
82  SharedBatchArray& outputs, SharedOptions inOptions)
83  {
84  return NVDSINFER_SUCCESS;
85  }
86  virtual void notifyError(NvDsInferStatus status) = 0;
87 
88  void rawDataInferDone(
89  NvDsInferStatus status, SharedBatchArray outputs, SharedOptions inOptions,
90  InferCompleted done);
91 
92 protected:
93  virtual void backendConsumedInputs(SharedBatchArray inputs) {
94  inputs.reset();
95  }
96  virtual SharedCuStream& mainStream() = 0;
97 
98  const ic::InferenceConfig& config() const { return m_Config; }
99  int maxBatchSize() const { return m_MaxBatchSize; }
100  int uniqueId() const { return m_UniqueID; }
101  BaseBackend* backend() { return m_Backend.get(); }
102  const SharedDllHandle& customLib() const { return m_CustomLib; }
103  bool needCopyInputToHost() const;
104  void print(NvDsInferLogLevel l, const char* msg);
105  bool needPreprocess() const;
106 
107 private:
108  NvDsInferStatus buidNextPreprocMap();
109  NvDsInferStatus forEachPreprocess(
110  IPreprocessor* cur, SharedBatchArray input, InferCompleted done);
111  NvDsInferStatus doInference(SharedBatchArray inputs, InferCompleted done);
112  NvDsInferStatus doPostCudaProcess(
113  SharedBatchArray inputs, InferCompleted done);
114  NvDsInferStatus doPostHostProcess(
115  SharedBatchArray inputs, InferCompleted done);
116 
117 private:
118  InferLoggingFunc m_LoggingFunc;
119  uint32_t m_UniqueID = 0;
123  uint32_t m_MaxBatchSize = 1;
124  bool m_Initialized = false;
125 
131  std::vector<UniqPreprocessor> m_Preprocessors;
132  std::unordered_map<IPreprocessor*, IPreprocessor*> m_NextPreprocMap;
133  UniqBackend m_Backend;
134  UniqPostprocessor m_Postprocessor;
135  SharedDllHandle m_CustomLib;
138  ic::InferenceConfig m_Config;
139 };
140 
141 } // namespace nvdsinferserver
142 
143 #define _MAX_LOG_LENGTH 4096
144 #define printMsg(level, tag_str, fmt, ...) \
145  do { \
146  char* baseName = strrchr((char*)__FILE__, '/'); \
147  baseName = (baseName) ? (baseName + 1) : (char*)__FILE__; \
148  std::vector<char> logMsgBuffer(_MAX_LOG_LENGTH, 0); \
149  snprintf(logMsgBuffer.data(), _MAX_LOG_LENGTH - 1, \
150  tag_str " %s() <%s:%d> [UID = %d]: " fmt, \
151  __func__, baseName, __LINE__, uniqueId(), ##__VA_ARGS__); \
152  this->print(level, logMsgBuffer.data()); \
153  } while (0)
154 
155 #define printError(fmt, ...) \
156  do { \
157  printMsg(NVDSINFER_LOG_ERROR, "Error in", fmt, ##__VA_ARGS__); \
158  } while (0)
159 
160 #define printWarning(fmt, ...) \
161  do { \
162  printMsg(NVDSINFER_LOG_WARNING, "Warning from", fmt, ##__VA_ARGS__); \
163  } while (0)
164 
165 #define printInfo(fmt, ...) \
166  do { \
167  printMsg(NVDSINFER_LOG_INFO, "Info from", fmt, ##__VA_ARGS__); \
168  } while (0)
169 
170 #define printDebug(fmt, ...) \
171  do { \
172  printMsg(NVDSINFER_LOG_DEBUG, "DEBUG", fmt, ##__VA_ARGS__); \
173  } while (0)
174 
175 #define CTX_RETURN_NVINFER_ERROR(err, fmt, ...) \
176  CHECK_NVINFER_ERROR_PRINT( \
177  err, return ifStatus, printError, fmt, ##__VA_ARGS__)
178 
179 #define CTX_RETURN_CUDA_ERR(err, fmt, ...) \
180  CHECK_CUDA_ERR_W_ACTION( \
181  err, return NVDSINFER_CUDA_ERROR, printError, fmt, ##__VA_ARGS__)
182 
183 #endif /* __INFER_BASE_CONTEXT_H__ */
nvdsinferserver
This is a header file for pre-processing cuda kernels with normalization and mean subtraction require...
Definition: infer_custom_process.h:24
nvdsinferserver::InferBaseContext::initialize
NvDsInferStatus initialize(const std::string &prototxt, InferLoggingFunc logFunc) final
nvdsinferserver::SharedDllHandle
std::shared_ptr< DlLibHandle > SharedDllHandle
Definition: infer_common.h:111
nvdsinferserver::InferBaseContext::maxBatchSize
int maxBatchSize() const
Definition: infer_base_context.h:99
nvdsinferserver::InferBaseContext::customLib
const SharedDllHandle & customLib() const
Definition: infer_base_context.h:102
nvdsinferserver::InferCompleted
std::function< void(NvDsInferStatus, SharedBatchArray)> InferCompleted
Definition: infer_base_context.h:45
NVDSINFER_SUCCESS
@ NVDSINFER_SUCCESS
NvDsInferContext operation succeeded.
Definition: nvdsinfer.h:220
NvDsInferLogLevel
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
Definition: nvdsinfer.h:249
infer_common.h
Header file of the common declarations for the nvinferserver library.
nvdsinferserver::InferBaseContext::needPreprocess
bool needPreprocess() const
nvdsinferserver::UniqBackend
std::unique_ptr< BaseBackend > UniqBackend
Definition: infer_base_backend.h:212
nvdsinferserver::SharedIBatchArray
std::shared_ptr< IBatchArray > SharedIBatchArray
Definition: infer_datatypes.h:205
nvdsinferserver::InferBaseContext::deinit
NvDsInferStatus deinit() override
infer_icontext.h
Inference context library interface header file.
nvdsinferserver::SharedOptions
std::shared_ptr< IOptions > SharedOptions
Definition: infer_common.h:73
nvdsinferserver::SharedCuStream
std::shared_ptr< CudaStream > SharedCuStream
Cuda based pointers.
Definition: infer_common.h:84
nvdsinferserver::InferBaseContext::backend
BaseBackend * backend()
Definition: infer_base_context.h:101
nvdsinferserver::InferBaseContext
The base class for handling the inference context.
Definition: infer_base_context.h:52
nvdsinferserver::InferBaseContext::print
void print(NvDsInferLogLevel l, const char *msg)
nvdsinferserver::InferBaseContext::config
const ic::InferenceConfig & config() const
Definition: infer_base_context.h:98
nvdsinferserver::InferBaseContext::~InferBaseContext
~InferBaseContext() override
nvdsinferserver::UniqPostprocessor
std::unique_ptr< BasePostprocessor > UniqPostprocessor
Processor interfaces.
Definition: infer_common.h:98
nvdsinferserver::BaseBackend
Base class of inference backend processing.
Definition: infer_base_backend.h:40
infer_base_backend.h
Header file for inference processing backend base class.
nvdsinferserver::InferBaseContext::InferBaseContext
InferBaseContext()
nvdsinferserver::InferBaseContext::run
NvDsInferStatus run(SharedIBatchArray input, InferOutputCb outputCb) final
nvdsinferserver::IPreprocessor
Preprocessor interface class.
Definition: infer_iprocess.h:41
nvdsinferserver::InferBaseContext::mainStream
virtual SharedCuStream & mainStream()=0
nvdsinferserver::InferBaseContext::uniqueId
int uniqueId() const
Definition: infer_base_context.h:100
nvdsinferserver::InferBaseContext::needCopyInputToHost
bool needCopyInputToHost() const
nvdsinferserver::SharedBatchArray
std::shared_ptr< BaseBatchArray > SharedBatchArray
Definition: infer_common.h:75
nvdsinferserver::InferBaseContext::backendConsumedInputs
virtual void backendConsumedInputs(SharedBatchArray inputs)
Definition: infer_base_context.h:93
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218