NVIDIA DeepStream SDK API Reference

7.0 Release
nvdsinfer_func_utils.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
13 #ifndef __NVDSINFER_FUNC_UTILS_H__
14 #define __NVDSINFER_FUNC_UTILS_H__
15 
16 #include <dlfcn.h>
17 #include <stdarg.h>
18 #include <string.h>
19 #include <unistd.h>
20 #include <cassert>
21 #include <condition_variable>
22 #include <mutex>
23 #include <sstream>
24 #include <string>
25 #include <unordered_set>
26 
27 #include <NvInfer.h>
28 #include <NvInferRuntime.h>
29 #include <nvdsinfer.h>
30 #include <nvdsinfer_context.h>
31 #include <nvdsinfer_logger.h>
32 
33 /* This file provides APIs/macros for some frequently used functionality. */
34 
35 #define DISABLE_CLASS_COPY(NoCopyClass) \
36  NoCopyClass(const NoCopyClass&) = delete; \
37  void operator=(const NoCopyClass&) = delete
38 
39 #define SIMPLE_MOVE_COPY(Cls) \
40  Cls& operator=(Cls&& o) { \
41  move_copy(std::move(o)); \
42  return *this; \
43  } \
44  Cls(Cls&& o) { move_copy(std::move(o)); }
45 
46 #define CHECK_NVINFER_ERROR(err, action, fmt, ...) \
47  do { \
48  NvDsInferStatus ifStatus = (err); \
49  if (ifStatus != NVDSINFER_SUCCESS) { \
50  auto errStr = NvDsInferStatus2Str(ifStatus); \
51  dsInferError(fmt ", nvinfer error:%s", ##__VA_ARGS__, errStr); \
52  action; \
53  } \
54  } while (0)
55 
56 #define RETURN_NVINFER_ERROR(err, fmt, ...) \
57  CHECK_NVINFER_ERROR(err, return ifStatus, fmt, ##__VA_ARGS__)
58 
59 #define CHECK_CUDA_ERR_W_ACTION(err, action, fmt, ...) \
60  do { \
61  cudaError_t errnum = (err); \
62  if (errnum != cudaSuccess) { \
63  dsInferError(fmt ", cuda err_no:%d, err_str:%s", ##__VA_ARGS__, \
64  (int)errnum, cudaGetErrorName(errnum)); \
65  action; \
66  } \
67  } while (0)
68 
69 #define CHECK_CUDA_ERR_NO_ACTION(err, fmt, ...) \
70  CHECK_CUDA_ERR_W_ACTION(err, , fmt, ##__VA_ARGS__)
71 
72 #define RETURN_CUDA_ERR(err, fmt, ...) \
73  CHECK_CUDA_ERR_W_ACTION( \
74  err, return NVDSINFER_CUDA_ERROR, fmt, ##__VA_ARGS__)
75 
76 #define READ_SYMBOL(lib, func_name) \
77  lib->symbol<decltype(&func_name)>(#func_name)
78 
79 namespace nvdsinfer {
80 
81 inline const char* safeStr(const char* str)
82 {
83  return !str ? "" : str;
84 }
85 
86 inline const char* safeStr(const std::string& str)
87 {
88  return str.c_str();
89 }
90 
91 inline bool string_empty(const char* str)
92 {
93  return !str || strlen(str) == 0;
94 }
95 
96 inline bool file_accessible(const char* path)
97 {
98  assert(path);
99  return (access(path, F_OK) != -1);
100 }
101 
102 inline bool file_accessible(const std::string& path)
103 {
104  return (!path.empty()) && file_accessible(path.c_str());
105 }
106 
107 std::string dims2Str(const nvinfer1::Dims& d);
108 std::string dims2Str(const NvDsInferDims& d);
109 std::string batchDims2Str(const NvDsInferBatchDims& d);
110 
111 std::string dataType2Str(const nvinfer1::DataType type);
112 std::string dataType2Str(const NvDsInferDataType type);
113 std::string networkMode2Str(const NvDsInferNetworkMode type);
114 
115 /* Custom unique_ptr subclass with deleter functions for TensorRT objects. */
116 template <class T>
117 class UniquePtrWDestroy : public std::unique_ptr<T, void (*)(T*)>
118 {
119 public:
120  UniquePtrWDestroy(T* t = nullptr)
121  : std::unique_ptr<T, void (*)(T*)>(t, [](T* t) {
122  if (t)
123  t->destroy();
124  }) {}
125 };
126 
127 template <class T>
128 class SharedPtrWDestroy : public std::shared_ptr<T>
129 {
130 public:
131  SharedPtrWDestroy(T* t = nullptr)
132  : std::shared_ptr<T>(t, [](T* t) {
133  if (t)
134  t->destroy();
135  }) {}
136 };
137 
139 {
140 public:
141  DlLibHandle(const std::string& path, int mode = RTLD_LAZY);
142  ~DlLibHandle();
143 
144  bool isValid() const { return m_LibHandle; }
145  const std::string& getPath() const { return m_LibPath; }
146 
147  template <typename FuncPtr>
148  FuncPtr symbol(const char* func)
149  {
150  assert(!string_empty(func));
151  if (!m_LibHandle)
152  return nullptr;
153  return (FuncPtr)dlsym(m_LibHandle, func);
154  }
155 
156  template <typename FuncPtr>
157  FuncPtr symbol(const std::string& func)
158  {
159  return symbol<FuncPtr>(func.c_str());
160  }
161 
162 private:
163  void* m_LibHandle{nullptr};
164  const std::string m_LibPath;
165 };
166 
167 
168 template <typename Container>
170 {
171 public:
172  typedef typename Container::value_type T;
173  void push(const T& data)
174  {
175  std::unique_lock<std::mutex> lock(m_Mutex);
176  m_Queue.push_back(data);
177  m_Cond.notify_one();
178  }
179  T pop()
180  {
181  std::unique_lock<std::mutex> lock(m_Mutex);
182  m_Cond.wait(lock, [this]() { return !m_Queue.empty(); });
183  assert(!m_Queue.empty());
184  T ret = std::move(*m_Queue.begin());
185  m_Queue.erase(m_Queue.begin());
186  return ret;
187  }
188  bool isEmpty()
189  {
190  std::unique_lock<std::mutex> lock(m_Mutex);
191  return m_Queue.empty();
192  }
193  void clear()
194  {
195  std::unique_lock<std::mutex> lock(m_Mutex);
196  m_Queue.clear();
197  }
198 
199 private:
200  std::mutex m_Mutex;
201  std::condition_variable m_Cond;
202  Container m_Queue;
203 };
204 
208 inline uint32_t
210 {
211  switch (t)
212  {
213  case INT32:
214  case FLOAT:
215  return 4;
216  case HALF:
217  return 2;
218  case INT8:
219  return 1;
220  default:
221  dsInferError(
222  "Failed to get element size on Unknown datatype:%d", (int)t);
223  return 0;
224  }
225 }
226 
227 /* Convert between TRT's nvinfer1::Dims representation and DeepStream's
228  * NvDsInferDimsCHW/NvDsInferDims representation. */
229 nvinfer1::Dims ds2TrtDims(const NvDsInferDimsCHW& dims);
230 nvinfer1::Dims ds2TrtDims(const NvDsInferDims& dims);
231 NvDsInferDims trt2DsDims(const nvinfer1::Dims& dims);
232 
233 /* Add batch size to provided dims to get full dims as nvinfer1::Dims. */
234 nvinfer1::Dims CombineDimsBatch(const NvDsInferDims& dims, int batch);
235 /* Split full dims provided in the form of nvinfer1::Dims into batch size and
236  * layer dims. */
237 void SplitFullDims(
238  const nvinfer1::Dims& fullDims, NvDsInferDims& dims, int& batch);
239 
240 /* Convert from TRT's nvinfer1::Dims representation to DeepStream's
241  * NvDsInferBatchDims representation. */
242 inline void
243 convertFullDims(const nvinfer1::Dims& fullDims, NvDsInferBatchDims& batchDims)
244 {
245  SplitFullDims(fullDims, batchDims.dims, batchDims.batchSize);
246 }
247 
248 void normalizeDims(NvDsInferDims& dims);
249 
250 bool hasWildcard(const nvinfer1::Dims& dims);
251 bool hasWildcard(const NvDsInferDims& dims);
252 
253 /* Equality / inequality operators implementation for nvinfer1::Dims */
254 bool operator<=(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
255 bool operator>(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
256 bool operator==(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
257 bool operator!=(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
258 
259 /* Equality / inequality operators implementation for NvDsInferDims */
260 bool operator<=(const NvDsInferDims& a, const NvDsInferDims& b);
261 bool operator>(const NvDsInferDims& a, const NvDsInferDims& b);
262 bool operator==(const NvDsInferDims& a, const NvDsInferDims& b);
263 bool operator!=(const NvDsInferDims& a, const NvDsInferDims& b);
264 
265 
266 bool isValidOutputFormat(const std::string& fmt);
267 bool isValidOutputDataType(const std::string& dataType);
268 nvinfer1::DataType str2DataType(const std::string& dataType);
269 uint32_t str2TensorFormat(const std::string& fmt);
270 
271 struct BuildParams;
272 bool validateIOTensorNames(const BuildParams& params,
273  const nvinfer1::INetworkDefinition& network);
274 bool isValidDeviceType(const std::string& fmt);
275 bool isValidPrecisionType(const std::string& dataType);
276 nvinfer1::DataType str2PrecisionType(const std::string& dataType);
277 nvinfer1::DeviceType str2DeviceType(const std::string& deviceType);
278 
279 } // namespace nvdsinfer
280 
281 #endif
nvdsinfer::SharedPtrWDestroy::SharedPtrWDestroy
SharedPtrWDestroy(T *t=nullptr)
Definition: nvdsinfer_func_utils.h:131
nvdsinfer::DlLibHandle::getPath
const std::string & getPath() const
Definition: nvdsinfer_func_utils.h:145
nvdsinfer::GuardQueue
Definition: nvdsinfer_func_utils.h:169
nvdsinfer::isValidPrecisionType
bool isValidPrecisionType(const std::string &dataType)
nvdsinfer::ds2TrtDims
nvinfer1::Dims ds2TrtDims(const NvDsInferDimsCHW &dims)
nvdsinfer::isValidOutputFormat
bool isValidOutputFormat(const std::string &fmt)
nvdsinfer::GuardQueue::pop
T pop()
Definition: nvdsinfer_func_utils.h:179
nvdsinfer::GuardQueue::push
void push(const T &data)
Definition: nvdsinfer_func_utils.h:173
nvdsinfer::dims2Str
std::string dims2Str(const nvinfer1::Dims &d)
ds3d::DataType
DataType
Definition: idatatype.h:77
nvdsinfer::normalizeDims
void normalizeDims(NvDsInferDims &dims)
nvdsinfer::DlLibHandle::DlLibHandle
DlLibHandle(const std::string &path, int mode=RTLD_LAZY)
nvdsinfer::CombineDimsBatch
nvinfer1::Dims CombineDimsBatch(const NvDsInferDims &dims, int batch)
nvdsinfer::operator>
bool operator>(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::str2PrecisionType
nvinfer1::DataType str2PrecisionType(const std::string &dataType)
NvDsInferDims
Holds the dimensions of a layer.
Definition: nvdsinfer.h:46
nvdsinfer::networkMode2Str
std::string networkMode2Str(const NvDsInferNetworkMode type)
dsInferError
#define dsInferError(fmt,...)
Definition: nvdsinfer_logger.h:28
nvdsinfer::str2DataType
nvinfer1::DataType str2DataType(const std::string &dataType)
NvDsInferDataType
NvDsInferDataType
Specifies the data type of a layer.
Definition: nvdsinfer.h:72
NvDsInferNetworkMode
NvDsInferNetworkMode
Defines internal data formats used by the inference engine.
Definition: nvdsinfer_context.h:120
nvdsinfer
Definition: nvdsinfer_model_builder.h:42
nvdsinfer::GuardQueue::T
Container::value_type T
Definition: nvdsinfer_func_utils.h:172
nvdsinfer::operator!=
bool operator!=(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::str2TensorFormat
uint32_t str2TensorFormat(const std::string &fmt)
FLOAT
@ FLOAT
Specifies FP32 format.
Definition: nvdsinfer.h:75
nvdsinfer::hasWildcard
bool hasWildcard(const nvinfer1::Dims &dims)
nvdsinfer::GuardQueue::clear
void clear()
Definition: nvdsinfer_func_utils.h:193
nvdsinfer::SharedPtrWDestroy
Definition: nvdsinfer_func_utils.h:128
HALF
@ HALF
Specifies FP16 format.
Definition: nvdsinfer.h:77
INT32
@ INT32
Specifies INT32 format.
Definition: nvdsinfer.h:81
nvdsinfer_context.h
INT8
@ INT8
Specifies INT8 format.
Definition: nvdsinfer.h:79
nvdsinfer::convertFullDims
void convertFullDims(const nvinfer1::Dims &fullDims, NvDsInferBatchDims &batchDims)
Definition: nvdsinfer_func_utils.h:243
nvdsinfer::UniquePtrWDestroy::UniquePtrWDestroy
UniquePtrWDestroy(T *t=nullptr)
Definition: nvdsinfer_func_utils.h:120
NvDsInferDimsCHW
Holds the dimensions of a three-dimensional layer.
Definition: nvdsinfer.h:59
nvdsinfer::UniquePtrWDestroy
Definition: nvdsinfer_func_utils.h:117
nvdsinfer_logger.h
nvdsinfer::operator==
bool operator==(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::DlLibHandle
Definition: nvdsinfer_func_utils.h:138
nvdsinfer::operator<=
bool operator<=(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::dataType2Str
std::string dataType2Str(const nvinfer1::DataType type)
nvdsinfer::DlLibHandle::symbol
FuncPtr symbol(const std::string &func)
Definition: nvdsinfer_func_utils.h:157
nvdsinfer::file_accessible
bool file_accessible(const char *path)
Definition: nvdsinfer_func_utils.h:96
nvdsinfer::DlLibHandle::~DlLibHandle
~DlLibHandle()
nvdsinfer::DlLibHandle::symbol
FuncPtr symbol(const char *func)
Definition: nvdsinfer_func_utils.h:148
nvdsinfer.h
nvdsinfer::str2DeviceType
nvinfer1::DeviceType str2DeviceType(const std::string &deviceType)
nvdsinfer::string_empty
bool string_empty(const char *str)
Definition: nvdsinfer_func_utils.h:91
nvdsinfer::safeStr
const char * safeStr(const char *str)
Definition: nvdsinfer_func_utils.h:81
nvdsinfer::trt2DsDims
NvDsInferDims trt2DsDims(const nvinfer1::Dims &dims)
nvdsinfer::SplitFullDims
void SplitFullDims(const nvinfer1::Dims &fullDims, NvDsInferDims &dims, int &batch)
nvdsinfer::GuardQueue::isEmpty
bool isEmpty()
Definition: nvdsinfer_func_utils.h:188
nvdsinfer::getElementSize
uint32_t getElementSize(NvDsInferDataType t)
Get the size of the element from the data type.
Definition: nvdsinfer_func_utils.h:209
nvdsinfer::batchDims2Str
std::string batchDims2Str(const NvDsInferBatchDims &d)
nvdsinfer::validateIOTensorNames
bool validateIOTensorNames(const BuildParams &params, const nvinfer1::INetworkDefinition &network)
INFER_EXPORT_API::fullDims
InferDims fullDims(int batchSize, const InferDims &in)
Extend the dimensions to include batch size.
nvdsinfer::isValidDeviceType
bool isValidDeviceType(const std::string &fmt)
nvdsinfer::DlLibHandle::isValid
bool isValid() const
Definition: nvdsinfer_func_utils.h:144
nvdsinfer::isValidOutputDataType
bool isValidOutputDataType(const std::string &dataType)