NVIDIA DeepStream SDK API Reference

6.4 Release
gstnvinfer_impl.h
Go to the documentation of this file.
1 
12 #ifndef __GSTNVINFER_IMPL_H__
13 #define __GSTNVINFER_IMPL_H__
14 
15 #include <string.h>
16 #include <sys/time.h>
17 #include <glib.h>
18 #include <gst/gst.h>
19 
20 #include <vector>
21 #include <list>
22 #include <condition_variable>
23 #include <memory>
24 #include <mutex>
25 #include <thread>
26 
27 #include "nvbufsurftransform.h"
28 #include "nvdsinfer_context.h"
29 #include "nvdsinfer_func_utils.h"
30 #include "nvdsmeta.h"
31 #include "nvdspreprocess_meta.h"
32 #include "nvtx3/nvToolsExt.h"
33 
34 G_BEGIN_DECLS
35 typedef struct _GstNvInfer GstNvInfer;
36 
37 void gst_nvinfer_logger(NvDsInferContextHandle handle, unsigned int unique_id,
38  NvDsInferLogLevel log_level, const char* log_message, void* user_ctx);
39 
40 G_END_DECLS
41 
42 using NvDsInferContextInitParamsPtr = std::unique_ptr<NvDsInferContextInitParams>;
43 using NvDsInferContextPtr = std::shared_ptr<INvDsInferContext>;
44 
46 
50 typedef struct {
54  gdouble scale_ratio_x = 0;
58  gdouble scale_ratio_y = 0;
61  guint offset_left = 0;
62  guint offset_top = 0;
64  guint roi_left = 0;
65  guint roi_top = 0;
67  NvDsObjectMeta *obj_meta = nullptr;
68  NvDsFrameMeta *frame_meta = nullptr;
69  NvDsRoiMeta *roi_meta = nullptr;
72  guint batch_index = 0;
74  gulong frame_num = 0;
76  NvBufSurfaceParams *input_surf_params = nullptr;
80  gpointer converted_frame_ptr = nullptr;
83  std::weak_ptr<GstNvInferObjectHistory> history;
84 
86 
88  std::pair<std::weak_ptr<GstNvInferObjectHistory>, NvDsObjectMeta *>;
89 
93 typedef struct {
95  std::vector<GstNvInferFrame> frames;
97  GstBuffer *inbuf = nullptr;
99  gulong inbuf_batch_num = 0;
104  gboolean push_buffer = FALSE;
108  gboolean event_marker = FALSE;
110  GstBuffer *conv_buf = nullptr;
111  nvtxRangeId_t nvtx_complete_buf_range = 0;
112 
116 
119  std::vector <GstNvInferObjHistory_MetaPair> objs_pending_meta_attach;
121 
122 
132 typedef struct
133 {
137  GstMiniObject mini_object;
145 
146 namespace gstnvinfer {
147 
150 {
154  std::string cfg_file;
155  /* Error message string if any. */
156  std::string message;
157 };
158 
161 {
162 public:
163  LockGMutex (GMutex &mutex)
164  :m (mutex) {
165  lock ();
166  }
168  if (locked)
169  unlock();
170  }
171  void lock ();
172  void unlock ();
173  void wait (GCond &cond);
174 
175 private:
176  GMutex &m;
177  bool locked = false;
178 };
179 
182 {
193 };
194 
195 /* Helper class to manage the NvDsInferContext and runtime model update. The
196  * model can be updated at runtime by setting "config-file-path" and/or
197  * "model-engine-file" properties with the new config file/model engine file.
198  *
199  * The runtime update implementation would basically create and initialize a
200  * new NvDsInferContext with new parameters and if successful will replace the
201  * current NvDsInferContext instance with the new instance while taking care of
202  * processing synchronization.
203  *
204  * Constraints of runtime model update:
205  * - Model input resolution and channels should not change
206  * - Batch-size of new model engine should be equal or greater than
207  * gst-nvinfer's batch-size
208  * - Type of the model (Detection/Classification/Segmentation) should not
209  * change.
210  *
211  * Check deepstream-test5-app README for more details on OTA and runtime model
212  * update and sample test steps.*/
214 {
215 public:
216  using ContextReplacementPtr =
217  std::unique_ptr<std::tuple<NvDsInferContextPtr, NvDsInferContextInitParamsPtr, std::string>>;
218 
219  DsNvInferImpl (GstNvInfer *infer);
220  ~DsNvInferImpl ();
221  /* Start the model load thread. */
223  /* Stop the model load thread. Release the NvDsInferContext. */
224  void stop ();
225 
226  bool isContextReady () const { return m_InferCtx.get(); }
227 
229  bool triggerNewModel (const std::string &modelPath, ModelLoadType loadType);
230 
233  void notifyLoadModelStatus (const ModelStatus &res);
234 
237 
240 
241 private:
243  class ModelLoadThread
244  {
245  public:
246  using ModelItem = std::tuple<std::string, ModelLoadType> ;
247 
248  ModelLoadThread (DsNvInferImpl &impl);
249  ~ModelLoadThread ();
250  void queueModel (const std::string &modelPath, ModelLoadType type) {
251  m_PendingModels.push (ModelItem(modelPath, type));
252  }
253  private:
254  void Run();
255 
256  DsNvInferImpl &m_Impl;
257  std::thread m_Thread;
259  };
260 
261  bool initNewInferModelParams (
262  NvDsInferContextInitParams &newParams,
263  const std::string &newModelPath, ModelLoadType loadType,
264  const NvDsInferContextInitParams &oldParams);
265  bool isNewContextValid (
266  INvDsInferContext &newCtx, NvDsInferContextInitParams &newParam);
267  bool triggerContextReplace (
269  const std::string &path);
270  void loadModel (const std::string &path, ModelLoadType loadType);
271 
272  ContextReplacementPtr getNextReplacementUnlock ();
273  NvDsInferStatus flushDataUnlock (LockGMutex &lock);
274  NvDsInferStatus resetContextUnlock (
276  const std::string &path);
277 
278  GstNvInfer *m_GstInfer = nullptr;
280  std::unique_ptr<ModelLoadThread> m_ModelLoadThread;
281  ContextReplacementPtr m_NextContextReplacement;
282 };
283 
284 }
285 
286 #endif
gstnvinfer::DsNvInferImpl::m_InitParams
NvDsInferContextInitParamsPtr m_InitParams
NvDsInferContext initialization params.
Definition: gstnvinfer_impl.h:239
gstnvinfer::LockGMutex::LockGMutex
LockGMutex(GMutex &mutex)
Definition: gstnvinfer_impl.h:163
nvdsinfer::GuardQueue
Definition: nvdsinfer_func_utils.h:168
gstnvinfer::LockGMutex::~LockGMutex
~LockGMutex()
Definition: gstnvinfer_impl.h:167
_GstNvInferObjectHistory
Holds the inference information/history for one object based on it's tracking id.
Definition: gstnvinfer.h:140
NvBufSurfTransformSyncObj_t
struct NvBufSurfTransformSyncObj * NvBufSurfTransformSyncObj_t
Holds the information about synchronization objects for asynchronous transform/composite APIs.
Definition: nvbufsurftransform.h:282
gstnvinfer
Definition: gstnvinfer_impl.h:146
GstNvInferFrame::history
std::weak_ptr< GstNvInferObjectHistory > history
Pointer to the structure holding inference history for the object.
Definition: gstnvinfer_impl.h:83
gst_nvinfer_logger
void gst_nvinfer_logger(NvDsInferContextHandle handle, unsigned int unique_id, NvDsInferLogLevel log_level, const char *log_message, void *user_ctx)
gstnvinfer::DsNvInferImpl::ensureReplaceNextContext
NvDsInferStatus ensureReplaceNextContext()
replace context, action in submit_input_buffer
GstNvInfer
typedefG_BEGIN_DECLS struct _GstNvInfer GstNvInfer
Definition: gstnvinfer.h:44
gstnvinfer::LockGMutex::lock
void lock()
gstnvinfer::ModelLoadType
ModelLoadType
Enum for type of model update required.
Definition: gstnvinfer_impl.h:181
GstNvInferObjHistory_MetaPair
std::pair< std::weak_ptr< GstNvInferObjectHistory >, NvDsObjectMeta * > GstNvInferObjHistory_MetaPair
Definition: gstnvinfer_impl.h:88
NvDsInferContextPtr
std::shared_ptr< INvDsInferContext > NvDsInferContextPtr
Definition: gstnvinfer_impl.h:43
NvDsInferContextInitParamsPtr
std::unique_ptr< NvDsInferContextInitParams > NvDsInferContextInitParamsPtr
Definition: gstnvinfer_impl.h:42
gstnvinfer::ModelStatus::status
NvDsInferStatus status
Status of the model update.
Definition: gstnvinfer_impl.h:152
NvDsInferLogLevel
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
Definition: nvdsinfer.h:249
gstnvinfer::DsNvInferImpl
Definition: gstnvinfer_impl.h:213
gstnvinfer::DsNvInferImpl::stop
void stop()
gstnvinfer::DsNvInferImpl::m_InferCtx
NvDsInferContextPtr m_InferCtx
NvDsInferContext to be used for inferencing.
Definition: gstnvinfer_impl.h:236
gstnvinfer::LockGMutex::unlock
void unlock()
gstnvinfer::DsNvInferImpl::notifyLoadModelStatus
void notifyLoadModelStatus(const ModelStatus &res)
NvBufSurfaceParams
Hold the information of single buffer in the batch.
Definition: nvbufsurface.h:470
gstnvinfer::DsNvInferImpl::DsNvInferImpl
DsNvInferImpl(GstNvInfer *infer)
gstnvinfer::DsNvInferImpl::triggerNewModel
bool triggerNewModel(const std::string &modelPath, ModelLoadType loadType)
Load new model in separate thread.
gstnvinfer::DsNvInferImpl::~DsNvInferImpl
~DsNvInferImpl()
GstNvInferTensorOutputObject::mini_object
GstMiniObject mini_object
Parent type.
Definition: gstnvinfer_impl.h:137
GstNvInferBatch::objs_pending_meta_attach
std::vector< GstNvInferObjHistory_MetaPair > objs_pending_meta_attach
List of objects not inferred on in the current batch but pending attachment of lastest available clas...
Definition: gstnvinfer_impl.h:119
nvdsinfer_context.h
Copyright (c) 2018-2020, NVIDIA CORPORATION.
NvDsInferContextHandle
struct INvDsInferContext * NvDsInferContextHandle
An opaque pointer type to be used as a handle for a context instance.
Definition: nvdsinfer_context.h:623
GstNvInferTensorOutputObject::infer_context
NvDsInferContextPtr infer_context
NvDsInferContext pointer which hold the resource.
Definition: gstnvinfer_impl.h:139
GstNvInferBatch
Holds information about the batch of frames to be inferred.
Definition: gstnvinfer_impl.h:93
gstnvinfer::ModelStatus::message
std::string message
Definition: gstnvinfer_impl.h:156
_GstNvInfer::unique_id
guint unique_id
Unique ID of the element.
Definition: gstnvinfer.h:193
GstNvInferTensorOutputObject
Data type used for the refcounting and managing the usage of NvDsInferContext's batch output and the ...
Definition: gstnvinfer_impl.h:132
gstnvinfer::ModelStatus::cfg_file
std::string cfg_file
Config file used for model update.
Definition: gstnvinfer_impl.h:154
_NvDsInferContextInitParams
Holds the initialization parameters required for the NvDsInferContext interface.
Definition: nvdsinfer_context.h:233
gstnvinfer::DsNvInferImpl::start
NvDsInferStatus start()
NvDsRoiMeta
Holds Information about ROI Metadata.
Definition: nvds_roi_meta.h:96
GstBuffer
struct _GstBuffer GstBuffer
Definition: idatatype.h:19
gstnvinfer::DsNvInferImpl::isContextReady
bool isContextReady() const
Definition: gstnvinfer_impl.h:226
GstNvInferTensorOutputObject::batch_output
NvDsInferContextBatchOutput batch_output
NvDsInferContextBatchOutput instance whose output tensor buffers are being sent as meta data.
Definition: gstnvinfer_impl.h:143
GstNvInferBatch::frames
std::vector< GstNvInferFrame > frames
Vector of frames in the batch.
Definition: gstnvinfer_impl.h:95
gstnvinfer::MODEL_LOAD_STOP
@ MODEL_LOAD_STOP
Request the model load thread to stop.
Definition: gstnvinfer_impl.h:192
NvDsInferContextBatchOutput
Holds the output for all of the frames in a batch (an array of frame), and related buffer information...
Definition: nvdsinfer_context.h:599
_NvDsFrameMeta
Holds metadata for a frame in a batch.
Definition: nvdsmeta.h:284
_GstNvInfer
GstNvInfer element structure.
Definition: gstnvinfer.h:175
nvdsmeta.h
nvbufsurftransform.h
GstNvInferFrame
Holds info about one frame in a batch for inferencing.
Definition: gstnvinfer_impl.h:50
gstnvinfer::LockGMutex::wait
void wait(GCond &cond)
gstnvinfer::MODEL_LOAD_FROM_ENGINE
@ MODEL_LOAD_FROM_ENGINE
Load a new model by just replacing the model engine assuming no network architecture changes.
Definition: gstnvinfer_impl.h:185
gstnvinfer::ModelStatus
Holds runtime model update status along with the error message if any.
Definition: gstnvinfer_impl.h:149
gstnvinfer::LockGMutex
C++ helper class written on top of GMutex/GCond.
Definition: gstnvinfer_impl.h:160
gstnvinfer::DsNvInferImpl::ContextReplacementPtr
std::unique_ptr< std::tuple< NvDsInferContextPtr, NvDsInferContextInitParamsPtr, std::string > > ContextReplacementPtr
Definition: gstnvinfer_impl.h:217
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218
nvdsinfer_func_utils.h
gstnvinfer::MODEL_LOAD_FROM_CONFIG
@ MODEL_LOAD_FROM_CONFIG
Load a new model with other configuration changes.
Definition: gstnvinfer_impl.h:190
_NvDsObjectMeta
Holds metadata for an object in the frame.
Definition: nvdsmeta.h:342