NVIDIA DeepStream SDK API Reference

6.4 Release
infer_lstm.h
Go to the documentation of this file.
1 
12 #ifndef __INFER_LSTM_CONTROL_H__
13 #define __INFER_LSTM_CONTROL_H__
14 
15 #include "infer_base_context.h"
16 #include "infer_common.h"
17 #include "infer_datatypes.h"
18 #include "infer_proto_utils.h"
19 #include "infer_utils.h"
20 
21 namespace nvdsinferserver {
22 
24 public:
25  LstmController(const ic::LstmParams& params, int devId, int maxBatchSize)
26  {
27  m_Params.CopyFrom(params);
28  m_DevId = devId;
29  m_MaxBatchSize = maxBatchSize;
30  }
31  ~LstmController() = default;
32 
36  void notifyError(NvDsInferStatus status);
37  void destroy()
38  {
39  UniqLock locker(m_Mutex);
40  m_InProgress = 0;
41  m_Cond.notify_all();
42  locker.unlock();
43  m_LoopStateMap.clear();
44  m_LstmInputs.clear();
45  }
46 
47 private:
48  // check input/output tensor names/dims/datatype must be same
49  NvDsInferStatus checkTensorInfo(BaseBackend& backend);
50  struct LoopState {
51  std::string inputName;
52  SharedCudaTensorBuf inputTensor;
53  SharedBatchBuf outputTensor;
54  bool keepOutputParsing = false;
55  };
56 
57 private:
58  ic::LstmParams m_Params;
59  int m_DevId = 0;
60  int m_MaxBatchSize = 1;
61  // map<outputName, loopState>
62  std::unordered_map<std::string, LoopState> m_LoopStateMap;
63  std::vector<SharedCudaTensorBuf> m_LstmInputs;
64  std::atomic<int32_t> m_InProgress{0};
65  std::mutex m_Mutex;
66  std::condition_variable m_Cond;
67  SharedCuEvent m_InputReadyEvent;
68  SharedCuStream m_LstmStream;
69 };
70 
71 } // namespace nvdsinferserver
72 
73 #endif
nvdsinferserver
Copyright (c) 2021, NVIDIA CORPORATION.
Definition: infer_custom_process.h:28
nvdsinferserver::LstmController::destroy
void destroy()
Definition: infer_lstm.h:37
nvdsinferserver::SharedBatchBuf
std::shared_ptr< BaseBatchBuffer > SharedBatchBuf
Common buffer interfaces (internal).
Definition: infer_common.h:71
infer_datatypes.h
Header file for the data types used in the inference processing.
infer_utils.h
Header file containing utility functions and classes used by the nvinferserver low level library.
nvdsinferserver::LstmController::waitAndGetInputs
NvDsInferStatus waitAndGetInputs(SharedBatchArray &inputs)
infer_common.h
Header file of the common declarations for the nvinferserver library.
nvdsinferserver::LstmController::~LstmController
~LstmController()=default
nvdsinferserver::LstmController
Definition: infer_lstm.h:23
infer_proto_utils.h
nvdsinferserver::SharedCuStream
std::shared_ptr< CudaStream > SharedCuStream
Cuda based pointers.
Definition: infer_common.h:84
nvdsinferserver::LstmController::notifyError
void notifyError(NvDsInferStatus status)
nvdsinferserver::LstmController::LstmController
LstmController(const ic::LstmParams &params, int devId, int maxBatchSize)
Definition: infer_lstm.h:25
nvdsinferserver::SharedCuEvent
std::shared_ptr< CudaEvent > SharedCuEvent
Definition: infer_common.h:86
nvdsinferserver::BaseBackend
Base class of inference backend processing.
Definition: infer_base_backend.h:40
nvdsinferserver::LstmController::initInputState
NvDsInferStatus initInputState(BaseBackend &backend)
nvdsinferserver::LstmController::feedbackInputs
NvDsInferStatus feedbackInputs(SharedBatchArray &outTensors)
nvdsinferserver::UniqLock
std::unique_lock< std::mutex > UniqLock
Miscellaneous declarations.
Definition: infer_common.h:108
nvdsinferserver::SharedBatchArray
std::shared_ptr< BaseBatchArray > SharedBatchArray
Definition: infer_common.h:75
infer_base_context.h
Header file of the base class for inference context.
nvdsinferserver::SharedCudaTensorBuf
std::shared_ptr< CudaTensorBuf > SharedCudaTensorBuf
Definition: infer_common.h:91
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218