Wrapper class for Triton inference request.
Definition at line 68 of file sources/libs/nvdsinferserver/infer_trtis_server.h.
|
| | TrtServerRequest (TrtServerPtr server) |
| | Constructor. More...
|
| |
| NvDsInferStatus | init (const std::string &model, int64_t version, SharedBatchArray &inputs, const std::vector< std::string > &outputs, uint64_t reqId, const std::vector< TritonClassParams > &clasList) |
| | Create a new Triton inference request with the specified inputs and parameters. More...
|
| |
| NvDsInferStatus | setRequestComplete (TRITONSERVER_InferenceRequestReleaseFn_t requestCompleteCb, void *userPtr) |
| | Set the release callback function for the request. More...
|
| |
| NvDsInferStatus | setResponseComplete (ShrTritonAllocator &allocator, TRITONSERVER_InferenceResponseCompleteFn_t responseCompleteCb, void *responseUserPtr) |
| | Set the allocator and response callback for the request. More...
|
| |
| | TrtServerRequest (TrtServerPtr server) |
| | Constructor. More...
|
| |
| NvDsInferStatus | init (const std::string &model, int64_t version, SharedBatchArray &inputs, const std::vector< std::string > &outputs, uint64_t reqId, const std::vector< TritonClassParams > &clasList) |
| | Create a new Triton inference request with the specified inputs and parameters. More...
|
| |
| NvDsInferStatus | setRequestComplete (TRITONSERVER_InferenceRequestReleaseFn_t requestCompleteCb, void *userPtr) |
| | Set the release callback function for the request. More...
|
| |
| NvDsInferStatus | setResponseComplete (ShrTritonAllocator &allocator, TRITONSERVER_InferenceResponseCompleteFn_t responseCompleteCb, void *responseUserPtr) |
| | Set the allocator and response callback for the request. More...
|
| |
|
| static void | RequestOnRelease (TRITONSERVER_InferenceRequest *request, const uint32_t flags, void *userp) |
| | The callback function to release the request instance. More...
|
| |
| static void | RequestOnRelease (TRITONSERVER_InferenceRequest *request, const uint32_t flags, void *userp) |
| | The callback function to release the request instance. More...
|
| |
◆ TrtServerRequest() [1/2]
| nvdsinferserver::TrtServerRequest::TrtServerRequest |
( |
TrtServerPtr |
server | ) |
|
|
protected |
Constructor.
Save the server instance pointer and register the Triton request deletion function.
- Parameters
-
◆ ~TrtServerRequest() [1/2]
| nvdsinferserver::TrtServerRequest::~TrtServerRequest |
( |
| ) |
|
Destructor.
Releases the Triton inference request instance.
◆ TrtServerRequest() [2/2]
| nvdsinferserver::TrtServerRequest::TrtServerRequest |
( |
TrtServerPtr |
server | ) |
|
|
protected |
Constructor.
Save the server instance pointer and register the Triton request deletion function.
- Parameters
-
◆ ~TrtServerRequest() [2/2]
| nvdsinferserver::TrtServerRequest::~TrtServerRequest |
( |
| ) |
|
Destructor.
Releases the Triton inference request instance.
◆ bufId() [1/2]
| uint64_t nvdsinferserver::TrtServerRequest::bufId |
( |
| ) |
const |
|
inline |
◆ bufId() [2/2]
| uint64_t nvdsinferserver::TrtServerRequest::bufId |
( |
| ) |
const |
|
inline |
◆ classParams() [1/2]
| const std::map<std::string, TritonClassParams>& nvdsinferserver::TrtServerRequest::classParams |
( |
| ) |
const |
|
inline |
◆ classParams() [2/2]
| const std::map<std::string, TritonClassParams>& nvdsinferserver::TrtServerRequest::classParams |
( |
| ) |
const |
|
inline |
◆ id() [1/2]
| uint64_t nvdsinferserver::TrtServerRequest::id |
( |
| ) |
const |
|
inline |
◆ id() [2/2]
| uint64_t nvdsinferserver::TrtServerRequest::id |
( |
| ) |
const |
|
inline |
◆ init() [1/2]
| NvDsInferStatus nvdsinferserver::TrtServerRequest::init |
( |
const std::string & |
model, |
|
|
int64_t |
version, |
|
|
SharedBatchArray & |
inputs, |
|
|
const std::vector< std::string > & |
outputs, |
|
|
uint64_t |
reqId, |
|
|
const std::vector< TritonClassParams > & |
clasList |
|
) |
| |
|
protected |
Create a new Triton inference request with the specified inputs and parameters.
- Parameters
-
| [in] | model | Model name. |
| [in] | version | Model version. |
| [in] | inputs | Array of input batch buffers. |
| [in] | outputs | List of names of required output tensors. |
| [in] | reqId | ID of this request. |
| [in] | clasList | Triton classification parameters, if any. |
- Returns
- NVDSINFER_SUCCESS or NVDSINFER_TRITON_ERROR.
◆ init() [2/2]
| NvDsInferStatus nvdsinferserver::TrtServerRequest::init |
( |
const std::string & |
model, |
|
|
int64_t |
version, |
|
|
SharedBatchArray & |
inputs, |
|
|
const std::vector< std::string > & |
outputs, |
|
|
uint64_t |
reqId, |
|
|
const std::vector< TritonClassParams > & |
clasList |
|
) |
| |
|
protected |
Create a new Triton inference request with the specified inputs and parameters.
- Parameters
-
| [in] | model | Model name. |
| [in] | version | Model version. |
| [in] | inputs | Array of input batch buffers. |
| [in] | outputs | List of names of required output tensors. |
| [in] | reqId | ID of this request. |
| [in] | clasList | Triton classification parameters, if any. |
- Returns
- NVDSINFER_SUCCESS or NVDSINFER_TRITON_ERROR.
◆ model() [1/2]
| const std::string& nvdsinferserver::TrtServerRequest::model |
( |
| ) |
const |
|
inline |
◆ model() [2/2]
| const std::string& nvdsinferserver::TrtServerRequest::model |
( |
| ) |
const |
|
inline |
◆ outputs() [1/2]
| const std::vector<std::string>& nvdsinferserver::TrtServerRequest::outputs |
( |
| ) |
const |
|
inline |
◆ outputs() [2/2]
| const std::vector<std::string>& nvdsinferserver::TrtServerRequest::outputs |
( |
| ) |
const |
|
inline |
◆ ptr() [1/2]
| TRITONSERVER_InferenceRequest* nvdsinferserver::TrtServerRequest::ptr |
( |
| ) |
|
|
inline |
◆ ptr() [2/2]
| TRITONSERVER_InferenceRequest* nvdsinferserver::TrtServerRequest::ptr |
( |
| ) |
|
|
inline |
◆ releaseInputs() [1/2]
◆ releaseInputs() [2/2]
◆ RequestOnRelease() [1/2]
| static void nvdsinferserver::TrtServerRequest::RequestOnRelease |
( |
TRITONSERVER_InferenceRequest * |
request, |
|
|
const uint32_t |
flags, |
|
|
void * |
userp |
|
) |
| |
|
staticprotected |
The callback function to release the request instance.
- Parameters
-
| [in] | request | Pointer to the request. |
| [in] | flags | Flags associated with the callback. |
| [in] | userp | User data pointer. |
◆ RequestOnRelease() [2/2]
| static void nvdsinferserver::TrtServerRequest::RequestOnRelease |
( |
TRITONSERVER_InferenceRequest * |
request, |
|
|
const uint32_t |
flags, |
|
|
void * |
userp |
|
) |
| |
|
staticprotected |
The callback function to release the request instance.
- Parameters
-
| [in] | request | Pointer to the request. |
| [in] | flags | Flags associated with the callback. |
| [in] | userp | User data pointer. |
◆ setRequestComplete() [1/2]
Set the release callback function for the request.
- Parameters
-
| [in] | requestCompleteCb | The request release callback function. |
| [in] | userPtr | The user data pointer for the callback. |
- Returns
- NVDSINFER_SUCCESS or NVDSINFER_TRITON_ERROR.
◆ setRequestComplete() [2/2]
Set the release callback function for the request.
- Parameters
-
| [in] | requestCompleteCb | The request release callback function. |
| [in] | userPtr | The user data pointer for the callback. |
- Returns
- NVDSINFER_SUCCESS or NVDSINFER_TRITON_ERROR.
◆ setResponseComplete() [1/2]
Set the allocator and response callback for the request.
- Parameters
-
| [in] | allocator | Pointer to the output allocator instance. |
| [in] | responseCompleteCb | The response callback function. |
| [in] | responseUserPtr | The user data pointer. |
- Returns
- NVDSINFER_SUCCESS or NVDSINFER_TRITON_ERROR.
◆ setResponseComplete() [2/2]
Set the allocator and response callback for the request.
- Parameters
-
| [in] | allocator | Pointer to the output allocator instance. |
| [in] | responseCompleteCb | The response callback function. |
| [in] | responseUserPtr | The user data pointer. |
- Returns
- NVDSINFER_SUCCESS or NVDSINFER_TRITON_ERROR.
◆ TrtISServer
The documentation for this class was generated from the following file: