NVIDIA DeepStream SDK API Reference

6.4 Release
infer_icontext.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
28 #ifndef __NVDSINFERSERVER_ICONTEXT_H__
29 #define __NVDSINFERSERVER_ICONTEXT_H__
30 
31 #ifdef __cplusplus
32 
33 #include <stdarg.h>
34 #include <condition_variable>
35 #include <functional>
36 #include <list>
37 #include <memory>
38 #include <mutex>
39 #include <queue>
40 
41 #include <infer_datatypes.h>
42 
43 namespace nvdsinferserver {
44 
48 using InferOutputCb = std::function<void(NvDsInferStatus, SharedIBatchArray)>;
49 
53 using InferLoggingFunc =
54  std::function<void(NvDsInferLogLevel, const char* msg)>;
55 
59 class IInferContext {
60 public:
61  virtual ~IInferContext() = default;
62 
71  virtual NvDsInferStatus initialize(
72  const std::string& prototxt, InferLoggingFunc logFunc) = 0;
73 
82  virtual NvDsInferStatus run(
83  SharedIBatchArray input, InferOutputCb outputCb) = 0;
84 
90  virtual NvDsInferStatus deinit() = 0;
91 
97  virtual void getNetworkInputInfo(NvDsInferNetworkInfo &networkInfo) = 0;
98 };
99 
104 class ITritonServerInstance;
105 
106 } // namespace nvdsinferserver
107 
108 extern "C" {
109 
118 INFER_EXPORT_API nvdsinferserver::IInferContext* createInferTrtISContext(
119  const char* configStr, uint32_t configStrLen);
120 
126 INFER_EXPORT_API nvdsinferserver::IInferContext*
127 createInferTritonSimpleContext();
128 
137 INFER_EXPORT_API nvdsinferserver::IInferContext*
138 createInferTritonGrpcContext(const char* configStr, uint32_t configStrLen);
139 
149 INFER_EXPORT_API NvDsInferStatus NvDsTritonServerInit(
150  nvdsinferserver::ITritonServerInstance** instance, const char* configStr,
151  uint32_t configStrLen);
152 
161 NvDsTritonServerDeinit(nvdsinferserver::ITritonServerInstance* instance);
162 
174  void* buf, size_t bufBytes,
175  const nvdsinferserver::InferBufferDescription& desc, uint32_t batchSize,
176  std::function<void(void* data)> freeFunc);
177 
184 NvDsInferServerCreateBatchArray();
185 
199 NvDsInferServerCreateStrBuf(
200  const std::vector<std::string>& strings,
201  const nvdsinferserver::InferDims& dims, uint32_t batchSize,
202  const std::string& name, bool isInput);
203 }
204 
205 #endif
206 
207 #endif
208 
nvdsinferserver
Copyright (c) 2021, NVIDIA CORPORATION.
Definition: infer_custom_process.h:28
nvdsinferserver::SharedIBatchBuffer
std::shared_ptr< IBatchBuffer > SharedIBatchBuffer
Definition: infer_datatypes.h:204
infer_datatypes.h
Header file for the data types used in the inference processing.
NvDsInferLogLevel
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
Definition: nvdsinfer.h:249
NvDsInferNetworkInfo
Holds information about the model network.
Definition: nvdsinfer.h:110
nvdsinferserver::SharedIBatchArray
std::shared_ptr< IBatchArray > SharedIBatchArray
Definition: infer_datatypes.h:205
INFER_EXPORT_API
Definition: infer_utils.h:33
nvdsinferserver::InferDims
Holds the information about the dimensions of a neural network layer.
Definition: infer_datatypes.h:146
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218
nvdsinferserver::InferBufferDescription
Holds the information about a inference buffer.
Definition: infer_datatypes.h:168