NVIDIA DeepStream SDK API Reference

7.0 Release
infer_icontext.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
23 #ifndef __NVDSINFERSERVER_ICONTEXT_H__
24 #define __NVDSINFERSERVER_ICONTEXT_H__
25 
26 #ifdef __cplusplus
27 
28 #include <stdarg.h>
29 #include <condition_variable>
30 #include <functional>
31 #include <list>
32 #include <memory>
33 #include <mutex>
34 #include <queue>
35 
36 #include <infer_datatypes.h>
37 
38 namespace nvdsinferserver {
39 
43 using InferOutputCb = std::function<void(NvDsInferStatus, SharedIBatchArray)>;
44 
48 using InferLoggingFunc =
49  std::function<void(NvDsInferLogLevel, const char* msg)>;
50 
54 class IInferContext {
55 public:
56  virtual ~IInferContext() = default;
57 
66  virtual NvDsInferStatus initialize(
67  const std::string& prototxt, InferLoggingFunc logFunc) = 0;
68 
77  virtual NvDsInferStatus run(
78  SharedIBatchArray input, InferOutputCb outputCb) = 0;
79 
85  virtual NvDsInferStatus deinit() = 0;
86 
92  virtual void getNetworkInputInfo(NvDsInferNetworkInfo &networkInfo) = 0;
93 };
94 
99 class ITritonServerInstance;
100 
101 } // namespace nvdsinferserver
102 
103 extern "C" {
104 
113 INFER_EXPORT_API nvdsinferserver::IInferContext* createInferTrtISContext(
114  const char* configStr, uint32_t configStrLen);
115 
121 INFER_EXPORT_API nvdsinferserver::IInferContext*
122 createInferTritonSimpleContext();
123 
132 INFER_EXPORT_API nvdsinferserver::IInferContext*
133 createInferTritonGrpcContext(const char* configStr, uint32_t configStrLen);
134 
144 INFER_EXPORT_API NvDsInferStatus NvDsTritonServerInit(
145  nvdsinferserver::ITritonServerInstance** instance, const char* configStr,
146  uint32_t configStrLen);
147 
156 NvDsTritonServerDeinit(nvdsinferserver::ITritonServerInstance* instance);
157 
169  void* buf, size_t bufBytes,
170  const nvdsinferserver::InferBufferDescription& desc, uint32_t batchSize,
171  std::function<void(void* data)> freeFunc);
172 
179 NvDsInferServerCreateBatchArray();
180 
194 NvDsInferServerCreateStrBuf(
195  const std::vector<std::string>& strings,
196  const nvdsinferserver::InferDims& dims, uint32_t batchSize,
197  const std::string& name, bool isInput);
198 }
199 
200 #endif
201 
202 #endif
nvdsinferserver
This is a header file for pre-processing cuda kernels with normalization and mean subtraction require...
Definition: infer_custom_process.h:24
nvdsinferserver::SharedIBatchBuffer
std::shared_ptr< IBatchBuffer > SharedIBatchBuffer
Definition: infer_datatypes.h:204
infer_datatypes.h
Header file for the data types used in the inference processing.
NvDsInferLogLevel
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
Definition: nvdsinfer.h:249
NvDsInferNetworkInfo
Holds information about the model network.
Definition: nvdsinfer.h:110
nvdsinferserver::SharedIBatchArray
std::shared_ptr< IBatchArray > SharedIBatchArray
Definition: infer_datatypes.h:205
INFER_EXPORT_API
Definition: infer_utils.h:33
nvdsinferserver::InferDims
Holds the information about the dimensions of a neural network layer.
Definition: infer_datatypes.h:146
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218
nvdsinferserver::InferBufferDescription
Holds the information about a inference buffer.
Definition: infer_datatypes.h:168