NVIDIA DeepStream SDK API Reference

7.0 Release
infer_extra_processor.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
20 #ifndef __NVDSINFERSERVER_EXTRA_PROCESSOR_H__
21 #define __NVDSINFERSERVER_EXTRA_PROCESSOR_H__
22 
23 #include "infer_base_backend.h"
24 #include "infer_common.h"
25 #include "infer_custom_process.h"
26 #include "infer_datatypes.h"
27 #include "infer_utils.h"
28 
29 namespace nvdsinferserver {
30 
31 using TensorMapPool = MapBufferPool<std::string, UniqCudaTensorBuf>;
32 using TensorMapPoolPtr = std::unique_ptr<TensorMapPool>;
33 
38 public:
41 
42  /*
43  * @brief Load custom processor from custom library.
44  */
46  SharedDllHandle dlHandle, const std::string& funcName, const std::string& config);
47 
48  /*
49  * @brief Allocate extra input resources including both CPU/GPU buffers.
50  */
52  BaseBackend& backend, const std::set<std::string>& excludes, int32_t poolSize, int gpuId);
53 
54  /*
55  * @brief Process extra input tensors per batched input.
56  */
58 
59  /*
60  * @brief Notify errors.
61  */
62  void notifyError(NvDsInferStatus status);
63 
64  /*
65  * @brief Inference done callback outputs.
66  */
68 
69  /*
70  * @brief Destroy all resources including custom processors.
71  */
73 
74 private:
75  bool requireLoop() const { return m_RequireInferLoop; }
76 
77  /*
78  * @brief Stores all input layers except primary input.
79  */
80  LayerDescriptionList m_ExtraInputLayers;
81  /*
82  * @brief Stores all input layers.
83  */
84  LayerDescriptionList m_FullInputLayers;
85  /*
86  * @brief Max batch size, 0 indicates no batching.
87  */
88  uint32_t m_maxBatch = 0;
89  /*
90  * @brief Flag indicating first dimension is dynamic size batch.
91  * Only valid if m_maxBatch == 0
92  */
93  bool m_firstDimDynamicBatch = false;
94 
95  bool m_RequireInferLoop = false;
96  UniqStreamManager m_StreamManager;
97  InferCustomProcessorPtr m_CustomProcessor;
98  TensorMapPoolPtr m_ExtInputHostPool;
99  TensorMapPoolPtr m_ExtInputGpuPool;
100  SharedCuStream m_Host2GpuStream;
101 };
102 
103 } // namespace nvdsinferserver
104 
105 #endif
nvdsinferserver
This is a header file for pre-processing cuda kernels with normalization and mean subtraction require...
Definition: infer_custom_process.h:24
nvdsinferserver::InferExtraProcessor::processExtraInputs
NvDsInferStatus processExtraInputs(SharedBatchArray &inputs)
nvdsinferserver::SharedDllHandle
std::shared_ptr< DlLibHandle > SharedDllHandle
Definition: infer_common.h:111
nvdsinferserver::InferCustomProcessorPtr
std::shared_ptr< IInferCustomProcessor > InferCustomProcessorPtr
Definition: infer_common.h:141
infer_datatypes.h
Header file for the data types used in the inference processing.
nvdsinferserver::LayerDescriptionList
std::vector< LayerDescription > LayerDescriptionList
Definition: infer_ibackend.h:58
infer_utils.h
Header file containing utility functions and classes used by the nvinferserver low level library.
nvdsinferserver::InferExtraProcessor::InferExtraProcessor
InferExtraProcessor()
nvdsinferserver::InferExtraProcessor::checkInferOutputs
NvDsInferStatus checkInferOutputs(SharedBatchArray &outputs, SharedOptions inOptions)
infer_common.h
Header file of the common declarations for the nvinferserver library.
nvdsinferserver::InferExtraProcessor::~InferExtraProcessor
~InferExtraProcessor()
nvdsinferserver::SharedOptions
std::shared_ptr< IOptions > SharedOptions
Definition: infer_common.h:73
nvdsinferserver::SharedCuStream
std::shared_ptr< CudaStream > SharedCuStream
Cuda based pointers.
Definition: infer_common.h:84
nvdsinferserver::InferExtraProcessor
: Extra processing pre/post inference.
Definition: infer_extra_processor.h:37
infer_custom_process.h
nvdsinferserver::TensorMapPool
MapBufferPool< std::string, UniqCudaTensorBuf > TensorMapPool
Definition: infer_extra_processor.h:31
nvdsinferserver::TensorMapPoolPtr
std::unique_ptr< TensorMapPool > TensorMapPoolPtr
Definition: infer_extra_processor.h:32
nvdsinferserver::UniqStreamManager
std::unique_ptr< StreamManager > UniqStreamManager
Definition: infer_common.h:131
nvdsinferserver::InferExtraProcessor::destroy
NvDsInferStatus destroy()
nvdsinferserver::InferExtraProcessor::notifyError
void notifyError(NvDsInferStatus status)
nvdsinferserver::BaseBackend
Base class of inference backend processing.
Definition: infer_base_backend.h:40
infer_base_backend.h
Header file for inference processing backend base class.
nvdsinferserver::InferExtraProcessor::allocateExtraInputs
NvDsInferStatus allocateExtraInputs(BaseBackend &backend, const std::set< std::string > &excludes, int32_t poolSize, int gpuId)
nvdsinferserver::SharedBatchArray
std::shared_ptr< BaseBatchArray > SharedBatchArray
Definition: infer_common.h:75
nvdsinferserver::InferExtraProcessor::initCustomProcessor
NvDsInferStatus initCustomProcessor(SharedDllHandle dlHandle, const std::string &funcName, const std::string &config)
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:218