|  | 
    
        | NVIDIA DeepStream SDK API Reference | 
                8.0 Release
             | 
  
 
 
Go to the documentation of this file.
   23 #ifndef __INFER_BASE_CONTEXT_H__ 
   24 #define __INFER_BASE_CONTEXT_H__ 
   27 #include <condition_variable> 
   37 #include "nvdsinferserver_config.pb.h" 
   39 namespace ic = nvdsinferserver::config;
 
   58         const std::string& prototxt, InferLoggingFunc logFunc) 
final;
 
   63     virtual NvDsInferStatus createNNBackend(
const ic::BackendParams& params,
 
   68         const ic::PreProcessParams& params,
 
   69         std::vector<UniqPreprocessor>& processors) = 0;
 
   73         const ic::InferenceConfig& 
config) = 0;
 
   88     void rawDataInferDone(
 
   98     const ic::InferenceConfig& 
config()
 const { 
return m_Config; }
 
  118     InferLoggingFunc m_LoggingFunc;
 
  119     uint32_t m_UniqueID = 0;
 
  123     uint32_t m_MaxBatchSize = 1;
 
  124     bool m_Initialized = 
false;
 
  131     std::vector<UniqPreprocessor> m_Preprocessors;
 
  132     std::unordered_map<IPreprocessor*, IPreprocessor*> m_NextPreprocMap;
 
  138     ic::InferenceConfig m_Config;
 
  143 #define _MAX_LOG_LENGTH 4096 
  144 #define printMsg(level, tag_str, fmt, ...)                                  \ 
  146         char* baseName = strrchr((char*)__FILE__, '/');                     \ 
  147         baseName = (baseName) ? (baseName + 1) : (char*)__FILE__;           \ 
  148         std::vector<char> logMsgBuffer(_MAX_LOG_LENGTH, 0);                 \ 
  149         snprintf(logMsgBuffer.data(), _MAX_LOG_LENGTH - 1,                  \ 
  150             tag_str " %s() <%s:%d> [UID = %d]: " fmt, \ 
  151             __func__, baseName, __LINE__, uniqueId(), ##__VA_ARGS__);       \ 
  152         this->print(level, logMsgBuffer.data());                            \ 
  155 #define printError(fmt, ...)                                           \ 
  157         printMsg(NVDSINFER_LOG_ERROR, "Error in", fmt, ##__VA_ARGS__); \ 
  160 #define printWarning(fmt, ...)                                               \ 
  162         printMsg(NVDSINFER_LOG_WARNING, "Warning from", fmt, ##__VA_ARGS__); \ 
  165 #define printInfo(fmt, ...)                                            \ 
  167         printMsg(NVDSINFER_LOG_INFO, "Info from", fmt, ##__VA_ARGS__); \ 
  170 #define printDebug(fmt, ...)                                        \ 
  172         printMsg(NVDSINFER_LOG_DEBUG, "DEBUG", fmt, ##__VA_ARGS__); \ 
  175 #define CTX_RETURN_NVINFER_ERROR(err, fmt, ...) \ 
  176     CHECK_NVINFER_ERROR_PRINT(                  \ 
  177         err, return ifStatus, printError, fmt, ##__VA_ARGS__) 
  179 #define CTX_RETURN_CUDA_ERR(err, fmt, ...) \ 
  180     CHECK_CUDA_ERR_W_ACTION(               \ 
  181         err, return NVDSINFER_CUDA_ERROR, printError, fmt, ##__VA_ARGS__) 
  
 
This is a header file for pre-processing cuda kernels with normalization and mean subtraction require...
NvDsInferStatus initialize(const std::string &prototxt, InferLoggingFunc logFunc) final
std::shared_ptr< DlLibHandle > SharedDllHandle
const SharedDllHandle & customLib() const
std::function< void(NvDsInferStatus, SharedBatchArray)> InferCompleted
@ NVDSINFER_SUCCESS
NvDsInferContext operation succeeded.
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
Header file of the common declarations for the nvinferserver library.
bool needPreprocess() const
std::unique_ptr< BaseBackend > UniqBackend
std::shared_ptr< IBatchArray > SharedIBatchArray
NvDsInferStatus deinit() override
Inference context library interface header file.
std::shared_ptr< IOptions > SharedOptions
std::shared_ptr< CudaStream > SharedCuStream
Cuda based pointers.
The base class for handling the inference context.
void print(NvDsInferLogLevel l, const char *msg)
const ic::InferenceConfig & config() const
~InferBaseContext() override
std::unique_ptr< BasePostprocessor > UniqPostprocessor
Processor interfaces.
Base class of inference backend processing.
Header file for inference processing backend base class.
NvDsInferStatus run(SharedIBatchArray input, InferOutputCb outputCb) final
Preprocessor interface class.
virtual SharedCuStream & mainStream()=0
bool needCopyInputToHost() const
std::shared_ptr< BaseBatchArray > SharedBatchArray
virtual void backendConsumedInputs(SharedBatchArray inputs)
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.