DriveWorks SDK Reference
3.0.4260 Release
For Test and Development only

DNN.h
Go to the documentation of this file.
1 // This code contains NVIDIA Confidential Information and is disclosed
3 // under the Mutual Non-Disclosure Agreement.
4 //
5 // Notice
6 // ALL NVIDIA DESIGN SPECIFICATIONS AND CODE ("MATERIALS") ARE PROVIDED "AS IS" NVIDIA MAKES
7 // NO REPRESENTATIONS, WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
8 // THE MATERIALS, AND EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTIES OF NONINFRINGEMENT,
9 // MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
10 //
11 // NVIDIA Corporation assumes no responsibility for the consequences of use of such
12 // information or for any infringement of patents or other rights of third parties that may
13 // result from its use. No license is granted by implication or otherwise under any patent
14 // or patent rights of NVIDIA Corporation. No third party distribution is allowed unless
15 // expressly authorized by NVIDIA. Details are subject to change without notice.
16 // This code supersedes and replaces all information previously supplied.
17 // NVIDIA Corporation products are not authorized for use as critical
18 // components in life support devices or systems without express written approval of
19 // NVIDIA Corporation.
20 //
21 // Copyright (c) 2016-2020 NVIDIA Corporation. All rights reserved.
22 //
23 // NVIDIA Corporation and its licensors retain all intellectual property and proprietary
24 // rights in and to this software and related documentation and any modifications thereto.
25 // Any use, reproduction, disclosure or distribution of this software and related
26 // documentation without an express license agreement from NVIDIA Corporation is
27 // strictly prohibited.
28 //
30 
48 #ifndef DW_DNN_H_
49 #define DW_DNN_H_
50 
51 #include <dw/dnn/DataConditioner.h>
52 #include <dw/core/Config.h>
53 #include <dw/core/Status.h>
54 #include <dw/core/Context.h>
55 #include <dw/dnn/Tensor.h>
56 #include <driver_types.h>
57 
58 #ifdef __cplusplus
59 extern "C" {
60 #endif
61 
64 typedef struct dwDNNObject* dwDNNHandle_t;
65 typedef struct dwDNNObject const* dwConstDNNHandle_t;
66 
69 typedef struct
70 {
73 
76 typedef struct
77 {
79  const char8_t* layerName;
81 
84 typedef struct
85 {
87  size_t numCustomLayers;
89 
127 dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t* network, const char8_t* modelFilename,
128  const dwDNNPluginConfiguration* pluginConfiguration,
129  dwProcessorType processorType, dwContextHandle_t context);
130 
152 dwStatus dwDNN_initializeTensorRTFromMemory(dwDNNHandle_t* network,
153  const char8_t* modelContent,
154  uint32_t modelContentSize,
155  const dwDNNPluginConfiguration* pluginConfiguration,
156  dwProcessorType processorType, dwContextHandle_t context);
166 dwStatus dwDNN_reset(dwDNNHandle_t network);
167 
177 dwStatus dwDNN_release(dwDNNHandle_t network);
178 
204 dwStatus dwDNN_inferSIO(float32_t* d_output, const float32_t* d_input, uint32_t batchsize,
205  dwDNNHandle_t network);
206 
230 dwStatus dwDNN_inferRaw(float32_t* const* d_output, const float32_t* const* d_input, uint32_t batchsize,
231  dwDNNHandle_t network);
232 
245 dwStatus dwDNN_setCUDAStream(cudaStream_t stream, dwDNNHandle_t network);
246 
257 dwStatus dwDNN_getCUDAStream(cudaStream_t* stream, dwDNNHandle_t network);
258 
271 dwStatus dwDNN_getInputSize(dwBlobSize* blobSize, uint32_t blobIndex, dwDNNHandle_t network);
272 
285 dwStatus dwDNN_getOutputSize(dwBlobSize* blobSize, uint32_t blobIndex, dwDNNHandle_t network);
286 
299 dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties* tensorProps, uint32_t blobIndex, dwDNNHandle_t network);
300 
313 dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties* tensorProps, uint32_t blobIndex, dwDNNHandle_t network);
314 
325 dwStatus dwDNN_getInputBlobCount(uint32_t* count, dwDNNHandle_t network);
326 
336 dwStatus dwDNN_getOutputBlobCount(uint32_t* count, dwDNNHandle_t network);
337 
350 dwStatus dwDNN_getInputIndex(uint32_t* blobIndex, const char8_t* blobName, dwDNNHandle_t network);
351 
364 dwStatus dwDNN_getOutputIndex(uint32_t* blobIndex, const char8_t* blobName, dwDNNHandle_t network);
365 
377 dwStatus dwDNN_getMetaData(dwDNNMetaData* metaData, dwDNNHandle_t network);
378 
391 dwStatus dwDNN_infer(dwDNNTensorHandle_t* outputTensors, uint32_t outputTensorCount,
392  dwConstDNNTensorHandle_t* inputTensors, uint32_t inputTensorCount, dwDNNHandle_t network);
393 
394 #ifdef __cplusplus
395 }
396 #endif
397 
398 #endif // DW_DNN_H_
DW_API_PUBLIC dwStatus dwDNN_inferSIO(float32_t *d_output, const float32_t *d_input, uint32_t batchsize, dwDNNHandle_t network)
Forwards pass from the first input blob to the first output blob (a shortcut for a single input - sin...
DW_API_PUBLIC dwStatus dwDNN_getOutputBlobCount(uint32_t *count, dwDNNHandle_t network)
Gets the output blob count.
DW_API_PUBLIC dwStatus dwDNN_getOutputIndex(uint32_t *blobIndex, const char8_t *blobName, dwDNNHandle_t network)
Gets the index of an output blob with a given blob name.
float float32_t
Specifies POD types.
Definition: Types.h:70
Specified plugin configuration.
Definition: DNN.h:84
DW_API_PUBLIC dwStatus dwDNN_getInputBlobCount(uint32_t *count, dwDNNHandle_t network)
Gets the input blob count.
DW_API_PUBLIC dwStatus dwDNN_getCUDAStream(cudaStream_t *stream, dwDNNHandle_t network)
Gets the CUDA stream used by the feature list.
Specifies custom layer.
Definition: DNN.h:76
DW_API_PUBLIC dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties *tensorProps, uint32_t blobIndex, dwDNNHandle_t network)
Gets the output tensor properties at blobIndex.
NVIDIA DriveWorks API: Core Methods
DW_API_PUBLIC dwStatus dwDNN_getInputSize(dwBlobSize *blobSize, uint32_t blobIndex, dwDNNHandle_t network)
Gets the input blob size at blobIndex.
Holds blob dimensions.
Definition: DNNTypes.h:60
DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t *network, const char8_t *modelFilename, const dwDNNPluginConfiguration *pluginConfiguration, dwProcessorType processorType, dwContextHandle_t context)
Creates and initializes a TensorRT Network from file.
DW_API_PUBLIC dwStatus dwDNN_inferRaw(float32_t *const *d_output, const float32_t *const *d_input, uint32_t batchsize, dwDNNHandle_t network)
Forwards pass from all input blobs to all output blobs.
DW_API_PUBLIC dwStatus dwDNN_reset(dwDNNHandle_t network)
Resets a given network.
NVIDIA DriveWorks API: DNNTensor Structures and Methods
DW_API_PUBLIC dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties *tensorProps, uint32_t blobIndex, dwDNNHandle_t network)
Gets the input tensor properties at blobIndex.
DW_API_PUBLIC dwStatus dwDNN_setCUDAStream(cudaStream_t stream, dwDNNHandle_t network)
Sets the CUDA stream for infer operations.
const char8_t * pluginLibraryPath
Path to a plugin shared object.
Definition: DNN.h:78
dwStatus
Status definition.
Definition: Status.h:166
size_t numCustomLayers
Number of custom layers.
Definition: DNN.h:87
dwProcessorType
Processor type definitions.
Definition: Types.h:159
NVIDIA DriveWorks API: Data Conditioner Methods
DW_API_PUBLIC dwStatus dwDNN_getInputIndex(uint32_t *blobIndex, const char8_t *blobName, dwDNNHandle_t network)
Gets the index of an input blob with a given blob name.
struct dwDNNTensorObject const * dwConstDNNTensorHandle_t
Definition: Tensor.h:60
DW_API_PUBLIC dwStatus dwDNN_getOutputSize(dwBlobSize *blobSize, uint32_t blobIndex, dwDNNHandle_t network)
Gets the output blob size at blobIndex.
struct dwDNNObject const * dwConstDNNHandle_t
Definition: DNN.h:65
const char8_t * layerName
Name of the custom layer.
Definition: DNN.h:79
DW_API_PUBLIC dwStatus dwDNN_infer(dwDNNTensorHandle_t *outputTensors, uint32_t outputTensorCount, dwConstDNNTensorHandle_t *inputTensors, uint32_t inputTensorCount, dwDNNHandle_t network)
Runs inference pipeline on the given input.
Specifies DNNTensor properties.
Definition: Tensor.h:93
struct dwDNNObject * dwDNNHandle_t
Handles representing Deep Neural Network interface.
Definition: DNN.h:64
const dwDNNCustomLayer * customLayers
Array of custom layers.
Definition: DNN.h:86
struct dwContextObject * dwContextHandle_t
Context handle.
Definition: Context.h:80
struct dwDNNTensorObject * dwDNNTensorHandle_t
Handles representing Deep Neural Network interface.
Definition: Tensor.h:59
dwDataConditionerParams dataConditionerParams
DataConditioner parameters for running this network.
Definition: DNN.h:71
char char8_t
Definition: Types.h:72
Specifies TensorRT model header.
Definition: DNN.h:69
DW_API_PUBLIC dwStatus dwDNN_getMetaData(dwDNNMetaData *metaData, dwDNNHandle_t network)
Returns the metadata for the associated network model.
DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromMemory(dwDNNHandle_t *network, const char8_t *modelContent, uint32_t modelContentSize, const dwDNNPluginConfiguration *pluginConfiguration, dwProcessorType processorType, dwContextHandle_t context)
Creates and initializes a TensorRT Network from memory.
DW_API_PUBLIC dwStatus dwDNN_release(dwDNNHandle_t network)
Releases a given network.
#define DW_API_PUBLIC
Definition: Exports.h:56
NVIDIA DriveWorks API: Core Status Methods