DriveWorks SDK Reference
4.0.0 Release
For Test and Development only

DNN.h
Go to the documentation of this file.
1 // This code contains NVIDIA Confidential Information and is disclosed
3 // under the Mutual Non-Disclosure Agreement.
4 //
5 // Notice
6 // ALL NVIDIA DESIGN SPECIFICATIONS AND CODE ("MATERIALS") ARE PROVIDED "AS IS" NVIDIA MAKES
7 // NO REPRESENTATIONS, WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
8 // THE MATERIALS, AND EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTIES OF NONINFRINGEMENT,
9 // MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
10 //
11 // NVIDIA Corporation assumes no responsibility for the consequences of use of such
12 // information or for any infringement of patents or other rights of third parties that may
13 // result from its use. No license is granted by implication or otherwise under any patent
14 // or patent rights of NVIDIA Corporation. No third party distribution is allowed unless
15 // expressly authorized by NVIDIA. Details are subject to change without notice.
16 // This code supersedes and replaces all information previously supplied.
17 // NVIDIA Corporation products are not authorized for use as critical
18 // components in life support devices or systems without express written approval of
19 // NVIDIA Corporation.
20 //
21 // Copyright (c) 2016-2020 NVIDIA Corporation. All rights reserved.
22 //
23 // NVIDIA Corporation and its licensors retain all intellectual property and proprietary
24 // rights in and to this software and related documentation and any modifications thereto.
25 // Any use, reproduction, disclosure or distribution of this software and related
26 // documentation without an express license agreement from NVIDIA Corporation is
27 // strictly prohibited.
28 //
30 
46 #ifndef DW_DNN_H_
47 #define DW_DNN_H_
48 
49 #include <dw/dnn/DataConditioner.h>
50 #include <dw/core/Config.h>
51 #include <dw/core/base/Status.h>
53 #include <dw/dnn/tensor/Tensor.h>
54 #include <driver_types.h>
55 
56 #ifdef __cplusplus
57 extern "C" {
58 #endif
59 
62 typedef struct dwDNNObject* dwDNNHandle_t;
63 typedef struct dwDNNObject const* dwConstDNNHandle_t;
64 
67 typedef struct
68 {
71 
74 typedef struct
75 {
77  const char8_t* layerName;
79 
82 typedef struct
83 {
85  size_t numCustomLayers;
87 
125 dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t* const network, const char8_t* const modelFilename,
126  const dwDNNPluginConfiguration* const pluginConfiguration,
127  dwProcessorType const processorType, dwContextHandle_t const context);
128 
150 dwStatus dwDNN_initializeTensorRTFromMemory(dwDNNHandle_t* const network,
151  const char8_t* const modelContent,
152  uint32_t const modelContentSize,
153  const dwDNNPluginConfiguration* const pluginConfiguration,
154  dwProcessorType const processorType, dwContextHandle_t const context);
164 dwStatus dwDNN_reset(dwDNNHandle_t const network);
165 
175 dwStatus dwDNN_release(dwDNNHandle_t const network);
176 
202 dwStatus dwDNN_inferSIO(float32_t* const dOutput, const float32_t* const dInput, uint32_t const batchsize,
203  dwDNNHandle_t const network);
204 
228 dwStatus dwDNN_inferRaw(float32_t* const* const dOutput, const float32_t* const* const dInput,
229  uint32_t const batchsize, dwDNNHandle_t const network);
230 
243 dwStatus dwDNN_setCUDAStream(cudaStream_t const stream, dwDNNHandle_t const network);
244 
255 dwStatus dwDNN_getCUDAStream(cudaStream_t* const stream, dwDNNHandle_t const network);
256 
269 dwStatus dwDNN_getInputSize(dwBlobSize* const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network);
270 
283 dwStatus dwDNN_getOutputSize(dwBlobSize* const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network);
284 
297 dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties* const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network);
298 
311 dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties* const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network);
312 
323 dwStatus dwDNN_getInputBlobCount(uint32_t* const count, dwDNNHandle_t const network);
324 
334 dwStatus dwDNN_getOutputBlobCount(uint32_t* const count, dwDNNHandle_t const network);
335 
348 dwStatus dwDNN_getInputIndex(uint32_t* const blobIndex, const char8_t* const blobName, dwDNNHandle_t const network);
349 
362 dwStatus dwDNN_getOutputIndex(uint32_t* const blobIndex, const char8_t* const blobName, dwDNNHandle_t const network);
363 
375 dwStatus dwDNN_getMetaData(dwDNNMetaData* const metaData, dwDNNHandle_t const network);
376 
389 dwStatus dwDNN_infer(dwDNNTensorHandle_t* const outputTensors, uint32_t const outputTensorCount,
390  dwConstDNNTensorHandle_t* const inputTensors, uint32_t const inputTensorCount, dwDNNHandle_t const network);
391 
392 #ifdef __cplusplus
393 }
394 #endif
395 
396 #endif // DW_DNN_H_
DW_API_PUBLIC dwStatus dwDNN_reset(dwDNNHandle_t const network)
Resets a given network.
DW_API_PUBLIC dwStatus dwDNN_infer(dwDNNTensorHandle_t *const outputTensors, uint32_t const outputTensorCount, dwConstDNNTensorHandle_t *const inputTensors, uint32_t const inputTensorCount, dwDNNHandle_t const network)
Runs inference pipeline on the given input.
DW_API_PUBLIC dwStatus dwDNN_getInputSize(dwBlobSize *const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network)
Gets the input blob size at blobIndex.
float float32_t
Specifies POD types.
Definition: Types.h:70
Specified plugin configuration.
Definition: DNN.h:82
DW_API_PUBLIC dwStatus dwDNN_getInputIndex(uint32_t *const blobIndex, const char8_t *const blobName, dwDNNHandle_t const network)
Gets the index of an input blob with a given blob name.
DW_API_PUBLIC dwStatus dwDNN_getInputBlobCount(uint32_t *const count, dwDNNHandle_t const network)
Gets the input blob count.
DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromFile(dwDNNHandle_t *const network, const char8_t *const modelFilename, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, dwContextHandle_t const context)
Creates and initializes a TensorRT Network from file.
Specifies plugin configuration.
Definition: DNN.h:74
DW_API_PUBLIC dwStatus dwDNN_getInputTensorProperties(dwDNNTensorProperties *const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network)
Gets the input tensor properties at blobIndex.
DW_API_PUBLIC dwStatus dwDNN_inferRaw(float32_t *const *const dOutput, const float32_t *const *const dInput, uint32_t const batchsize, dwDNNHandle_t const network)
Forwards pass from all input blobs to all output blobs.
const char8_t * pluginLibraryPath
Path to a plugin shared object.
Definition: DNN.h:76
dwStatus
Status definition.
Definition: Status.h:180
size_t numCustomLayers
Number of custom layers.
Definition: DNN.h:85
DW_API_PUBLIC dwStatus dwDNN_initializeTensorRTFromMemory(dwDNNHandle_t *const network, const char8_t *const modelContent, uint32_t const modelContentSize, const dwDNNPluginConfiguration *const pluginConfiguration, dwProcessorType const processorType, dwContextHandle_t const context)
Creates and initializes a TensorRT Network from memory.
DW_API_PUBLIC dwStatus dwDNN_getOutputBlobCount(uint32_t *const count, dwDNNHandle_t const network)
Gets the output blob count.
dwProcessorType
Processor type definitions.
Definition: Types.h:159
DW_API_PUBLIC dwStatus dwDNN_getOutputIndex(uint32_t *const blobIndex, const char8_t *const blobName, dwDNNHandle_t const network)
Gets the index of an output blob with a given blob name.
NVIDIA DriveWorks API: Data Conditioner Methods
Holds blob dimensions.
Definition: Types.h:560
DW_API_PUBLIC dwStatus dwDNN_setCUDAStream(cudaStream_t const stream, dwDNNHandle_t const network)
Sets the CUDA stream for infer operations.
DW_API_PUBLIC dwStatus dwDNN_getMetaData(dwDNNMetaData *const metaData, dwDNNHandle_t const network)
Returns the metadata for the associated network model.
DW_API_PUBLIC dwStatus dwDNN_getOutputSize(dwBlobSize *const blobSize, uint32_t const blobIndex, dwDNNHandle_t const network)
Gets the output blob size at blobIndex.
NVIDIA DriveWorks API: DNNTensor Structures and Methods
struct dwDNNTensorObject const * dwConstDNNTensorHandle_t
Definition: Tensor.h:58
struct dwDNNObject const * dwConstDNNHandle_t
Definition: DNN.h:63
const char8_t * layerName
Name of the custom layer.
Definition: DNN.h:77
Specifies DNNTensor properties.
Definition: Tensor.h:91
DW_API_PUBLIC dwStatus dwDNN_release(dwDNNHandle_t const network)
Releases a given network.
struct dwDNNObject * dwDNNHandle_t
Handles representing Deep Neural Network interface.
Definition: DNN.h:62
const dwDNNCustomLayer * customLayers
Array of custom layers.
Definition: DNN.h:84
struct dwContextObject * dwContextHandle_t
Context handle.
Definition: Context.h:79
struct dwDNNTensorObject * dwDNNTensorHandle_t
Handles representing Deep Neural Network interface.
Definition: Tensor.h:57
dwDataConditionerParams dataConditionerParams
DataConditioner parameters for running this network.
Definition: DNN.h:69
NVIDIA DriveWorks API: Core Methods
char char8_t
Definition: Types.h:72
Specifies TensorRT model header.
Definition: DNN.h:67
DW_API_PUBLIC dwStatus dwDNN_inferSIO(float32_t *const dOutput, const float32_t *const dInput, uint32_t const batchsize, dwDNNHandle_t const network)
Forwards pass from the first input blob to the first output blob (a shortcut for a single input - sin...
DW_API_PUBLIC dwStatus dwDNN_getOutputTensorProperties(dwDNNTensorProperties *const tensorProps, uint32_t const blobIndex, dwDNNHandle_t const network)
Gets the output tensor properties at blobIndex.
DW_API_PUBLIC dwStatus dwDNN_getCUDAStream(cudaStream_t *const stream, dwDNNHandle_t const network)
Gets the CUDA stream used by the feature list.
#define DW_API_PUBLIC
Definition: Exports.h:54
NVIDIA DriveWorks API: Core Status Methods