NVIDIA NvNeural SDK  2022.2
GPU inference framework for NVIDIA Nsight Deep Learning Designer
CudaTypes.h
Go to the documentation of this file.
1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23 
25 
26 #ifndef NVNEURAL_CUDATYPES_H
27 #define NVNEURAL_CUDATYPES_H
28 
29 #include <nvneural/CoreTypes.h>
30 #include <cuda.h>
31 
32 namespace nvneural {
33 
34 class INetworkBackendCuda;
36 #define NVNEURAL_INETWORKBACKENDCUDA_OBJECTCLASS "com.nvidia.backendcuda"
38 #define NVNEURAL_ICUDAMEMORYALLOCATOR_OBJECTCLASS "com.nvidia.memoryallocatorcuda"
39 
52 {
53 public:
55  static const IRefObject::TypeId typeID = 0x121e6098096e5c97ul;
56 
64  virtual NeuralResult allocateMemoryBlock(MemoryHandle* pHandleOut, std::size_t byteCount, MemorySemantic semantic) noexcept = 0;
65 
72  virtual NeuralResult freeMemoryBlock(MemoryHandle handle) noexcept = 0;
73 
78  virtual void* getAddressForMemoryBlock(MemoryHandle handle) const noexcept = 0;
79 
89  virtual std::size_t getSizeForMemoryBlock(MemoryHandle handle) const noexcept = 0;
90 
104  virtual NeuralResult lockMemoryBlock(MemoryHandle handle) noexcept = 0;
105 
115  virtual NeuralResult unlockMemoryBlock(MemoryHandle handle) noexcept = 0;
116 
127  virtual NeuralResult compactMemory() noexcept = 0;
128 };
129 
133 {
134 public:
136  static const IRefObject::TypeId typeID = 0xd350a56002e2a077ul;
137 
139 
159  virtual NeuralResult allocateMemoryBlock(MemoryHandle* pHandleOut, size_t byteCount, MemorySemantic semantic, const char* pTrackingKey, const char* pTrackingSubkey) noexcept = 0;
160 
168  virtual const MemoryTrackingData* getMemoryTrackingData(const char* pTrackingKey, const char* pTrackingSubkey) const noexcept = 0;
169 
181  virtual NeuralResult getMemoryTrackingKeys(IStringList** ppKeysOut) noexcept = 0;
182 
194  virtual NeuralResult getMemoryTrackingSubkeys(const char* pTrackingKey, IStringList** ppKeysOut) noexcept = 0;
195 
206  virtual NeuralResult setMemoryTrackingKey(const char* pTrackingKey, const char* pTrackingSubkey) noexcept = 0;
207 };
208 
211 {
212 public:
214  static const IRefObject::TypeId typeID = 0x467d6d0e91bcc332ul;
215 
230  INetworkBackendCuda* pBackend,
231  std::size_t gridSizeX,
232  std::size_t gridSizeY,
233  std::size_t gridSizeZ,
234  std::size_t blockSizeX,
235  std::size_t blockSizeY,
236  std::size_t blockSizeZ,
237  void** ppArguments,
238  std::uint32_t smem) const noexcept = 0;
239 
244  virtual CUmodule module() const noexcept = 0;
245 
250  virtual CUfunction function() const noexcept = 0;
251 
256  virtual std::size_t compiledBinarySize() const noexcept = 0;
257 
262  virtual const void* compiledBinary() const noexcept = 0;
263 };
264 
270 {
271 public:
273  static const IRefObject::TypeId typeID = 0xe39c2816f916d342ul;
274 
277  {
279  const char* pModuleName = nullptr;
281  const char* pModuleSource = nullptr;
283  const char* pModuleEntryPoint = nullptr;
286  IStringList* pHeaderNames = nullptr;
291  IStringList* pHeaderContents = nullptr;
294  IStringList* pAdditionalCompilerOptions = nullptr;
295  };
296 
300  virtual const char* targetArchitecture() const noexcept = 0;
301 
317  virtual NeuralResult setTargetArchitecture(const char* pTargetArch) noexcept = 0;
318 
326  virtual NeuralResult compile(ICudaCompiledFunction** ppCompiledFunctionOut, const CompilationDetails& compilationDetails) noexcept = 0;
327 
334  virtual NeuralResult loadCubin(ICudaCompiledFunction** ppCompiledFunctionOut, std::uint8_t* pCode, std::size_t codeSize, const char* pEntryPoint) noexcept = 0;
335 };
336 
346 {
347 public:
349  static const IRefObject::TypeId typeID = 0x61f19c57a3032f9ul;
350 
352  virtual ICudaMemoryAllocator* getAllocator() const noexcept = 0;
353 
357  virtual NeuralResult setAllocator(ICudaMemoryAllocator* pAllocator) noexcept = 0;
358 
360  virtual CUcontext getCudaContext() const noexcept = 0;
361 
363  virtual CUdevice getCudaDevice() const noexcept = 0;
364 
369  virtual CUstream getCudaStream() const noexcept = 0;
370 
372  virtual ICudaRuntimeCompiler* runtimeCompiler() const noexcept = 0;
373 };
374 
375 } // namespace nvneural
376 
377 #endif // NVNEURAL_CUDATYPES_H
Fundamental NvNeural data types are declared here.
MemorySemantic
Describes the intended purpose of allocated GPU memory.
Definition: CoreTypes.h:631
MemoryHandle__type * MemoryHandle
Opaque typedef used to represent INetworkBackend memory handles.
Definition: CoreTypes.h:626
NeuralResult
NeuralResult is a generic success/failure result type similar to COM HRESULT.
Definition: CoreTypes.h:275
Represents a runtime-compiled function object from ICudaRuntimeCompiler.
Definition: CudaTypes.h:211
virtual CUmodule module() const noexcept=0
Returns the CUmodule containing this function object.
virtual NeuralResult launch(INetworkBackendCuda *pBackend, std::size_t gridSizeX, std::size_t gridSizeY, std::size_t gridSizeZ, std::size_t blockSizeX, std::size_t blockSizeY, std::size_t blockSizeZ, void **ppArguments, std::uint32_t smem) const noexcept=0
Launches the function on the specified CUDA backend's stream.
Generic interface for CUDA device memory allocation.
Definition: CudaTypes.h:133
virtual NeuralResult allocateMemoryBlock(MemoryHandle *pHandleOut, size_t byteCount, MemorySemantic semantic, const char *pTrackingKey, const char *pTrackingSubkey) noexcept=0
Allocates a memory block of the requested size and allows tracking of the memory block using a given ...
virtual NeuralResult getMemoryTrackingSubkeys(const char *pTrackingKey, IStringList **ppKeysOut) noexcept=0
Returns an IStringList of the subkeys of given tracking key.
virtual NeuralResult getMemoryTrackingKeys(IStringList **ppKeysOut) noexcept=0
Returns an IStringList of the currently tracked keys.
virtual NeuralResult setMemoryTrackingKey(const char *pTrackingKey, const char *pTrackingSubkey) noexcept=0
Sets a potential tracking key.
virtual const MemoryTrackingData * getMemoryTrackingData(const char *pTrackingKey, const char *pTrackingSubkey) const noexcept=0
Compiles and returns memory data for the given key.
Generic interface for CUDA device memory allocation.
Definition: CudaTypes.h:52
static const IRefObject::TypeId typeID
Interface TypeId for InterfaceOf purposes.
Definition: CudaTypes.h:55
virtual NeuralResult unlockMemoryBlock(MemoryHandle handle) noexcept=0
Removes a lock from a preexisting memory block.
virtual std::size_t getSizeForMemoryBlock(MemoryHandle handle) const noexcept=0
Returns the buffer size associated with a memory handle.
virtual NeuralResult compactMemory() noexcept=0
Signals the allocator to release unused memory blocks back to the system.
virtual void * getAddressForMemoryBlock(MemoryHandle handle) const noexcept=0
Converts a memory handle to a GPU virtual address.
virtual NeuralResult lockMemoryBlock(MemoryHandle handle) noexcept=0
Adds a lock to a preexisting memory block.
virtual NeuralResult freeMemoryBlock(MemoryHandle handle) noexcept=0
Frees a memory block.
virtual NeuralResult allocateMemoryBlock(MemoryHandle *pHandleOut, std::size_t byteCount, MemorySemantic semantic) noexcept=0
Allocates a new memory block and returns a handle to it.
Represents a runtime compiler that can transform CUDA source code into compiled functions.
Definition: CudaTypes.h:270
virtual const char * targetArchitecture() const noexcept=0
Returns the current target GPU architecture for compilation.
INetworkBackend companion interface with CUDA-specific functionality.
Definition: CudaTypes.h:346
virtual ICudaMemoryAllocator * getAllocator() const noexcept=0
Returns the CUDA memory allocator interface.
Base class for all objects, similar to COM's IUnknown.
Definition: CoreTypes.h:343
std::uint64_t TypeId
Every interface must define a unique TypeId. This should be randomized.
Definition: CoreTypes.h:349
IStringList represents an immutable collection of strings.
Definition: CoreTypes.h:1079
Params struct describing a compilation request.
Definition: CudaTypes.h:277
Structure describing details of an object's memory allocation behavior.
Definition: CoreTypes.h:931