NVIDIA NvNeural SDK  2022.2
GPU inference framework for NVIDIA Nsight Deep Learning Designer
nvneural::INetworkBackend Member List

This is the complete list of members for nvneural::INetworkBackend, including all inherited members.

addRef() const noexcept=0nvneural::IRefObjectpure virtual
allocateMemoryBlock(MemoryHandle *pHandle, size_t byteCount) noexcept=0nvneural::INetworkBackendpure virtual
bindCurrentThread() noexcept=0nvneural::INetworkBackendpure virtual
clearLoadedWeights() noexcept=0nvneural::INetworkBackendpure virtual
copyMemoryD2D(void *pDeviceDestination, const void *pDeviceSource, std::size_t byteCount) noexcept=0nvneural::INetworkBackendpure virtual
copyMemoryD2H(void *pHostDestination, const void *pDeviceSource, std::size_t byteCount) noexcept=0nvneural::INetworkBackendpure virtual
copyMemoryH2D(void *pDeviceDestination, const void *pHostSource, std::size_t byteCount) noexcept=0nvneural::INetworkBackendpure virtual
deviceIdentifier() const noexcept=0nvneural::INetworkBackendpure virtual
freeMemoryBlock(MemoryHandle handle) noexcept=0nvneural::INetworkBackendpure virtual
getAddressForMemoryBlock(MemoryHandle handle) noexcept=0nvneural::INetworkBackendpure virtual
getAddressForWeightsData(const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader, const char *pName, TensorFormat format) const noexcept=0nvneural::INetworkBackendpure virtual
getDimensionsForWeightsData(TensorDimension *pDimensionOut, const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader, const char *pName, TensorFormat format) const noexcept=0nvneural::INetworkBackendpure virtual
getLibraryContext(ILibraryContext::LibraryId libraryId) noexcept=0nvneural::INetworkBackendpure virtual
getLibraryContext(ILibraryContext::LibraryId libraryId) const noexcept=0nvneural::INetworkBackendpure virtual
getSizeForMemoryBlock(MemoryHandle handle) noexcept=0nvneural::INetworkBackendpure virtual
getWeightsNamesForLayer(IStringList **ppListOut, const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader) const noexcept=0nvneural::INetworkBackendpure virtual
id() const noexcept=0nvneural::INetworkBackendpure virtual
initializeFromDeviceIdentifier(const IBackendDeviceIdentifier *pDeviceIdentifier) noexcept=0nvneural::INetworkBackendpure virtual
initializeFromDeviceOrdinal(std::uint32_t deviceOrdinal) noexcept=0nvneural::INetworkBackendpure virtual
lockMemoryBlock(MemoryHandle handle) noexcept=0nvneural::INetworkBackendpure virtual
OptimizationCapability enum namenvneural::INetworkBackend
queryInterface(TypeId interface) noexcept=0nvneural::IRefObjectpure virtual
queryInterface(TypeId interface) const noexcept=0nvneural::IRefObjectpure virtual
RefCount typedefnvneural::IRefObject
registerLibraryContext(ILibraryContext *pLibraryContext) noexcept=0nvneural::INetworkBackendpure virtual
release() const noexcept=0nvneural::IRefObjectpure virtual
saveImage(const ILayer *pLayer, const INetworkRuntime *pNetwork, IImage *pImage, ImageSpace imageSpace, size_t channels) noexcept=0nvneural::INetworkBackendpure virtual
setDeviceMemory(void *pDeviceDestination, std::uint8_t value, std::size_t byteCount) noexcept=0nvneural::INetworkBackendpure virtual
supportsOptimization(OptimizationCapability optimization) const noexcept=0nvneural::INetworkBackendpure virtual
synchronize() noexcept=0nvneural::INetworkBackendpure virtual
transformTensor(void *pDeviceDestination, TensorFormat destinationFormat, TensorDimension destinationSize, const void *pDeviceSource, TensorFormat sourceFormat, TensorDimension sourceSize) noexcept=0nvneural::INetworkBackendpure virtual
typeIDnvneural::INetworkBackendstatic
TypeId typedefnvneural::IRefObject
unlockMemoryBlock(MemoryHandle handle) noexcept=0nvneural::INetworkBackendpure virtual
updateTensor(const ILayer *pLayer, INetworkRuntime *pNetwork, TensorFormat format, MemoryHandle hOriginal, TensorDimension stepping, TensorDimension internalDimensions) noexcept=0nvneural::INetworkBackendpure virtual
uploadWeights(const void **ppUploadedWeightsOut, const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader, const char *pName, const void *pWeightsData, std::size_t weightsDataSize, TensorDimension weightsDim, TensorFormat format, bool memManagedExternally) noexcept=0nvneural::INetworkBackendpure virtual
~IRefObject()=defaultnvneural::IRefObjectprotectedvirtual