NVIDIA NvNeural SDK
2022.2
GPU inference framework for NVIDIA Nsight Deep Learning Designer
|
This is the complete list of members for nvneural::INetworkBackend2, including all inherited members.
addRef() const noexcept=0 | nvneural::IRefObject | pure virtual |
allocateMemoryBlock(MemoryHandle *pHandle, size_t byteCount, const char *pTrackingKey) noexcept=0 | nvneural::INetworkBackend2 | pure virtual |
allocateMemoryBlock(MemoryHandle *pHandle, size_t byteCount) noexcept=0 | nvneural::INetworkBackend2 | |
nvneural::INetworkBackend::allocateMemoryBlock(MemoryHandle *pHandle, size_t byteCount) noexcept=0 | nvneural::INetworkBackend | pure virtual |
bindCurrentThread() noexcept=0 | nvneural::INetworkBackend | pure virtual |
clearLoadedWeights() noexcept=0 | nvneural::INetworkBackend | pure virtual |
copyMemoryD2D(void *pDeviceDestination, const void *pDeviceSource, std::size_t byteCount) noexcept=0 | nvneural::INetworkBackend | pure virtual |
copyMemoryD2H(void *pHostDestination, const void *pDeviceSource, std::size_t byteCount) noexcept=0 | nvneural::INetworkBackend | pure virtual |
copyMemoryH2D(void *pDeviceDestination, const void *pHostSource, std::size_t byteCount) noexcept=0 | nvneural::INetworkBackend | pure virtual |
deviceIdentifier() const noexcept=0 | nvneural::INetworkBackend | pure virtual |
freeMemoryBlock(MemoryHandle handle) noexcept=0 | nvneural::INetworkBackend | pure virtual |
getAddressForMemoryBlock(MemoryHandle handle) noexcept=0 | nvneural::INetworkBackend | pure virtual |
getAddressForWeightsData(const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader, const char *pName, TensorFormat format) const noexcept=0 | nvneural::INetworkBackend | pure virtual |
getDimensionsForWeightsData(TensorDimension *pDimensionOut, const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader, const char *pName, TensorFormat format) const noexcept=0 | nvneural::INetworkBackend | pure virtual |
getLibraryContext(ILibraryContext::LibraryId libraryId) noexcept=0 | nvneural::INetworkBackend | pure virtual |
getLibraryContext(ILibraryContext::LibraryId libraryId) const noexcept=0 | nvneural::INetworkBackend | pure virtual |
getMemoryTrackingData(const char *pTrackingKey, const char *pTrackingSubkey) const noexcept=0 | nvneural::INetworkBackend2 | pure virtual |
getMemoryTrackingKeys(IStringList **ppKeysOut) noexcept=0 | nvneural::INetworkBackend2 | pure virtual |
getMemoryTrackingSubkeys(const char *pTrackingKey, IStringList **ppKeysOut) noexcept=0 | nvneural::INetworkBackend2 | pure virtual |
getSizeForMemoryBlock(MemoryHandle handle) noexcept=0 | nvneural::INetworkBackend | pure virtual |
getWeightsNamesForLayer(IStringList **ppListOut, const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader) const noexcept=0 | nvneural::INetworkBackend | pure virtual |
id() const noexcept=0 | nvneural::INetworkBackend | pure virtual |
initializeFromDeviceIdentifier(const IBackendDeviceIdentifier *pDeviceIdentifier) noexcept=0 | nvneural::INetworkBackend | pure virtual |
initializeFromDeviceOrdinal(std::uint32_t deviceOrdinal) noexcept=0 | nvneural::INetworkBackend | pure virtual |
lockMemoryBlock(MemoryHandle handle) noexcept=0 | nvneural::INetworkBackend | pure virtual |
OptimizationCapability enum name | nvneural::INetworkBackend | |
queryInterface(TypeId interface) noexcept=0 | nvneural::IRefObject | pure virtual |
queryInterface(TypeId interface) const noexcept=0 | nvneural::IRefObject | pure virtual |
RefCount typedef | nvneural::IRefObject | |
registerLibraryContext(ILibraryContext *pLibraryContext) noexcept=0 | nvneural::INetworkBackend | pure virtual |
release() const noexcept=0 | nvneural::IRefObject | pure virtual |
saveImage(const ILayer *pLayer, const INetworkRuntime *pNetwork, IImage *pImage, ImageSpace imageSpace, size_t channels) noexcept=0 | nvneural::INetworkBackend | pure virtual |
setDeviceMemory(void *pDeviceDestination, std::uint8_t value, std::size_t byteCount) noexcept=0 | nvneural::INetworkBackend | pure virtual |
setMemoryTrackingKey(const char *pTrackingKey, const char *pTrackingSubkey) noexcept=0 | nvneural::INetworkBackend2 | pure virtual |
supportsOptimization(OptimizationCapability optimization) const noexcept=0 | nvneural::INetworkBackend | pure virtual |
synchronize() noexcept=0 | nvneural::INetworkBackend | pure virtual |
transformTensor(void *pDeviceDestination, TensorFormat destinationFormat, TensorDimension destinationSize, const void *pDeviceSource, TensorFormat sourceFormat, TensorDimension sourceSize) noexcept=0 | nvneural::INetworkBackend | pure virtual |
TypeId typedef | nvneural::IRefObject | |
typeID | nvneural::INetworkBackend2 | static |
unlockMemoryBlock(MemoryHandle handle) noexcept=0 | nvneural::INetworkBackend | pure virtual |
updateTensor(const ILayer *pLayer, INetworkRuntime *pNetwork, TensorFormat format, MemoryHandle hOriginal, TensorDimension stepping, TensorDimension internalDimensions) noexcept=0 | nvneural::INetworkBackend | pure virtual |
uploadWeights(const void **ppUploadedWeightsOut, const ILayer *pLayer, const IWeightsLoader *pOriginWeightLoader, const char *pName, const void *pWeightsData, std::size_t weightsDataSize, TensorDimension weightsDim, TensorFormat format, bool memManagedExternally) noexcept=0 | nvneural::INetworkBackend | pure virtual |
~IRefObject()=default | nvneural::IRefObject | protectedvirtual |