| createEngineInspector() const noexcept | nvinfer1::ICudaEngine | inline |
| createExecutionContext(ExecutionContextAllocationStrategy strategy=ExecutionContextAllocationStrategy::kSTATIC) noexcept | nvinfer1::ICudaEngine | inline |
| createExecutionContext(IRuntimeConfig *runtimeConfig) noexcept | nvinfer1::ICudaEngine | inline |
| createExecutionContextWithoutDeviceMemory() noexcept | nvinfer1::ICudaEngine | inline |
| createRuntimeConfig() noexcept | nvinfer1::ICudaEngine | inline |
| createSerializationConfig() noexcept | nvinfer1::ICudaEngine | inline |
| getDeviceMemorySize() const noexcept | nvinfer1::ICudaEngine | inline |
| getDeviceMemorySizeForProfile(int32_t profileIndex) const noexcept | nvinfer1::ICudaEngine | inline |
| getDeviceMemorySizeForProfileV2(int32_t profileIndex) const noexcept | nvinfer1::ICudaEngine | inline |
| getDeviceMemorySizeV2() const noexcept | nvinfer1::ICudaEngine | inline |
| getEngineCapability() const noexcept | nvinfer1::ICudaEngine | inline |
| getErrorRecorder() const noexcept | nvinfer1::ICudaEngine | inline |
| getHardwareCompatibilityLevel() const noexcept | nvinfer1::ICudaEngine | inline |
| getIOTensorName(int32_t index) const noexcept | nvinfer1::ICudaEngine | inline |
| getMinimumWeightStreamingBudget() const noexcept | nvinfer1::ICudaEngine | inline |
| getName() const noexcept | nvinfer1::ICudaEngine | inline |
| getNbAuxStreams() const noexcept | nvinfer1::ICudaEngine | inline |
| getNbIOTensors() const noexcept | nvinfer1::ICudaEngine | inline |
| getNbLayers() const noexcept | nvinfer1::ICudaEngine | inline |
| getNbOptimizationProfiles() const noexcept | nvinfer1::ICudaEngine | inline |
| getProfileShape(char const *tensorName, int32_t profileIndex, OptProfileSelector select) const noexcept | nvinfer1::ICudaEngine | inline |
| getProfileTensorValues(char const *tensorName, int32_t profileIndex, OptProfileSelector select) const noexcept | nvinfer1::ICudaEngine | inline |
| getProfileTensorValuesV2(char const *tensorName, int32_t profileIndex, OptProfileSelector select) const noexcept | nvinfer1::ICudaEngine | inline |
| getProfilingVerbosity() const noexcept | nvinfer1::ICudaEngine | inline |
| getStreamableWeightsSize() const noexcept | nvinfer1::ICudaEngine | inline |
| getTacticSources() const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorBytesPerComponent(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorBytesPerComponent(char const *tensorName, int32_t profileIndex) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorComponentsPerElement(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorComponentsPerElement(char const *tensorName, int32_t profileIndex) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorDataType(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorFormat(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorFormat(char const *tensorName, int32_t profileIndex) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorFormatDesc(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorFormatDesc(char const *tensorName, int32_t profileIndex) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorIOMode(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorLocation(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorShape(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorVectorizedDim(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| getTensorVectorizedDim(char const *tensorName, int32_t profileIndex) const noexcept | nvinfer1::ICudaEngine | inline |
| getWeightStreamingAutomaticBudget() const noexcept | nvinfer1::ICudaEngine | inline |
| getWeightStreamingBudget() const noexcept | nvinfer1::ICudaEngine | inline |
| getWeightStreamingBudgetV2() const noexcept | nvinfer1::ICudaEngine | inline |
| getWeightStreamingScratchMemorySize() const noexcept | nvinfer1::ICudaEngine | inline |
| hasImplicitBatchDimension() const noexcept | nvinfer1::ICudaEngine | inline |
| INoCopy()=default | nvinfer1::INoCopy | protected |
| INoCopy(INoCopy const &other)=delete | nvinfer1::INoCopy | protected |
| INoCopy(INoCopy &&other)=delete | nvinfer1::INoCopy | protected |
| isDebugTensor(char const *name) const noexcept | nvinfer1::ICudaEngine | inline |
| isRefittable() const noexcept | nvinfer1::ICudaEngine | inline |
| isShapeInferenceIO(char const *tensorName) const noexcept | nvinfer1::ICudaEngine | inline |
| mImpl | nvinfer1::ICudaEngine | protected |
| operator=(INoCopy const &other)=delete | nvinfer1::INoCopy | protected |
| operator=(INoCopy &&other)=delete | nvinfer1::INoCopy | protected |
| serialize() const noexcept | nvinfer1::ICudaEngine | inline |
| serializeWithConfig(ISerializationConfig &config) const noexcept | nvinfer1::ICudaEngine | inline |
| setErrorRecorder(IErrorRecorder *recorder) noexcept | nvinfer1::ICudaEngine | inline |
| setWeightStreamingBudget(int64_t gpuMemoryBudget) noexcept | nvinfer1::ICudaEngine | inline |
| setWeightStreamingBudgetV2(int64_t gpuMemoryBudget) noexcept | nvinfer1::ICudaEngine | inline |
| ~ICudaEngine() noexcept=default | nvinfer1::ICudaEngine | virtual |
| ~INoCopy()=default | nvinfer1::INoCopy | protectedvirtual |