| allInputDimensionsSpecified() const noexcept | nvinfer1::IExecutionContext | inline |
| allInputShapesSpecified() const noexcept | nvinfer1::IExecutionContext | inline |
| enqueueV3(cudaStream_t stream) noexcept | nvinfer1::IExecutionContext | inline |
| executeV2(void *const *bindings) noexcept | nvinfer1::IExecutionContext | inline |
| getDebugListener() noexcept | nvinfer1::IExecutionContext | inline |
| getDebugState(char const *name) const noexcept | nvinfer1::IExecutionContext | inline |
| getDebugSync() const noexcept | nvinfer1::IExecutionContext | inline |
| getEngine() const noexcept | nvinfer1::IExecutionContext | inline |
| getEnqueueEmitsProfile() const noexcept | nvinfer1::IExecutionContext | inline |
| getErrorRecorder() const noexcept | nvinfer1::IExecutionContext | inline |
| getInputConsumedEvent() const noexcept | nvinfer1::IExecutionContext | inline |
| getMaxOutputSize(char const *tensorName) const noexcept | nvinfer1::IExecutionContext | inline |
| getName() const noexcept | nvinfer1::IExecutionContext | inline |
| getNvtxVerbosity() const noexcept | nvinfer1::IExecutionContext | inline |
| getOptimizationProfile() const noexcept | nvinfer1::IExecutionContext | inline |
| getOutputAllocator(char const *tensorName) const noexcept | nvinfer1::IExecutionContext | inline |
| getOutputTensorAddress(char const *tensorName) const noexcept | nvinfer1::IExecutionContext | inline |
| getPersistentCacheLimit() const noexcept | nvinfer1::IExecutionContext | inline |
| getProfiler() const noexcept | nvinfer1::IExecutionContext | inline |
| getRuntimeConfig() const noexcept | nvinfer1::IExecutionContext | inline |
| getTemporaryStorageAllocator() const noexcept | nvinfer1::IExecutionContext | inline |
| getTensorAddress(char const *tensorName) const noexcept | nvinfer1::IExecutionContext | inline |
| getTensorShape(char const *tensorName) const noexcept | nvinfer1::IExecutionContext | inline |
| getTensorStrides(char const *tensorName) const noexcept | nvinfer1::IExecutionContext | inline |
| getUnfusedTensorsDebugState() const noexcept | nvinfer1::IExecutionContext | inline |
| inferShapes(int32_t nbMaxNames, char const **tensorNames) noexcept | nvinfer1::IExecutionContext | inline |
| INoCopy()=default | nvinfer1::INoCopy | protected |
| INoCopy(INoCopy const &other)=delete | nvinfer1::INoCopy | protected |
| INoCopy(INoCopy &&other)=delete | nvinfer1::INoCopy | protected |
| mImpl | nvinfer1::IExecutionContext | protected |
| operator=(INoCopy const &other)=delete | nvinfer1::INoCopy | protected |
| operator=(INoCopy &&other)=delete | nvinfer1::INoCopy | protected |
| reportToProfiler() const noexcept | nvinfer1::IExecutionContext | inline |
| setAllTensorsDebugState(bool flag) noexcept | nvinfer1::IExecutionContext | inline |
| setAuxStreams(cudaStream_t *auxStreams, int32_t nbStreams) noexcept | nvinfer1::IExecutionContext | inline |
| setDebugListener(IDebugListener *listener) noexcept | nvinfer1::IExecutionContext | inline |
| setDebugSync(bool sync) noexcept | nvinfer1::IExecutionContext | inline |
| setDeviceMemory(void *memory) noexcept | nvinfer1::IExecutionContext | inline |
| setDeviceMemoryV2(void *memory, int64_t size) noexcept | nvinfer1::IExecutionContext | inline |
| setEnqueueEmitsProfile(bool enqueueEmitsProfile) noexcept | nvinfer1::IExecutionContext | inline |
| setErrorRecorder(IErrorRecorder *recorder) noexcept | nvinfer1::IExecutionContext | inline |
| setInputConsumedEvent(cudaEvent_t event) noexcept | nvinfer1::IExecutionContext | inline |
| setInputShape(char const *tensorName, Dims const &dims) noexcept | nvinfer1::IExecutionContext | inline |
| setInputTensorAddress(char const *tensorName, void const *data) noexcept | nvinfer1::IExecutionContext | inline |
| setName(char const *name) noexcept | nvinfer1::IExecutionContext | inline |
| setNvtxVerbosity(ProfilingVerbosity verbosity) noexcept | nvinfer1::IExecutionContext | inline |
| setOptimizationProfileAsync(int32_t profileIndex, cudaStream_t stream) noexcept | nvinfer1::IExecutionContext | inline |
| setOutputAllocator(char const *tensorName, IOutputAllocator *outputAllocator) noexcept | nvinfer1::IExecutionContext | inline |
| setOutputTensorAddress(char const *tensorName, void *data) noexcept | nvinfer1::IExecutionContext | inline |
| setPersistentCacheLimit(size_t size) noexcept | nvinfer1::IExecutionContext | inline |
| setProfiler(IProfiler *profiler) noexcept | nvinfer1::IExecutionContext | inline |
| setTemporaryStorageAllocator(IGpuAllocator *allocator) noexcept | nvinfer1::IExecutionContext | inline |
| setTensorAddress(char const *tensorName, void *data) noexcept | nvinfer1::IExecutionContext | inline |
| setTensorDebugState(char const *name, bool flag) noexcept | nvinfer1::IExecutionContext | inline |
| setUnfusedTensorsDebugState(bool flag) noexcept | nvinfer1::IExecutionContext | inline |
| updateDeviceMemorySizeForShapes() noexcept | nvinfer1::IExecutionContext | inline |
| ~IExecutionContext() noexcept=default | nvinfer1::IExecutionContext | virtual |
| ~INoCopy()=default | nvinfer1::INoCopy | protectedvirtual |