Go to the documentation of this file.
50 #ifndef NV_INFER_RUNTIME_H
51 #define NV_INFER_RUNTIME_H
65 class IExecutionContext;
129 static constexpr int32_t kVALUE = 3;
171 return mImpl->data();
175 std::size_t
size() const noexcept
177 return mImpl->size();
183 return mImpl->type();
247 static constexpr int32_t kVALUE = 2;
269 return mImpl->isConstant();
276 return mImpl->getConstantValue();
307 return mImpl->constant(value);
315 return mImpl->operation(op, first, second);
406 static constexpr int32_t kFORMAT_COMBINATION_LIMIT = 100;
440 virtual bool supportsFormatCombination(
441 int32_t pos,
const PluginTensorDesc* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept
496 int32_t nbOutputs)
const noexcept
512 const void*
const* inputs,
void*
const* outputs,
void* workspace,
cudaStream_t stream) noexcept
534 bool const*,
PluginFormat, int32_t) noexcept
override final
543 Dims getOutputDimensions(int32_t,
Dims const*, int32_t) noexcept
override final
548 bool isOutputBroadcastAcrossBatch(int32_t,
bool const*, int32_t)
const noexcept
override final
553 bool canBroadcastInputAcrossBatch(int32_t)
const noexcept
override final
558 size_t getWorkspaceSize(int32_t)
const noexcept
override final
563 int32_t enqueue(int32_t,
const void*
const*,
void*
const*,
void*,
cudaStream_t) noexcept
override final
588 virtual void reportLayerTime(
const char* layerName,
float ms) noexcept = 0;
644 virtual ~
IRuntime() noexcept =
default;
662 const void* blob, std::size_t size, IPluginFactory* pluginFactory) noexcept
664 return mImpl->deserializeCudaEngine(blob, size,
nullptr);
676 mImpl->setDLACore(dlaCore);
687 return mImpl->getDLACore();
695 return mImpl->getNbDLACores();
721 mImpl->setGpuAllocator(allocator);
740 mImpl->setErrorRecorder(recorder);
755 return mImpl->getErrorRecorder();
768 return mImpl->deserializeCudaEngine(blob, size,
nullptr);
799 return mImpl->setWeights(layerName, role, weights);
809 return mImpl->refitCudaEngine();
830 return mImpl->getMissing(size, layerNames, roles);
847 return mImpl->getAll(size, layerNames, roles);
874 return mImpl->setDynamicRange(tensorName, min, max);
886 return mImpl->getDynamicRangeMin(tensorName);
898 return mImpl->getDynamicRangeMax(tensorName);
914 return mImpl->getTensorsWithDynamicRange(size, tensorNames);
933 mImpl->setErrorRecorder(recorder);
948 return mImpl->getErrorRecorder();
966 return mImpl->setNamedWeights(name, weights);
986 return mImpl->getMissingWeights(size, weightsNames);
1002 return mImpl->getAllWeights(size, weightsNames);
1028 constexpr
inline int32_t EnumMax<OptProfileSelector>() noexcept
1085 return mImpl->setDimensions(inputName, select, dims);
1095 return mImpl->getDimensions(inputName, select);
1122 const char* inputName,
OptProfileSelector select,
const int32_t* values, int32_t nbValues) noexcept
1124 return mImpl->setShapeValues(inputName, select, values, nbValues);
1135 return mImpl->getNbShapeValues(inputName);
1145 return mImpl->getShapeValues(inputName, select);
1163 return mImpl->setExtraMemoryTarget(target);
1171 return mImpl->getExtraMemoryTarget();
1187 return mImpl->isValid();
1248 return mImpl->getNbBindings();
1270 return mImpl->getBindingIndex(name);
1290 return mImpl->getBindingName(bindingIndex);
1303 return mImpl->bindingIsInput(bindingIndex);
1328 return mImpl->getBindingDimensions(bindingIndex);
1341 return mImpl->getBindingDataType(bindingIndex);
1353 return mImpl->getMaxBatchSize();
1367 return mImpl->getNbLayers();
1381 return mImpl->serialize();
1397 return mImpl->createExecutionContext();
1424 return mImpl->getLocation(bindingIndex);
1433 return mImpl->createExecutionContextWithoutDeviceMemory();
1443 return mImpl->getDeviceMemorySize();
1453 return mImpl->isRefittable();
1467 return mImpl->getBindingBytesPerComponent(bindingIndex);
1481 return mImpl->getBindingComponentsPerElement(bindingIndex);
1491 return mImpl->getBindingFormat(bindingIndex);
1510 return mImpl->getBindingFormatDesc(bindingIndex);
1522 return mImpl->getBindingVectorizedDim(bindingIndex);
1537 return mImpl->getName();
1548 return mImpl->getNbOptimizationProfiles();
1575 return mImpl->getProfileDimensions(bindingIndex, profileIndex, select);
1602 return mImpl->getProfileShapeValues(profileIndex, inputIndex, select);
1638 return mImpl->isShapeBinding(bindingIndex);
1652 return mImpl->isExecutionBinding(bindingIndex);
1667 return mImpl->getEngineCapability();
1685 return mImpl->setErrorRecorder(recorder);
1700 return mImpl->getErrorRecorder();
1719 return mImpl->hasImplicitBatchDimension();
1728 return mImpl->getTacticSources();
1767 bool execute(int32_t batchSize,
void*
const* bindings) noexcept
1769 return mImpl->execute(batchSize, bindings);
1798 return mImpl->enqueue(batchSize, bindings, stream, inputConsumed);
1811 mImpl->setDebugSync(sync);
1821 return mImpl->getDebugSync();
1831 mImpl->setProfiler(profiler);
1841 return mImpl->getProfiler();
1851 return mImpl->getEngine();
1875 mImpl->setName(name);
1885 return mImpl->getName();
1903 mImpl->setDeviceMemory(memory);
1924 return mImpl->getStrides(bindingIndex);
1966 return mImpl->setOptimizationProfile(profileIndex);
1978 return mImpl->getOptimizationProfile();
2008 return mImpl->setBindingDimensions(bindingIndex, dimensions);
2038 return mImpl->getBindingDimensions(bindingIndex);
2062 return mImpl->setInputShapeBinding(bindingIndex, data);
2084 return mImpl->getShapeBinding(bindingIndex, data);
2099 return mImpl->allInputDimensionsSpecified();
2114 return mImpl->allInputShapesSpecified();
2133 mImpl->setErrorRecorder(recorder);
2148 return mImpl->getErrorRecorder();
2165 return mImpl->executeV2(bindings);
2193 return mImpl->enqueueV2(bindings, stream, inputConsumed);
2241 return mImpl->setOptimizationProfileAsync(profileIndex, stream);
2253 extern "C" TENSORRTAPI
void* createInferRuntime_INTERNAL(
void* logger, int32_t version) noexcept;
2259 extern "C" TENSORRTAPI
void* createInferRefitter_INTERNAL(
void* engine,
void* logger, int32_t version) noexcept;
2278 return static_cast<IRuntime*>(createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION));
2288 return static_cast<IRefitter*>(createInferRefitter_INTERNAL(&engine, &logger, NV_TENSORRT_VERSION));
2304 template <
typename T>
2320 #define REGISTER_TENSORRT_PLUGIN(name) \
2321 static nvinfer1::PluginRegistrar<name> pluginRegistrar##name {}
2322 #endif // NV_INFER_RUNTIME_H
int32_t getNbShapeValues(const char *inputName) const noexcept
Get the number of values for an input shape tensor.
Definition: NvInferRuntime.h:1133
bool allInputDimensionsSpecified() const noexcept
Whether all dynamic dimensions of input tensors have been specified.
Definition: NvInferRuntime.h:2097
int32_t getConstantValue() const noexcept
Definition: NvInferRuntime.h:274
Structure to define the dimensions of a tensor.
Definition: NvInferRuntime.h:340
IErrorRecorder * getErrorRecorder() const noexcept
get the ErrorRecorder assigned to this interface.
Definition: NvInferRuntime.h:753
DimensionOperation
An operation on two IDimensionExpr, which represent integer expressions used in dimension computation...
Definition: NvInferRuntime.h:211
Minimum of the two elements.
TRT_DEPRECATED void setDeviceMemory(void *memory) noexcept
Set the device memory for use by this execution context.
Definition: NvInferRuntime.h:1901
bool getShapeBinding(int32_t bindingIndex, int32_t *data) const noexcept
Get values of an input tensor required for shape calculations or an output tensor produced by shape c...
Definition: NvInferRuntime.h:2082
void setName(const char *name) noexcept
Set the name of the execution context.
Definition: NvInferRuntime.h:1873
int32_t getTensorRTVersion() const noexcept override
Return the API version with which this plugin was built. The upper byte reserved by TensorRT and is u...
Definition: NvInferRuntime.h:523
An array of weights used as a layer parameter.
Definition: NvInferRuntime.h:145
size_t getDeviceMemorySize() const noexcept
Return the amount of device memory required by an execution context.
Definition: NvInferRuntime.h:1441
Register the plugin creator to the registry The static registry object will be instantiated when the ...
Definition: NvInferRuntime.h:2305
nvinfer1::IPluginRegistry * getPluginRegistry() noexcept
Return the plugin registry.
bool isConstant() const noexcept
Return true if expression is a build-time constant.
Definition: NvInferRuntime.h:267
Class to handle library allocated memory that is accessible to the user.
Definition: NvInferRuntime.h:163
void * data() const noexcept
A pointer to the raw data that is owned by the library.
Definition: NvInferRuntime.h:169
TRT_DEPRECATED void destroy() noexcept
Definition: NvInferRuntime.h:192
DeviceType
The device that this layer/network will execute on.
Definition: NvInferRuntime.h:621
float getDynamicRangeMin(const char *tensorName) const noexcept
Get minimum of dynamic range.
Definition: NvInferRuntime.h:884
IErrorRecorder * getErrorRecorder() const noexcept
Get the ErrorRecorder assigned to this interface.
Definition: NvInferRuntime.h:2146
float getDynamicRangeMax(const char *tensorName) const noexcept
Get maximum of dynamic range.
Definition: NvInferRuntime.h:896
IHostMemory * serialize() const noexcept
Serialize the network to a stream.
Definition: NvInferRuntime.h:1379
Declaration of EnumMaxImpl struct to store maximum number of elements in an enumeration type.
Definition: NvInferRuntimeCommon.h:136
constexpr int32_t EnumMax< WeightsRole >() noexcept
Maximum number of elements in WeightsRole enum.
Definition: NvInferRuntime.h:611
Dims getBindingDimensions(int32_t bindingIndex) const noexcept
Get the dimensions of a binding.
Definition: NvInferRuntime.h:1326
Updates weights in an engine.
Definition: NvInferRuntime.h:782
Definition: NvInferRuntimeCommon.h:189
TacticSource
List of tactic sources for TensorRT.
Definition: NvInferRuntime.h:1202
int32_t const * getShapeValues(const char *inputName, OptProfileSelector select) const noexcept
Get the minimum / optimum / maximum values for an input shape tensor.
Definition: NvInferRuntime.h:1143
kernel for IConvolutionLayer, IDeconvolutionLayer, or IFullyConnectedLayer
Fail with error when the coordinates are out of bounds. This is the default.
Dims getProfileDimensions(int32_t bindingIndex, int32_t profileIndex, OptProfileSelector select) const noexcept
Get the minimum / optimum / maximum dimensions for a particular binding under an optimization profile...
Definition: NvInferRuntime.h:1573
TensorLocation getLocation(int32_t bindingIndex) const noexcept
Get location of binding.
Definition: NvInferRuntime.h:1422
const int32_t * getProfileShapeValues(int32_t profileIndex, int32_t inputIndex, OptProfileSelector select) const noexcept
Get minimum / optimum / maximum values for an input shape binding under an optimization profile.
Definition: NvInferRuntime.h:1599
TRT_DEPRECATED void destroy() noexcept
Destroy this object;.
Definition: NvInferRuntime.h:1407
bool setDimensions(const char *inputName, OptProfileSelector select, Dims dims) noexcept
Set the minimum / optimum / maximum dimensions for a dynamic input tensor.
Definition: NvInferRuntime.h:1083
bool setDynamicRange(const char *tensorName, float min, float max) noexcept
Definition: NvInferRuntime.h:872
TensorFormat
Format of the input/output tensors.
Definition: NvInferRuntimeCommon.h:220
TRT_DEPRECATED void destroy() noexcept
Destroy this object.
Definition: NvInferRuntime.h:705
int64_t count
The number of weights in the array.
Definition: NvInferRuntime.h:150
Single registration point for all plugins in an application. It is used to find plugin implementation...
Definition: NvInferRuntimeCommon.h:997
bool enqueueV2(void *const *bindings, cudaStream_t stream, cudaEvent_t *inputConsumed) noexcept
Asynchronously execute inference.
Definition: NvInferRuntime.h:2191
Application-implemented logging interface for the builder, engine and runtime.
Definition: NvInferRuntimeCommon.h:1175
Definition: NvInferImpl.h:252
An engine for executing inference on a built network, with functionally unsafe features.
Definition: NvInferRuntime.h:1231
int32_t getBindingComponentsPerElement(int32_t bindingIndex) const noexcept
Return the number of components included in one element.
Definition: NvInferRuntime.h:1479
DataType type() const noexcept
The type of the memory that was allocated.
Definition: NvInferRuntime.h:181
bool refitCudaEngine() noexcept
Updates associated engine. Return true if successful.
Definition: NvInferRuntime.h:807
IRuntime * createInferRuntime(ILogger &logger) noexcept
Create an instance of an IRuntime class.
Definition: NvInferRuntime.h:2276
nvinfer1::ICudaEngine * deserializeCudaEngine(const void *blob, std::size_t size) noexcept
Deserialize an engine from a stream.
Definition: NvInferRuntime.h:766
Definition: NvInferImpl.h:196
int32_t uint32_t TacticSources
Represents a collection of one or more TacticSource values combine using bitwise-OR operations.
Definition: NvInferImpl.h:158
bool isValid() const noexcept
Check whether the optimization profile can be passed to an IBuilderConfig object.
Definition: NvInferRuntime.h:1185
bool enqueue(int32_t batchSize, void *const *bindings, cudaStream_t stream, cudaEvent_t *inputConsumed) noexcept
Asynchronously execute inference on a batch.
Definition: NvInferRuntime.h:1796
bool getDebugSync() const noexcept
Get the debug sync flag.
Definition: NvInferRuntime.h:1819
int32_t getAll(int32_t size, const char **layerNames, WeightsRole *roles) noexcept
Get description of all weights that could be refit.
Definition: NvInferRuntime.h:845
bool execute(int32_t batchSize, void *const *bindings) noexcept
Synchronously execute inference on a batch.
Definition: NvInferRuntime.h:1767
TRT_DEPRECATED nvinfer1::ICudaEngine * deserializeCudaEngine(const void *blob, std::size_t size, IPluginFactory *pluginFactory) noexcept
Deserialize an engine from a stream.
Definition: NvInferRuntime.h:661
void setErrorRecorder(IErrorRecorder *recorder) noexcept
Set the ErrorRecorder for this interface.
Definition: NvInferRuntime.h:1683
std::size_t size() const noexcept
The size in bytes of the data that was allocated.
Definition: NvInferRuntime.h:175
constexpr int32_t EnumMax< TacticSource >() noexcept
Maximum number of tactic sources in TacticSource enum.
Definition: NvInferRuntime.h:1211
int32_t nbDims
The number of dimensions.
Definition: NvInferRuntime.h:331
bool isExecutionBinding(int32_t bindingIndex) const noexcept
True if pointer to tensor data is required for execution phase, false if nullptr can be supplied.
Definition: NvInferRuntime.h:1650
The TensorRT API version 1 namespace.
bool setShapeValues(const char *inputName, OptProfileSelector select, const int32_t *values, int32_t nbValues) noexcept
Set the minimum / optimum / maximum values for an input shape tensor.
Definition: NvInferRuntime.h:1121
constexpr int32_t EnumMax< DeviceType >() noexcept
Maximum number of elements in DeviceType enum.
Definition: NvInferRuntime.h:629
constexpr int32_t EnumMax< DimensionOperation >() noexcept
Maximum number of elements in DimensionOperation enum.
Definition: NvInferRuntime.h:226
bool executeV2(void *const *bindings) noexcept
Synchronously execute inference a network.
Definition: NvInferRuntime.h:2163
int32_t getTensorsWithDynamicRange(int32_t size, const char **tensorNames) const noexcept
Get names of all tensors that have refittable dynamic ranges.
Definition: NvInferRuntime.h:912
Definition: NvInferRuntime.h:370
void setGpuAllocator(IGpuAllocator *allocator) noexcept
Set the GPU allocator.
Definition: NvInferRuntime.h:719
int32_t getNbDLACores() const noexcept
Returns number of DLA hardware cores accessible.
Definition: NvInferRuntime.h:693
float getExtraMemoryTarget() const noexcept
Get the extra memory target that has been defined for this profile.
Definition: NvInferRuntime.h:1169
int32_t getNbOptimizationProfiles() const noexcept
Get the number of optimization profiles defined for this engine.
Definition: NvInferRuntime.h:1546
This is used to set or get the value that is used in the optimization (kernel selection).
const IDimensionExpr * constant(int32_t value) noexcept
Return pointer to IDimensionExp for given value.
Definition: NvInferRuntime.h:305
IErrorRecorder * getErrorRecorder() const noexcept
Get the ErrorRecorder assigned to this interface.
Definition: NvInferRuntime.h:1698
DataType getBindingDataType(int32_t bindingIndex) const noexcept
Determine the required data type for a buffer from its binding index.
Definition: NvInferRuntime.h:1339
Dims max
Upper bounds on tensor’s dimensions.
Definition: NvInferRuntime.h:349
Check if two elements are equal.
struct CUstream_st * cudaStream_t
Forward declaration of cudaStream_t.
Definition: NvInferRuntimeCommon.h:107
void setErrorRecorder(IErrorRecorder *recorder) noexcept
Set the ErrorRecorder for this interface.
Definition: NvInferRuntime.h:2131
TRT_DEPRECATED bool setOptimizationProfile(int32_t profileIndex) noexcept
Select an optimization profile for the current context.
Definition: NvInferRuntime.h:1964
void setProfiler(IProfiler *profiler) noexcept
Set the profiler.
Definition: NvInferRuntime.h:1829
int32_t getNbLayers() const noexcept
Get the number of layers in the network.
Definition: NvInferRuntime.h:1365
DataType type
The type of the weights.
Definition: NvInferRuntime.h:148
bias for IConvolutionLayer, IDeconvolutionLayer, or IFullyConnectedLayer
int32_t getBindingBytesPerComponent(int32_t bindingIndex) const noexcept
Return the number of bytes per component of an element.
Definition: NvInferRuntime.h:1465
Definition: NvInferImpl.h:205
struct CUevent_st * cudaEvent_t
Forward declaration of cudaEvent_t.
Definition: NvInferRuntimeCommon.h:110
Dims min
Lower bounds on tensor’s dimensions.
Definition: NvInferRuntime.h:346
int32_t getBindingIndex(const char *name) const noexcept
Retrieve the binding index for a named tensor.
Definition: NvInferRuntime.h:1268
int32_t getNbBindings() const noexcept
Get the number of binding indices.
Definition: NvInferRuntime.h:1246
void setDLACore(int32_t dlaCore) noexcept
Set the DLA core that the deserialized engine must execute on.
Definition: NvInferRuntime.h:674
const ICudaEngine & getEngine() const noexcept
Get the associated engine.
Definition: NvInferRuntime.h:1849
IErrorRecorder * getErrorRecorder() const noexcept
Get the ErrorRecorder assigned to this interface.
Definition: NvInferRuntime.h:946
shift part of IScaleLayer
Optimization profile for dynamic input dimensions and shape tensors.
Definition: NvInferRuntime.h:1055
IExecutionContext * createExecutionContextWithoutDeviceMemory() noexcept
create an execution context without any device memory allocated
Definition: NvInferRuntime.h:1431
Definition: NvInferImpl.h:237
DataType
The type of weights and tensors.
Definition: NvInferRuntimeCommon.h:150
TacticSources getTacticSources() const noexcept
return the tactic sources required by this engine
Definition: NvInferRuntime.h:1726
int32_t getBindingVectorizedDim(int32_t bindingIndex) const noexcept
Return the dimension index that the buffer is vectorized.
Definition: NvInferRuntime.h:1520
Definition: NvInferRuntime.h:328
virtual bool registerCreator(IPluginCreator &creator, AsciiChar const *pluginNamespace) noexcept=0
Register a plugin creator. Returns false if one with same type is already registered.
bool setOptimizationProfileAsync(int32_t profileIndex, cudaStream_t stream) noexcept
Select an optimization profile for the current context with async semantics.
Definition: NvInferRuntime.h:2239
Floor division of the first element by the second.
PluginTensorDesc desc
Information required to interpret a pointer to tensor data, except that desc.dims has -1 in place of ...
Definition: NvInferRuntime.h:343
Definition: NvInferImpl.h:181
Substract the second element from the first.
Forward declaration of IPluginFactory for use by other interfaces.
Definition: NvInferRuntime.h:78
const char * getName() const noexcept
Returns the name of the network associated with the engine.
Definition: NvInferRuntime.h:1535
TensorFormat getBindingFormat(int32_t bindingIndex) const noexcept
Return the binding format.
Definition: NvInferRuntime.h:1489
Reference counted application-implemented error reporting interface for TensorRT objects.
Definition: NvInferRuntimeCommon.h:1353
IProfiler * getProfiler() const noexcept
Get the profiler.
Definition: NvInferRuntime.h:1839
int32_t getAllWeights(int32_t size, const char **weightsNames) noexcept
Get names of all weights that could be refit.
Definition: NvInferRuntime.h:1000
OptProfileSelector
When setting or querying optimization profile parameters (such as shape tensor inputs or dynamic dime...
Definition: NvInferRuntime.h:1019
WeightsRole
How a layer uses particular Weights.
Definition: NvInferRuntime.h:599
bool isShapeBinding(int32_t bindingIndex) const noexcept
True if tensor is required as input for shape calculations or output from them.
Definition: NvInferRuntime.h:1636
int32_t getMaxBatchSize() const noexcept
Get the maximum batch size which can be used for inference.
Definition: NvInferRuntime.h:1351
const char * getBindingFormatDesc(int32_t bindingIndex) const noexcept
Return the human readable description of the tensor format.
Definition: NvInferRuntime.h:1508
void setErrorRecorder(IErrorRecorder *recorder) noexcept
Set the ErrorRecorder for this interface.
Definition: NvInferRuntime.h:738
Plugin class for user-implemented layers.
Definition: NvInferRuntimeCommon.h:599
bool setInputShapeBinding(int32_t bindingIndex, int32_t const *data) noexcept
Set values of input tensor required by shape calculations.
Definition: NvInferRuntime.h:2060
static constexpr int32_t MAX_DIMS
The maximum number of dimensions supported for a tensor.
Definition: NvInferRuntimeCommon.h:193
Definition: NvInferRuntime.h:263
Dims getBindingDimensions(int32_t bindingIndex) const noexcept
Get the dynamic dimensions of a binding.
Definition: NvInferRuntime.h:2036
Product of the two elements.
EngineCapability
List of supported engine capability flows.
Definition: NvInferRuntime.h:103
bool hasImplicitBatchDimension() const noexcept
Query whether the engine was built with an implicit batch dimension.
Definition: NvInferRuntime.h:1717
Application-implemented interface for profiling.
Definition: NvInferRuntime.h:579
Check if element in first tensor is less than corresponding element in second tensor.
bool bindingIsInput(int32_t bindingIndex) const noexcept
Determine whether a binding is an input binding.
Definition: NvInferRuntime.h:1301
bool setExtraMemoryTarget(float target) noexcept
Set a target for extra GPU memory that may be used by this profile.
Definition: NvInferRuntime.h:1161
bool allInputShapesSpecified() const noexcept
Whether all input shape bindings have been specified.
Definition: NvInferRuntime.h:2111
Fields that a plugin might see for an input or output.
Definition: NvInferRuntimeCommon.h:368
TRT_DEPRECATED void destroy() noexcept
Definition: NvInferRuntime.h:855
const void * values
The weight values, in a contiguous array.
Definition: NvInferRuntime.h:149
Definition: NvInferImpl.h:189
TensorFormat PluginFormat
PluginFormat is reserved for backward compatibility.
Definition: NvInferRuntimeCommon.h:345
int32_t getDLACore() const noexcept
Get the DLA core that the engine executes on.
Definition: NvInferRuntime.h:685
Definition: NvInferImpl.h:291
const char * getBindingName(int32_t bindingIndex) const noexcept
Retrieve the name corresponding to a binding index.
Definition: NvInferRuntime.h:1288
bool setNamedWeights(const char *name, Weights weights) noexcept
Specify new weights of given name.
Definition: NvInferRuntime.h:964
const char * getName() const noexcept
Return the name of the execution context.
Definition: NvInferRuntime.h:1883
Application-implemented class for controlling allocation on the GPU.
Definition: NvInferRuntimeCommon.h:1093
Dims getDimensions(const char *inputName, OptProfileSelector select) const noexcept
Get the minimum / optimum / maximum dimensions for a dynamic input tensor.
Definition: NvInferRuntime.h:1093
IRefitter * createInferRefitter(ICudaEngine &engine, ILogger &logger) noexcept
Create an instance of an IRefitter class.
Definition: NvInferRuntime.h:2286
bool isRefittable() const noexcept
Return true if an engine can be refit.
Definition: NvInferRuntime.h:1451
int32_t getMissing(int32_t size, const char **layerNames, WeightsRole *roles) noexcept
Get description of missing weights.
Definition: NvInferRuntime.h:828
Allows a serialized functionally unsafe engine to be deserialized.
Definition: NvInferRuntime.h:641
void setDebugSync(bool sync) noexcept
Set the debug sync flag.
Definition: NvInferRuntime.h:1809
EngineCapability getEngineCapability() const noexcept
Determine what execution capability this engine has.
Definition: NvInferRuntime.h:1665
Definition: NvInferImpl.h:219
TensorLocation
The location for tensor data storage, device or host.
Definition: NvInferRuntime.h:235
Context for executing inference using an engine, with functionally unsafe features.
Definition: NvInferRuntime.h:1745
int32_t getMissingWeights(int32_t size, const char **weightsNames) noexcept
Get names of missing weights.
Definition: NvInferRuntime.h:984
TRT_DEPRECATED void destroy() noexcept
Destroy this object.
Definition: NvInferRuntime.h:1861
const IDimensionExpr * operation(DimensionOperation op, const IDimensionExpr &first, const IDimensionExpr &second) noexcept
Definition: NvInferRuntime.h:312
Dims getStrides(int32_t bindingIndex) const noexcept
Return the strides of the buffer for the given binding.
Definition: NvInferRuntime.h:1922
#define TRT_DEPRECATED
< Items that are marked as deprecated will be removed in a future release.
Definition: NvInferRuntimeCommon.h:76
bool setWeights(const char *layerName, WeightsRole role, Weights weights) noexcept
Specify new weights for a layer of given name. Returns true on success, or false if new weights are r...
Definition: NvInferRuntime.h:797
int32_t getOptimizationProfile() const noexcept
Get the index of the currently selected optimization profile.
Definition: NvInferRuntime.h:1976
void setErrorRecorder(IErrorRecorder *recorder) noexcept
Set the ErrorRecorder for this interface.
Definition: NvInferRuntime.h:931
Definition: NvInferRuntime.h:301
IExecutionContext * createExecutionContext() noexcept
Create an execution context.
Definition: NvInferRuntime.h:1395
bool setBindingDimensions(int32_t bindingIndex, Dims dimensions) noexcept
Set the dynamic dimensions of a binding.
Definition: NvInferRuntime.h:2006