NVIDIA TensorRT Inference Server
0.8.0 -266768c
Version select:
Current release
master (unstable)
Older releases
Documentation home
User Guide
Quickstart
Installing the Server
Installing Prebuilt Containers
Running the Server
Example Model Repository
Running The Inference Server
Checking Inference Server Status
Client Libraries and Examples
Building the Client Libraries and Examples
Image Classification Example Application
Performance Example Application
Client API
Model Repository
Modifying the Model Repository
Model Versions
Model Definition
TensorRT Models
TensorFlow Models
Caffe2 Models
TensorRT/TensorFlow Models
ONNX Models
Model Configuration
Version Policy
Instance Groups
Dynamic Batching
Optimization Policy
Inference Server API
Health
Status
Inference
Metrics
Developer Guide
Architecture
Concurrent Model Execution
Contributing
Coding Convention
Building
Building the Server
Incremental Builds
Building the Client Libraries and Examples
Building the Documentation
Testing
Generate QA Model Repository
Build QA Container
Run QA Container
API Reference
Protobuf API
HTTP/GRPC API
Model Configuration
Status
C++ API
Class Hierarchy
File Hierarchy
Full API
Namespaces
Namespace nvidia
Namespace nvidia::inferenceserver
Namespace nvidia::inferenceserver::client
Classes and Structs
Struct Result::ClassResult
Struct InferContext::Stat
Class Error
Class InferContext
Class InferContext::Input
Class InferContext::Options
Class InferContext::Output
Class InferContext::Request
Class InferContext::RequestTimers
Class InferContext::Result
Class InferGrpcContext
Class InferHttpContext
Class ProfileContext
Class ProfileGrpcContext
Class ProfileHttpContext
Class ServerHealthContext
Class ServerHealthGrpcContext
Class ServerHealthHttpContext
Class ServerStatusContext
Class ServerStatusGrpcContext
Class ServerStatusHttpContext
Functions
Function nvidia::inferenceserver::client::operator<<
Directories
Directory src
Directory clients
Directory c++
Files
File request.h
Python API
Client
NVIDIA TensorRT Inference Server
Docs
»
Index
Index
A
|
C
|
G
|
I
|
M
|
N
|
P
|
R
|
S
|
T
A
async_run() (tensorrtserver.api.InferContext method)
C
close() (tensorrtserver.api.InferContext method)
(tensorrtserver.api.ServerHealthContext method)
(tensorrtserver.api.ServerStatusContext method)
G
get_async_run_results() (tensorrtserver.api.InferContext method)
get_last_request_id() (tensorrtserver.api.InferContext method)
(tensorrtserver.api.ServerHealthContext method)
(tensorrtserver.api.ServerStatusContext method)
get_last_request_model_name() (tensorrtserver.api.InferContext method)
get_last_request_model_version() (tensorrtserver.api.InferContext method)
get_ready_async_request() (tensorrtserver.api.InferContext method)
get_server_status() (tensorrtserver.api.ServerStatusContext method)
I
InferContext (class in tensorrtserver.api)
InferContext.ResultFormat (class in tensorrtserver.api)
InferenceServerException
is_live() (tensorrtserver.api.ServerHealthContext method)
is_ready() (tensorrtserver.api.ServerHealthContext method)
M
message() (tensorrtserver.api.InferenceServerException method)
N
nvidia::inferenceserver::client::Error (C++ class)
nvidia::inferenceserver::client::Error::Code (C++ function)
nvidia::inferenceserver::client::Error::Error (C++ function)
,
[1]
,
[2]
nvidia::inferenceserver::client::Error::IsOk (C++ function)
nvidia::inferenceserver::client::Error::Message (C++ function)
nvidia::inferenceserver::client::Error::RequestId (C++ function)
nvidia::inferenceserver::client::Error::ServerId (C++ function)
nvidia::inferenceserver::client::Error::Success (C++ member)
nvidia::inferenceserver::client::InferContext (C++ class)
nvidia::inferenceserver::client::InferContext::async_request_id_ (C++ member)
nvidia::inferenceserver::client::InferContext::AsyncReqMap (C++ type)
nvidia::inferenceserver::client::InferContext::AsyncRun (C++ function)
nvidia::inferenceserver::client::InferContext::AsyncTransfer (C++ function)
nvidia::inferenceserver::client::InferContext::batch_size_ (C++ member)
nvidia::inferenceserver::client::InferContext::CLASS (C++ enumerator)
nvidia::inferenceserver::client::InferContext::context_stat_ (C++ member)
nvidia::inferenceserver::client::InferContext::cv_ (C++ member)
nvidia::inferenceserver::client::InferContext::exiting_ (C++ member)
nvidia::inferenceserver::client::InferContext::GetAsyncRunResults (C++ function)
nvidia::inferenceserver::client::InferContext::GetInput (C++ function)
nvidia::inferenceserver::client::InferContext::GetOutput (C++ function)
nvidia::inferenceserver::client::InferContext::GetReadyAsyncRequest (C++ function)
nvidia::inferenceserver::client::InferContext::GetStat (C++ function)
nvidia::inferenceserver::client::InferContext::infer_request_ (C++ member)
nvidia::inferenceserver::client::InferContext::InferContext (C++ function)
nvidia::inferenceserver::client::InferContext::Input (C++ class)
,
[1]
nvidia::inferenceserver::client::InferContext::Input::ByteSize (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Input::Dims (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Input::DType (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Input::Format (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Input::Name (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Input::Reset (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Input::SetRaw (C++ function)
,
[1]
,
[2]
,
[3]
nvidia::inferenceserver::client::InferContext::Input::~Input (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Inputs (C++ function)
nvidia::inferenceserver::client::InferContext::inputs_ (C++ member)
nvidia::inferenceserver::client::InferContext::IsRequestReady (C++ function)
nvidia::inferenceserver::client::InferContext::Kind (C++ type)
nvidia::inferenceserver::client::InferContext::max_batch_size_ (C++ member)
nvidia::inferenceserver::client::InferContext::MaxBatchSize (C++ function)
nvidia::inferenceserver::client::InferContext::model_name_ (C++ member)
nvidia::inferenceserver::client::InferContext::model_version_ (C++ member)
nvidia::inferenceserver::client::InferContext::ModelName (C++ function)
nvidia::inferenceserver::client::InferContext::ModelVersion (C++ function)
nvidia::inferenceserver::client::InferContext::mutex_ (C++ member)
nvidia::inferenceserver::client::InferContext::ongoing_async_requests_ (C++ member)
nvidia::inferenceserver::client::InferContext::Options (C++ class)
,
[1]
nvidia::inferenceserver::client::InferContext::Options::AddClassResult (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Options::AddRawResult (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Options::BatchSize (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Options::Create (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Options::SetBatchSize (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Options::~Options (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Output (C++ class)
,
[1]
nvidia::inferenceserver::client::InferContext::Output::ByteSize (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Output::Dims (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Output::DType (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Output::Name (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Output::~Output (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Outputs (C++ function)
nvidia::inferenceserver::client::InferContext::outputs_ (C++ member)
nvidia::inferenceserver::client::InferContext::PreRunProcessing (C++ function)
nvidia::inferenceserver::client::InferContext::RAW (C++ enumerator)
nvidia::inferenceserver::client::InferContext::RECEIVE_END (C++ enumerator)
nvidia::inferenceserver::client::InferContext::RECEIVE_START (C++ enumerator)
nvidia::inferenceserver::client::InferContext::Request (C++ class)
,
[1]
nvidia::inferenceserver::client::InferContext::Request::Id (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Request::~Request (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::REQUEST_END (C++ enumerator)
nvidia::inferenceserver::client::InferContext::REQUEST_START (C++ enumerator)
nvidia::inferenceserver::client::InferContext::requested_outputs_ (C++ member)
nvidia::inferenceserver::client::InferContext::RequestTimers (C++ class)
,
[1]
nvidia::inferenceserver::client::InferContext::RequestTimers::Kind (C++ type)
nvidia::inferenceserver::client::InferContext::RequestTimers::RECEIVE_END (C++ enumerator)
nvidia::inferenceserver::client::InferContext::RequestTimers::RECEIVE_START (C++ enumerator)
nvidia::inferenceserver::client::InferContext::RequestTimers::Record (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::RequestTimers::REQUEST_END (C++ enumerator)
nvidia::inferenceserver::client::InferContext::RequestTimers::REQUEST_START (C++ enumerator)
nvidia::inferenceserver::client::InferContext::RequestTimers::RequestTimers (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::RequestTimers::Reset (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::RequestTimers::SEND_END (C++ enumerator)
nvidia::inferenceserver::client::InferContext::RequestTimers::SEND_START (C++ enumerator)
nvidia::inferenceserver::client::InferContext::Result (C++ class)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::CLASS (C++ enumerator)
nvidia::inferenceserver::client::InferContext::Result::ClassResult (C++ class)
,
[1]
,
[2]
nvidia::inferenceserver::client::InferContext::Result::ClassResult::idx (C++ member)
,
[1]
,
[2]
nvidia::inferenceserver::client::InferContext::Result::ClassResult::label (C++ member)
,
[1]
,
[2]
nvidia::inferenceserver::client::InferContext::Result::ClassResult::value (C++ member)
,
[1]
,
[2]
nvidia::inferenceserver::client::InferContext::Result::GetClassAtCursor (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::GetClassCount (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::GetOutput (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::GetRaw (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::GetRawAtCursor (C++ function)
,
[1]
,
[2]
,
[3]
nvidia::inferenceserver::client::InferContext::Result::ModelName (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::ModelVersion (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::RAW (C++ enumerator)
nvidia::inferenceserver::client::InferContext::Result::ResetCursor (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::ResetCursors (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::Result::ResultFormat (C++ type)
nvidia::inferenceserver::client::InferContext::Result::~Result (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::ResultFormat (C++ type)
nvidia::inferenceserver::client::InferContext::Run (C++ function)
nvidia::inferenceserver::client::InferContext::SEND_END (C++ enumerator)
nvidia::inferenceserver::client::InferContext::SEND_START (C++ enumerator)
nvidia::inferenceserver::client::InferContext::SetRunOptions (C++ function)
nvidia::inferenceserver::client::InferContext::Stat (C++ class)
,
[1]
nvidia::inferenceserver::client::InferContext::Stat::completed_request_count (C++ member)
,
[1]
nvidia::inferenceserver::client::InferContext::Stat::cumulative_receive_time_ns (C++ member)
,
[1]
nvidia::inferenceserver::client::InferContext::Stat::cumulative_send_time_ns (C++ member)
,
[1]
nvidia::inferenceserver::client::InferContext::Stat::cumulative_total_request_time_ns (C++ member)
,
[1]
nvidia::inferenceserver::client::InferContext::Stat::Stat (C++ function)
,
[1]
nvidia::inferenceserver::client::InferContext::sync_request_ (C++ member)
nvidia::inferenceserver::client::InferContext::total_input_byte_size_ (C++ member)
nvidia::inferenceserver::client::InferContext::UpdateStat (C++ function)
nvidia::inferenceserver::client::InferContext::verbose_ (C++ member)
nvidia::inferenceserver::client::InferContext::worker_ (C++ member)
nvidia::inferenceserver::client::InferContext::~InferContext (C++ function)
nvidia::inferenceserver::client::InferGrpcContext (C++ class)
nvidia::inferenceserver::client::InferGrpcContext::AsyncRun (C++ function)
nvidia::inferenceserver::client::InferGrpcContext::Create (C++ function)
nvidia::inferenceserver::client::InferGrpcContext::GetAsyncRunResults (C++ function)
nvidia::inferenceserver::client::InferGrpcContext::Run (C++ function)
nvidia::inferenceserver::client::InferGrpcContext::~InferGrpcContext (C++ function)
nvidia::inferenceserver::client::InferHttpContext (C++ class)
nvidia::inferenceserver::client::InferHttpContext::AsyncRun (C++ function)
nvidia::inferenceserver::client::InferHttpContext::Create (C++ function)
nvidia::inferenceserver::client::InferHttpContext::GetAsyncRunResults (C++ function)
nvidia::inferenceserver::client::InferHttpContext::Run (C++ function)
nvidia::inferenceserver::client::InferHttpContext::~InferHttpContext (C++ function)
nvidia::inferenceserver::client::operator<< (C++ function)
nvidia::inferenceserver::client::ProfileContext (C++ class)
nvidia::inferenceserver::client::ProfileContext::ProfileContext (C++ function)
nvidia::inferenceserver::client::ProfileContext::SendCommand (C++ function)
nvidia::inferenceserver::client::ProfileContext::StartProfile (C++ function)
nvidia::inferenceserver::client::ProfileContext::StopProfile (C++ function)
nvidia::inferenceserver::client::ProfileContext::verbose_ (C++ member)
nvidia::inferenceserver::client::ProfileGrpcContext (C++ class)
nvidia::inferenceserver::client::ProfileGrpcContext::Create (C++ function)
nvidia::inferenceserver::client::ProfileHttpContext (C++ class)
nvidia::inferenceserver::client::ProfileHttpContext::Create (C++ function)
nvidia::inferenceserver::client::ServerHealthContext (C++ class)
nvidia::inferenceserver::client::ServerHealthContext::GetLive (C++ function)
nvidia::inferenceserver::client::ServerHealthContext::GetReady (C++ function)
nvidia::inferenceserver::client::ServerHealthContext::ServerHealthContext (C++ function)
nvidia::inferenceserver::client::ServerHealthContext::verbose_ (C++ member)
nvidia::inferenceserver::client::ServerHealthGrpcContext (C++ class)
nvidia::inferenceserver::client::ServerHealthGrpcContext::Create (C++ function)
nvidia::inferenceserver::client::ServerHealthGrpcContext::GetLive (C++ function)
nvidia::inferenceserver::client::ServerHealthGrpcContext::GetReady (C++ function)
nvidia::inferenceserver::client::ServerHealthHttpContext (C++ class)
nvidia::inferenceserver::client::ServerHealthHttpContext::Create (C++ function)
nvidia::inferenceserver::client::ServerHealthHttpContext::GetLive (C++ function)
nvidia::inferenceserver::client::ServerHealthHttpContext::GetReady (C++ function)
nvidia::inferenceserver::client::ServerStatusContext (C++ class)
nvidia::inferenceserver::client::ServerStatusContext::GetServerStatus (C++ function)
nvidia::inferenceserver::client::ServerStatusContext::ServerStatusContext (C++ function)
nvidia::inferenceserver::client::ServerStatusContext::verbose_ (C++ member)
nvidia::inferenceserver::client::ServerStatusGrpcContext (C++ class)
nvidia::inferenceserver::client::ServerStatusGrpcContext::Create (C++ function)
,
[1]
nvidia::inferenceserver::client::ServerStatusGrpcContext::GetServerStatus (C++ function)
nvidia::inferenceserver::client::ServerStatusHttpContext (C++ class)
nvidia::inferenceserver::client::ServerStatusHttpContext::Create (C++ function)
,
[1]
nvidia::inferenceserver::client::ServerStatusHttpContext::GetServerStatus (C++ function)
nvidia::inferenceserver::DataType (C++ enum)
nvidia::inferenceserver::DataType::DataType::BOOL (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::FP16 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::FP32 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::FP64 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::INT16 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::INT32 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::INT64 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::INT8 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::INVALID (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::UINT16 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::UINT32 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::UINT64 (C++ enumerator)
nvidia::inferenceserver::DataType::DataType::UINT8 (C++ enumerator)
nvidia::inferenceserver::GRPCService (C++ member)
nvidia::inferenceserver::HealthRequest (C++ member)
nvidia::inferenceserver::HealthRequest::mode (C++ member)
nvidia::inferenceserver::HealthRequestStats (C++ member)
nvidia::inferenceserver::HealthRequestStats::success (C++ member)
nvidia::inferenceserver::HealthResponse (C++ member)
nvidia::inferenceserver::HealthResponse::health (C++ member)
nvidia::inferenceserver::HealthResponse::request_status (C++ member)
nvidia::inferenceserver::InferRequest (C++ member)
nvidia::inferenceserver::InferRequest::meta_data (C++ member)
nvidia::inferenceserver::InferRequest::model_name (C++ member)
nvidia::inferenceserver::InferRequest::raw_input (C++ member)
nvidia::inferenceserver::InferRequest::version (C++ member)
nvidia::inferenceserver::InferRequestHeader (C++ member)
nvidia::inferenceserver::InferRequestHeader::batch_size (C++ member)
nvidia::inferenceserver::InferRequestHeader::Input (C++ member)
nvidia::inferenceserver::InferRequestHeader::input (C++ member)
nvidia::inferenceserver::InferRequestHeader::Input::byte_size (C++ member)
nvidia::inferenceserver::InferRequestHeader::Input::name (C++ member)
nvidia::inferenceserver::InferRequestHeader::Output (C++ member)
nvidia::inferenceserver::InferRequestHeader::output (C++ member)
nvidia::inferenceserver::InferRequestHeader::Output::byte_size (C++ member)
nvidia::inferenceserver::InferRequestHeader::Output::Class (C++ member)
nvidia::inferenceserver::InferRequestHeader::Output::Class::count (C++ member)
nvidia::inferenceserver::InferRequestHeader::Output::cls (C++ member)
nvidia::inferenceserver::InferRequestHeader::Output::name (C++ member)
nvidia::inferenceserver::InferRequestStats (C++ member)
nvidia::inferenceserver::InferRequestStats::compute (C++ member)
nvidia::inferenceserver::InferRequestStats::failed (C++ member)
nvidia::inferenceserver::InferRequestStats::queue (C++ member)
nvidia::inferenceserver::InferRequestStats::success (C++ member)
nvidia::inferenceserver::InferResponse (C++ member)
nvidia::inferenceserver::InferResponse::meta_data (C++ member)
nvidia::inferenceserver::InferResponse::raw_output (C++ member)
nvidia::inferenceserver::InferResponse::request_status (C++ member)
nvidia::inferenceserver::InferResponseHeader (C++ member)
nvidia::inferenceserver::InferResponseHeader::batch_size (C++ member)
nvidia::inferenceserver::InferResponseHeader::model_name (C++ member)
nvidia::inferenceserver::InferResponseHeader::model_version (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output (C++ member)
nvidia::inferenceserver::InferResponseHeader::output (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::batch_classes (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Class (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Class::idx (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Class::label (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Class::value (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Classes (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Classes::cls (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::name (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Raw (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::raw (C++ member)
nvidia::inferenceserver::InferResponseHeader::Output::Raw::byte_size (C++ member)
nvidia::inferenceserver::ModelConfig (C++ member)
nvidia::inferenceserver::ModelConfig::cc_model_filenames (C++ member)
nvidia::inferenceserver::ModelConfig::default_model_filename (C++ member)
nvidia::inferenceserver::ModelConfig::dynamic_batching (C++ member)
nvidia::inferenceserver::ModelConfig::input (C++ member)
nvidia::inferenceserver::ModelConfig::instance_group (C++ member)
nvidia::inferenceserver::ModelConfig::max_batch_size (C++ member)
nvidia::inferenceserver::ModelConfig::name (C++ member)
nvidia::inferenceserver::ModelConfig::optimization (C++ member)
nvidia::inferenceserver::ModelConfig::output (C++ member)
nvidia::inferenceserver::ModelConfig::platform (C++ member)
nvidia::inferenceserver::ModelConfig::tags (C++ member)
nvidia::inferenceserver::ModelConfig::version_policy (C++ member)
nvidia::inferenceserver::ModelConfigList (C++ member)
nvidia::inferenceserver::ModelDynamicBatching (C++ member)
nvidia::inferenceserver::ModelDynamicBatching::max_queue_delay_microseconds (C++ member)
nvidia::inferenceserver::ModelDynamicBatching::preferred_batch_size (C++ member)
nvidia::inferenceserver::ModelInput (C++ member)
nvidia::inferenceserver::ModelInput::data_type (C++ member)
nvidia::inferenceserver::ModelInput::dims (C++ member)
nvidia::inferenceserver::ModelInput::Format (C++ enum)
nvidia::inferenceserver::ModelInput::format (C++ member)
nvidia::inferenceserver::ModelInput::Format::Format::FORMAT_NCHW (C++ enumerator)
nvidia::inferenceserver::ModelInput::Format::Format::FORMAT_NHWC (C++ enumerator)
nvidia::inferenceserver::ModelInput::Format::Format::FORMAT_NONE (C++ enumerator)
nvidia::inferenceserver::ModelInput::name (C++ member)
nvidia::inferenceserver::ModelInstanceGroup (C++ member)
nvidia::inferenceserver::ModelInstanceGroup::count (C++ member)
nvidia::inferenceserver::ModelInstanceGroup::gpus (C++ member)
nvidia::inferenceserver::ModelInstanceGroup::Kind (C++ enum)
nvidia::inferenceserver::ModelInstanceGroup::kind (C++ member)
nvidia::inferenceserver::ModelInstanceGroup::Kind::Kind::KIND_AUTO (C++ enumerator)
nvidia::inferenceserver::ModelInstanceGroup::Kind::Kind::KIND_CPU (C++ enumerator)
nvidia::inferenceserver::ModelInstanceGroup::Kind::Kind::KIND_GPU (C++ enumerator)
nvidia::inferenceserver::ModelInstanceGroup::name (C++ member)
nvidia::inferenceserver::ModelOptimizationPolicy (C++ member)
nvidia::inferenceserver::ModelOptimizationPolicy::Graph (C++ member)
nvidia::inferenceserver::ModelOptimizationPolicy::graph (C++ member)
nvidia::inferenceserver::ModelOptimizationPolicy::Graph::level (C++ member)
nvidia::inferenceserver::ModelOutput (C++ member)
nvidia::inferenceserver::ModelOutput::data_type (C++ member)
nvidia::inferenceserver::ModelOutput::dims (C++ member)
nvidia::inferenceserver::ModelOutput::label_filename (C++ member)
nvidia::inferenceserver::ModelOutput::name (C++ member)
nvidia::inferenceserver::ModelReadyState (C++ enum)
nvidia::inferenceserver::ModelReadyState::ModelReadyState::MODEL_LOADING (C++ enumerator)
nvidia::inferenceserver::ModelReadyState::ModelReadyState::MODEL_READY (C++ enumerator)
nvidia::inferenceserver::ModelReadyState::ModelReadyState::MODEL_UNAVAILABLE (C++ enumerator)
nvidia::inferenceserver::ModelReadyState::ModelReadyState::MODEL_UNKNOWN (C++ enumerator)
nvidia::inferenceserver::ModelReadyState::ModelReadyState::MODEL_UNLOADING (C++ enumerator)
nvidia::inferenceserver::ModelStatus (C++ member)
nvidia::inferenceserver::ModelStatus::config (C++ member)
nvidia::inferenceserver::ModelStatus::version_status (C++ member)
nvidia::inferenceserver::ModelVersionPolicy (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::All (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::Latest (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::Latest::num_versions (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::Specific (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::Specific::all (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::Specific::latest (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::Specific::specific (C++ member)
nvidia::inferenceserver::ModelVersionPolicy::Specific::versions (C++ member)
nvidia::inferenceserver::ModelVersionStatus (C++ member)
nvidia::inferenceserver::ModelVersionStatus::infer_stats (C++ member)
nvidia::inferenceserver::ModelVersionStatus::model_execution_count (C++ member)
nvidia::inferenceserver::ModelVersionStatus::model_inference_count (C++ member)
nvidia::inferenceserver::ModelVersionStatus::ready_statue (C++ member)
nvidia::inferenceserver::ProfileRequest (C++ member)
nvidia::inferenceserver::ProfileRequest::cmd (C++ member)
nvidia::inferenceserver::ProfileRequestStats (C++ member)
nvidia::inferenceserver::ProfileRequestStats::success (C++ member)
nvidia::inferenceserver::ProfileResponse (C++ member)
nvidia::inferenceserver::ProfileResponse::request_status (C++ member)
nvidia::inferenceserver::RequestStatus (C++ member)
nvidia::inferenceserver::RequestStatus::code (C++ member)
nvidia::inferenceserver::RequestStatus::msg (C++ member)
nvidia::inferenceserver::RequestStatus::request_id (C++ member)
nvidia::inferenceserver::RequestStatus::server_id (C++ member)
nvidia::inferenceserver::RequestStatusCode (C++ enum)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::INTERNAL (C++ enumerator)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::INVALID (C++ enumerator)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::INVALID_ARG (C++ enumerator)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::NOT_FOUND (C++ enumerator)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::SUCCESS (C++ enumerator)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::UNAVAILABLE (C++ enumerator)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::UNKNOWN (C++ enumerator)
nvidia::inferenceserver::RequestStatusCode::RequestStatusCode::UNSUPPORTED (C++ enumerator)
nvidia::inferenceserver::ServerReadyState (C++ enum)
nvidia::inferenceserver::ServerReadyState::ServerReadyState::SERVER_EXITING (C++ enumerator)
nvidia::inferenceserver::ServerReadyState::ServerReadyState::SERVER_FAILED_TO_INITIALIZE (C++ enumerator)
nvidia::inferenceserver::ServerReadyState::ServerReadyState::SERVER_INITIALIZING (C++ enumerator)
nvidia::inferenceserver::ServerReadyState::ServerReadyState::SERVER_INVALID (C++ enumerator)
nvidia::inferenceserver::ServerReadyState::ServerReadyState::SERVER_READY (C++ enumerator)
nvidia::inferenceserver::ServerStatus (C++ member)
nvidia::inferenceserver::ServerStatus::health_stats (C++ member)
nvidia::inferenceserver::ServerStatus::id (C++ member)
nvidia::inferenceserver::ServerStatus::model_status (C++ member)
nvidia::inferenceserver::ServerStatus::profile_stats (C++ member)
nvidia::inferenceserver::ServerStatus::ready_state (C++ member)
nvidia::inferenceserver::ServerStatus::status_stats (C++ member)
nvidia::inferenceserver::ServerStatus::uptime_ns (C++ member)
nvidia::inferenceserver::ServerStatus::version (C++ member)
nvidia::inferenceserver::StatDuration (C++ member)
nvidia::inferenceserver::StatDuration::count (C++ member)
nvidia::inferenceserver::StatDuration::total_time_ns (C++ member)
nvidia::inferenceserver::StatusRequest (C++ member)
nvidia::inferenceserver::StatusRequest::model_name (C++ member)
nvidia::inferenceserver::StatusRequestStats (C++ member)
nvidia::inferenceserver::StatusRequestStats::success (C++ member)
nvidia::inferenceserver::StatusResponse (C++ member)
nvidia::inferenceserver::StatusResponse::request_status (C++ member)
nvidia::inferenceserver::StatusResponse::server_status (C++ member)
P
ProtocolType (class in tensorrtserver.api)
R
request_id() (tensorrtserver.api.InferenceServerException method)
run() (tensorrtserver.api.InferContext method)
S
server_id() (tensorrtserver.api.InferenceServerException method)
ServerHealthContext (class in tensorrtserver.api)
ServerStatusContext (class in tensorrtserver.api)
T
tensorrtserver.api (module)