tritonclient.http.aio

tritonclient.http.aio#

Classes

InferenceServerClient(url[, verbose, ...])

This feature is currently in beta and may be subject to change.

class tritonclient.http.aio.InferenceServerClient(url, verbose=False, conn_limit=100, conn_timeout=60.0, ssl=False, ssl_context=None)#

This feature is currently in beta and may be subject to change.

An analogy of the tritonclient.http.InferenceServerClient to enable calling via asyncio syntax. The object is intended to be used by a single thread and simultaneously calling methods with different threads is not supported and can cause undefined behavior.

_fix_header(headers)#

Returns a header that is valid for aiohttp.

Parameters:

headers (dict (or None)) – HTTP headers to fix before processing the request.

async _get(request_uri, headers, query_params)#

Issues the GET request to the server

Parameters:
  • request_uri (str) – The request URI to be used in GET request.

  • headers (dict) – Additional HTTP headers to include in the request.

  • query_params (dict) – Optional url query parameters to use in network transaction.

Returns:

The response from server.

Return type:

aiohttp.ClientResponse

async _post(request_uri, request_body, headers, query_params)#

Issues the POST request to the server

Parameters:
  • request_uri (str) – The request URI to be used in POST request.

  • request_body (str) – The body of the request

  • headers (dict) – Additional HTTP headers to include in the request.

  • query_params (dict) – Optional url query parameters to use in network transaction.

Returns:

The response from server.

Return type:

aiohttp.ClientResponse

_validate_headers(headers)#

Checks for any unsupported HTTP headers before processing a request.

Parameters:

headers (dict) – HTTP headers to validate before processing the request.

Raises:

InferenceServerException – If an unsupported HTTP header is included in a request.

async close()#

Close the client. Any future calls to server will result in an Error.

static generate_request_body(inputs, outputs=None, request_id='', sequence_id=0, sequence_start=False, sequence_end=False, priority=0, timeout=None, parameters=None)#

Refer to tritonclient.http.InferenceServerClient.generate_request_body()

async get_cuda_shared_memory_status(region_name='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_cuda_shared_memory_status()

async get_inference_statistics(model_name='', model_version='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_inference_statistics()

async get_log_settings(headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_log_settings()

async get_model_config(model_name, model_version='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_model_config()

async get_model_metadata(model_name, model_version='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_model_metadata()

async get_model_repository_index(headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_model_repository_index()

async get_server_metadata(headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_server_metadata()

async get_system_shared_memory_status(region_name='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_system_shared_memory_status()

async get_trace_settings(model_name=None, headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.get_trace_settings()

async infer(model_name, inputs, model_version='', outputs=None, request_id='', sequence_id=0, sequence_start=False, sequence_end=False, priority=0, timeout=None, headers=None, query_params=None, request_compression_algorithm=None, response_compression_algorithm=None, parameters=None)#

Refer to tritonclient.http.InferenceServerClient.infer()

async is_model_ready(model_name, model_version='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.is_model_ready()

async is_server_live(headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.is_server_live()

async is_server_ready(headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.is_server_ready()

async load_model(model_name, headers=None, query_params=None, config=None, files=None)#

Refer to tritonclient.http.InferenceServerClient.load_model()

static parse_response_body(response_body, verbose=False, header_length=None, content_encoding=None)#

Refer to tritonclient.http.InferenceServerClient.parse_response_body()

async register_cuda_shared_memory(name, raw_handle, device_id, byte_size, headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.register_cuda_shared_memory()

async register_system_shared_memory(name, key, byte_size, offset=0, headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.register_system_shared_memory()

async unload_model(model_name, headers=None, query_params=None, unload_dependents=False)#

Refer to tritonclient.http.InferenceServerClient.unload_model()

async unregister_cuda_shared_memory(name='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.unregister_cuda_shared_memory()

async unregister_system_shared_memory(name='', headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.unregister_system_shared_memory()

async update_log_settings(settings, headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.update_log_settings()

async update_trace_settings(model_name=None, settings={}, headers=None, query_params=None)#

Refer to tritonclient.http.InferenceServerClient.update_trace_settings()

async tritonclient.http.aio._get_error(response)#

Returns the InferenceServerException object if response indicates the error. If no error then return None

async tritonclient.http.aio._raise_if_error(response)#

Raise InferenceServerException if received non-Success response from the server

Modules

tritonclient.http.aio.auth