core.inference.inference_request#

Module Contents#

Classes#

Status

Enum for status

InferenceRequest

Class for one inference request

DynamicInferenceEventType

Dynamic inference event type.

DynamicInferenceEvent

A lifecycle event for a dynamic inference requests.

DynamicInferenceRequest

Class for one inference request

DynamicInferenceRequestRecord

History of DynamicInferenceRequest objects over multiple suspend and resumes.

VLMInferenceRequest

Class for a VLM inference request

Functions#

serialize_tensor

Serialize tensor to bytes.

deserialize_tensor

Deserialize tensor from bytes.

API#

core.inference.inference_request.serialize_tensor(tensor: torch.Tensor) bytes#

Serialize tensor to bytes.

Parameters:

tensor (Tensor) – Tensor.

Returns:

(bytes) Byte representation of tensor.

core.inference.inference_request.deserialize_tensor(tensor_bytes: bytes) torch.Tensor#

Deserialize tensor from bytes.

Parameters:

tensor_bytes (bytes) – Byte representation of tensor.

Returns:

(Tensor) Tensor.

class core.inference.inference_request.Status(*args, **kwds)#

Bases: enum.Enum

Enum for status

Initialization

WAITING_IN_QUEUE#

1

ACTIVE_AND_GENERATING_TOKENS#

2

ACTIVE_BUT_NOT_GENERATING_TOKENS#

3

COMPLETED#

4

FAILED#

5

class core.inference.inference_request.InferenceRequest#

Class for one inference request

Containing relevant data for an inference request

request_id: int#

None

prompt: str#

None

sampling_params: Optional[megatron.core.inference.sampling_params.SamplingParams]#

None

inference_parameters: Optional[megatron.core.inference.sampling_params.SamplingParams]#

None

prompt_tokens: Optional[List[int]]#

None

arrival_time: Optional[float]#

None

status: Optional[core.inference.inference_request.Status]#

None

encoder_prompt: Optional[str]#

None

generated_text: Optional[str]#

None

segments: Optional[List[str]]#

None

generated_segments: Optional[List[str]]#

None

generated_sequence_lengths: Optional[List[int]]#

None

generated_tokens: Optional[torch.Tensor]#

None

prompt_log_probs: Optional[torch.Tensor]#

None

generated_log_probs: Optional[torch.Tensor]#

None

prompt_top_n_logprobs: Optional[List[Dict[str, float]]]#

None

generated_top_n_logprobs: Optional[List[Dict[str, float]]]#

None

generated_length: Optional[int]#

None

tpot: Optional[List[int]]#

None

__post_init__()#
serialize() dict#

Converts the instance into a serializable dictionary.

Returns:

(dict) A dictionary representation of the instance suitable for serialization.

classmethod deserialize(
obj: dict,
) core.inference.inference_request.InferenceRequest#

Deserialize request.

Parameters:

obj (dict) – Serialized request data.

Returns:

(InferenceRequest) Deserialized request.

class core.inference.inference_request.DynamicInferenceEventType(*args, **kwds)#

Bases: enum.Enum

Dynamic inference event type.

Initialization

ADD#

‘auto(…)’

PAUSE#

‘auto(…)’

FINISH#

‘auto(…)’

FAIL#

‘auto(…)’

ERROR_TRANSIENT#

‘auto(…)’

ERROR_NONTRANSIENT#

‘auto(…)’

class core.inference.inference_request.DynamicInferenceEvent#

A lifecycle event for a dynamic inference requests.

An event is currently one of the following:

  • request added

  • request paused

  • request finished

  • request failed

  • request error (transient)

  • request error (non-transient, i.e. fatal)

timestamp: Optional[float]#

None

type: core.inference.inference_request.DynamicInferenceEventType#

None

payload: Optional[Any]#

None

__post_init__()#
__str__()#
serialize() dict#

Converts the instance into a serializable dictionary.

Returns:

(dict) A dictionary representation of the instance suitable for serialization.

classmethod deserialize(
obj: dict,
) core.inference.inference_request.DynamicInferenceEvent#

Deserialize event.

Parameters:

obj (dict) – Serialized event data.

Returns:

(DynamicInferenceEvent) Deserialized event.

class core.inference.inference_request.DynamicInferenceRequest#

Bases: core.inference.inference_request.InferenceRequest

Class for one inference request

Containing relevant data for an dynamic inference request

request_id: int#

None

generated_tokens: List[int]#

‘field(…)’

prompt: Optional[str]#

None

prompt_tokens: Optional[torch.Tensor]#

None

remaining_prompt_tokens: Optional[torch.Tensor]#

None

latency: Optional[float]#

None

finished_chunk_token_count#

0

__post_init__()#
property remaining_prompt_length#

Get the length of the remaining prompt tokens.

events: List[core.inference.inference_request.DynamicInferenceEvent]#

‘field(…)’

__str__()#
serialize()#

Converts the instance into a serializable dictionary.

Returns:

(dict) A dictionary representation of the instance suitable for serialization.

classmethod deserialize(
obj: dict,
) core.inference.inference_request.DynamicInferenceRequest#

Deserialize request.

Parameters:

obj (dict) – Serialized request data.

Returns:

(DynamicInferenceRequest) Deserialized request.

property tracked_metadata: List[Any]#

Obtain an ordered list of all request metadata to be tracked by the context.

This consists of metadata that is used to inform text generation. The values of such fields are tensorized and kept aligned with the current active batch.

Note that while the general request object is mutable, this metadata is inherently assumed to remain immutable once the request becomes active.

static get_metadata_types() List[Tuple[str, torch.dtype, bool]]#

Keeps track of all request metadata names, dtypes, and target device.

Returns:

Mapping from metadata name to: name (str) - The name of the metadata field. dtype (torch.dtype) - The datatype of the metadata. on_device (bool) - Whether the metadata lives on GPU (True) or CPU (False).

Return type:

List[Tuple[str, torch.dtype, bool]]

add_event(
type: core.inference.inference_request.DynamicInferenceEventType,
payload: Optional[Any] = None,
) None#

Add event.

add_event_add()#

Add ‘add’ event.

add_event_pause()#

Add ‘pause’ event.

add_event_finish()#

Add ‘finish’ event.

add_event_fail()#

Add ‘fail’ event.

add_event_error_transient(error: Exception)#

Add transient error event.

add_event_error_nontransient(error: Exception)#

Add non-transient error event.

succeeded() bool#

Request experienced no non-transient errors.

failed() bool#

Request experienced non-transient error.

class core.inference.inference_request.DynamicInferenceRequestRecord#

History of DynamicInferenceRequest objects over multiple suspend and resumes.

requests: list[core.inference.inference_request.DynamicInferenceRequest]#

‘field(…)’

latency: Optional[float]#

None

classmethod from_request(
request: core.inference.inference_request.DynamicInferenceRequest,
) core.inference.inference_request.DynamicInferenceRequestRecord#

Initialize record from a single request.

Parameters:

request (DynamicInferenceRequest) – Initial request.

Returns:

(DynamicInferenceRequestRecord) A record.

__getitem__(
idx: int,
) core.inference.inference_request.DynamicInferenceRequest#

Get request by index.

Parameters:

idx (int) – Request index.

Returns:

(DynamicInferenceRequest) Request object.

property request_id: int#

Get request id.

Returns:

(int) Request id.

suspend(
tokenizer: megatron.core.tokenizers.MegatronTokenizer | None = None,
)#

Suspend request by storing references to previous prompt, generations, and sampling params.

Parameters:

tokenizer (MegatronTokenizer | None) – (Deprecated) Tokenizer.

merge(
tokenizer: megatron.core.tokenizers.MegatronTokenizer | None = None,
) core.inference.inference_request.DynamicInferenceRequest#

Merge requests into a single suspend-agnostic request object.

Parameters:

tokenizer (MegatronTokenizer | None) – (Deprecated) Tokenizer.

Returns:

(DynamicInferenceRequest) Merged request.

serialize() dict#

Converts the instance into a serializable dictionary.

Returns:

(dict) A dictionary representation of the instance suitable for serialization.

classmethod deserialize(
obj: dict,
) core.inference.inference_request.DynamicInferenceRequestRecord#

Deserialize record.

Parameters:

obj (dict) – Serialized record data.

Returns:

(DynamicInferenceRequestRecord) Deserialized record.

class core.inference.inference_request.VLMInferenceRequest#

Bases: core.inference.inference_request.InferenceRequest

Class for a VLM inference request

num_img_embeddings_per_tile: int#

None

imgs: torch.Tensor#

None

num_tiles: torch.Tensor#

None

decoder_seq_length: int#

None