nat.plugins.eval.runtime_evaluator.register#

Classes#

AverageLLMLatencyConfig

Mean difference between connected LLM_START and LLM_END events (same UUID).

AverageWorkflowRuntimeConfig

Average workflow runtime per item (max timestamp - min timestamp).

AverageNumberOfLLMCallsConfig

Average number of LLM calls per item (count of LLM_END).

AverageTokensPerLLMEndConfig

Average total tokens per LLM_END event (prompt + completion if available).

Functions#

Module Contents#

class AverageLLMLatencyConfig#

Bases: nat.data_models.evaluator.EvaluatorBaseConfig

Mean difference between connected LLM_START and LLM_END events (same UUID).

max_concurrency: int = None#
class AverageWorkflowRuntimeConfig#

Bases: nat.data_models.evaluator.EvaluatorBaseConfig

Average workflow runtime per item (max timestamp - min timestamp).

max_concurrency: int = None#
class AverageNumberOfLLMCallsConfig#

Bases: nat.data_models.evaluator.EvaluatorBaseConfig

Average number of LLM calls per item (count of LLM_END).

max_concurrency: int = None#
class AverageTokensPerLLMEndConfig#

Bases: nat.data_models.evaluator.EvaluatorBaseConfig

Average total tokens per LLM_END event (prompt + completion if available).

max_concurrency: int = None#
async register_avg_llm_latency_evaluator(
config: AverageLLMLatencyConfig,
builder: nat.builder.builder.EvalBuilder,
)#
async register_avg_workflow_runtime_evaluator(
config: AverageWorkflowRuntimeConfig,
builder: nat.builder.builder.EvalBuilder,
)#
async register_avg_num_llm_calls_evaluator(
config: AverageNumberOfLLMCallsConfig,
builder: nat.builder.builder.EvalBuilder,
)#
async register_avg_tokens_per_llm_end_evaluator(
config: AverageTokensPerLLMEndConfig,
builder: nat.builder.builder.EvalBuilder,
)#