nat.eval.rag_evaluator.evaluate#

Attributes#

Classes#

Module Contents#

logger#
class RAGEvaluator(
evaluator_llm: ragas.llms.LangchainLLMWrapper,
metrics: collections.abc.Sequence[ragas.metrics.Metric],
max_concurrency=8,
input_obj_field: str | None = None,
)#
evaluator_llm#
metrics#
max_concurrency = 8#
input_obj_field = None#
extract_input_obj(
item: nat.eval.evaluator.evaluator_model.EvalInputItem,
) str#

Extracts the input object from EvalInputItem based on the configured input_obj_field.

eval_input_to_ragas(
eval_input: nat.eval.evaluator.evaluator_model.EvalInput,
) ragas.EvaluationDataset#

Converts EvalInput into a Ragas-compatible EvaluationDataset.

ragas_to_eval_output(
eval_input: nat.eval.evaluator.evaluator_model.EvalInput,
results_dataset: ragas.dataset_schema.EvaluationResult | None,
) nat.eval.evaluator.evaluator_model.EvalOutput#

Converts the ragas EvaluationResult to nat EvalOutput

async evaluate(
eval_input: nat.eval.evaluator.evaluator_model.EvalInput,
) nat.eval.evaluator.evaluator_model.EvalOutput#

Run Ragas metrics evaluation on the provided EvalInput