nat.plugins.config_optimizer.prompts.ga_prompt_optimizer#

GA prompt optimizer: genetic-algorithm implementation for evolving prompts.

Attributes#

Classes#

GAPromptOptimizer

Genetic-algorithm prompt optimizer.

Functions#

_on_prompt_trial_end(→ None)

Build TrialResults for each individual in a GA generation and fire on_trial_end.

_on_prompt_study_end(→ None)

Fire on_study_end for a completed prompt GA optimisation study.

optimize_prompts(→ None)

Entry point: run GA prompt optimizer (thin wrapper around GAPromptOptimizer).

Module Contents#

logger#
_on_prompt_trial_end(
callback_manager: nat.profiler.parameter_optimization.optimizer_callbacks.OptimizerCallbackManager | None,
population: collections.abc.Sequence[nat.plugins.config_optimizer.prompts.ga_individual.Individual],
eval_metrics: list[str],
frozen_params: dict[str, Any] | None,
prompt_format_map: dict[str, str | None],
best: nat.plugins.config_optimizer.prompts.ga_individual.Individual,
) None#

Build TrialResults for each individual in a GA generation and fire on_trial_end.

_on_prompt_study_end(
callback_manager: nat.profiler.parameter_optimization.optimizer_callbacks.OptimizerCallbackManager | None,
best: nat.plugins.config_optimizer.prompts.ga_individual.Individual,
frozen_params: dict[str, Any] | None,
prompt_format_map: dict[str, str | None],
trial_number_offset: int,
generations: int,
pop_size: int,
) None#

Fire on_study_end for a completed prompt GA optimisation study.

async optimize_prompts(
*,
base_cfg: nat.data_models.config.Config,
full_space: dict[str, nat.data_models.optimizable.SearchSpace],
optimizer_config: nat.data_models.optimizer.OptimizerConfig,
opt_run_config: nat.data_models.optimizer.OptimizerRunConfig,
callback_manager: nat.profiler.parameter_optimization.optimizer_callbacks.OptimizerCallbackManager | None = None,
trial_number_offset: int = 0,
frozen_params: dict[str, Any] | None = None,
) None#

Entry point: run GA prompt optimizer (thin wrapper around GAPromptOptimizer).

class GAPromptOptimizer#

Bases: nat.plugins.config_optimizer.prompts.base.BasePromptOptimizer

Genetic-algorithm prompt optimizer.

async _evaluate_single_given_trial(
ind: nat.plugins.config_optimizer.prompts.ga_individual.Individual,
cfg_trial: nat.data_models.config.Config,
optimizer_config: nat.data_models.optimizer.OptimizerConfig,
opt_run_config: nat.data_models.optimizer.OptimizerRunConfig,
oracle_feedback_worst_n: int,
) None#

Run EvaluationRun for an already-built trial config; fill ind.metrics.

async _post_evaluate_single(
ind: nat.plugins.config_optimizer.prompts.ga_individual.Individual,
all_results: list[list[tuple[str, Any]]],
optimizer_config: nat.data_models.optimizer.OptimizerConfig,
opt_run_config: nat.data_models.optimizer.OptimizerRunConfig,
oracle_feedback_worst_n: int,
) None#

Extract worst_items_reasoning for oracle feedback.

async _evaluate_population(
population: list[nat.plugins.config_optimizer.prompts.ga_individual.Individual],
base_cfg: nat.data_models.config.Config,
optimizer_config: nat.data_models.optimizer.OptimizerConfig,
opt_run_config: nat.data_models.optimizer.OptimizerRunConfig,
max_concurrency: int = 8,
callback_manager: nat.profiler.parameter_optimization.optimizer_callbacks.OptimizerCallbackManager | None = None,
oracle_feedback_worst_n: int = 5,
) None#

Evaluate all individuals (concurrently).

static _normalize_generation(
individuals: collections.abc.Sequence[nat.plugins.config_optimizer.prompts.ga_individual.Individual],
metric_names: collections.abc.Sequence[str],
directions: collections.abc.Sequence[str],
eps: float = 1e-12,
) list[dict[str, float]]#

Return per-individual dict of normalised scores in [0,1] where higher is better.

static _scalarize(
norm_scores: dict[str, float],
*,
mode: str,
weights: collections.abc.Sequence[float] | None,
) float#

Collapse normalised scores to a single scalar (higher is better).

static _apply_diversity_penalty(
individuals: collections.abc.Sequence[nat.plugins.config_optimizer.prompts.ga_individual.Individual],
diversity_lambda: float,
) list[float]#

Per-individual diversity penalty (exact-string key).

_compute_fitness(
population: list[nat.plugins.config_optimizer.prompts.ga_individual.Individual],
optimizer_config: nat.data_models.optimizer.OptimizerConfig,
diversity_lambda: float = 0.0,
) None#

Set scalar_fitness on each individual (normalize, scalarize, diversity penalty).

static _persist_checkpoint(
gen: int,
best: nat.plugins.config_optimizer.prompts.ga_individual.Individual,
prompt_space: dict[str, tuple[str, str]],
out_dir: pathlib.Path,
) None#

Write generation checkpoint JSON.

static _persist_final(
best: nat.plugins.config_optimizer.prompts.ga_individual.Individual,
history_rows: list[dict[str, Any]],
prompt_space: dict[str, tuple[str, str]],
out_dir: pathlib.Path,
) None#

Write final optimized_prompts.json and ga_history_prompts.csv.

static _tournament_select(
pop: collections.abc.Sequence[nat.plugins.config_optimizer.prompts.ga_individual.Individual],
k: int,
) nat.plugins.config_optimizer.prompts.ga_individual.Individual#

Select one individual by tournament (max fitness).

async run(
*,
base_cfg: nat.data_models.config.Config,
full_space: dict[str, nat.data_models.optimizable.SearchSpace],
optimizer_config: nat.data_models.optimizer.OptimizerConfig,
opt_run_config: nat.data_models.optimizer.OptimizerRunConfig,
callback_manager: nat.profiler.parameter_optimization.optimizer_callbacks.OptimizerCallbackManager | None = None,
trial_number_offset: int = 0,
frozen_params: dict[str, Any] | None = None,
) None#