nemo_microservices.types.shared.config_data_param#

Module Contents#

Classes#

API#

class nemo_microservices.types.shared.config_data_param.ConfigDataParam(/, **data: typing.Any)#

Bases: nemo_microservices._models.BaseModel

actions_server_url: Optional[str]#

None

The URL of the actions server that should be used for the rails.

colang_version: Optional[str]#

None

The Colang version to use.

custom_data: Optional[Dict[str, object]]#

None

Any custom configuration data that might be needed.

enable_multi_step_generation: Optional[bool]#

None

Whether to enable multi-step generation for the LLM.

enable_rails_exceptions: Optional[bool]#

None

If set, the pre-defined guardrails raise exceptions instead of returning pre-defined messages.

instructions: Optional[List[nemo_microservices.types.shared.instruction.Instruction]]#

None

List of instructions in natural language that the LLM should use.

lowest_temperature: Optional[float]#

None

The lowest temperature that should be used for the LLM.

models: List[nemo_microservices.types.shared.guardrail_model.GuardrailModel]#

None

The list of models used by the rails configuration.

passthrough: Optional[bool]#

None

Weather the original prompt should pass through the guardrails configuration as is. This means it will not be altered in any way.

prompting_mode: Optional[str]#

None

Allows choosing between different prompting strategies.

prompts: Optional[List[nemo_microservices.types.shared.task_prompt.TaskPrompt]]#

None

The prompts that should be used for the various LLM tasks.

rails: Optional[nemo_microservices.types.shared.rails_param.RailsParam]#

None

Configuration of specific rails.

sample_conversation: Optional[str]#

None

The sample conversation that should be used inside the prompts.

tracing: Optional[nemo_microservices.types.shared.tracing_config.TracingConfig]#

None

Configuration for tracing.