nat.agent.react_agent.agent#

Attributes#

Classes#

ReActGraphState

State schema for the ReAct Agent Graph

ReActAgentGraph

Configurable LangGraph ReAct Agent. A ReAct Agent performs reasoning inbetween tool calls, and utilizes the tool

Functions#

create_react_agent_prompt(...)

Create a ReAct Agent prompt from the config.

Module Contents#

logger#
class ReActGraphState(/, **data: Any)#

Bases: pydantic.BaseModel

State schema for the ReAct Agent Graph

Create a new model by parsing and validating input data from keyword arguments.

Raises [ValidationError][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.

self is explicitly positional-only to allow self as a field name.

messages: list[langchain_core.messages.base.BaseMessage] = None#
agent_scratchpad: list[langchain_core.agents.AgentAction] = None#
tool_responses: list[langchain_core.messages.base.BaseMessage] = None#
final_answer: str | None = None#
class ReActAgentGraph(
llm: langchain_core.language_models.BaseChatModel,
prompt: langchain_core.prompts.ChatPromptTemplate,
tools: list[langchain_core.tools.BaseTool],
use_tool_schema: bool = True,
callbacks: list[langchain_core.callbacks.base.AsyncCallbackHandler] | None = None,
detailed_logs: bool = False,
log_response_max_chars: int = 1000,
retry_agent_response_parsing_errors: bool = True,
parse_agent_response_max_retries: int = 1,
tool_call_max_retries: int = 1,
pass_tool_call_errors_to_agent: bool = True,
normalize_tool_input_quotes: bool = True,
)#

Bases: nat.agent.dual_node.DualNodeAgent

Configurable LangGraph ReAct Agent. A ReAct Agent performs reasoning inbetween tool calls, and utilizes the tool names and descriptions to select the optimal tool. Supports retrying on output parsing errors. Argument “detailed_logs” toggles logging of inputs, outputs, and intermediate steps.

parse_agent_response_max_retries = 1#
tool_call_max_retries = 1#
pass_tool_call_errors_to_agent = True#
normalize_tool_input_quotes = True#
agent#
tools_dict#
_maybe_bind_llm_and_yield() langchain_core.runnables.Runnable[langchain_core.language_models.LanguageModelInput, langchain_core.messages.base.BaseMessage]#

Bind additional parameters to the LLM if needed - if the LLM is a smart model, no need to bind any additional parameters - if the LLM is a non-smart model, bind a stop sequence to the LLM

Returns:

Runnable[LanguageModelInput, BaseMessage]: The LLM with any additional parameters bound.

_get_tool(tool_name: str)#
async agent_node(state: ReActGraphState)#
async conditional_edge(state: ReActGraphState)#
async tool_node(state: ReActGraphState)#
async build_graph()#
static validate_system_prompt(system_prompt: str) bool#
create_react_agent_prompt(
config: nat.agent.react_agent.register.ReActAgentWorkflowConfig,
) langchain_core.prompts.ChatPromptTemplate#

Create a ReAct Agent prompt from the config.

Args:

config (ReActAgentWorkflowConfig): The config to use for the prompt.

Returns:

ChatPromptTemplate: The ReAct Agent prompt.