[docs]@deprecated("0.1.0",message=AGENT_DEPRECATION_WARNING,removal="1.0",)classChatAgent(Agent):"""Chat Agent."""output_parser:AgentOutputParser=Field(default_factory=ChatOutputParser)"""Output parser for the agent."""@propertydefobservation_prefix(self)->str:"""Prefix to append the observation with."""return"Observation: "@propertydefllm_prefix(self)->str:"""Prefix to append the llm call with."""return"Thought:"def_construct_scratchpad(self,intermediate_steps:List[Tuple[AgentAction,str]])->str:agent_scratchpad=super()._construct_scratchpad(intermediate_steps)ifnotisinstance(agent_scratchpad,str):raiseValueError("agent_scratchpad should be of type string.")ifagent_scratchpad:return(f"This was your previous work "f"(but I haven't seen any of it! I only see what "f"you return as final answer):\n{agent_scratchpad}")else:returnagent_scratchpad@classmethoddef_get_default_output_parser(cls,**kwargs:Any)->AgentOutputParser:returnChatOutputParser()@classmethoddef_validate_tools(cls,tools:Sequence[BaseTool])->None:super()._validate_tools(tools)validate_tools_single_input(class_name=cls.__name__,tools=tools)@propertydef_stop(self)->List[str]:return["Observation:"]
[docs]@classmethoddefcreate_prompt(cls,tools:Sequence[BaseTool],system_message_prefix:str=SYSTEM_MESSAGE_PREFIX,system_message_suffix:str=SYSTEM_MESSAGE_SUFFIX,human_message:str=HUMAN_MESSAGE,format_instructions:str=FORMAT_INSTRUCTIONS,input_variables:Optional[List[str]]=None,)->BasePromptTemplate:"""Create a prompt from a list of tools. Args: tools: A list of tools. system_message_prefix: The system message prefix. Default is SYSTEM_MESSAGE_PREFIX. system_message_suffix: The system message suffix. Default is SYSTEM_MESSAGE_SUFFIX. human_message: The human message. Default is HUMAN_MESSAGE. format_instructions: The format instructions. Default is FORMAT_INSTRUCTIONS. input_variables: The input variables. Default is None. Returns: A prompt template. """tool_strings="\n".join([f"{tool.name}: {tool.description}"fortoolintools])tool_names=", ".join([tool.namefortoolintools])format_instructions=format_instructions.format(tool_names=tool_names)template="\n\n".join([system_message_prefix,tool_strings,format_instructions,system_message_suffix,])messages=[SystemMessagePromptTemplate.from_template(template),HumanMessagePromptTemplate.from_template(human_message),]ifinput_variablesisNone:input_variables=["input","agent_scratchpad"]returnChatPromptTemplate(input_variables=input_variables,messages=messages)# type: ignore[arg-type]
[docs]@classmethoddeffrom_llm_and_tools(cls,llm:BaseLanguageModel,tools:Sequence[BaseTool],callback_manager:Optional[BaseCallbackManager]=None,output_parser:Optional[AgentOutputParser]=None,system_message_prefix:str=SYSTEM_MESSAGE_PREFIX,system_message_suffix:str=SYSTEM_MESSAGE_SUFFIX,human_message:str=HUMAN_MESSAGE,format_instructions:str=FORMAT_INSTRUCTIONS,input_variables:Optional[List[str]]=None,**kwargs:Any,)->Agent:"""Construct an agent from an LLM and tools. Args: llm: The language model. tools: A list of tools. callback_manager: The callback manager. Default is None. output_parser: The output parser. Default is None. system_message_prefix: The system message prefix. Default is SYSTEM_MESSAGE_PREFIX. system_message_suffix: The system message suffix. Default is SYSTEM_MESSAGE_SUFFIX. human_message: The human message. Default is HUMAN_MESSAGE. format_instructions: The format instructions. Default is FORMAT_INSTRUCTIONS. input_variables: The input variables. Default is None. kwargs: Additional keyword arguments. Returns: An agent. """cls._validate_tools(tools)prompt=cls.create_prompt(tools,system_message_prefix=system_message_prefix,system_message_suffix=system_message_suffix,human_message=human_message,format_instructions=format_instructions,input_variables=input_variables,)llm_chain=LLMChain(llm=llm,prompt=prompt,callback_manager=callback_manager,)tool_names=[tool.namefortoolintools]_output_parser=output_parserorcls._get_default_output_parser()returncls(llm_chain=llm_chain,allowed_tools=tool_names,output_parser=_output_parser,**kwargs,)