Source code for langchain_cohere.react_multi_hop.agent
"""Cohere multi-hop agent enables multiple tools to be used in sequence to complete atask.This agent uses a multi hop prompt by Cohere, which is experimental and subjectto change. The latest prompt can be used by upgrading the langchain-cohere package."""fromtypingimportAny,Dict,List,MutableMapping,Optional,Sequence,Unionfromlangchain_core.agentsimportAgentAction,AgentFinishfromlangchain_core.language_modelsimportBaseLanguageModelfromlangchain_core.prompts.chatimportChatPromptTemplatefromlangchain_core.runnablesimport(Runnable,RunnableConfig,RunnableParallel,RunnablePassthrough,)fromlangchain_core.toolsimportBaseToolfromlangchain_cohere.react_multi_hop.parsingimport(GROUNDED_ANSWER_KEY,OUTPUT_KEY,CohereToolsReactAgentOutputParser,parse_citations,)fromlangchain_cohere.react_multi_hop.promptimport(convert_to_documents,multi_hop_prompt,)
[docs]defcreate_cohere_react_agent(llm:BaseLanguageModel,tools:Sequence[BaseTool],prompt:ChatPromptTemplate,)->Runnable:""" Create an agent that enables multiple tools to be used in sequence to complete a task. Args: llm: The ChatCohere LLM instance to use. tools: Tools this agent has access to. prompt: The prompt to use. Returns: A Runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does and returns a List[AgentAction] or a single AgentFinish. The AgentFinish will have two fields: * output: str - The output string generated by the model * citations: List[CohereCitation] - A list of citations that refer to the output and observations made by the agent. If there are no citations this list will be empty. Example: . code-block:: python from langchain.agents import AgentExecutor from langchain.prompts import ChatPromptTemplate from langchain_cohere import ChatCohere, create_cohere_react_agent prompt = ChatPromptTemplate.from_template("{input}") tools = [] # Populate this with a list of tools you would like to use. llm = ChatCohere() agent = create_cohere_react_agent( llm, tools, prompt ) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({ "input": "In what year was the company that was founded as Sound of Music added to the S&P 500?", }) """# noqa: E501# Creates a prompt, invokes the model, and produces a# "Union[List[AgentAction], AgentFinish]"generate_agent_steps=(multi_hop_prompt(tools=tools,prompt=prompt)|llm.bind(stop=["\nObservation:"],raw_prompting=True)|CohereToolsReactAgentOutputParser())agent=(RunnablePassthrough.assign(# agent_scratchpad isn't used in this chain, but added here for# interoperability with other chains that may require it.agent_scratchpad=lambda_:[],)|RunnableParallel(chain_input=RunnablePassthrough(),agent_steps=generate_agent_steps)|_AddCitations())returnagent
class_AddCitations(Runnable):""" Adds a list of citations to the output of the Cohere multi hop chain when the last step is an AgentFinish. Citations are generated from the observations (made in previous agent steps) and the grounded answer (made in the last step). """definvoke(self,input:Dict[str,Any],config:Optional[RunnableConfig]=None,**kwargs:Any,)->Union[List[AgentAction],AgentFinish]:agent_steps=input.get("agent_steps",[])ifnotagent_steps:# The input wasn't as expected.return[]ifnotisinstance(agent_steps,AgentFinish):# We're not on the AgentFinish step.returnagent_stepsagent_finish=agent_steps# Build a list of documents from the intermediate_steps used in this chain.intermediate_steps=input.get("chain_input",{}).get("intermediate_steps",[])documents:List[MutableMapping]=[]for_,observationinintermediate_steps:documents.extend(convert_to_documents(observation))# Build a list of citations, if any, from the documents + grounded answer.grounded_answer=agent_finish.return_values.pop(GROUNDED_ANSWER_KEY,"")output,citations=parse_citations(grounded_answer=grounded_answer,documents=documents)agent_finish.return_values[OUTPUT_KEY]=outputagent_finish.return_values["citations"]=citationsreturnagent_finish