Source code for langchain_cohere.react_multi_hop.prompt
from__future__importannotationsfromdatetimeimportdatetimefromtypingimport(Any,Callable,Dict,List,Mapping,MutableMapping,Optional,Sequence,Tuple,Type,Union,)fromlangchain_core.agentsimportAgentAction,AgentActionMessageLogfromlangchain_core.messagesimportAIMessage,BaseMessage,SystemMessagefromlangchain_core.promptsimport(BasePromptTemplate,ChatPromptTemplate,PromptTemplate,)fromlangchain_core.toolsimportBaseToolfrompydanticimportBaseModelfromlangchain_cohere.react_multi_hop.default_prompt_constantsimport(_SpecialToken,default_basic_rules,default_multi_hop_instruction,default_safety_rules,default_style_guide,default_task_context,)fromlangchain_cohere.utilsimport(JSON_TO_PYTHON_TYPES,_remove_signature_from_tool_description,)multi_hop_prompt_partial=PromptTemplate.from_template("""{structured_preamble}## Available ToolsHere is a list of tools that you have available to you:{tools}{end_turn}{history}{user_prompt}{start_turn}{system_role}{multi_hop_instruction}{end_turn}{steps}""").partial(start_turn=_SpecialToken.start_turn.value,end_turn=_SpecialToken.end_turn.value,system_role=_SpecialToken.role_system.value,multi_hop_instruction=default_multi_hop_instruction,)
[docs]defrender_structured_preamble(preamble:Optional[str]=None,)->str:"""Renders the structured preamble part of the prompt content."""ifpreambleisNone:default_preamble="""## Task And Context{task_and_context}## Style Guide{style_guide}"""preamble=default_preamble.format(task_and_context=default_task_context.format(now=datetime.now().strftime("%A, %B %d, %Y %H:%M:%S")),style_guide=default_style_guide,)structured_preamble_template="""{prompt_start}# Safety Preamble{safety_rules}# System Preamble## Basic Rules{basic_rules}# User Preamble{preamble}"""returnstructured_preamble_template.format(prompt_start=f"{_SpecialToken.bos.value}{_SpecialToken.start_turn.value}{_SpecialToken.role_system.value}",safety_rules=default_safety_rules,basic_rules=default_basic_rules,preamble=preamble,)
[docs]defrender_tool(tool:Optional[BaseTool]=None,json_schema:Optional[Dict]=None,)->str:"""Renders a tool into prompt content. Either a BaseTool instance, or, a JSON schema must be provided. Args: tool: An instance of a BaseTool. json_schema: A dictionary containing the JSON schema representation of a tool. Returns: A string of prompt content. Example: .. code-block:: python from langchain_cohere.react_multi_hop.prompt import render_tool json_schema = { "name": "example_tool", "description": "A description of example_tool", "parameters": { "type": "object", "properties": { "foo": {"type": "string", "description": "A description of foo"}, "bar": {"type": "integer", "description": "A description of bar"}, }, "required": ["foo"], }, } print(render_tool(json_schema=json_schema)) tool = MyTool() print(render_tool(tool=tool)) """template="""```python{tool_signature}\"\"\"{tool_description}{tool_args}\"\"\" pass```"""assert(toolisnotNoneorjson_schemaisnotNone),"Either a BaseTool instance or JSON schema must be provided."iftoolisnotNone:asserttoolisnotNone# for type checkerstool_name=tool.nametool_description=tool.descriptiontool_args=tool.argsrequired_parameters=[]forparameter_name,parameter_definitionintool_args.items():if"default"notinparameter_definition:required_parameters.append(parameter_name)else:assertjson_schemaisnotNone# for type checkerstool_name=json_schema.get("name","")tool_description=json_schema.get("description","")tool_args=json_schema.get("parameters",{}).get("properties",{})required_parameters=json_schema.get("parameters",{}).get("required",[])returntemplate.format(tool_signature=_render_tool_signature(tool_name=tool_name,tool_args=tool_args,required_parameters=required_parameters,),tool_description=_remove_signature_from_tool_description(name=tool_name,description=tool_description),tool_args=_render_tool_args(tool_args=tool_args,required_parameters=required_parameters),)
[docs]defrender_observations(observations:Union[List[Mapping[str,str]],List[str],Mapping[str,str],str],index:int,)->Tuple[BaseMessage,int]:"""Renders the 'output' part of an Agent's intermediate step into prompt content."""documents=convert_to_documents(observations)rendered_documents:List[str]=[]document_prompt="""Document: {index}{fields}"""fordocindocuments:# Render document fields into Key: value strings.fields:List[str]=[]fork,vindoc.items():ifk.lower()=="url":# 'url' is a special key which is always upper case.k="URL"else:# keys are otherwise transformed into title case.k=k.title()fields.append(f"{k}: {v}")rendered_documents.append(document_prompt.format(index=index,fields="\n".join(fields),))index+=1prompt_content="<results>\n"+"\n\n".join(rendered_documents)+"\n</results>"returnSystemMessage(content=prompt_content),index
[docs]defconvert_to_documents(observations:Any,)->List[MutableMapping]:"""Converts observations into a 'document' dict"""documents:List[MutableMapping]=[]ifisinstance(observations,str):# strings are turned into a key/value pair and a key of 'output' is added.observations=[{"output":observations}]elifisinstance(observations,Mapping):# single mappings are transformed into a list to simplify the rest of the code.observations=[observations]elifnotisinstance(observations,Sequence):# all other types are turned into a key/value pair within a listobservations=[{"output":observations}]fordocinobservations:ifnotisinstance(doc,Mapping):# types that aren't Mapping are turned into a key/value pair.doc={"output":doc}documents.append(doc)returndocuments
[docs]defrender_intermediate_steps(intermediate_steps:List[Tuple[AgentAction,Any]],)->str:"""Renders an agent's intermediate steps into prompt content."""prompt_content=""ifany(notisinstance(action,AgentActionMessageLog)foraction,_inintermediate_steps):raiseValueError("all AgentAction steps must implement AgentActionMessageLog")i=0foraction,observationinintermediate_steps:prompt_content+=render_messages(action.messages)ifobservation:prompt_content+="\n"observation_message,i=render_observations(observation,i)prompt_content+=render_messages([observation_message])# Always add an 'open' chatbot turn because prompts for the current turn always end# with an open turn.prompt_content+=(f"{_SpecialToken.start_turn.value}{_SpecialToken.role_chatbot.value}")returnprompt_content
[docs]defmulti_hop_prompt(tools:Sequence[BaseTool],prompt:ChatPromptTemplate)->Callable[[Dict],BasePromptTemplate]:"""The returned function produces a BasePromptTemplate suitable for multi-hop."""# the directly_answer tool is used internally by the model, but never produces an# AgentAction, so we only need to add it to the prompt.tools=list(tools)tools.insert(0,create_directly_answer_tool())definner(x:Dict)->BasePromptTemplate:returnmulti_hop_prompt_partial.partial(structured_preamble=render_structured_preamble(preamble=x.get("preamble",None)),tools="\n\n".join([render_tool(t)fortintools]),user_prompt=render_messages(prompt.invoke(x).to_messages()),steps=render_intermediate_steps(x["intermediate_steps"]),history=render_messages(x.get("chat_history",[])),)returninner
def_render_type(type_:str,is_optional:bool)->str:""" Renders a tool's type into prompt content. Types should be Python types, but JSON schema is allowed and converted. """python_type=JSON_TO_PYTHON_TYPES.get(type_,type_)ifis_optional:returnf"Optional[{python_type}]"else:returnpython_typedef_render_tool_signature(tool_name:str,tool_args:Dict,required_parameters:List)->str:"""Renders the signature of a tool into prompt content."""args=[]forparameter_name,parameter_definitionintool_args.items():type_=_render_type(type_=parameter_definition.get("type"),is_optional=parameter_namenotinrequired_parameters,)args.append(f"{parameter_name}: {type_}")signature=", ".join(args)returnf"def {tool_name}({signature}) -> List[Dict]:"def_render_tool_args(tool_args:Dict,required_parameters:List[str])->str:"""Renders the 'Args' section of a tool's prompt content."""ifnottool_args:return""indent=" "prompt_content=f"\n\n{indent*4}Args:\n{indent*8}"rendered_args=[]forparameter_name,parameter_definitionintool_args.items():type_=_render_type(type_=parameter_definition.get("type"),is_optional=parameter_namenotinrequired_parameters,)description=parameter_definition.get("description","")rendered_args.append(f"{parameter_name} ({type_}): {description}")prompt_content+=f"\n{indent*8}".join(rendered_args)returnprompt_content
[docs]defcreate_directly_answer_tool()->BaseTool:""" directly_answer is a special tool that's always presented to the model as an available tool. The model only ever invokes this whilst answering and no AgentAction is produced, so it only needs to be added to the prompt. """classDirectlyAnswerTool(BaseTool):classInputSchema(BaseModel):passname:str="directly_answer"description:str="Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history"# noqa: E501args_schema:Type[BaseModel]=InputSchema@propertydefargs(self)->dict:return{}def_run(self,*args:Any,**kwargs:Any)->Any:raiseNotImplementedError()returnDirectlyAnswerTool()
[docs]defrender_role(message:BaseMessage)->str:"""Renders the role of a message into prompt content."""ifisinstance(message,AIMessage):return_SpecialToken.role_chatbot.valueelifisinstance(message,SystemMessage):return_SpecialToken.role_system.valueelse:return_SpecialToken.role_user.value
[docs]defrender_messages(messages:Sequence[BaseMessage])->str:"""Renders one or more BaseMessage implementations into prompt content."""return"".join([f"{_SpecialToken.start_turn.value}{render_role(message)}{message.content}{_SpecialToken.end_turn.value}"formessageinmessages])