Source code for langchain.chains.router.multi_prompt
"""Use a single chain to route an input to one of multiple llm chains."""from__future__importannotationsfromtypingimportAny,Dict,List,Optionalfromlangchain_core._apiimportdeprecatedfromlangchain_core.language_modelsimportBaseLanguageModelfromlangchain_core.promptsimportPromptTemplatefromlangchain.chainsimportConversationChainfromlangchain.chains.baseimportChainfromlangchain.chains.llmimportLLMChainfromlangchain.chains.router.baseimportMultiRouteChainfromlangchain.chains.router.llm_routerimportLLMRouterChain,RouterOutputParserfromlangchain.chains.router.multi_prompt_promptimportMULTI_PROMPT_ROUTER_TEMPLATE
[docs]@deprecated(since="0.2.12",removal="1.0",message=("Use RunnableLambda to select from multiple prompt templates. See example ""in API reference: ""https://api.python.langchain.com/en/latest/chains/langchain.chains.router.multi_prompt.MultiPromptChain.html"# noqa: E501),)classMultiPromptChain(MultiRouteChain):"""A multi-route chain that uses an LLM router chain to choose amongst prompts. This class is deprecated. See below for a replacement, which offers several benefits, including streaming and batch support. Below is an example implementation: .. code-block:: python from operator import itemgetter from typing import Literal from typing_extensions import TypedDict from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-4o-mini") prompt_1 = ChatPromptTemplate.from_messages( [ ("system", "You are an expert on animals."), ("human", "{query}"), ] ) prompt_2 = ChatPromptTemplate.from_messages( [ ("system", "You are an expert on vegetables."), ("human", "{query}"), ] ) chain_1 = prompt_1 | llm | StrOutputParser() chain_2 = prompt_2 | llm | StrOutputParser() route_system = "Route the user's query to either the animal or vegetable expert." route_prompt = ChatPromptTemplate.from_messages( [ ("system", route_system), ("human", "{query}"), ] ) class RouteQuery(TypedDict): \"\"\"Route query to destination.\"\"\" destination: Literal["animal", "vegetable"] route_chain = ( route_prompt | llm.with_structured_output(RouteQuery) | itemgetter("destination") ) chain = { "destination": route_chain, # "animal" or "vegetable" "query": lambda x: x["query"], # pass through input query } | RunnableLambda( # if animal, chain_1. otherwise, chain_2. lambda x: chain_1 if x["destination"] == "animal" else chain_2, ) chain.invoke({"query": "what color are carrots"}) """# noqa: E501@propertydefoutput_keys(self)->List[str]:return["text"]
[docs]@classmethoddeffrom_prompts(cls,llm:BaseLanguageModel,prompt_infos:List[Dict[str,str]],default_chain:Optional[Chain]=None,**kwargs:Any,)->MultiPromptChain:"""Convenience constructor for instantiating from destination prompts."""destinations=[f"{p['name']}: {p['description']}"forpinprompt_infos]destinations_str="\n".join(destinations)router_template=MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations_str)router_prompt=PromptTemplate(template=router_template,input_variables=["input"],output_parser=RouterOutputParser(),)router_chain=LLMRouterChain.from_llm(llm,router_prompt)destination_chains={}forp_infoinprompt_infos:name=p_info["name"]prompt_template=p_info["prompt_template"]prompt=PromptTemplate(template=prompt_template,input_variables=["input"])chain=LLMChain(llm=llm,prompt=prompt)destination_chains[name]=chain_default_chain=default_chainorConversationChain(llm=llm,output_key="text")returncls(router_chain=router_chain,destination_chains=destination_chains,default_chain=_default_chain,**kwargs,)