"""Chain that just formats a prompt and calls an LLM."""from__future__importannotationsimportwarningsfromtypingimportAny,Dict,List,Optional,Sequence,Tuple,Union,castfromlangchain_core._apiimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManager,AsyncCallbackManagerForChainRun,CallbackManager,CallbackManagerForChainRun,Callbacks,)fromlangchain_core.language_modelsimport(BaseLanguageModel,LanguageModelInput,)fromlangchain_core.messagesimportBaseMessagefromlangchain_core.output_parsersimportBaseLLMOutputParser,StrOutputParserfromlangchain_core.outputsimportChatGeneration,Generation,LLMResultfromlangchain_core.prompt_valuesimportPromptValuefromlangchain_core.promptsimportBasePromptTemplate,PromptTemplatefromlangchain_core.runnablesimport(Runnable,RunnableBinding,RunnableBranch,RunnableWithFallbacks,)fromlangchain_core.runnables.configurableimportDynamicRunnablefromlangchain_core.utils.inputimportget_colored_textfrompydanticimportConfigDict,Fieldfromlangchain.chains.baseimportChain
[docs]@deprecated(since="0.1.17",alternative="RunnableSequence, e.g., `prompt | llm`",removal="1.0",)classLLMChain(Chain):"""Chain to run queries against LLMs. This class is deprecated. See below for an example implementation using LangChain runnables: .. code-block:: python from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = OpenAI() chain = prompt | llm | StrOutputParser() chain.invoke("your adjective here") Example: .. code-block:: python from langchain.chains import LLMChain from langchain_community.llms import OpenAI from langchain_core.prompts import PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) """@classmethoddefis_lc_serializable(self)->bool:returnTrueprompt:BasePromptTemplate"""Prompt object to use."""llm:Union[Runnable[LanguageModelInput,str],Runnable[LanguageModelInput,BaseMessage]]"""Language model to call."""output_key:str="text"#: :meta private:output_parser:BaseLLMOutputParser=Field(default_factory=StrOutputParser)"""Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise."""return_final_only:bool=True"""Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation."""llm_kwargs:dict=Field(default_factory=dict)model_config=ConfigDict(arbitrary_types_allowed=True,extra="forbid",)@propertydefinput_keys(self)->List[str]:"""Will be whatever keys the prompt expects. :meta private: """returnself.prompt.input_variables@propertydefoutput_keys(self)->List[str]:"""Will always return text key. :meta private: """ifself.return_final_only:return[self.output_key]else:return[self.output_key,"full_generation"]def_call(self,inputs:Dict[str,Any],run_manager:Optional[CallbackManagerForChainRun]=None,)->Dict[str,str]:response=self.generate([inputs],run_manager=run_manager)returnself.create_outputs(response)[0]defgenerate(self,input_list:List[Dict[str,Any]],run_manager:Optional[CallbackManagerForChainRun]=None,)->LLMResult:"""Generate LLM result from inputs."""prompts,stop=self.prep_prompts(input_list,run_manager=run_manager)callbacks=run_manager.get_child()ifrun_managerelseNoneifisinstance(self.llm,BaseLanguageModel):returnself.llm.generate_prompt(prompts,stop,callbacks=callbacks,**self.llm_kwargs,)else:results=self.llm.bind(stop=stop,**self.llm_kwargs).batch(cast(List,prompts),{"callbacks":callbacks})generations:List[List[Generation]]=[]forresinresults:ifisinstance(res,BaseMessage):generations.append([ChatGeneration(message=res)])else:generations.append([Generation(text=res)])returnLLMResult(generations=generations)asyncdefagenerate(self,input_list:List[Dict[str,Any]],run_manager:Optional[AsyncCallbackManagerForChainRun]=None,)->LLMResult:"""Generate LLM result from inputs."""prompts,stop=awaitself.aprep_prompts(input_list,run_manager=run_manager)callbacks=run_manager.get_child()ifrun_managerelseNoneifisinstance(self.llm,BaseLanguageModel):returnawaitself.llm.agenerate_prompt(prompts,stop,callbacks=callbacks,**self.llm_kwargs,)else:results=awaitself.llm.bind(stop=stop,**self.llm_kwargs).abatch(cast(List,prompts),{"callbacks":callbacks})generations:List[List[Generation]]=[]forresinresults:ifisinstance(res,BaseMessage):generations.append([ChatGeneration(message=res)])else:generations.append([Generation(text=res)])returnLLMResult(generations=generations)
[docs]defprep_prompts(self,input_list:List[Dict[str,Any]],run_manager:Optional[CallbackManagerForChainRun]=None,)->Tuple[List[PromptValue],Optional[List[str]]]:"""Prepare prompts from inputs."""stop=Noneiflen(input_list)==0:return[],stopif"stop"ininput_list[0]:stop=input_list[0]["stop"]prompts=[]forinputsininput_list:selected_inputs={k:inputs[k]forkinself.prompt.input_variables}prompt=self.prompt.format_prompt(**selected_inputs)_colored_text=get_colored_text(prompt.to_string(),"green")_text="Prompt after formatting:\n"+_colored_textifrun_manager:run_manager.on_text(_text,end="\n",verbose=self.verbose)if"stop"ininputsandinputs["stop"]!=stop:raiseValueError("If `stop` is present in any inputs, should be present in all.")prompts.append(prompt)returnprompts,stop
[docs]asyncdefaprep_prompts(self,input_list:List[Dict[str,Any]],run_manager:Optional[AsyncCallbackManagerForChainRun]=None,)->Tuple[List[PromptValue],Optional[List[str]]]:"""Prepare prompts from inputs."""stop=Noneiflen(input_list)==0:return[],stopif"stop"ininput_list[0]:stop=input_list[0]["stop"]prompts=[]forinputsininput_list:selected_inputs={k:inputs[k]forkinself.prompt.input_variables}prompt=self.prompt.format_prompt(**selected_inputs)_colored_text=get_colored_text(prompt.to_string(),"green")_text="Prompt after formatting:\n"+_colored_textifrun_manager:awaitrun_manager.on_text(_text,end="\n",verbose=self.verbose)if"stop"ininputsandinputs["stop"]!=stop:raiseValueError("If `stop` is present in any inputs, should be present in all.")prompts.append(prompt)returnprompts,stop
[docs]defapply(self,input_list:List[Dict[str,Any]],callbacks:Callbacks=None)->List[Dict[str,str]]:"""Utilize the LLM generate method for speed gains."""callback_manager=CallbackManager.configure(callbacks,self.callbacks,self.verbose)run_manager=callback_manager.on_chain_start(None,{"input_list":input_list},name=self.get_name(),)try:response=self.generate(input_list,run_manager=run_manager)exceptBaseExceptionase:run_manager.on_chain_error(e)raiseeoutputs=self.create_outputs(response)run_manager.on_chain_end({"outputs":outputs})returnoutputs
[docs]asyncdefaapply(self,input_list:List[Dict[str,Any]],callbacks:Callbacks=None)->List[Dict[str,str]]:"""Utilize the LLM generate method for speed gains."""callback_manager=AsyncCallbackManager.configure(callbacks,self.callbacks,self.verbose)run_manager=awaitcallback_manager.on_chain_start(None,{"input_list":input_list},name=self.get_name(),)try:response=awaitself.agenerate(input_list,run_manager=run_manager)exceptBaseExceptionase:awaitrun_manager.on_chain_error(e)raiseeoutputs=self.create_outputs(response)awaitrun_manager.on_chain_end({"outputs":outputs})returnoutputs
[docs]defcreate_outputs(self,llm_result:LLMResult)->List[Dict[str,Any]]:"""Create outputs from response."""result=[# Get the text of the top generated string.{self.output_key:self.output_parser.parse_result(generation),"full_generation":generation,}forgenerationinllm_result.generations]ifself.return_final_only:result=[{self.output_key:r[self.output_key]}forrinresult]returnresult
asyncdef_acall(self,inputs:Dict[str,Any],run_manager:Optional[AsyncCallbackManagerForChainRun]=None,)->Dict[str,str]:response=awaitself.agenerate([inputs],run_manager=run_manager)returnself.create_outputs(response)[0]defpredict(self,callbacks:Callbacks=None,**kwargs:Any)->str:"""Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """returnself(kwargs,callbacks=callbacks)[self.output_key]asyncdefapredict(self,callbacks:Callbacks=None,**kwargs:Any)->str:"""Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """return(awaitself.acall(kwargs,callbacks=callbacks))[self.output_key]
[docs]defpredict_and_parse(self,callbacks:Callbacks=None,**kwargs:Any)->Union[str,List[str],Dict[str,Any]]:"""Call predict and then parse the results."""warnings.warn("The predict_and_parse method is deprecated, ""instead pass an output parser directly to LLMChain.")result=self.predict(callbacks=callbacks,**kwargs)ifself.prompt.output_parserisnotNone:returnself.prompt.output_parser.parse(result)else:returnresult
[docs]asyncdefapredict_and_parse(self,callbacks:Callbacks=None,**kwargs:Any)->Union[str,List[str],Dict[str,str]]:"""Call apredict and then parse the results."""warnings.warn("The apredict_and_parse method is deprecated, ""instead pass an output parser directly to LLMChain.")result=awaitself.apredict(callbacks=callbacks,**kwargs)ifself.prompt.output_parserisnotNone:returnself.prompt.output_parser.parse(result)else:returnresult
[docs]defapply_and_parse(self,input_list:List[Dict[str,Any]],callbacks:Callbacks=None)->Sequence[Union[str,List[str],Dict[str,str]]]:"""Call apply and then parse the results."""warnings.warn("The apply_and_parse method is deprecated, ""instead pass an output parser directly to LLMChain.")result=self.apply(input_list,callbacks=callbacks)returnself._parse_generation(result)
[docs]asyncdefaapply_and_parse(self,input_list:List[Dict[str,Any]],callbacks:Callbacks=None)->Sequence[Union[str,List[str],Dict[str,str]]]:"""Call apply and then parse the results."""warnings.warn("The aapply_and_parse method is deprecated, ""instead pass an output parser directly to LLMChain.")result=awaitself.aapply(input_list,callbacks=callbacks)returnself._parse_generation(result)
[docs]@classmethoddeffrom_string(cls,llm:BaseLanguageModel,template:str)->LLMChain:"""Create LLMChain from LLM and template."""prompt_template=PromptTemplate.from_template(template)returncls(llm=llm,prompt=prompt_template)
def_get_language_model(llm_like:Runnable)->BaseLanguageModel:ifisinstance(llm_like,BaseLanguageModel):returnllm_likeelifisinstance(llm_like,RunnableBinding):return_get_language_model(llm_like.bound)elifisinstance(llm_like,RunnableWithFallbacks):return_get_language_model(llm_like.runnable)elifisinstance(llm_like,(RunnableBranch,DynamicRunnable)):return_get_language_model(llm_like.default)else:raiseValueError(f"Unable to extract BaseLanguageModel from llm_like object of type "f"{type(llm_like)}")