Source code for langchain_experimental.smart_llm.base
"""Chain for applying self-critique using the SmartGPT workflow."""fromtypingimportAny,Dict,List,Optional,Tuple,Typefromlangchain.base_languageimportBaseLanguageModelfromlangchain.chains.baseimportChainfromlangchain.inputimportget_colored_textfromlangchain.schemaimportLLMResult,PromptValuefromlangchain_core.callbacks.managerimportCallbackManagerForChainRunfromlangchain_core.prompts.baseimportBasePromptTemplatefromlangchain_core.prompts.chatimport(AIMessagePromptTemplate,BaseMessagePromptTemplate,ChatPromptTemplate,HumanMessagePromptTemplate,)frompydanticimportConfigDict,model_validator
[docs]classSmartLLMChain(Chain):"""Chain for applying self-critique using the SmartGPT workflow. See details at https://youtu.be/wVzuvf9D9BU A SmartLLMChain is an LLMChain that instead of simply passing the prompt to the LLM performs these 3 steps: 1. Ideate: Pass the user prompt to an ideation LLM n_ideas times, each result is an "idea" 2. Critique: Pass the ideas to a critique LLM which looks for flaws in the ideas & picks the best one 3. Resolve: Pass the critique to a resolver LLM which improves upon the best idea & outputs only the (improved version of) the best output In total, SmartLLMChain pass will use n_ideas+2 LLM calls Note that SmartLLMChain will only improve results (compared to a basic LLMChain), when the underlying models have the capability for reflection, which smaller models often don't. Finally, a SmartLLMChain assumes that each underlying LLM outputs exactly 1 result. """
prompt:BasePromptTemplate"""Prompt object to use."""output_key:str="resolution"ideation_llm:Optional[BaseLanguageModel]=None"""LLM to use in ideation step. If None given, 'llm' will be used."""critique_llm:Optional[BaseLanguageModel]=None"""LLM to use in critique step. If None given, 'llm' will be used."""resolver_llm:Optional[BaseLanguageModel]=None"""LLM to use in resolve step. If None given, 'llm' will be used."""llm:Optional[BaseLanguageModel]=None"""LLM to use for each steps, if no specific llm for that step is given. """n_ideas:int=3"""Number of ideas to generate in idea step"""return_intermediate_steps:bool=False"""Whether to return ideas and critique, in addition to resolution."""history:SmartLLMChainHistory=SmartLLMChainHistory()model_config=ConfigDict(extra="forbid",)@model_validator(mode="before")@classmethoddefvalidate_inputs(cls,values:Dict[str,Any])->Any:"""Ensure we have an LLM for each step."""llm=values.get("llm")ideation_llm=values.get("ideation_llm")critique_llm=values.get("critique_llm")resolver_llm=values.get("resolver_llm")ifnotllmandnotideation_llm:raiseValueError("Either ideation_llm or llm needs to be given. Pass llm, ""if you want to use the same llm for all steps, or pass ""ideation_llm, critique_llm and resolver_llm if you want ""to use different llms for each step.")ifnotllmandnotcritique_llm:raiseValueError("Either critique_llm or llm needs to be given. Pass llm, ""if you want to use the same llm for all steps, or pass ""ideation_llm, critique_llm and resolver_llm if you want ""to use different llms for each step.")ifnotllmandnotresolver_llm:raiseValueError("Either resolve_llm or llm needs to be given. Pass llm, ""if you want to use the same llm for all steps, or pass ""ideation_llm, critique_llm and resolver_llm if you want ""to use different llms for each step.")ifllmandideation_llmandcritique_llmandresolver_llm:raiseValueError("LLMs are given for each step (ideation_llm, critique_llm,"" resolver_llm), but backup LLM (llm) is also given, which"" would not be used.")returnvalues@propertydefinput_keys(self)->List[str]:"""Defines the input keys."""returnself.prompt.input_variables@propertydefoutput_keys(self)->List[str]:"""Defines the output keys."""ifself.return_intermediate_steps:return["ideas","critique",self.output_key]return[self.output_key]
[docs]defprep_prompts(self,inputs:Dict[str,Any],run_manager:Optional[CallbackManagerForChainRun]=None,)->Tuple[PromptValue,Optional[List[str]]]:"""Prepare prompts from inputs."""stop=Noneif"stop"ininputs:stop=inputs["stop"]selected_inputs={k:inputs[k]forkinself.prompt.input_variables}prompt=self.prompt.format_prompt(**selected_inputs)_colored_text=get_colored_text(prompt.to_string(),"green")_text="Prompt after formatting:\n"+_colored_textifrun_manager:run_manager.on_text(_text,end="\n",verbose=self.verbose)if"stop"ininputsandinputs["stop"]!=stop:raiseValueError("If `stop` is present in any inputs, should be present in all.")returnprompt,stop
def_call(self,input_list:Dict[str,Any],run_manager:Optional[CallbackManagerForChainRun]=None,)->Dict[str,Any]:prompt,stop=self.prep_prompts(input_list,run_manager=run_manager)self.history.question=prompt.to_string()ideas=self._ideate(stop,run_manager)self.history.ideas=ideascritique=self._critique(stop,run_manager)self.history.critique=critiqueresolution=self._resolve(stop,run_manager)ifself.return_intermediate_steps:return{"ideas":ideas,"critique":critique,self.output_key:resolution}return{self.output_key:resolution}def_get_text_from_llm_result(self,result:LLMResult,step:str)->str:"""Between steps, only the LLM result text is passed, not the LLMResult object. This function extracts the text from an LLMResult."""iflen(result.generations)!=1:raiseValueError(f"In SmartLLM the LLM result in step {step} is not ""exactly 1 element. This should never happen")iflen(result.generations[0])!=1:raiseValueError(f"In SmartLLM the LLM in step {step} returned more than ""1 output. SmartLLM only works with LLMs returning ""exactly 1 output.")returnresult.generations[0][0].text
[docs]defget_prompt_strings(self,stage:str)->List[Tuple[Type[BaseMessagePromptTemplate],str]]:role_strings:List[Tuple[Type[BaseMessagePromptTemplate],str]]=[]role_strings.append((HumanMessagePromptTemplate,"Question: {question}\nAnswer: Let's work this out in a step by ""step way to be sure we have the right answer:",))ifstage=="ideation":returnrole_stringsrole_strings.extend([*[(AIMessagePromptTemplate,"Idea "+str(i+1)+": {idea_"+str(i+1)+"}",)foriinrange(self.n_ideas)],(HumanMessagePromptTemplate,"You are a researcher tasked with investigating the "f"{self.n_ideas} response options provided. List the flaws and ""faulty logic of each answer option. Let's work this out in a step"" by step way to be sure we have all the errors:",),])ifstage=="critique":returnrole_stringsrole_strings.extend([(AIMessagePromptTemplate,"Critique: {critique}"),(HumanMessagePromptTemplate,"You are a resolver tasked with 1) finding which of "f"the {self.n_ideas} answer options the researcher thought was ""best, 2) improving that answer and 3) printing the answer in ""full. Don't output anything for step 1 or 2, only the full ""answer in 3. Let's work this out in a step by step way to ""be sure we have the right answer:",),])ifstage=="resolve":returnrole_stringsraiseValueError("stage should be either 'ideation', 'critique' or 'resolve',"f" but it is '{stage}'. This should never happen.")
def_ideate(self,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForChainRun]=None,)->List[str]:"""Generate n_ideas ideas as response to user prompt."""llm=self.ideation_llmifself.ideation_llmelseself.llmprompt=self.ideation_prompt().format_prompt(**self.history.ideation_prompt_inputs())callbacks=run_manager.get_child()ifrun_managerelseNoneifllm:ideas=[self._get_text_from_llm_result(llm.generate_prompt([prompt],stop,callbacks),step="ideate",)for_inrange(self.n_ideas)]fori,ideainenumerate(ideas):_colored_text=get_colored_text(idea,"blue")_text=f"Idea {i+1}:\n"+_colored_textifrun_manager:run_manager.on_text(_text,end="\n",verbose=self.verbose)returnideaselse:raiseValueError("llm is none, which should never happen")def_critique(self,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForChainRun]=None,)->str:"""Critique each of the ideas from ideation stage & select best one."""llm=self.critique_llmifself.critique_llmelseself.llmprompt=self.critique_prompt().format_prompt(**self.history.critique_prompt_inputs())callbacks=run_manager.handlersifrun_managerelseNoneifllm:critique=self._get_text_from_llm_result(llm.generate_prompt([prompt],stop,callbacks),step="critique")_colored_text=get_colored_text(critique,"yellow")_text="Critique:\n"+_colored_textifrun_manager:run_manager.on_text(_text,end="\n",verbose=self.verbose)returncritiqueelse:raiseValueError("llm is none, which should never happen")def_resolve(self,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForChainRun]=None,)->str:"""Improve upon the best idea as chosen in critique step & return it."""llm=self.resolver_llmifself.resolver_llmelseself.llmprompt=self.resolve_prompt().format_prompt(**self.history.resolve_prompt_inputs())callbacks=run_manager.handlersifrun_managerelseNoneifllm:resolution=self._get_text_from_llm_result(llm.generate_prompt([prompt],stop,callbacks),step="resolve")_colored_text=get_colored_text(resolution,"green")_text="Resolution:\n"+_colored_textifrun_manager:run_manager.on_text(_text,end="\n",verbose=self.verbose)returnresolutionelse:raiseValueError("llm is none, which should never happen")