[docs]classResponseGenerationChain(LLMChain):"""Chain to execute tasks."""
[docs]@classmethoddeffrom_llm(cls,llm:BaseLanguageModel,verbose:bool=True)->LLMChain:execution_template=("The AI assistant has parsed the user input into several tasks""and executed them. The results are as follows:\n""{task_execution}""\nPlease summarize the results and generate a response.")prompt=PromptTemplate(template=execution_template,input_variables=["task_execution"],)returncls(prompt=prompt,llm=llm,verbose=verbose)
[docs]classResponseGenerator:"""Generates a response based on the input."""
[docs]defgenerate(self,inputs:dict,callbacks:Callbacks=None,**kwargs:Any)->str:"""Given input, decided what to do."""llm_response=self.llm_chain.run(**inputs,stop=self.stop,callbacks=callbacks)returnllm_response
[docs]defload_response_generator(llm:BaseLanguageModel)->ResponseGenerator:"""Load the ResponseGenerator."""llm_chain=ResponseGenerationChain.from_llm(llm)returnResponseGenerator(llm_chain=llm_chain,)