[docs]classFakeListLLM(LLM):"""Fake LLM for testing purposes."""responses:list[str]"""List of responses to return in order."""# This parameter should be removed from FakeListLLM since# it's only used by sub-classes.sleep:Optional[float]=None"""Sleep time in seconds between responses. Ignored by FakeListLLM, but used by sub-classes. """i:int=0"""Internally incremented after every model invocation. Useful primarily for testing purposes. """@propertydef_llm_type(self)->str:"""Return type of llm."""return"fake-list"def_call(self,prompt:str,stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Return next response."""response=self.responses[self.i]ifself.i<len(self.responses)-1:self.i+=1else:self.i=0returnresponseasyncdef_acall(self,prompt:str,stop:Optional[list[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Return next response."""response=self.responses[self.i]ifself.i<len(self.responses)-1:self.i+=1else:self.i=0returnresponse@propertydef_identifying_params(self)->Mapping[str,Any]:return{"responses":self.responses}
[docs]classFakeListLLMError(Exception):"""Fake error for testing purposes."""
[docs]classFakeStreamingListLLM(FakeListLLM):"""Fake streaming list LLM for testing purposes. An LLM that will return responses from a list in order. This model also supports optionally sleeping between successive chunks in a streaming implementation. """error_on_chunk_number:Optional[int]=None"""If set, will raise an exception on the specified chunk number."""