Source code for langchain_core.language_models.fake_chat_models
"""Fake ChatModel for testing purposes."""importasyncioimportreimporttimefromcollections.abcimportAsyncIterator,IteratorfromtypingimportAny,Optional,Union,castfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimportBaseChatModel,SimpleChatModelfromlangchain_core.messagesimportAIMessage,AIMessageChunk,BaseMessagefromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.runnablesimportRunnableConfig
[docs]classFakeMessagesListChatModel(BaseChatModel):"""Fake ChatModel for testing purposes."""responses:list[BaseMessage]"""List of responses to **cycle** through in order."""sleep:Optional[float]=None"""Sleep time in seconds between responses."""i:int=0"""Internally incremented after every model invocation."""def_generate(self,messages:list[BaseMessage],stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:response=self.responses[self.i]ifself.i<len(self.responses)-1:self.i+=1else:self.i=0generation=ChatGeneration(message=response)returnChatResult(generations=[generation])@propertydef_llm_type(self)->str:return"fake-messages-list-chat-model"
[docs]classFakeListChatModel(SimpleChatModel):"""Fake ChatModel for testing purposes."""responses:list[str]"""List of responses to **cycle** through in order."""sleep:Optional[float]=Nonei:int=0"""List of responses to **cycle** through in order."""error_on_chunk_number:Optional[int]=None"""Internally incremented after every model invocation."""@propertydef_llm_type(self)->str:return"fake-list-chat-model"def_call(self,messages:list[BaseMessage],stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""First try to lookup in queries, else return 'foo' or 'bar'."""response=self.responses[self.i]ifself.i<len(self.responses)-1:self.i+=1else:self.i=0returnresponsedef_stream(self,messages:list[BaseMessage],stop:Union[list[str],None]=None,run_manager:Union[CallbackManagerForLLMRun,None]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:response=self.responses[self.i]ifself.i<len(self.responses)-1:self.i+=1else:self.i=0fori_c,cinenumerate(response):ifself.sleepisnotNone:time.sleep(self.sleep)if(self.error_on_chunk_numberisnotNoneandi_c==self.error_on_chunk_number):raiseFakeListChatModelErroryieldChatGenerationChunk(message=AIMessageChunk(content=c))asyncdef_astream(self,messages:list[BaseMessage],stop:Union[list[str],None]=None,run_manager:Union[AsyncCallbackManagerForLLMRun,None]=None,**kwargs:Any,)->AsyncIterator[ChatGenerationChunk]:response=self.responses[self.i]ifself.i<len(self.responses)-1:self.i+=1else:self.i=0fori_c,cinenumerate(response):ifself.sleepisnotNone:awaitasyncio.sleep(self.sleep)if(self.error_on_chunk_numberisnotNoneandi_c==self.error_on_chunk_number):raiseFakeListChatModelErroryieldChatGenerationChunk(message=AIMessageChunk(content=c))@propertydef_identifying_params(self)->dict[str,Any]:return{"responses":self.responses}# manually override batch to preserve batch ordering with no concurrency
[docs]asyncdefabatch(self,inputs:list[Any],config:Optional[Union[RunnableConfig,list[RunnableConfig]]]=None,*,return_exceptions:bool=False,**kwargs:Any,)->list[BaseMessage]:ifisinstance(config,list):# do Not use an async iterator here because need explicit orderingreturn[awaitself.ainvoke(m,c,**kwargs)form,cinzip(inputs,config)]# do Not use an async iterator here because need explicit orderingreturn[awaitself.ainvoke(m,config,**kwargs)formininputs]
[docs]classFakeChatModel(SimpleChatModel):"""Fake Chat Model wrapper for testing purposes."""def_call(self,messages:list[BaseMessage],stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:return"fake response"asyncdef_agenerate(self,messages:list[BaseMessage],stop:Optional[list[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:output_str="fake response"message=AIMessage(content=output_str)generation=ChatGeneration(message=message)returnChatResult(generations=[generation])@propertydef_llm_type(self)->str:return"fake-chat-model"@propertydef_identifying_params(self)->dict[str,Any]:return{"key":"fake"}
[docs]classGenericFakeChatModel(BaseChatModel):"""Generic fake chat model that can be used to test the chat model interface. * Chat model should be usable in both sync and async tests * Invokes on_llm_new_token to allow for testing of callback related code for new tokens. * Includes logic to break messages into message chunk to facilitate testing of streaming. """messages:Iterator[Union[AIMessage,str]]"""Get an iterator over messages. This can be expanded to accept other types like Callables / dicts / strings to make the interface more generic if needed. Note: if you want to pass a list, you can use `iter` to convert it to an iterator. Please note that streaming is not implemented yet. We should try to implement it in the future by delegating to invoke and then breaking the resulting output into message chunks. """def_generate(self,messages:list[BaseMessage],stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Top Level call."""message=next(self.messages)message_=AIMessage(content=message)ifisinstance(message,str)elsemessagegeneration=ChatGeneration(message=message_)returnChatResult(generations=[generation])def_stream(self,messages:list[BaseMessage],stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:"""Stream the output of the model."""chat_result=self._generate(messages,stop=stop,run_manager=run_manager,**kwargs)ifnotisinstance(chat_result,ChatResult):msg=(f"Expected generate to return a ChatResult, "f"but got {type(chat_result)} instead.")raiseValueError(msg)# noqa: TRY004message=chat_result.generations[0].messageifnotisinstance(message,AIMessage):msg=(f"Expected invoke to return an AIMessage, "f"but got {type(message)} instead.")raiseValueError(msg)# noqa: TRY004content=message.contentifcontent:# Use a regular expression to split on whitespace with a capture group# so that we can preserve the whitespace in the output.ifnotisinstance(content,str):msg="Expected content to be a string."raiseValueError(msg)content_chunks=cast("list[str]",re.split(r"(\s)",content))fortokenincontent_chunks:chunk=ChatGenerationChunk(message=AIMessageChunk(content=token,id=message.id))ifrun_manager:run_manager.on_llm_new_token(token,chunk=chunk)yieldchunkifmessage.additional_kwargs:forkey,valueinmessage.additional_kwargs.items():# We should further break down the additional kwargs into chunks# Special case for function callifkey=="function_call":forfkey,fvalueinvalue.items():ifisinstance(fvalue,str):# Break function call by `,`fvalue_chunks=cast("list[str]",re.split(r"(,)",fvalue))forfvalue_chunkinfvalue_chunks:chunk=ChatGenerationChunk(message=AIMessageChunk(id=message.id,content="",additional_kwargs={"function_call":{fkey:fvalue_chunk}},))ifrun_manager:run_manager.on_llm_new_token("",chunk=chunk,# No token for function call)yieldchunkelse:chunk=ChatGenerationChunk(message=AIMessageChunk(id=message.id,content="",additional_kwargs={"function_call":{fkey:fvalue}},))ifrun_manager:run_manager.on_llm_new_token("",chunk=chunk,# No token for function call)yieldchunkelse:chunk=ChatGenerationChunk(message=AIMessageChunk(id=message.id,content="",additional_kwargs={key:value}))ifrun_manager:run_manager.on_llm_new_token("",chunk=chunk,# No token for function call)yieldchunk@propertydef_llm_type(self)->str:return"generic-fake-chat-model"
[docs]classParrotFakeChatModel(BaseChatModel):"""Generic fake chat model that can be used to test the chat model interface. * Chat model should be usable in both sync and async tests """def_generate(self,messages:list[BaseMessage],stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Top Level call."""returnChatResult(generations=[ChatGeneration(message=messages[-1])])@propertydef_llm_type(self)->str:return"parrot-fake-chat-model"