Source code for langchain_community.chat_models.ollama
importjsonfromtypingimportAny,AsyncIterator,Dict,Iterator,List,Optional,Union,castfromlangchain_core._apiimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimportBaseChatModel,LangSmithParamsfromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,ChatMessage,HumanMessage,SystemMessage,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_community.llms.ollamaimportOllamaEndpointNotFoundError,_OllamaCommon@deprecated("0.0.3",alternative="_chat_stream_response_to_chat_generation_chunk")def_stream_response_to_chat_generation_chunk(stream_response:str,)->ChatGenerationChunk:"""Convert a stream response to a generation chunk."""parsed_response=json.loads(stream_response)generation_info=parsed_responseifparsed_response.get("done")isTrueelseNonereturnChatGenerationChunk(message=AIMessageChunk(content=parsed_response.get("response","")),generation_info=generation_info,)def_chat_stream_response_to_chat_generation_chunk(stream_response:str,)->ChatGenerationChunk:"""Convert a stream response to a generation chunk."""parsed_response=json.loads(stream_response)generation_info=parsed_responseifparsed_response.get("done")isTrueelseNonereturnChatGenerationChunk(message=AIMessageChunk(content=parsed_response.get("message",{}).get("content","")),generation_info=generation_info,)
[docs]@deprecated(since="0.3.1",removal="1.0.0",alternative_import="langchain_ollama.ChatOllama",)classChatOllama(BaseChatModel,_OllamaCommon):"""Ollama locally runs large language models. To use, follow the instructions at https://ollama.ai/. Example: .. code-block:: python from langchain_community.chat_models import ChatOllama ollama = ChatOllama(model="llama2") """@propertydef_llm_type(self)->str:"""Return type of chat model."""return"ollama-chat"@classmethoddefis_lc_serializable(cls)->bool:"""Return whether this model can be serialized by Langchain."""returnFalsedef_get_ls_params(self,stop:Optional[List[str]]=None,**kwargs:Any)->LangSmithParams:"""Get standard params for tracing."""params=self._get_invocation_params(stop=stop,**kwargs)ls_params=LangSmithParams(ls_provider="ollama",ls_model_name=self.model,ls_model_type="chat",ls_temperature=params.get("temperature",self.temperature),)ifls_max_tokens:=params.get("num_predict",self.num_predict):ls_params["ls_max_tokens"]=ls_max_tokensifls_stop:=stoporparams.get("stop",None)orself.stop:ls_params["ls_stop"]=ls_stopreturnls_params@deprecated("0.0.3",alternative="_convert_messages_to_ollama_messages")def_format_message_as_text(self,message:BaseMessage)->str:ifisinstance(message,ChatMessage):message_text=f"\n\n{message.role.capitalize()}: {message.content}"elifisinstance(message,HumanMessage):ifisinstance(message.content,List):first_content=cast(List[Dict],message.content)[0]content_type=first_content.get("type")ifcontent_type=="text":message_text=f"[INST] {first_content['text']} [/INST]"elifcontent_type=="image_url":message_text=first_content["image_url"]["url"]else:message_text=f"[INST] {message.content} [/INST]"elifisinstance(message,AIMessage):message_text=f"{message.content}"elifisinstance(message,SystemMessage):message_text=f"<<SYS>> {message.content} <</SYS>>"else:raiseValueError(f"Got unknown type {message}")returnmessage_textdef_format_messages_as_text(self,messages:List[BaseMessage])->str:return"\n".join([self._format_message_as_text(message)formessageinmessages])def_convert_messages_to_ollama_messages(self,messages:List[BaseMessage])->List[Dict[str,Union[str,List[str]]]]:ollama_messages:List=[]formessageinmessages:role=""ifisinstance(message,HumanMessage):role="user"elifisinstance(message,AIMessage):role="assistant"elifisinstance(message,SystemMessage):role="system"else:raiseValueError("Received unsupported message type for Ollama.")content=""images=[]ifisinstance(message.content,str):content=message.contentelse:forcontent_partincast(List[Dict],message.content):ifcontent_part.get("type")=="text":content+=f"\n{content_part['text']}"elifcontent_part.get("type")=="image_url":image_url=Nonetemp_image_url=content_part.get("image_url")ifisinstance(temp_image_url,str):image_url=content_part["image_url"]elif(isinstance(temp_image_url,dict)and"url"intemp_image_url):image_url=temp_image_url["url"]else:raiseValueError("Only string image_url or dict with string 'url' ""inside content parts are supported.")image_url_components=image_url.split(",")# Support data:image/jpeg;base64,<image> format# and base64 stringsiflen(image_url_components)>1:images.append(image_url_components[1])else:images.append(image_url_components[0])else:raiseValueError("Unsupported message content type. ""Must either have type 'text' or type 'image_url' ""with a string 'image_url' field.")ollama_messages.append({"role":role,"content":content,"images":images,})returnollama_messagesdef_create_chat_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,**kwargs:Any,)->Iterator[str]:payload={"model":self.model,"messages":self._convert_messages_to_ollama_messages(messages),}yield fromself._create_stream(payload=payload,stop=stop,api_url=f"{self.base_url}/api/chat",**kwargs)asyncdef_acreate_chat_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,**kwargs:Any,)->AsyncIterator[str]:payload={"model":self.model,"messages":self._convert_messages_to_ollama_messages(messages),}asyncforstream_respinself._acreate_stream(payload=payload,stop=stop,api_url=f"{self.base_url}/api/chat",**kwargs):yieldstream_respdef_chat_stream_with_aggregation(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,verbose:bool=False,**kwargs:Any,)->ChatGenerationChunk:final_chunk:Optional[ChatGenerationChunk]=Noneforstream_respinself._create_chat_stream(messages,stop,**kwargs):ifstream_resp:chunk=_chat_stream_response_to_chat_generation_chunk(stream_resp)iffinal_chunkisNone:final_chunk=chunkelse:final_chunk+=chunkifrun_manager:run_manager.on_llm_new_token(chunk.text,chunk=chunk,verbose=verbose,)iffinal_chunkisNone:raiseValueError("No data received from Ollama stream.")returnfinal_chunkasyncdef_achat_stream_with_aggregation(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,verbose:bool=False,**kwargs:Any,)->ChatGenerationChunk:final_chunk:Optional[ChatGenerationChunk]=Noneasyncforstream_respinself._acreate_chat_stream(messages,stop,**kwargs):ifstream_resp:chunk=_chat_stream_response_to_chat_generation_chunk(stream_resp)iffinal_chunkisNone:final_chunk=chunkelse:final_chunk+=chunkifrun_manager:awaitrun_manager.on_llm_new_token(chunk.text,chunk=chunk,verbose=verbose,)iffinal_chunkisNone:raiseValueError("No data received from Ollama stream.")returnfinal_chunkdef_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Call out to Ollama's generate endpoint. Args: messages: The list of base messages to pass into the model. stop: Optional list of stop words to use when generating. Returns: Chat generations from the model Example: .. code-block:: python response = ollama([ HumanMessage(content="Tell me about the history of AI") ]) """final_chunk=self._chat_stream_with_aggregation(messages,stop=stop,run_manager=run_manager,verbose=self.verbose,**kwargs,)chat_generation=ChatGeneration(message=AIMessage(content=final_chunk.text),generation_info=final_chunk.generation_info,)returnChatResult(generations=[chat_generation])asyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Call out to Ollama's generate endpoint. Args: messages: The list of base messages to pass into the model. stop: Optional list of stop words to use when generating. Returns: Chat generations from the model Example: .. code-block:: python response = ollama([ HumanMessage(content="Tell me about the history of AI") ]) """final_chunk=awaitself._achat_stream_with_aggregation(messages,stop=stop,run_manager=run_manager,verbose=self.verbose,**kwargs,)chat_generation=ChatGeneration(message=AIMessage(content=final_chunk.text),generation_info=final_chunk.generation_info,)returnChatResult(generations=[chat_generation])def_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:try:forstream_respinself._create_chat_stream(messages,stop,**kwargs):ifstream_resp:chunk=_chat_stream_response_to_chat_generation_chunk(stream_resp)ifrun_manager:run_manager.on_llm_new_token(chunk.text,chunk=chunk,verbose=self.verbose,)yieldchunkexceptOllamaEndpointNotFoundError:yield fromself._legacy_stream(messages,stop,**kwargs)asyncdef_astream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[ChatGenerationChunk]:asyncforstream_respinself._acreate_chat_stream(messages,stop,**kwargs):ifstream_resp:chunk=_chat_stream_response_to_chat_generation_chunk(stream_resp)ifrun_manager:awaitrun_manager.on_llm_new_token(chunk.text,chunk=chunk,verbose=self.verbose,)yieldchunk@deprecated("0.0.3",alternative="_stream")def_legacy_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:prompt=self._format_messages_as_text(messages)forstream_respinself._create_generate_stream(prompt,stop,**kwargs):ifstream_resp:chunk=_stream_response_to_chat_generation_chunk(stream_resp)ifrun_manager:run_manager.on_llm_new_token(chunk.text,chunk=chunk,verbose=self.verbose,)yieldchunk