Source code for langchain_community.chat_models.vertexai
"""Wrapper around Google VertexAI chat-based models."""from__future__importannotationsimportbase64importloggingimportrefromdataclassesimportdataclass,fieldfromtypingimportTYPE_CHECKING,Any,Dict,Iterator,List,Optional,Union,castfromurllib.parseimporturlparseimportrequestsfromlangchain_core._api.deprecationimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimport(BaseChatModel,generate_from_stream,)fromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,HumanMessage,SystemMessage,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.utilsimportpre_initfromlangchain_community.llms.vertexaiimport(_VertexAICommon,is_codey_model,is_gemini_model,)fromlangchain_community.utilities.vertexaiimport(load_image_from_gcs,raise_vertex_import_error,)ifTYPE_CHECKING:fromvertexai.language_modelsimport(ChatMessage,ChatSession,CodeChatSession,InputOutputTextPair,)fromvertexai.preview.generative_modelsimportContentlogger=logging.getLogger(__name__)@dataclassclass_ChatHistory:"""Represents a context and a history of messages."""history:List["ChatMessage"]=field(default_factory=list)context:Optional[str]=Nonedef_parse_chat_history(history:List[BaseMessage])->_ChatHistory:"""Parse a sequence of messages into history. Args: history: The list of messages to re-create the history of the chat. Returns: A parsed chat history. Raises: ValueError: If a sequence of message has a SystemMessage not at the first place. """fromvertexai.language_modelsimportChatMessagevertex_messages,context=[],Nonefori,messageinenumerate(history):content=cast(str,message.content)ifi==0andisinstance(message,SystemMessage):context=contentelifisinstance(message,AIMessage):vertex_message=ChatMessage(content=message.content,author="bot")vertex_messages.append(vertex_message)elifisinstance(message,HumanMessage):vertex_message=ChatMessage(content=message.content,author="user")vertex_messages.append(vertex_message)else:raiseValueError(f"Unexpected message with type {type(message)} at the position {i}.")chat_history=_ChatHistory(context=context,history=vertex_messages)returnchat_historydef_is_url(s:str)->bool:try:result=urlparse(s)returnall([result.scheme,result.netloc])exceptExceptionase:logger.debug(f"Unable to parse URL: {e}")returnFalsedef_parse_chat_history_gemini(history:List[BaseMessage],project:Optional[str])->List["Content"]:fromvertexai.preview.generative_modelsimportContent,Image,Partdef_convert_to_prompt(part:Union[str,Dict])->Part:ifisinstance(part,str):returnPart.from_text(part)ifnotisinstance(part,Dict):raiseValueError(f"Message's content is expected to be a dict, got {type(part)}!")ifpart["type"]=="text":returnPart.from_text(part["text"])elifpart["type"]=="image_url":path=part["image_url"]["url"]ifpath.startswith("gs://"):image=load_image_from_gcs(path=path,project=project)elifpath.startswith("data:image/"):# extract base64 component from image uriencoded:Any=re.search(r"data:image/\w{2,4};base64,(.*)",path)ifencoded:encoded=encoded.group(1)else:raiseValueError("Invalid image uri. It should be in the format ""data:image/<image_type>;base64,<base64_encoded_image>.")image=Image.from_bytes(base64.b64decode(encoded))elif_is_url(path):response=requests.get(path)response.raise_for_status()image=Image.from_bytes(response.content)else:image=Image.load_from_file(path)else:raiseValueError("Only text and image_url types are supported!")returnPart.from_image(image)vertex_messages=[]fori,messageinenumerate(history):ifi==0andisinstance(message,SystemMessage):raiseValueError("SystemMessages are not yet supported!")elifisinstance(message,AIMessage):role="model"elifisinstance(message,HumanMessage):role="user"else:raiseValueError(f"Unexpected message with type {type(message)} at the position {i}.")raw_content=message.contentifisinstance(raw_content,str):raw_content=[raw_content]parts=[_convert_to_prompt(part)forpartinraw_content]vertex_message=Content(role=role,parts=parts)vertex_messages.append(vertex_message)returnvertex_messagesdef_parse_examples(examples:List[BaseMessage])->List["InputOutputTextPair"]:fromvertexai.language_modelsimportInputOutputTextPairiflen(examples)%2!=0:raiseValueError(f"Expect examples to have an even amount of messages, got {len(examples)}.")example_pairs=[]input_text=Nonefori,exampleinenumerate(examples):ifi%2==0:ifnotisinstance(example,HumanMessage):raiseValueError(f"Expected the first message in a part to be from human, got "f"{type(example)} for the {i}th message.")input_text=example.contentifi%2==1:ifnotisinstance(example,AIMessage):raiseValueError(f"Expected the second message in a part to be from AI, got "f"{type(example)} for the {i}th message.")pair=InputOutputTextPair(input_text=input_text,output_text=example.content)example_pairs.append(pair)returnexample_pairsdef_get_question(messages:List[BaseMessage])->HumanMessage:"""Get the human message at the end of a list of input messages to a chat model."""ifnotmessages:raiseValueError("You should provide at least one message to start the chat!")question=messages[-1]ifnotisinstance(question,HumanMessage):raiseValueError(f"Last message in the list should be from human, got {question.type}.")returnquestion
[docs]@deprecated(since="0.0.12",removal="1.0",alternative_import="langchain_google_vertexai.ChatVertexAI",)classChatVertexAI(_VertexAICommon,BaseChatModel):# type: ignore[override]"""`Vertex AI` Chat large language models API."""model_name:str="chat-bison""Underlying model name."examples:Optional[List[BaseMessage]]=None@classmethoddefis_lc_serializable(self)->bool:returnTrue@classmethoddefget_lc_namespace(cls)->List[str]:"""Get the namespace of the langchain object."""return["langchain","chat_models","vertexai"]
[docs]@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that the python package exists in environment."""is_gemini=is_gemini_model(values["model_name"])cls._try_init_vertexai(values)try:fromvertexai.language_modelsimportChatModel,CodeChatModelifis_gemini:fromvertexai.preview.generative_modelsimport(GenerativeModel,)exceptImportError:raise_vertex_import_error()ifis_gemini:values["client"]=GenerativeModel(model_name=values["model_name"])else:ifis_codey_model(values["model_name"]):model_cls=CodeChatModelelse:model_cls=ChatModelvalues["client"]=model_cls.from_pretrained(values["model_name"])returnvalues
def_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,stream:Optional[bool]=None,**kwargs:Any,)->ChatResult:"""Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. Code chat does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. stream: Whether to use the streaming endpoint. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """should_stream=streamifstreamisnotNoneelseself.streamingifshould_stream:stream_iter=self._stream(messages,stop=stop,run_manager=run_manager,**kwargs)returngenerate_from_stream(stream_iter)question=_get_question(messages)params=self._prepare_params(stop=stop,stream=False,**kwargs)msg_params={}if"candidate_count"inparams:msg_params["candidate_count"]=params.pop("candidate_count")ifself._is_gemini_model:history_gemini=_parse_chat_history_gemini(messages,project=self.project)message=history_gemini.pop()chat=self.client.start_chat(history=history_gemini)response=chat.send_message(message,generation_config=params)else:history=_parse_chat_history(messages[:-1])examples=kwargs.get("examples")orself.examplesifexamples:params["examples"]=_parse_examples(examples)chat=self._start_chat(history,**params)response=chat.send_message(question.content,**msg_params)generations=[ChatGeneration(message=AIMessage(content=r.text))forrinresponse.candidates]returnChatResult(generations=generations)asyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Asynchronously generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. Code chat does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """if"stream"inkwargs:kwargs.pop("stream")logger.warning("ChatVertexAI does not currently support async streaming.")params=self._prepare_params(stop=stop,**kwargs)msg_params={}if"candidate_count"inparams:msg_params["candidate_count"]=params.pop("candidate_count")ifself._is_gemini_model:history_gemini=_parse_chat_history_gemini(messages,project=self.project)message=history_gemini.pop()chat=self.client.start_chat(history=history_gemini)response=awaitchat.send_message_async(message,generation_config=params)else:question=_get_question(messages)history=_parse_chat_history(messages[:-1])examples=kwargs.get("examples",None)ifexamples:params["examples"]=_parse_examples(examples)chat=self._start_chat(history,**params)response=awaitchat.send_message_async(question.content,**msg_params)generations=[ChatGeneration(message=AIMessage(content=r.text))forrinresponse.candidates]returnChatResult(generations=generations)def_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:params=self._prepare_params(stop=stop,stream=True,**kwargs)ifself._is_gemini_model:history_gemini=_parse_chat_history_gemini(messages,project=self.project)message=history_gemini.pop()chat=self.client.start_chat(history=history_gemini)responses=chat.send_message(message,stream=True,generation_config=params)else:question=_get_question(messages)history=_parse_chat_history(messages[:-1])examples=kwargs.get("examples",None)ifexamples:params["examples"]=_parse_examples(examples)chat=self._start_chat(history,**params)responses=chat.send_message_streaming(question.content,**params)forresponseinresponses:chunk=ChatGenerationChunk(message=AIMessageChunk(content=response.text))ifrun_manager:run_manager.on_llm_new_token(response.text,chunk=chunk)yieldchunkdef_start_chat(self,history:_ChatHistory,**kwargs:Any)->Union[ChatSession,CodeChatSession]:ifnotself.is_codey_model:returnself.client.start_chat(context=history.context,message_history=history.history,**kwargs)else:returnself.client.start_chat(message_history=history.history,**kwargs)