Source code for langchain_community.chat_models.bedrock
importrefromcollectionsimportdefaultdictfromtypingimportAny,Dict,Iterator,List,Optional,Tuple,Unionfromlangchain_core._api.deprecationimportdeprecatedfromlangchain_core.callbacksimport(CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimportBaseChatModelfromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,ChatMessage,HumanMessage,SystemMessage,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfrompydanticimportConfigDictfromlangchain_community.chat_models.anthropicimport(convert_messages_to_prompt_anthropic,)fromlangchain_community.chat_models.metaimportconvert_messages_to_prompt_llamafromlangchain_community.llms.bedrockimportBedrockBasefromlangchain_community.utilities.anthropicimport(get_num_tokens_anthropic,get_token_ids_anthropic,)def_convert_one_message_to_text_mistral(message:BaseMessage)->str:ifisinstance(message,ChatMessage):message_text=f"\n\n{message.role.capitalize()}: {message.content}"elifisinstance(message,HumanMessage):message_text=f"[INST] {message.content} [/INST]"elifisinstance(message,AIMessage):message_text=f"{message.content}"elifisinstance(message,SystemMessage):message_text=f"<<SYS>> {message.content} <</SYS>>"else:raiseValueError(f"Got unknown type {message}")returnmessage_text
[docs]defconvert_messages_to_prompt_mistral(messages:List[BaseMessage])->str:"""Convert a list of messages to a prompt for mistral."""return"\n".join([_convert_one_message_to_text_mistral(message)formessageinmessages])
def_format_image(image_url:str)->Dict:""" Formats an image of format data:image/jpeg;base64,{b64_string} to a dict for anthropic api { "type": "base64", "media_type": "image/jpeg", "data": "/9j/4AAQSkZJRg...", } And throws an error if it's not a b64 image """regex=r"^data:(?P<media_type>image/.+);base64,(?P<data>.+)$"match=re.match(regex,image_url)ifmatchisNone:raiseValueError("Anthropic only supports base64-encoded images currently."" Example: data:image/png;base64,'/9j/4AAQSk'...")return{"type":"base64","media_type":match.group("media_type"),"data":match.group("data"),}def_format_anthropic_messages(messages:List[BaseMessage],)->Tuple[Optional[str],List[Dict]]:"""Format messages for anthropic."""""" [ { "role": _message_type_lookups[m.type], "content": [_AnthropicMessageContent(text=m.content).dict()], } for m in messages ] """system:Optional[str]=Noneformatted_messages:List[Dict]=[]fori,messageinenumerate(messages):ifmessage.type=="system":ifi!=0:raiseValueError("System message must be at beginning of message list.")ifnotisinstance(message.content,str):raiseValueError("System message must be a string, "f"instead was: {type(message.content)}")system=message.contentcontinuerole=_message_type_lookups[message.type]content:Union[str,List[Dict]]ifnotisinstance(message.content,str):# parse as dictassertisinstance(message.content,list),("Anthropic message content must be str or list of dicts")# populate contentcontent=[]foriteminmessage.content:ifisinstance(item,str):content.append({"type":"text","text":item,})elifisinstance(item,dict):if"type"notinitem:raiseValueError("Dict content item must have a type key")ifitem["type"]=="image_url":# convert formatsource=_format_image(item["image_url"]["url"])content.append({"type":"image","source":source,})else:content.append(item)else:raiseValueError(f"Content items must be str or dict, instead was: {type(item)}")else:content=message.contentformatted_messages.append({"role":role,"content":content,})returnsystem,formatted_messages
[docs]classChatPromptAdapter:"""Adapter class to prepare the inputs from Langchain to prompt format that Chat model expects. """
[docs]@classmethoddefconvert_messages_to_prompt(cls,provider:str,messages:List[BaseMessage])->str:ifprovider=="anthropic":prompt=convert_messages_to_prompt_anthropic(messages=messages)elifprovider=="meta":prompt=convert_messages_to_prompt_llama(messages=messages)elifprovider=="mistral":prompt=convert_messages_to_prompt_mistral(messages=messages)elifprovider=="amazon":prompt=convert_messages_to_prompt_anthropic(messages=messages,human_prompt="\n\nUser:",ai_prompt="\n\nBot:",)else:raiseNotImplementedError(f"Provider {provider} model does not support chat.")returnprompt
[docs]@classmethoddefformat_messages(cls,provider:str,messages:List[BaseMessage])->Tuple[Optional[str],List[Dict]]:ifprovider=="anthropic":return_format_anthropic_messages(messages)raiseNotImplementedError(f"Provider {provider} not supported for format_messages")
[docs]@deprecated(since="0.0.34",removal="1.0",alternative_import="langchain_aws.ChatBedrock")classBedrockChat(BaseChatModel,BedrockBase):"""Chat model that uses the Bedrock API."""@propertydef_llm_type(self)->str:"""Return type of chat model."""return"amazon_bedrock_chat"@classmethoddefis_lc_serializable(cls)->bool:"""Return whether this model can be serialized by Langchain."""returnTrue@classmethoddefget_lc_namespace(cls)->List[str]:"""Get the namespace of the langchain object."""return["langchain","chat_models","bedrock"]@propertydeflc_attributes(self)->Dict[str,Any]:attributes:Dict[str,Any]={}ifself.region_name:attributes["region_name"]=self.region_namereturnattributesmodel_config=ConfigDict(extra="forbid",)def_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:provider=self._get_provider()prompt,system,formatted_messages=None,None,Noneifprovider=="anthropic":system,formatted_messages=ChatPromptAdapter.format_messages(provider,messages)else:prompt=ChatPromptAdapter.convert_messages_to_prompt(provider=provider,messages=messages)forchunkinself._prepare_input_and_invoke_stream(prompt=prompt,system=system,messages=formatted_messages,stop=stop,run_manager=run_manager,**kwargs,):delta=chunk.textyieldChatGenerationChunk(message=AIMessageChunk(content=delta))def_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:completion=""llm_output:Dict[str,Any]={"model_id":self.model_id}ifself.streaming:forchunkinself._stream(messages,stop,run_manager,**kwargs):completion+=chunk.textelse:provider=self._get_provider()prompt,system,formatted_messages=None,None,Noneparams:Dict[str,Any]={**kwargs}ifprovider=="anthropic":system,formatted_messages=ChatPromptAdapter.format_messages(provider,messages)else:prompt=ChatPromptAdapter.convert_messages_to_prompt(provider=provider,messages=messages)ifstop:params["stop_sequences"]=stopcompletion,usage_info=self._prepare_input_and_invoke(prompt=prompt,stop=stop,run_manager=run_manager,system=system,messages=formatted_messages,**params,)llm_output["usage"]=usage_inforeturnChatResult(generations=[ChatGeneration(message=AIMessage(content=completion))],llm_output=llm_output,)def_combine_llm_outputs(self,llm_outputs:List[Optional[dict]])->dict:final_usage:Dict[str,int]=defaultdict(int)final_output={}foroutputinllm_outputs:output=outputor{}usage=output.get("usage",{})fortoken_type,token_countinusage.items():final_usage[token_type]+=token_countfinal_output.update(output)final_output["usage"]=final_usagereturnfinal_output