importloggingimportreimportwarningsfromcollectionsimportdefaultdictfromoperatorimportitemgetterfromtypingimport(Any,Callable,Dict,Iterator,List,Literal,Optional,Sequence,Tuple,Union,cast,)fromlangchain_core.callbacksimportCallbackManagerForLLMRunfromlangchain_core.language_modelsimport(BaseChatModel,LangSmithParams,LanguageModelInput,)fromlangchain_core.language_models.chat_modelsimportgenerate_from_streamfromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,ChatMessage,HumanMessage,SystemMessage,)fromlangchain_core.messages.aiimportUsageMetadatafromlangchain_core.messages.toolimportToolCall,ToolMessagefromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.runnablesimportRunnable,RunnableMap,RunnablePassthroughfromlangchain_core.toolsimportBaseToolfromlangchain_core.utils.function_callingimportconvert_to_openai_toolfromlangchain_core.utils.pydanticimportTypeBaseModel,is_basemodel_subclassfrompydanticimportBaseModel,ConfigDict,Field,model_validatorfromlangchain_aws.chat_models.bedrock_converseimportChatBedrockConversefromlangchain_aws.function_callingimport(ToolsOutputParser,_lc_tool_calls_to_anthropic_tool_use_blocks,convert_to_anthropic_tool,get_system_message,)fromlangchain_aws.llms.bedrockimport(BedrockBase,_combine_generation_info_for_llm_result,)fromlangchain_aws.utilsimport(anthropic_tokens_supported,get_num_tokens_anthropic,get_token_ids_anthropic,)logger=logging.getLogger(__name__)def_convert_one_message_to_text_llama(message:BaseMessage)->str:ifisinstance(message,ChatMessage):message_text=f"\n\n{message.role.capitalize()}: {message.content}"elifisinstance(message,HumanMessage):message_text=f"[INST] {message.content} [/INST]"elifisinstance(message,AIMessage):message_text=f"{message.content}"elifisinstance(message,SystemMessage):message_text=f"<<SYS>> {message.content} <</SYS>>"else:raiseValueError(f"Got unknown type {message}")returnmessage_text
[docs]defconvert_messages_to_prompt_llama(messages:List[BaseMessage])->str:"""Convert a list of messages to a prompt for llama."""return"\n".join([_convert_one_message_to_text_llama(message)formessageinmessages])
def_convert_one_message_to_text_llama3(message:BaseMessage)->str:ifisinstance(message,ChatMessage):message_text=(f"<|start_header_id|>{message.role}"f"<|end_header_id|>{message.content}<|eot_id|>")elifisinstance(message,HumanMessage):message_text=(f"<|start_header_id|>user"f"<|end_header_id|>{message.content}<|eot_id|>")elifisinstance(message,AIMessage):message_text=(f"<|start_header_id|>assistant"f"<|end_header_id|>{message.content}<|eot_id|>")elifisinstance(message,SystemMessage):message_text=(f"<|start_header_id|>system"f"<|end_header_id|>{message.content}<|eot_id|>")else:raiseValueError(f"Got unknown type {message}")returnmessage_text
[docs]defconvert_messages_to_prompt_llama3(messages:List[BaseMessage])->str:"""Convert a list of messages to a prompt for llama."""return"\n".join(["<|begin_of_text|>"]+[_convert_one_message_to_text_llama3(message)formessageinmessages]+["<|start_header_id|>assistant<|end_header_id|>\n\n"])
def_convert_one_message_to_text_anthropic(message:BaseMessage,human_prompt:str,ai_prompt:str,)->str:content=cast(str,message.content)ifisinstance(message,ChatMessage):message_text=f"\n\n{message.role.capitalize()}: {content}"elifisinstance(message,HumanMessage):message_text=f"{human_prompt}{content}"elifisinstance(message,AIMessage):message_text=f"{ai_prompt}{content}"elifisinstance(message,SystemMessage):message_text=contentelse:raiseValueError(f"Got unknown type {message}")returnmessage_text
[docs]defconvert_messages_to_prompt_anthropic(messages:List[BaseMessage],*,human_prompt:str="\n\nHuman:",ai_prompt:str="\n\nAssistant:",)->str:"""Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. human_prompt (str, optional): Human prompt tag. Defaults to "\n\nHuman:". ai_prompt (str, optional): AI prompt tag. Defaults to "\n\nAssistant:". Returns: str: Combined string with necessary human_prompt and ai_prompt tags. """messages=messages.copy()# don't mutate the original listifnotisinstance(messages[-1],AIMessage):messages.append(AIMessage(content=""))text="".join(_convert_one_message_to_text_anthropic(message,human_prompt,ai_prompt)formessageinmessages)# trim off the trailing ' ' that might come from the "Assistant: "returntext.rstrip()
def_convert_one_message_to_text_mistral(message:BaseMessage)->str:ifisinstance(message,ChatMessage):message_text=f"\n\n{message.role.capitalize()}: {message.content}"elifisinstance(message,HumanMessage):message_text=f"[INST] {message.content} [/INST]"elifisinstance(message,AIMessage):message_text=f"{message.content}"elifisinstance(message,SystemMessage):message_text=f"<<SYS>> {message.content} <</SYS>>"else:raiseValueError(f"Got unknown type {message}")returnmessage_text
[docs]defconvert_messages_to_prompt_mistral(messages:List[BaseMessage])->str:"""Convert a list of messages to a prompt for mistral."""return"\n".join([_convert_one_message_to_text_mistral(message)formessageinmessages])
def_convert_one_message_to_text_deepseek(message:BaseMessage)->str:ifisinstance(message,ChatMessage):message_text=(f"<|{message.role}|>{message.content}")elifisinstance(message,HumanMessage):message_text=(f"<|User|>{message.content}")elifisinstance(message,AIMessage):message_text=(f"<|Assistant|>{message.content}")elifisinstance(message,SystemMessage):message_text=(f"<|System|>{message.content}")else:raiseValueError(f"Got unknown type {message}")returnmessage_text
[docs]defconvert_messages_to_prompt_deepseek(messages:List[BaseMessage])->str:"""Convert a list of messages to a prompt for DeepSeek-R1."""prompt="\n<|begin_of_sentence|>"formessageinmessages:prompt+=_convert_one_message_to_text_deepseek(message)prompt+="<|Assistant|>\n\n"returnprompt
def_format_image(image_url:str)->Dict:""" Formats an image of format data:image/jpeg;base64,{b64_string} to a dict for anthropic api { "type": "base64", "media_type": "image/jpeg", "data": "/9j/4AAQSkZJRg...", } And throws an error if it's not a b64 image """regex=r"^data:(?P<media_type>image/.+);base64,(?P<data>.+)$"match=re.match(regex,image_url)ifmatchisNone:raiseValueError("Anthropic only supports base64-encoded images currently."" Example: data:image/png;base64,'/9j/4AAQSk'...")return{"type":"base64","media_type":match.group("media_type"),"data":match.group("data"),}def_merge_messages(messages:Sequence[BaseMessage],)->List[Union[SystemMessage,AIMessage,HumanMessage]]:"""Merge runs of human/tool messages into single human messages with content blocks."""# noqa: E501merged:list=[]forcurrinmessages:curr=curr.model_copy(deep=True)ifisinstance(curr,ToolMessage):ifisinstance(curr.content,list)andall(isinstance(block,dict)andblock.get("type")=="tool_result"forblockincurr.content):curr=HumanMessage(curr.content)# type: ignore[misc]else:curr=HumanMessage(# type: ignore[misc][{"type":"tool_result","content":curr.content,"tool_use_id":curr.tool_call_id,}])last=merged[-1]ifmergedelseNoneifisinstance(last,HumanMessage)andisinstance(curr,HumanMessage):ifisinstance(last.content,str):new_content:List=[{"type":"text","text":last.content}]else:new_content=last.contentifisinstance(curr.content,str):new_content.append({"type":"text","text":curr.content})else:new_content.extend(curr.content)last.content=new_contentelse:merged.append(curr)returnmergeddef_format_anthropic_messages(messages:List[BaseMessage],)->Tuple[Optional[str],List[Dict]]:"""Format messages for anthropic."""system:Optional[str]=Noneformatted_messages:List[Dict]=[]merged_messages=_merge_messages(messages)fori,messageinenumerate(merged_messages):ifmessage.type=="system":ifi!=0:raiseValueError("System message must be at beginning of message list.")ifisinstance(message.content,str):system=message.contentelifisinstance(message.content,list):text_chunks=[]foriteminmessage.content:ifisinstance(item,str):text_chunks.append(item)elifisinstance(item,dict):ifitem.get("type")!="text":raiseValueError("System message content item must be type 'text'")if"text"notinitem:raiseValueError("System message content item must have a 'text' key")text_chunks.append(item["text"])else:raiseValueError("System message content list must be a string or dict, "f"instead was: {type(item)}")system="".join(text_chunks)else:raiseValueError("System message content must be a string or list, "f"instead was: {type(message.content)}")continuerole=_message_type_lookups[message.type]content:Union[str,List]ifnotisinstance(message.content,str):# parse as dictassertisinstance(message.content,list),"Anthropic message content must be str or list of dicts"# populate contentcontent=[]thinking_blocks=[]text_blocks=[]tool_blocks=[]# First collect all blocks by typeforiteminmessage.content:ifisinstance(item,str):text_blocks.append({"type":"text","text":item})elifisinstance(item,dict):if"type"notinitem:raiseValueError("Dict content item must have a type key")elifitem["type"]=="image_url":# convert formatsource=_format_image(item["image_url"]["url"])tool_blocks.append({"type":"image","source":source})elifitem["type"]=="tool_use":# If a tool_call with the same id as a tool_use content block# exists, the tool_call is preferred.ifisinstance(message,AIMessage)anditem["id"]in[tc["id"]fortcinmessage.tool_calls]:overlapping=[tcfortcinmessage.tool_callsiftc["id"]==item["id"]]tool_blocks.extend(_lc_tool_calls_to_anthropic_tool_use_blocks(overlapping))else:item.pop("text",None)tool_blocks.append(item)elifitem["type"]in["thinking","redacted_thinking"]:# Store thinking blocks separatelythinking_blocks.append(item)elifitem["type"]=="text":text=item.get("text","")# Only add non-empty strings for now as empty ones are not# accepted.# https://github.com/anthropics/anthropic-sdk-python/issues/461iftext.strip():content_item={"type":"text","text":text}ifitem.get("cache_control"):content_item["cache_control"]={"type":"ephemeral"}text_blocks.append(content_item)else:tool_blocks.append(item)else:raiseValueError(f"Content items must be str or dict, instead was: {type(item)}")# For assistant messages, when thinking blocks exist, ensure they come firstifrole=="assistant":content=text_blocks+tool_blocksifthinking_blocks:content=thinking_blocks+contentelifrole=="user"andtool_blocksandtext_blocks:content=tool_blocks+text_blocks# tool result must precede textifthinking_blocks:content=thinking_blocks+contentelse:# combine all blocks in standard ordercontent=text_blocks+tool_blocks# Only include thinking blocks if they existifthinking_blocks:content=thinking_blocks+contentelifisinstance(message,AIMessage):# For string content, create appropriate structurecontent_list=[]# Add thinking blocks from additional_kwargs if presentifmessage.additional_kwargsand"thinking"inmessage.additional_kwargs:thinking_data=message.additional_kwargs["thinking"]ifthinking_dataandisinstance(thinking_data,dict):if"text"inthinking_dataand"signature"inthinking_data:content_list.append({"type":"thinking","thinking":thinking_data["text"],"signature":thinking_data["signature"],})# Add base content as text blockifmessage.content:content_list.append({"type":"text","text":message.content})# Add tool calls if presentifmessage.tool_calls:content_list.extend(_lc_tool_calls_to_anthropic_tool_use_blocks(message.tool_calls))# For assistant messages with thinking blocks, ensure they come firstifrole=="assistant"andany(block.get("type")in["thinking","redacted_thinking"]forblockincontent_listifisinstance(block,dict)):# Separate thinking blocks and non-thinking blocksthinking_blocks=[blockforblockincontent_listifisinstance(block,dict)andblock.get("type")in["thinking","redacted_thinking"]]other_blocks=[blockforblockincontent_listifnot(isinstance(block,dict)andblock.get("type")in["thinking","redacted_thinking"])]# Combine with thinking firstcontent=thinking_blocks+other_blockselse:# No thinking blocks or not an assistant messagecontent=content_listelse:# Simple string contentcontent=message.contentformatted_messages.append({"role":role,"content":content})returnsystem,formatted_messages
[docs]classChatPromptAdapter:"""Adapter class to prepare the inputs from Langchain to prompt format that Chat model expects. """
[docs]@classmethoddefconvert_messages_to_prompt(cls,provider:str,messages:List[BaseMessage],model:str)->str:ifprovider=="anthropic":prompt=convert_messages_to_prompt_anthropic(messages=messages)elifprovider=="deepseek":prompt=convert_messages_to_prompt_deepseek(messages=messages)elifprovider=="meta":if"llama3"inmodel:prompt=convert_messages_to_prompt_llama3(messages=messages)else:prompt=convert_messages_to_prompt_llama(messages=messages)elifprovider=="mistral":prompt=convert_messages_to_prompt_mistral(messages=messages)elifprovider=="amazon":prompt=convert_messages_to_prompt_anthropic(messages=messages,human_prompt="\n\nUser:",ai_prompt="\n\nBot:",)else:raiseNotImplementedError(f"Provider {provider} model does not support chat.")returnprompt
[docs]@classmethoddefformat_messages(cls,provider:str,messages:List[BaseMessage])->Tuple[Optional[str],List[Dict]]:ifprovider=="anthropic":return_format_anthropic_messages(messages)raiseNotImplementedError(f"Provider {provider} not supported for format_messages")
[docs]classChatBedrock(BaseChatModel,BedrockBase):"""A chat model that uses the Bedrock API."""system_prompt_with_tools:str=""beta_use_converse_api:bool=False"""Use the new Bedrock ``converse`` API which provides a standardized interface to all Bedrock models. Support still in beta. See ChatBedrockConverse docs for more."""stop_sequences:Optional[List[str]]=Field(default=None,alias="stop")"""Stop sequence inference parameter from new Bedrock ``converse`` API providing a sequence of characters that causes a model to stop generating a response. See https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_InferenceConfiguration.html for more."""@propertydef_llm_type(self)->str:"""Return type of chat model."""return"amazon_bedrock_chat"@classmethoddefis_lc_serializable(cls)->bool:"""Return whether this model can be serialized by Langchain."""returnTrue@classmethoddefget_lc_namespace(cls)->List[str]:"""Get the namespace of the langchain object."""return["langchain","chat_models","bedrock"]@model_validator(mode="before")@classmethoddefset_beta_use_converse_api(cls,values:Dict)->Any:model_id=values.get("model_id",values.get("model"))ifmodel_idand"beta_use_converse_api"notinvalues:values["beta_use_converse_api"]="nova"inmodel_idreturnvalues@propertydeflc_attributes(self)->Dict[str,Any]:attributes:Dict[str,Any]={}ifself.region_name:attributes["region_name"]=self.region_namereturnattributesmodel_config=ConfigDict(extra="forbid",populate_by_name=True,)def_get_ls_params(self,stop:Optional[List[str]]=None,**kwargs:Any)->LangSmithParams:"""Get standard params for tracing."""params=self._get_invocation_params(stop=stop,**kwargs)ls_params=LangSmithParams(ls_provider="amazon_bedrock",ls_model_name=self.model_id,ls_model_type="chat",)ifls_temperature:=params.get("temperature",self.temperature):ls_params["ls_temperature"]=ls_temperatureifls_max_tokens:=params.get("max_tokens",self.max_tokens):ls_params["ls_max_tokens"]=ls_max_tokensifls_stop:=stoporparams.get("stop",None):ls_params["ls_stop"]=ls_stopreturnls_paramsdef_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:ifself.beta_use_converse_api:yield fromself._as_converse._stream(messages,stop=stop,run_manager=run_manager,**kwargs)returnprovider=self._get_provider()prompt,system,formatted_messages=None,None,Noneifprovider=="anthropic":system,formatted_messages=ChatPromptAdapter.format_messages(provider,messages)ifself.system_prompt_with_tools:ifsystem:system=self.system_prompt_with_tools+f"\n{system}"else:system=self.system_prompt_with_toolselse:prompt=ChatPromptAdapter.convert_messages_to_prompt(provider=provider,messages=messages,model=self._get_model())added_model_name=Falseforchunkinself._prepare_input_and_invoke_stream(prompt=prompt,system=system,messages=formatted_messages,stop=stop,run_manager=run_manager,**kwargs,):ifisinstance(chunk,AIMessageChunk):generation_chunk=ChatGenerationChunk(message=chunk)ifrun_manager:run_manager.on_llm_new_token(generation_chunk.text,chunk=generation_chunk)yieldgeneration_chunkelse:delta=chunk.textresponse_metadata=Noneifgeneration_info:=chunk.generation_info:usage_metadata=generation_info.pop("usage_metadata",None)response_metadata=generation_infoifnotadded_model_name:response_metadata["model_name"]=self.model_idadded_model_name=Trueelse:usage_metadata=Nonegeneration_chunk=ChatGenerationChunk(message=AIMessageChunk(content=delta,response_metadata=response_metadata,usage_metadata=usage_metadata,)ifresponse_metadataisnotNoneelseAIMessageChunk(content=delta))ifrun_manager:run_manager.on_llm_new_token(generation_chunk.text,chunk=generation_chunk)yieldgeneration_chunkdef_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:ifself.beta_use_converse_api:ifnotself.streaming:returnself._as_converse._generate(messages,stop=stop,run_manager=run_manager,**kwargs)else:stream_iter=self._as_converse._stream(messages,stop=stop,run_manager=run_manager,**kwargs)returngenerate_from_stream(stream_iter)completion=""llm_output:Dict[str,Any]={}tool_calls:List[ToolCall]=[]provider_stop_reason_code=self.provider_stop_reason_key_map.get(self._get_provider(),"stop_reason")provider=self._get_provider()ifself.streaming:ifprovider=="anthropic":stream_iter=self._stream(messages,stop,run_manager,**kwargs)returngenerate_from_stream(stream_iter)response_metadata:List[Dict[str,Any]]=[]forchunkinself._stream(messages,stop,run_manager,**kwargs):completion+=chunk.textresponse_metadata.append(chunk.message.response_metadata)if"tool_calls"inchunk.message.additional_kwargs.keys():tool_calls=chunk.message.additional_kwargs["tool_calls"]llm_output=_combine_generation_info_for_llm_result(response_metadata,provider_stop_reason_code)else:prompt,system,formatted_messages=None,None,Noneparams:Dict[str,Any]={**kwargs}ifprovider=="anthropic":system,formatted_messages=ChatPromptAdapter.format_messages(provider,messages)# use tools the new way with claude 3ifself.system_prompt_with_tools:ifsystem:system=self.system_prompt_with_tools+f"\n{system}"else:system=self.system_prompt_with_toolselse:prompt=ChatPromptAdapter.convert_messages_to_prompt(provider=provider,messages=messages,model=self._get_model())ifstop:params["stop_sequences"]=stopcompletion,tool_calls,llm_output=self._prepare_input_and_invoke(prompt=prompt,stop=stop,run_manager=run_manager,system=system,messages=formatted_messages,**params,)# usage metadataifusage:=llm_output.get("usage"):input_tokens=usage.get("prompt_tokens",0)output_tokens=usage.get("completion_tokens",0)usage_metadata=UsageMetadata(input_tokens=input_tokens,output_tokens=output_tokens,total_tokens=usage.get("total_tokens",input_tokens+output_tokens),)else:usage_metadata=Nonelogger.debug(f"The message received from Bedrock: {completion}")llm_output["model_id"]=self.model_id# backward-compatibilityllm_output["model_name"]=self.model_idmsg=AIMessage(content=completion,additional_kwargs=llm_output,tool_calls=cast(List[ToolCall],tool_calls),usage_metadata=usage_metadata,)returnChatResult(generations=[ChatGeneration(message=msg,)],llm_output=llm_output,)def_combine_llm_outputs(self,llm_outputs:List[Optional[dict]])->dict:final_usage:Dict[str,int]=defaultdict(int)final_output={}foroutputinllm_outputs:output=outputor{}usage=output.get("usage",{})fortoken_type,token_countinusage.items():final_usage[token_type]+=token_countfinal_output.update(output)final_output["usage"]=final_usagereturnfinal_output
[docs]defget_token_ids(self,text:str)->List[int]:ifself._model_is_anthropicandnotself.custom_get_token_ids:ifanthropic_tokens_supported():returnget_token_ids_anthropic(text)else:warnings.warn("Falling back to default token method due to missing or ""incompatible `anthropic` installation ""(needs <=0.38.0).\n\nIf using `anthropic>0.38.0`, ""it is recommended to provide the model class with a ""custom_get_token_ids method implementing a more accurate ""tokenizer for Anthropic. For get_num_tokens, as another ""alternative, you can implement your own token counter method ""using the ChatAnthropic or AnthropicLLM classes.")returnsuper().get_token_ids(text)
[docs]defset_system_prompt_with_tools(self,xml_tools_system_prompt:str)->None:"""Workaround to bind. Sets the system prompt with tools"""self.system_prompt_with_tools=xml_tools_system_prompt
[docs]defbind_tools(self,tools:Sequence[Union[Dict[str,Any],TypeBaseModel,Callable,BaseTool]],*,tool_choice:Optional[Union[dict,str,Literal["auto","none"],bool]]=None,**kwargs:Any,)->Runnable[LanguageModelInput,BaseMessage]:"""Bind tool-like objects to this chat model. Assumes model has a tool calling API. Args: tools: A list of tool definitions to bind to this chat model. Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic models, callables, and BaseTools will be automatically converted to their schema dictionary representation. tool_choice: Which tool to require the model to call. Must be the name of the single provided function or "auto" to automatically determine which function to call (if any), or a dict of the form: {"type": "function", "function": {"name": <<tool_name>>}}. **kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. """ifself.beta_use_converse_api:ifisinstance(tool_choice,bool):tool_choice="any"iftool_choiceelseNonereturnself._as_converse.bind_tools(tools,tool_choice=tool_choice,**kwargs)ifself._get_provider()=="anthropic":formatted_tools=[convert_to_anthropic_tool(tool)fortoolintools]# true if the model is a claude 3 modelif"claude-3"inself._get_model():ifnottool_choice:passelifisinstance(tool_choice,dict):kwargs["tool_choice"]=tool_choiceelifisinstance(tool_choice,str)andtool_choicein("any","auto"):kwargs["tool_choice"]={"type":tool_choice}elifisinstance(tool_choice,str):kwargs["tool_choice"]={"type":"tool","name":tool_choice}else:raiseValueError(f"Unrecognized 'tool_choice' type {tool_choice=}."f"Expected dict, str, or None.")returnself.bind(tools=formatted_tools,**kwargs)else:# add tools to the system prompt, the old waysystem_formatted_tools=get_system_message(formatted_tools)self.set_system_prompt_with_tools(system_formatted_tools)returnself
[docs]defwith_structured_output(self,schema:Union[Dict,TypeBaseModel],*,include_raw:bool=False,**kwargs:Any,)->Runnable[LanguageModelInput,Union[Dict,BaseModel]]:"""Model wrapper that returns outputs formatted to match the given schema. Args: schema: The output schema as a dict or a Pydantic class. If a Pydantic class then the model output will be an object of that class. If a dict then the model output will be a dict. With a Pydantic class the returned attributes will be validated, whereas with a dict they will not be. include_raw: If False then only the parsed structured output is returned. If an error occurs during model output parsing it will be raised. If True then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict with keys "raw", "parsed", and "parsing_error". Returns: A Runnable that takes any ChatModel input. The output type depends on include_raw and schema. If include_raw is True then output is a dict with keys: raw: BaseMessage, parsed: Optional[_DictOrPydantic], parsing_error: Optional[BaseException], If include_raw is False and schema is a Dict then the runnable outputs a Dict. If include_raw is False and schema is a Type[BaseModel] then the runnable outputs a BaseModel. Example: Pydantic schema (include_raw=False): .. code-block:: python from langchain_aws.chat_models.bedrock import ChatBedrock from pydantic import BaseModel class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: str llm =ChatBedrock( model_id="anthropic.claude-3-sonnet-20240229-v1:0", model_kwargs={"temperature": 0.001}, ) # type: ignore[call-arg] structured_llm = llm.with_structured_output(AnswerWithJustification) structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") # -> AnswerWithJustification( # answer='They weigh the same', # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' # ) Example: Pydantic schema (include_raw=True): .. code-block:: python from langchain_aws.chat_models.bedrock import ChatBedrock from pydantic import BaseModel class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: str llm =ChatBedrock( model_id="anthropic.claude-3-sonnet-20240229-v1:0", model_kwargs={"temperature": 0.001}, ) # type: ignore[call-arg] structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), # 'parsing_error': None # } Example: Dict schema (include_raw=False): .. code-block:: python from langchain_aws.chat_models.bedrock import ChatBedrock schema = { "name": "AnswerWithJustification", "description": "An answer to the user question along with justification for the answer.", "input_schema": { "type": "object", "properties": { "answer": {"type": "string"}, "justification": {"type": "string"}, }, "required": ["answer", "justification"] } } llm =ChatBedrock( model_id="anthropic.claude-3-sonnet-20240229-v1:0", model_kwargs={"temperature": 0.001}, ) # type: ignore[call-arg] structured_llm = llm.with_structured_output(schema) structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } """# noqa: E501ifself.beta_use_converse_api:returnself._as_converse.with_structured_output(schema,include_raw=include_raw,**kwargs)if"claude-3"notinself._get_model():raiseValueError(f"Structured output is not supported for model {self._get_model()}")tool_name=convert_to_anthropic_tool(schema)["name"]llm=self.bind_tools([schema],tool_choice=tool_name,ls_structured_output_format={"kwargs":{"method":"function_calling"},"schema":convert_to_openai_tool(schema),},)ifisinstance(schema,type)andis_basemodel_subclass(schema):output_parser=ToolsOutputParser(first_tool_only=True,pydantic_schemas=[schema])else:output_parser=ToolsOutputParser(first_tool_only=True,args_only=True)ifinclude_raw:parser_assign=RunnablePassthrough.assign(parsed=itemgetter("raw")|output_parser,parsing_error=lambda_:None)parser_none=RunnablePassthrough.assign(parsed=lambda_:None)parser_with_fallback=parser_assign.with_fallbacks([parser_none],exception_key="parsing_error")returnRunnableMap(raw=llm)|parser_with_fallbackelse:returnllm|output_parser