[docs]classInputTokenDetails(TypedDict,total=False):"""Breakdown of input token counts. Does *not* need to sum to full input token count. Does *not* need to have all keys. Example: .. code-block:: python { "audio": 10, "cache_creation": 200, "cache_read": 100, } .. versionadded:: 0.3.9 """audio:int"""Audio input tokens."""cache_creation:int"""Input tokens that were cached and there was a cache miss. Since there was a cache miss, the cache was created from these tokens. """cache_read:int"""Input tokens that were cached and there was a cache hit. Since there was a cache hit, the tokens were read from the cache. More precisely, the model state given these tokens was read from the cache. """
[docs]classOutputTokenDetails(TypedDict,total=False):"""Breakdown of output token counts. Does *not* need to sum to full output token count. Does *not* need to have all keys. Example: .. code-block:: python { "audio": 10, "reasoning": 200, } .. versionadded:: 0.3.9 """audio:int"""Audio output tokens."""reasoning:int"""Reasoning output tokens. Tokens generated by the model in a chain of thought process (i.e. by OpenAI's o1 models) that are not returned as part of model output. """
[docs]classUsageMetadata(TypedDict):"""Usage metadata for a message, such as token counts. This is a standard representation of token usage that is consistent across models. Example: .. code-block:: python { "input_tokens": 350, "output_tokens": 240, "total_tokens": 590, "input_token_details": { "audio": 10, "cache_creation": 200, "cache_read": 100, }, "output_token_details": { "audio": 10, "reasoning": 200, } } .. versionchanged:: 0.3.9 Added ``input_token_details`` and ``output_token_details``. """input_tokens:int"""Count of input (or prompt) tokens. Sum of all input token types."""output_tokens:int"""Count of output (or completion) tokens. Sum of all output token types."""total_tokens:int"""Total token count. Sum of input_tokens + output_tokens."""input_token_details:NotRequired[InputTokenDetails]"""Breakdown of input token counts. Does *not* need to sum to full input token count. Does *not* need to have all keys. """output_token_details:NotRequired[OutputTokenDetails]"""Breakdown of output token counts. Does *not* need to sum to full output token count. Does *not* need to have all keys. """
[docs]classAIMessage(BaseMessage):"""Message from an AI. AIMessage is returned from a chat model as a response to a prompt. This message represents the output of the model and consists of both the raw output as returned by the model together standardized fields (e.g., tool calls, usage metadata) added by the LangChain framework. """example:bool=False"""Use to denote that a message is part of an example conversation. At the moment, this is ignored by most models. Usage is discouraged. """tool_calls:list[ToolCall]=[]"""If provided, tool calls associated with the message."""invalid_tool_calls:list[InvalidToolCall]=[]"""If provided, tool calls with parsing errors associated with the message."""usage_metadata:Optional[UsageMetadata]=None"""If provided, usage metadata for a message, such as token counts. This is a standard representation of token usage that is consistent across models. """type:Literal["ai"]="ai""""The type of the message (used for deserialization). Defaults to "ai"."""def__init__(self,content:Union[str,list[Union[str,dict]]],**kwargs:Any)->None:"""Pass in content as positional arg. Args: content: The content of the message. kwargs: Additional arguments to pass to the parent class. """super().__init__(content=content,**kwargs)@propertydeflc_attributes(self)->dict:"""Attrs to be serialized even if they are derived from other init args."""return{"tool_calls":self.tool_calls,"invalid_tool_calls":self.invalid_tool_calls,}@model_validator(mode="before")@classmethoddef_backwards_compat_tool_calls(cls,values:dict)->Any:check_additional_kwargs=notany(values.get(k)forkin("tool_calls","invalid_tool_calls","tool_call_chunks"))ifcheck_additional_kwargsand(raw_tool_calls:=values.get("additional_kwargs",{}).get("tool_calls")):try:ifissubclass(cls,AIMessageChunk):values["tool_call_chunks"]=default_tool_chunk_parser(raw_tool_calls)else:parsed_tool_calls,parsed_invalid_tool_calls=default_tool_parser(raw_tool_calls)values["tool_calls"]=parsed_tool_callsvalues["invalid_tool_calls"]=parsed_invalid_tool_callsexceptException:logger.debug("Failed to parse tool calls",exc_info=True)# Ensure "type" is properly set on all tool call-like dicts.iftool_calls:=values.get("tool_calls"):values["tool_calls"]=[create_tool_call(**{k:vfork,vintc.items()ifk!="type"})fortcintool_calls]ifinvalid_tool_calls:=values.get("invalid_tool_calls"):values["invalid_tool_calls"]=[create_invalid_tool_call(**{k:vfork,vintc.items()ifk!="type"})fortcininvalid_tool_calls]iftool_call_chunks:=values.get("tool_call_chunks"):values["tool_call_chunks"]=[create_tool_call_chunk(**{k:vfork,vintc.items()ifk!="type"})fortcintool_call_chunks]returnvalues
[docs]@overridedefpretty_repr(self,html:bool=False)->str:"""Return a pretty representation of the message. Args: html: Whether to return an HTML-formatted string. Defaults to False. Returns: A pretty representation of the message. """base=super().pretty_repr(html=html)lines=[]def_format_tool_args(tc:Union[ToolCall,InvalidToolCall])->list[str]:lines=[f" {tc.get('name','Tool')} ({tc.get('id')})",f" Call ID: {tc.get('id')}",]iftc.get("error"):lines.append(f" Error: {tc.get('error')}")lines.append(" Args:")args=tc.get("args")ifisinstance(args,str):lines.append(f" {args}")elifisinstance(args,dict):forarg,valueinargs.items():lines.append(f" {arg}: {value}")returnlinesifself.tool_calls:lines.append("Tool Calls:")fortcinself.tool_calls:lines.extend(_format_tool_args(tc))ifself.invalid_tool_calls:lines.append("Invalid Tool Calls:")foritcinself.invalid_tool_calls:lines.extend(_format_tool_args(itc))return(base.strip()+"\n"+"\n".join(lines)).strip()
[docs]classAIMessageChunk(AIMessage,BaseMessageChunk):"""Message chunk from an AI."""# Ignoring mypy re-assignment here since we're overriding the value# to make sure that the chunk variant can be discriminated from the# non-chunk variant.type:Literal["AIMessageChunk"]="AIMessageChunk"# type: ignore[assignment]"""The type of the message (used for deserialization). Defaults to "AIMessageChunk"."""tool_call_chunks:list[ToolCallChunk]=[]"""If provided, tool call chunks associated with the message."""@propertydeflc_attributes(self)->dict:"""Attrs to be serialized even if they are derived from other init args."""return{"tool_calls":self.tool_calls,"invalid_tool_calls":self.invalid_tool_calls,}@model_validator(mode="after")definit_tool_calls(self)->Self:"""Initialize tool calls from tool call chunks. Args: values: The values to validate. Returns: The values with tool calls initialized. Raises: ValueError: If the tool call chunks are malformed. """ifnotself.tool_call_chunks:ifself.tool_calls:self.tool_call_chunks=[create_tool_call_chunk(name=tc["name"],args=json.dumps(tc["args"]),id=tc["id"],index=None,)fortcinself.tool_calls]ifself.invalid_tool_calls:tool_call_chunks=self.tool_call_chunkstool_call_chunks.extend([create_tool_call_chunk(name=tc["name"],args=tc["args"],id=tc["id"],index=None)fortcinself.invalid_tool_calls])self.tool_call_chunks=tool_call_chunksreturnselftool_calls=[]invalid_tool_calls=[]defadd_chunk_to_invalid_tool_calls(chunk:ToolCallChunk)->None:invalid_tool_calls.append(create_invalid_tool_call(name=chunk["name"],args=chunk["args"],id=chunk["id"],error=None,))forchunkinself.tool_call_chunks:try:args_=parse_partial_json(chunk["args"])ifchunk["args"]!=""else{}# type: ignore[arg-type]ifisinstance(args_,dict):tool_calls.append(create_tool_call(name=chunk["name"]or"",args=args_,id=chunk["id"],))else:add_chunk_to_invalid_tool_calls(chunk)exceptException:add_chunk_to_invalid_tool_calls(chunk)self.tool_calls=tool_callsself.invalid_tool_calls=invalid_tool_callsreturnself@overridedef__add__(self,other:Any)->BaseMessageChunk:# type: ignore[override]ifisinstance(other,AIMessageChunk):returnadd_ai_message_chunks(self,other)ifisinstance(other,(list,tuple))andall(isinstance(o,AIMessageChunk)foroinother):returnadd_ai_message_chunks(self,*other)returnsuper().__add__(other)
[docs]defadd_ai_message_chunks(left:AIMessageChunk,*others:AIMessageChunk)->AIMessageChunk:"""Add multiple AIMessageChunks together."""ifany(left.example!=o.exampleforoinothers):msg="Cannot concatenate AIMessageChunks with different example values."raiseValueError(msg)content=merge_content(left.content,*(o.contentforoinothers))additional_kwargs=merge_dicts(left.additional_kwargs,*(o.additional_kwargsforoinothers))response_metadata=merge_dicts(left.response_metadata,*(o.response_metadataforoinothers))# Merge tool call chunksifraw_tool_calls:=merge_lists(left.tool_call_chunks,*(o.tool_call_chunksforoinothers)):tool_call_chunks=[create_tool_call_chunk(name=rtc.get("name"),args=rtc.get("args"),index=rtc.get("index"),id=rtc.get("id"),)forrtcinraw_tool_calls]else:tool_call_chunks=[]# Token usageifleft.usage_metadataorany(o.usage_metadataisnotNoneforoinothers):usage_metadata:Optional[UsageMetadata]=left.usage_metadataforotherinothers:usage_metadata=add_usage(usage_metadata,other.usage_metadata)else:usage_metadata=Noneid=Nonecandidates=[left.id]+[o.idforoinothers]# first pass: pick the first non‐run-* idforid_incandidates:ifid_andnotid_.startswith(_LC_ID_PREFIX):id=id_breakelse:# second pass: no provider-assigned id found, just take the first non‐nullforid_incandidates:ifid_:id=id_breakreturnleft.__class__(example=left.example,content=content,additional_kwargs=additional_kwargs,tool_call_chunks=tool_call_chunks,response_metadata=response_metadata,usage_metadata=usage_metadata,id=id,)
[docs]defadd_usage(left:Optional[UsageMetadata],right:Optional[UsageMetadata])->UsageMetadata:"""Recursively add two UsageMetadata objects. Example: .. code-block:: python from langchain_core.messages.ai import add_usage left = UsageMetadata( input_tokens=5, output_tokens=0, total_tokens=5, input_token_details=InputTokenDetails(cache_read=3) ) right = UsageMetadata( input_tokens=0, output_tokens=10, total_tokens=10, output_token_details=OutputTokenDetails(reasoning=4) ) add_usage(left, right) results in .. code-block:: python UsageMetadata( input_tokens=5, output_tokens=10, total_tokens=15, input_token_details=InputTokenDetails(cache_read=3), output_token_details=OutputTokenDetails(reasoning=4) ) """ifnot(leftorright):returnUsageMetadata(input_tokens=0,output_tokens=0,total_tokens=0)ifnot(leftandright):returncast("UsageMetadata",leftorright)returnUsageMetadata(**cast("UsageMetadata",_dict_int_op(cast("dict",left),cast("dict",right),operator.add,),))
[docs]defsubtract_usage(left:Optional[UsageMetadata],right:Optional[UsageMetadata])->UsageMetadata:"""Recursively subtract two UsageMetadata objects. Token counts cannot be negative so the actual operation is max(left - right, 0). Example: .. code-block:: python from langchain_core.messages.ai import subtract_usage left = UsageMetadata( input_tokens=5, output_tokens=10, total_tokens=15, input_token_details=InputTokenDetails(cache_read=4) ) right = UsageMetadata( input_tokens=3, output_tokens=8, total_tokens=11, output_token_details=OutputTokenDetails(reasoning=4) ) subtract_usage(left, right) results in .. code-block:: python UsageMetadata( input_tokens=2, output_tokens=2, total_tokens=4, input_token_details=InputTokenDetails(cache_read=4), output_token_details=OutputTokenDetails(reasoning=0) ) """ifnot(leftorright):returnUsageMetadata(input_tokens=0,output_tokens=0,total_tokens=0)ifnot(leftandright):returncast("UsageMetadata",leftorright)returnUsageMetadata(**cast("UsageMetadata",_dict_int_op(cast("dict",left),cast("dict",right),(lambdale,ri:max(le-ri,0)),),))