Source code for langchain_community.chat_models.anthropic
fromtypingimportAny,AsyncIterator,Dict,Iterator,List,Optional,castfromlangchain_core._api.deprecationimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimport(BaseChatModel,agenerate_from_stream,generate_from_stream,)fromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,ChatMessage,HumanMessage,SystemMessage,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.prompt_valuesimportPromptValuefrompydanticimportConfigDictfromlangchain_community.llms.anthropicimport_AnthropicCommondef_convert_one_message_to_text(message:BaseMessage,human_prompt:str,ai_prompt:str,)->str:content=cast(str,message.content)ifisinstance(message,ChatMessage):message_text=f"\n\n{message.role.capitalize()}: {content}"elifisinstance(message,HumanMessage):message_text=f"{human_prompt}{content}"elifisinstance(message,AIMessage):message_text=f"{ai_prompt}{content}"elifisinstance(message,SystemMessage):message_text=contentelse:raiseValueError(f"Got unknown type {message}")returnmessage_text
[docs]defconvert_messages_to_prompt_anthropic(messages:List[BaseMessage],*,human_prompt:str="\n\nHuman:",ai_prompt:str="\n\nAssistant:",)->str:"""Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. human_prompt (str, optional): Human prompt tag. Defaults to "\n\nHuman:". ai_prompt (str, optional): AI prompt tag. Defaults to "\n\nAssistant:". Returns: str: Combined string with necessary human_prompt and ai_prompt tags. """messages=messages.copy()# don't mutate the original listifnotisinstance(messages[-1],AIMessage):messages.append(AIMessage(content=""))text="".join(_convert_one_message_to_text(message,human_prompt,ai_prompt)formessageinmessages)# trim off the trailing ' ' that might come from the "Assistant: "returntext.rstrip()
[docs]@deprecated(since="0.0.28",removal="1.0",alternative_import="langchain_anthropic.ChatAnthropic",)classChatAnthropic(BaseChatModel,_AnthropicCommon):"""`Anthropic` chat large language models. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain_community.chat_models import ChatAnthropic model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key") """model_config=ConfigDict(populate_by_name=True,arbitrary_types_allowed=True,)@propertydeflc_secrets(self)->Dict[str,str]:return{"anthropic_api_key":"ANTHROPIC_API_KEY"}@propertydef_llm_type(self)->str:"""Return type of chat model."""return"anthropic-chat"@classmethoddefis_lc_serializable(cls)->bool:"""Return whether this model can be serialized by Langchain."""returnTrue@classmethoddefget_lc_namespace(cls)->List[str]:"""Get the namespace of the langchain object."""return["langchain","chat_models","anthropic"]def_convert_messages_to_prompt(self,messages:List[BaseMessage])->str:"""Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. Returns: str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags. """prompt_params={}ifself.HUMAN_PROMPT:prompt_params["human_prompt"]=self.HUMAN_PROMPTifself.AI_PROMPT:prompt_params["ai_prompt"]=self.AI_PROMPTreturnconvert_messages_to_prompt_anthropic(messages=messages,**prompt_params)
[docs]defget_num_tokens(self,text:str)->int:"""Calculate number of tokens."""ifnotself.count_tokens:raiseNameError("Please ensure the anthropic package is loaded")returnself.count_tokens(text)