"""Anthropic LLM wrapper. Chat models are in chat_models.py."""from__future__importannotationsimportreimportwarningsfromcollections.abcimportAsyncIterator,Iterator,MappingfromtypingimportAny,Callable,Optionalimportanthropicfromlangchain_core._api.deprecationimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_modelsimportBaseLanguageModel,LangSmithParamsfromlangchain_core.language_models.llmsimportLLMfromlangchain_core.outputsimportGenerationChunkfromlangchain_core.prompt_valuesimportPromptValuefromlangchain_core.utilsimportget_pydantic_field_namesfromlangchain_core.utils.utilsimport_build_model_kwargs,from_env,secret_from_envfrompydanticimportConfigDict,Field,SecretStr,model_validatorfromtyping_extensionsimportSelfclass_AnthropicCommon(BaseLanguageModel):client:Any=None#: :meta private:async_client:Any=None#: :meta private:model:str=Field(default="claude-3-5-sonnet-latest",alias="model_name")"""Model name to use."""max_tokens:int=Field(default=1024,alias="max_tokens_to_sample")"""Denotes the number of tokens to predict per generation."""temperature:Optional[float]=None"""A non-negative float that tunes the degree of randomness in generation."""top_k:Optional[int]=None"""Number of most likely tokens to consider at each step."""top_p:Optional[float]=None"""Total probability mass of tokens to consider at each step."""streaming:bool=False"""Whether to stream the results."""default_request_timeout:Optional[float]=None"""Timeout for requests to Anthropic Completion API. Default is 600 seconds."""max_retries:int=2"""Number of retries allowed for requests sent to the Anthropic Completion API."""anthropic_api_url:Optional[str]=Field(alias="base_url",default_factory=from_env("ANTHROPIC_API_URL",default="https://api.anthropic.com",),)"""Base URL for API requests. Only specify if using a proxy or service emulator. If a value isn't passed in, will attempt to read the value from ``ANTHROPIC_API_URL``. If not set, the default value ``https://api.anthropic.com`` will be used. """anthropic_api_key:SecretStr=Field(alias="api_key",default_factory=secret_from_env("ANTHROPIC_API_KEY",default=""),)"""Automatically read from env var ``ANTHROPIC_API_KEY`` if not provided."""HUMAN_PROMPT:Optional[str]=NoneAI_PROMPT:Optional[str]=Nonecount_tokens:Optional[Callable[[str],int]]=Nonemodel_kwargs:dict[str,Any]=Field(default_factory=dict)@model_validator(mode="before")@classmethoddefbuild_extra(cls,values:dict)->Any:all_required_field_names=get_pydantic_field_names(cls)return_build_model_kwargs(values,all_required_field_names)@model_validator(mode="after")defvalidate_environment(self)->Self:"""Validate that api key and python package exists in environment."""self.client=anthropic.Anthropic(base_url=self.anthropic_api_url,api_key=self.anthropic_api_key.get_secret_value(),timeout=self.default_request_timeout,max_retries=self.max_retries,)self.async_client=anthropic.AsyncAnthropic(base_url=self.anthropic_api_url,api_key=self.anthropic_api_key.get_secret_value(),timeout=self.default_request_timeout,max_retries=self.max_retries,)# Keep for backward compatibility but not used in Messages APIself.HUMAN_PROMPT=getattr(anthropic,"HUMAN_PROMPT",None)self.AI_PROMPT=getattr(anthropic,"AI_PROMPT",None)returnself@propertydef_default_params(self)->Mapping[str,Any]:"""Get the default parameters for calling Anthropic API."""d={"max_tokens":self.max_tokens,"model":self.model,}ifself.temperatureisnotNone:d["temperature"]=self.temperatureifself.top_kisnotNone:d["top_k"]=self.top_kifself.top_pisnotNone:d["top_p"]=self.top_preturn{**d,**self.model_kwargs}@propertydef_identifying_params(self)->Mapping[str,Any]:"""Get the identifying parameters."""return{**self._default_params}def_get_anthropic_stop(self,stop:Optional[list[str]]=None)->list[str]:ifstopisNone:stop=[]returnstop
[docs]classAnthropicLLM(LLM,_AnthropicCommon):"""Anthropic large language model. To use, you should have the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_anthropic import AnthropicLLM model = AnthropicLLM() """model_config=ConfigDict(populate_by_name=True,arbitrary_types_allowed=True,)@model_validator(mode="before")@classmethoddefraise_warning(cls,values:dict)->Any:"""Raise warning that this class is deprecated."""warnings.warn("This Anthropic LLM is deprecated. ""Please use `from langchain_anthropic import ChatAnthropic` ""instead",stacklevel=2,)returnvalues@propertydef_llm_type(self)->str:"""Return type of llm."""return"anthropic-llm"@propertydeflc_secrets(self)->dict[str,str]:"""Return a mapping of secret keys to environment variables."""return{"anthropic_api_key":"ANTHROPIC_API_KEY"}@classmethoddefis_lc_serializable(cls)->bool:"""Whether this class can be serialized by langchain."""returnTrue@propertydef_identifying_params(self)->dict[str,Any]:"""Get the identifying parameters."""return{"model":self.model,"max_tokens":self.max_tokens,"temperature":self.temperature,"top_k":self.top_k,"top_p":self.top_p,"model_kwargs":self.model_kwargs,"streaming":self.streaming,"default_request_timeout":self.default_request_timeout,"max_retries":self.max_retries,}def_get_ls_params(self,stop:Optional[list[str]]=None,**kwargs:Any,)->LangSmithParams:"""Get standard params for tracing."""params=super()._get_ls_params(stop=stop,**kwargs)identifying_params=self._identifying_paramsifmax_tokens:=kwargs.get("max_tokens",identifying_params.get("max_tokens"),):params["ls_max_tokens"]=max_tokensreturnparamsdef_format_messages(self,prompt:str)->list[dict[str,str]]:"""Convert prompt to Messages API format."""messages=[]# Handle legacy prompts that might have HUMAN_PROMPT/AI_PROMPT markersifself.HUMAN_PROMPTandself.HUMAN_PROMPTinprompt:# Split on human/assistant turnsparts=prompt.split(self.HUMAN_PROMPT)for_,partinenumerate(parts):ifnotpart.strip():continueifself.AI_PROMPTandself.AI_PROMPTinpart:# Split human and assistant partshuman_part,assistant_part=part.split(self.AI_PROMPT,1)ifhuman_part.strip():messages.append({"role":"user","content":human_part.strip()})ifassistant_part.strip():messages.append({"role":"assistant","content":assistant_part.strip()})# Just human contentelifpart.strip():messages.append({"role":"user","content":part.strip()})else:# Handle modern format or plain text# Clean prompt for Messages APIcontent=re.sub(r"^\n*Human:\s*","",prompt)content=re.sub(r"\n*Assistant:\s*.*$","",content)ifcontent.strip():messages.append({"role":"user","content":content.strip()})# Ensure we have at least one messageifnotmessages:messages=[{"role":"user","content":prompt.strip()or"Hello"}]returnmessagesdef_call(self,prompt:str,stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:r"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager for LLM run. kwargs: Additional keyword arguments to pass to the model. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model.invoke(prompt) """ifself.streaming:completion=""forchunkinself._stream(prompt=prompt,stop=stop,run_manager=run_manager,**kwargs,):completion+=chunk.textreturncompletionstop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}# Remove parameters not supported by Messages APIparams={k:vfork,vinparams.items()ifk!="max_tokens_to_sample"}response=self.client.messages.create(messages=self._format_messages(prompt),stop_sequences=stopifstopelseNone,**params,)returnresponse.content[0].text
[docs]defconvert_prompt(self,prompt:PromptValue)->str:"""Convert a ``PromptValue`` to a string."""returnprompt.to_string()
asyncdef_acall(self,prompt:str,stop:Optional[list[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Call out to Anthropic's completion endpoint asynchronously."""ifself.streaming:completion=""asyncforchunkinself._astream(prompt=prompt,stop=stop,run_manager=run_manager,**kwargs,):completion+=chunk.textreturncompletionstop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}# Remove parameters not supported by Messages APIparams={k:vfork,vinparams.items()ifk!="max_tokens_to_sample"}response=awaitself.async_client.messages.create(messages=self._format_messages(prompt),stop_sequences=stopifstopelseNone,**params,)returnresponse.content[0].textdef_stream(self,prompt:str,stop:Optional[list[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[GenerationChunk]:r"""Call Anthropic completion_stream and return the resulting generator. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager for LLM run. kwargs: Additional keyword arguments to pass to the model. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """stop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}# Remove parameters not supported by Messages APIparams={k:vfork,vinparams.items()ifk!="max_tokens_to_sample"}withself.client.messages.stream(messages=self._format_messages(prompt),stop_sequences=stopifstopelseNone,**params,)asstream:foreventinstream:ifevent.type=="content_block_delta"andhasattr(event.delta,"text"):chunk=GenerationChunk(text=event.delta.text)ifrun_manager:run_manager.on_llm_new_token(chunk.text,chunk=chunk)yieldchunkasyncdef_astream(self,prompt:str,stop:Optional[list[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[GenerationChunk]:r"""Call Anthropic completion_stream and return the resulting generator. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager for LLM run. kwargs: Additional keyword arguments to pass to the model. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """stop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}# Remove parameters not supported by Messages APIparams={k:vfork,vinparams.items()ifk!="max_tokens_to_sample"}asyncwithself.async_client.messages.stream(messages=self._format_messages(prompt),stop_sequences=stopifstopelseNone,**params,)asstream:asyncforeventinstream:ifevent.type=="content_block_delta"andhasattr(event.delta,"text"):chunk=GenerationChunk(text=event.delta.text)ifrun_manager:awaitrun_manager.on_llm_new_token(chunk.text,chunk=chunk)yieldchunk
[docs]defget_num_tokens(self,text:str)->int:"""Calculate number of tokens."""msg=("Anthropic's legacy count_tokens method was removed in anthropic 0.39.0 ""and langchain-anthropic 0.3.0. Please use ""ChatAnthropic.get_num_tokens_from_messages instead.")raiseNotImplementedError(msg,)
[docs]@deprecated(since="0.1.0",removal="1.0.0",alternative="AnthropicLLM")classAnthropic(AnthropicLLM):"""Anthropic large language model."""