Source code for langchain_community.llms.anthropic
importreimportwarningsfromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Mapping,Optional,)fromlangchain_core._api.deprecationimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_modelsimportBaseLanguageModelfromlangchain_core.language_models.llmsimportLLMfromlangchain_core.outputsimportGenerationChunkfromlangchain_core.prompt_valuesimportPromptValuefromlangchain_core.pydantic_v1importField,SecretStr,root_validatorfromlangchain_core.utilsimport(check_package_version,get_from_dict_or_env,get_pydantic_field_names,pre_init,)fromlangchain_core.utils.utilsimportbuild_extra_kwargs,convert_to_secret_strclass_AnthropicCommon(BaseLanguageModel):client:Any=None#: :meta private:async_client:Any=None#: :meta private:model:str=Field(default="claude-2",alias="model_name")"""Model name to use."""max_tokens_to_sample:int=Field(default=256,alias="max_tokens")"""Denotes the number of tokens to predict per generation."""temperature:Optional[float]=None"""A non-negative float that tunes the degree of randomness in generation."""top_k:Optional[int]=None"""Number of most likely tokens to consider at each step."""top_p:Optional[float]=None"""Total probability mass of tokens to consider at each step."""streaming:bool=False"""Whether to stream the results."""default_request_timeout:Optional[float]=None"""Timeout for requests to Anthropic Completion API. Default is 600 seconds."""max_retries:int=2"""Number of retries allowed for requests sent to the Anthropic Completion API."""anthropic_api_url:Optional[str]=Noneanthropic_api_key:Optional[SecretStr]=NoneHUMAN_PROMPT:Optional[str]=NoneAI_PROMPT:Optional[str]=Nonecount_tokens:Optional[Callable[[str],int]]=Nonemodel_kwargs:Dict[str,Any]=Field(default_factory=dict)@root_validator(pre=True)defbuild_extra(cls,values:Dict)->Dict:extra=values.get("model_kwargs",{})all_required_field_names=get_pydantic_field_names(cls)values["model_kwargs"]=build_extra_kwargs(extra,values,all_required_field_names)returnvalues@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key and python package exists in environment."""values["anthropic_api_key"]=convert_to_secret_str(get_from_dict_or_env(values,"anthropic_api_key","ANTHROPIC_API_KEY"))# Get custom api url from environment.values["anthropic_api_url"]=get_from_dict_or_env(values,"anthropic_api_url","ANTHROPIC_API_URL",default="https://api.anthropic.com",)try:importanthropiccheck_package_version("anthropic",gte_version="0.3")values["client"]=anthropic.Anthropic(base_url=values["anthropic_api_url"],api_key=values["anthropic_api_key"].get_secret_value(),timeout=values["default_request_timeout"],max_retries=values["max_retries"],)values["async_client"]=anthropic.AsyncAnthropic(base_url=values["anthropic_api_url"],api_key=values["anthropic_api_key"].get_secret_value(),timeout=values["default_request_timeout"],max_retries=values["max_retries"],)values["HUMAN_PROMPT"]=anthropic.HUMAN_PROMPTvalues["AI_PROMPT"]=anthropic.AI_PROMPTvalues["count_tokens"]=values["client"].count_tokensexceptImportError:raiseImportError("Could not import anthropic python package. ""Please it install it with `pip install anthropic`.")returnvalues@propertydef_default_params(self)->Mapping[str,Any]:"""Get the default parameters for calling Anthropic API."""d={"max_tokens_to_sample":self.max_tokens_to_sample,"model":self.model,}ifself.temperatureisnotNone:d["temperature"]=self.temperatureifself.top_kisnotNone:d["top_k"]=self.top_kifself.top_pisnotNone:d["top_p"]=self.top_preturn{**d,**self.model_kwargs}@propertydef_identifying_params(self)->Mapping[str,Any]:"""Get the identifying parameters."""return{**{},**self._default_params}def_get_anthropic_stop(self,stop:Optional[List[str]]=None)->List[str]:ifnotself.HUMAN_PROMPTornotself.AI_PROMPT:raiseNameError("Please ensure the anthropic package is loaded")ifstopisNone:stop=[]# Never want model to invent new turns of Human / Assistant dialog.stop.extend([self.HUMAN_PROMPT])returnstop
[docs]@deprecated(since="0.0.28",removal="1.0",alternative_import="langchain_anthropic.AnthropicLLM",)classAnthropic(LLM,_AnthropicCommon):"""Anthropic large language models. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain_community.llms import Anthropic model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key") # Simplest invocation, automatically wrapped with HUMAN_PROMPT # and AI_PROMPT. response = model.invoke("What are the biggest risks facing humanity?") # Or if you want to use the chat mode, build a few-shot-prompt, or # put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT: raw_prompt = "What are the biggest risks facing humanity?" prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}" response = model.invoke(prompt) """classConfig:allow_population_by_field_name=Truearbitrary_types_allowed=True@pre_initdefraise_warning(cls,values:Dict)->Dict:"""Raise warning that this class is deprecated."""warnings.warn("This Anthropic LLM is deprecated. ""Please use `from langchain_community.chat_models import ChatAnthropic` ""instead")returnvalues@propertydef_llm_type(self)->str:"""Return type of llm."""return"anthropic-llm"def_wrap_prompt(self,prompt:str)->str:ifnotself.HUMAN_PROMPTornotself.AI_PROMPT:raiseNameError("Please ensure the anthropic package is loaded")ifprompt.startswith(self.HUMAN_PROMPT):returnprompt# Already wrapped.# Guard against common errors in specifying wrong number of newlines.corrected_prompt,n_subs=re.subn(r"^\n*Human:",self.HUMAN_PROMPT,prompt)ifn_subs==1:returncorrected_prompt# As a last resort, wrap the prompt ourselves to emulate instruct-style.returnf"{self.HUMAN_PROMPT}{prompt}{self.AI_PROMPT} Sure, here you go:\n"def_call(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:r"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model.invoke(prompt) """ifself.streaming:completion=""forchunkinself._stream(prompt=prompt,stop=stop,run_manager=run_manager,**kwargs):completion+=chunk.textreturncompletionstop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}response=self.client.completions.create(prompt=self._wrap_prompt(prompt),stop_sequences=stop,**params,)returnresponse.completion
asyncdef_acall(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Call out to Anthropic's completion endpoint asynchronously."""ifself.streaming:completion=""asyncforchunkinself._astream(prompt=prompt,stop=stop,run_manager=run_manager,**kwargs):completion+=chunk.textreturncompletionstop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}response=awaitself.async_client.completions.create(prompt=self._wrap_prompt(prompt),stop_sequences=stop,**params,)returnresponse.completiondef_stream(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[GenerationChunk]:r"""Call Anthropic completion_stream and return the resulting generator. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """stop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}fortokeninself.client.completions.create(prompt=self._wrap_prompt(prompt),stop_sequences=stop,stream=True,**params):chunk=GenerationChunk(text=token.completion)ifrun_manager:run_manager.on_llm_new_token(chunk.text,chunk=chunk)yieldchunkasyncdef_astream(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[GenerationChunk]:r"""Call Anthropic completion_stream and return the resulting generator. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """stop=self._get_anthropic_stop(stop)params={**self._default_params,**kwargs}asyncfortokeninawaitself.async_client.completions.create(prompt=self._wrap_prompt(prompt),stop_sequences=stop,stream=True,**params,):chunk=GenerationChunk(text=token.completion)ifrun_manager:awaitrun_manager.on_llm_new_token(chunk.text,chunk=chunk)yieldchunk
[docs]defget_num_tokens(self,text:str)->int:"""Calculate number of tokens."""ifnotself.count_tokens:raiseNameError("Please ensure the anthropic package is loaded")returnself.count_tokens(text)