Source code for langchain_community.chat_models.anyscale
"""Anyscale Endpoints chat wrapper. Relies heavily on ChatOpenAI."""from__future__importannotationsimportloggingimportosimportsysimportwarningsfromtypingimport(TYPE_CHECKING,Any,Callable,Dict,Optional,Sequence,Set,Type,Union,)importrequestsfromlangchain_core.messagesimportBaseMessagefromlangchain_core.toolsimportBaseToolfromlangchain_core.utilsimportconvert_to_secret_str,get_from_dict_or_envfrompydanticimportField,SecretStr,model_validatorfromlangchain_community.adapters.openaiimportconvert_message_to_dictfromlangchain_community.chat_models.openaiimport(ChatOpenAI,_import_tiktoken,)fromlangchain_community.utils.openaiimportis_openai_v1ifTYPE_CHECKING:importtiktokenlogger=logging.getLogger(__name__)DEFAULT_API_BASE="https://api.endpoints.anyscale.com/v1"DEFAULT_MODEL="meta-llama/Meta-Llama-3-8B-Instruct"
[docs]classChatAnyscale(ChatOpenAI):"""`Anyscale` Chat large language models. See https://www.anyscale.com/ for information about Anyscale. To use, you should have the ``openai`` python package installed, and the environment variable ``ANYSCALE_API_KEY`` set with your API key. Alternatively, you can use the anyscale_api_key keyword argument. Any parameters that are valid to be passed to the `openai.create` call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain_community.chat_models import ChatAnyscale chat = ChatAnyscale(model_name="meta-llama/Llama-2-7b-chat-hf") """@propertydef_llm_type(self)->str:"""Return type of chat model."""return"anyscale-chat"@propertydeflc_secrets(self)->Dict[str,str]:return{"anyscale_api_key":"ANYSCALE_API_KEY"}@classmethoddefis_lc_serializable(cls)->bool:returnFalseanyscale_api_key:SecretStr=Field(default=SecretStr(""))"""AnyScale Endpoints API keys."""model_name:str=Field(default=DEFAULT_MODEL,alias="model")"""Model name to use."""anyscale_api_base:str=Field(default=DEFAULT_API_BASE)"""Base URL path for API requests, leave blank if not using a proxy or service emulator."""anyscale_proxy:Optional[str]=None"""To support explicit proxy for Anyscale."""available_models:Optional[Set[str]]=None"""Available models from Anyscale API."""
[docs]@staticmethoddefget_available_models(anyscale_api_key:Optional[str]=None,anyscale_api_base:str=DEFAULT_API_BASE,)->Set[str]:"""Get available models from Anyscale API."""try:anyscale_api_key=anyscale_api_keyoros.environ["ANYSCALE_API_KEY"]exceptKeyErrorase:raiseValueError("Anyscale API key must be passed as keyword argument or ""set in environment variable ANYSCALE_API_KEY.",)fromemodels_url=f"{anyscale_api_base}/models"models_response=requests.get(models_url,headers={"Authorization":f"Bearer {anyscale_api_key}",},)ifmodels_response.status_code!=200:raiseValueError(f"Error getting models from {models_url}: "f"{models_response.status_code}",)return{model["id"]formodelinmodels_response.json()["data"]}
@model_validator(mode="before")@classmethoddefvalidate_environment(cls,values:dict)->Any:"""Validate that api key and python package exists in environment."""values["anyscale_api_key"]=convert_to_secret_str(get_from_dict_or_env(values,"anyscale_api_key","ANYSCALE_API_KEY",))values["anyscale_api_base"]=get_from_dict_or_env(values,"anyscale_api_base","ANYSCALE_API_BASE",default=DEFAULT_API_BASE,)values["openai_proxy"]=get_from_dict_or_env(values,"anyscale_proxy","ANYSCALE_PROXY",default="",)try:importopenaiexceptImportErrorase:raiseImportError("Could not import openai python package. ""Please install it with `pip install openai`.",)frometry:ifis_openai_v1():client_params={"api_key":values["anyscale_api_key"].get_secret_value(),"base_url":values["anyscale_api_base"],# To do: future support# "organization": values["openai_organization"],# "timeout": values["request_timeout"],# "max_retries": values["max_retries"],# "default_headers": values["default_headers"],# "default_query": values["default_query"],# "http_client": values["http_client"],}ifnotvalues.get("client"):values["client"]=openai.OpenAI(**client_params).chat.completionsifnotvalues.get("async_client"):values["async_client"]=openai.AsyncOpenAI(**client_params).chat.completionselse:values["openai_api_base"]=values["anyscale_api_base"]values["openai_api_key"]=values["anyscale_api_key"].get_secret_value()values["client"]=openai.ChatCompletion# type: ignore[attr-defined]exceptAttributeErrorasexc:raiseValueError("`openai` has no `ChatCompletion` attribute, this is likely ""due to an old version of the openai package. Try upgrading it ""with `pip install --upgrade openai`.",)fromexcif"model_name"notinvalues.keys():values["model_name"]=DEFAULT_MODELmodel_name=values["model_name"]available_models=cls.get_available_models(values["anyscale_api_key"].get_secret_value(),values["anyscale_api_base"],)ifmodel_namenotinavailable_models:raiseValueError(f"Model name {model_name} not found in available models: "f"{available_models}.",)values["available_models"]=available_modelsreturnvaluesdef_get_encoding_model(self)->tuple[str,tiktoken.Encoding]:tiktoken_=_import_tiktoken()ifself.tiktoken_model_nameisnotNone:model=self.tiktoken_model_nameelse:model=self.model_name# Returns the number of tokens used by a list of messages.try:encoding=tiktoken_.encoding_for_model("gpt-3.5-turbo-0301")exceptKeyError:logger.warning("Warning: model not found. Using cl100k_base encoding.")model="cl100k_base"encoding=tiktoken_.get_encoding(model)returnmodel,encoding
[docs]defget_num_tokens_from_messages(self,messages:list[BaseMessage],tools:Optional[Sequence[Union[Dict[str,Any],Type,Callable,BaseTool]]]=None,)->int:"""Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb """iftoolsisnotNone:warnings.warn("Counting tokens in tool schemas is not yet supported. Ignoring tools.")ifsys.version_info[1]<=7:returnsuper().get_num_tokens_from_messages(messages)model,encoding=self._get_encoding_model()tokens_per_message=3tokens_per_name=1num_tokens=0messages_dict=[convert_message_to_dict(m)forminmessages]formessageinmessages_dict:num_tokens+=tokens_per_messageforkey,valueinmessage.items():# Cast str(value) in case the message value is not a string# This occurs with function messagesnum_tokens+=len(encoding.encode(str(value)))ifkey=="name":num_tokens+=tokens_per_name# every reply is primed with <im_start>assistantnum_tokens+=3returnnum_tokens