Source code for langchain_community.embeddings.localai
from__future__importannotationsimportloggingimportwarningsfromtypingimport(Any,Callable,Dict,List,Literal,Optional,Sequence,Set,Tuple,Union,)fromlangchain_core.embeddingsimportEmbeddingsfromlangchain_core.utilsimport(get_from_dict_or_env,get_pydantic_field_names,pre_init,)frompydanticimportBaseModel,ConfigDict,Field,model_validatorfromtenacityimport(AsyncRetrying,before_sleep_log,retry,retry_if_exception_type,stop_after_attempt,wait_exponential,)logger=logging.getLogger(__name__)def_create_retry_decorator(embeddings:LocalAIEmbeddings)->Callable[[Any],Any]:importopenaimin_seconds=4max_seconds=10# Wait 2^x * 1 second between each retry starting with# 4 seconds, then up to 10 seconds, then 10 seconds afterwardsreturnretry(reraise=True,stop=stop_after_attempt(embeddings.max_retries),wait=wait_exponential(multiplier=1,min=min_seconds,max=max_seconds),retry=(retry_if_exception_type(openai.error.Timeout)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIConnectionError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.RateLimitError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.ServiceUnavailableError)# type: ignore[attr-defined]),before_sleep=before_sleep_log(logger,logging.WARNING),)def_async_retry_decorator(embeddings:LocalAIEmbeddings)->Any:importopenaimin_seconds=4max_seconds=10# Wait 2^x * 1 second between each retry starting with# 4 seconds, then up to 10 seconds, then 10 seconds afterwardsasync_retrying=AsyncRetrying(reraise=True,stop=stop_after_attempt(embeddings.max_retries),wait=wait_exponential(multiplier=1,min=min_seconds,max=max_seconds),retry=(retry_if_exception_type(openai.error.Timeout)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIConnectionError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.RateLimitError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.ServiceUnavailableError)# type: ignore[attr-defined]),before_sleep=before_sleep_log(logger,logging.WARNING),)defwrap(func:Callable)->Callable:asyncdefwrapped_f(*args:Any,**kwargs:Any)->Callable:asyncfor_inasync_retrying:returnawaitfunc(*args,**kwargs)raiseAssertionError("this is unreachable")returnwrapped_freturnwrap# https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddingsdef_check_response(response:dict)->dict:ifany(len(d["embedding"])==1fordinresponse["data"]):importopenairaiseopenai.error.APIError("LocalAI API returned an empty embedding")# type: ignore[attr-defined]returnresponse
[docs]defembed_with_retry(embeddings:LocalAIEmbeddings,**kwargs:Any)->Any:"""Use tenacity to retry the embedding call."""retry_decorator=_create_retry_decorator(embeddings)@retry_decoratordef_embed_with_retry(**kwargs:Any)->Any:response=embeddings.client.create(**kwargs)return_check_response(response)return_embed_with_retry(**kwargs)
[docs]asyncdefasync_embed_with_retry(embeddings:LocalAIEmbeddings,**kwargs:Any)->Any:"""Use tenacity to retry the embedding call."""@_async_retry_decorator(embeddings)asyncdef_async_embed_with_retry(**kwargs:Any)->Any:response=awaitembeddings.client.acreate(**kwargs)return_check_response(response)returnawait_async_embed_with_retry(**kwargs)
[docs]classLocalAIEmbeddings(BaseModel,Embeddings):"""LocalAI embedding models. Since LocalAI and OpenAI have 1:1 compatibility between APIs, this class uses the ``openai`` Python package's ``openai.Embedding`` as its client. Thus, you should have the ``openai`` python package installed, and defeat the environment variable ``OPENAI_API_KEY`` by setting to a random string. You also need to specify ``OPENAI_API_BASE`` to point to your LocalAI service endpoint. Example: .. code-block:: python from langchain_community.embeddings import LocalAIEmbeddings openai = LocalAIEmbeddings( openai_api_key="random-string", openai_api_base="http://localhost:8080" ) """client:Any=None#: :meta private:model:str="text-embedding-ada-002"deployment:str=modelopenai_api_version:Optional[str]=Noneopenai_api_base:Optional[str]=None# to support explicit proxy for LocalAIopenai_proxy:Optional[str]=Noneembedding_ctx_length:int=8191"""The maximum number of tokens to embed at once."""openai_api_key:Optional[str]=Noneopenai_organization:Optional[str]=Noneallowed_special:Union[Literal["all"],Set[str]]=set()disallowed_special:Union[Literal["all"],Set[str],Sequence[str]]="all"chunk_size:int=1000"""Maximum number of texts to embed in each batch"""max_retries:int=6"""Maximum number of retries to make when generating."""request_timeout:Optional[Union[float,Tuple[float,float]]]=None"""Timeout in seconds for the LocalAI request."""headers:Any=Noneshow_progress_bar:bool=False"""Whether to show a progress bar when embedding."""model_kwargs:Dict[str,Any]=Field(default_factory=dict)"""Holds any model parameters valid for `create` call not explicitly specified."""model_config=ConfigDict(extra="forbid",protected_namespaces=())@model_validator(mode="before")@classmethoddefbuild_extra(cls,values:Dict[str,Any])->Any:"""Build extra kwargs from additional params that were passed in."""all_required_field_names=get_pydantic_field_names(cls)extra=values.get("model_kwargs",{})forfield_nameinlist(values):iffield_nameinextra:raiseValueError(f"Found {field_name} supplied twice.")iffield_namenotinall_required_field_names:warnings.warn(f"""WARNING! {field_name} is not default parameter.{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""")extra[field_name]=values.pop(field_name)invalid_model_kwargs=all_required_field_names.intersection(extra.keys())ifinvalid_model_kwargs:raiseValueError(f"Parameters {invalid_model_kwargs} should be specified explicitly. "f"Instead they were passed in as part of `model_kwargs` parameter.")values["model_kwargs"]=extrareturnvalues
[docs]@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key and python package exists in environment."""values["openai_api_key"]=get_from_dict_or_env(values,"openai_api_key","OPENAI_API_KEY")values["openai_api_base"]=get_from_dict_or_env(values,"openai_api_base","OPENAI_API_BASE",default="",)values["openai_proxy"]=get_from_dict_or_env(values,"openai_proxy","OPENAI_PROXY",default="",)default_api_version=""values["openai_api_version"]=get_from_dict_or_env(values,"openai_api_version","OPENAI_API_VERSION",default=default_api_version,)values["openai_organization"]=get_from_dict_or_env(values,"openai_organization","OPENAI_ORGANIZATION",default="",)try:importopenaivalues["client"]=openai.Embedding# type: ignore[attr-defined]exceptImportError:raiseImportError("Could not import openai python package. ""Please install it with `pip install openai`.")returnvalues
@propertydef_invocation_params(self)->Dict:openai_args={"model":self.model,"request_timeout":self.request_timeout,"headers":self.headers,"api_key":self.openai_api_key,"organization":self.openai_organization,"api_base":self.openai_api_base,"api_version":self.openai_api_version,**self.model_kwargs,}ifself.openai_proxy:importopenaiopenai.proxy={# type: ignore[attr-defined]"http":self.openai_proxy,"https":self.openai_proxy,}# type: ignore[assignment]returnopenai_argsdef_embedding_func(self,text:str,*,engine:str)->List[float]:"""Call out to LocalAI's embedding endpoint."""# handle large input textifself.model.endswith("001"):# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500# replace newlines, which can negatively affect performance.text=text.replace("\n"," ")returnembed_with_retry(self,input=[text],**self._invocation_params,)["data"][0]["embedding"]asyncdef_aembedding_func(self,text:str,*,engine:str)->List[float]:"""Call out to LocalAI's embedding endpoint."""# handle large input textifself.model.endswith("001"):# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500# replace newlines, which can negatively affect performance.text=text.replace("\n"," ")return(awaitasync_embed_with_retry(self,input=[text],**self._invocation_params,))["data"][0]["embedding"]
[docs]defembed_documents(self,texts:List[str],chunk_size:Optional[int]=0)->List[List[float]]:"""Call out to LocalAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """# call _embedding_func for each textreturn[self._embedding_func(text,engine=self.deployment)fortextintexts]
[docs]asyncdefaembed_documents(self,texts:List[str],chunk_size:Optional[int]=0)->List[List[float]]:"""Call out to LocalAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """embeddings=[]fortextintexts:response=awaitself._aembedding_func(text,engine=self.deployment)embeddings.append(response)returnembeddings
[docs]defembed_query(self,text:str)->List[float]:"""Call out to LocalAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """embedding=self._embedding_func(text,engine=self.deployment)returnembedding
[docs]asyncdefaembed_query(self,text:str)->List[float]:"""Call out to LocalAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """embedding=awaitself._aembedding_func(text,engine=self.deployment)returnembedding