[docs]classQuantizedBgeEmbeddings(BaseModel,Embeddings):"""Leverage Itrex runtime to unlock the performance of compressed NLP models. Please ensure that you have installed intel-extension-for-transformers. Input: model_name: str = Model name. max_seq_len: int = The maximum sequence length for tokenization. (default 512) pooling_strategy: str = "mean" or "cls", pooling strategy for the final layer. (default "mean") query_instruction: Optional[str] = An instruction to add to the query before embedding. (default None) document_instruction: Optional[str] = An instruction to add to each document before embedding. (default None) padding: Optional[bool] = Whether to add padding during tokenization or not. (default True) model_kwargs: Optional[Dict] = Parameters to add to the model during initialization. (default {}) encode_kwargs: Optional[Dict] = Parameters to add during the embedding forward pass. (default {}) onnx_file_name: Optional[str] = File name of onnx optimized model which is exported by itrex. (default "int8-model.onnx") Example: .. code-block:: python from langchain_community.embeddings import QuantizedBgeEmbeddings model_name = "Intel/bge-small-en-v1.5-sts-int8-static-inc" encode_kwargs = {'normalize_embeddings': True} hf = QuantizedBgeEmbeddings( model_name, encode_kwargs=encode_kwargs, query_instruction="Represent this sentence for searching relevant passages: " ) """# noqa: E501def__init__(self,model_name:str,*,max_seq_len:int=512,pooling_strategy:str="mean",# "mean" or "cls"query_instruction:Optional[str]=None,document_instruction:Optional[str]=None,padding:bool=True,model_kwargs:Optional[Dict]=None,encode_kwargs:Optional[Dict]=None,onnx_file_name:Optional[str]="int8-model.onnx",**kwargs:Any,)->None:super().__init__(**kwargs)# check sentence_transformers python packageifimportlib.util.find_spec("intel_extension_for_transformers")isNone:raiseImportError("Could not import intel_extension_for_transformers python package. ""Please install it with ""`pip install -U intel-extension-for-transformers`.")# check torch python packageifimportlib.util.find_spec("torch")isNone:raiseImportError("Could not import torch python package. ""Please install it with `pip install -U torch`.")# check onnx python packageifimportlib.util.find_spec("onnx")isNone:raiseImportError("Could not import onnx python package. ""Please install it with `pip install -U onnx`.")self.model_name_or_path=model_nameself.max_seq_len=max_seq_lenself.pooling=pooling_strategyself.padding=paddingself.encode_kwargs=encode_kwargsor{}self.model_kwargs=model_kwargsor{}self.normalize=self.encode_kwargs.get("normalize_embeddings",False)self.batch_size=self.encode_kwargs.get("batch_size",32)self.query_instruction=query_instructionself.document_instruction=document_instructionself.onnx_file_name=onnx_file_nameself.load_model()
model_config=ConfigDict(extra="allow",protected_namespaces=(),)def_embed(self,inputs:Any)->Any:importtorchengine_input=[valueforvalueininputs.values()]outputs=self.transformer_model.generate(engine_input)if"last_hidden_state:0"inoutputs:last_hidden_state=outputs["last_hidden_state:0"]else:last_hidden_state=[outforoutinoutputs.values()][0]last_hidden_state=torch.tensor(last_hidden_state).reshape(inputs["input_ids"].shape[0],inputs["input_ids"].shape[1],self.hidden_size)ifself.pooling=="mean":emb=self._mean_pooling(last_hidden_state,inputs["attention_mask"])elifself.pooling=="cls":emb=self._cls_pooling(last_hidden_state)else:raiseValueError("pooling method no supported")ifself.normalize:emb=torch.nn.functional.normalize(emb,p=2,dim=1)returnemb@staticmethoddef_cls_pooling(last_hidden_state:Any)->Any:returnlast_hidden_state[:,0]@staticmethoddef_mean_pooling(last_hidden_state:Any,attention_mask:Any)->Any:try:importtorchexceptImportErrorase:raiseImportError("Unable to import torch, please install with `pip install -U torch`.")fromeinput_mask_expanded=(attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float())sum_embeddings=torch.sum(last_hidden_state*input_mask_expanded,1)sum_mask=torch.clamp(input_mask_expanded.sum(1),min=1e-9)returnsum_embeddings/sum_maskdef_embed_text(self,texts:List[str])->List[List[float]]:inputs=self.transformer_tokenizer(texts,max_length=self.max_seq_len,truncation=True,padding=self.padding,return_tensors="pt",)returnself._embed(inputs).tolist()
[docs]defembed_documents(self,texts:List[str])->List[List[float]]:"""Embed a list of text documents using the Optimized Embedder model. Input: texts: List[str] = List of text documents to embed. Output: List[List[float]] = The embeddings of each text document. """try:importpandasaspdexceptImportErrorase:raiseImportError("Unable to import pandas, please install with `pip install -U pandas`.")fromedocs=[self.document_instruction+difself.document_instructionelsedfordintexts]# group into batchestext_list_df=pd.DataFrame(docs,columns=["texts"]).reset_index()# assign each example with its batchtext_list_df["batch_index"]=text_list_df["index"]//self.batch_size# create groupsbatches=list(text_list_df.groupby(["batch_index"])["texts"].apply(list))vectors=[]forbatchinbatches:vectors+=self._embed_text(batch)returnvectors