Source code for langchain_community.chat_models.jinachat
"""JinaChat wrapper."""from__future__importannotationsimportloggingfromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Mapping,Optional,Tuple,Type,Union,)fromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimport(BaseChatModel,agenerate_from_stream,generate_from_stream,)fromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,BaseMessageChunk,ChatMessage,ChatMessageChunk,FunctionMessage,HumanMessage,HumanMessageChunk,SystemMessage,SystemMessageChunk,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.utilsimport(convert_to_secret_str,get_from_dict_or_env,get_pydantic_field_names,pre_init,)frompydanticimportConfigDict,Field,SecretStr,model_validatorfromtenacityimport(before_sleep_log,retry,retry_if_exception_type,stop_after_attempt,wait_exponential,)logger=logging.getLogger(__name__)def_create_retry_decorator(llm:JinaChat)->Callable[[Any],Any]:importopenaimin_seconds=1max_seconds=60# Wait 2^x * 1 second between each retry starting with# 4 seconds, then up to 10 seconds, then 10 seconds afterwardsreturnretry(reraise=True,stop=stop_after_attempt(llm.max_retries),wait=wait_exponential(multiplier=1,min=min_seconds,max=max_seconds),retry=(retry_if_exception_type(openai.error.Timeout)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIConnectionError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.RateLimitError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.ServiceUnavailableError)# type: ignore[attr-defined]),before_sleep=before_sleep_log(logger,logging.WARNING),)
[docs]asyncdefacompletion_with_retry(llm:JinaChat,**kwargs:Any)->Any:"""Use tenacity to retry the async completion call."""retry_decorator=_create_retry_decorator(llm)@retry_decoratorasyncdef_completion_with_retry(**kwargs:Any)->Any:# Use OpenAI's async api https://github.com/openai/openai-python#async-apireturnawaitllm.client.acreate(**kwargs)returnawait_completion_with_retry(**kwargs)
def_convert_delta_to_message_chunk(_dict:Mapping[str,Any],default_class:Type[BaseMessageChunk])->BaseMessageChunk:role=_dict.get("role")content=_dict.get("content")or""ifrole=="user"ordefault_class==HumanMessageChunk:returnHumanMessageChunk(content=content)elifrole=="assistant"ordefault_class==AIMessageChunk:returnAIMessageChunk(content=content)elifrole=="system"ordefault_class==SystemMessageChunk:returnSystemMessageChunk(content=content)elifroleordefault_class==ChatMessageChunk:returnChatMessageChunk(content=content,role=role)# type: ignore[arg-type]else:returndefault_class(content=content)# type: ignore[call-arg]def_convert_dict_to_message(_dict:Mapping[str,Any])->BaseMessage:role=_dict["role"]ifrole=="user":returnHumanMessage(content=_dict["content"])elifrole=="assistant":content=_dict["content"]or""returnAIMessage(content=content)elifrole=="system":returnSystemMessage(content=_dict["content"])else:returnChatMessage(content=_dict["content"],role=role)def_convert_message_to_dict(message:BaseMessage)->dict:ifisinstance(message,ChatMessage):message_dict={"role":message.role,"content":message.content}elifisinstance(message,HumanMessage):message_dict={"role":"user","content":message.content}elifisinstance(message,AIMessage):message_dict={"role":"assistant","content":message.content}elifisinstance(message,SystemMessage):message_dict={"role":"system","content":message.content}elifisinstance(message,FunctionMessage):message_dict={"role":"function","name":message.name,"content":message.content,}else:raiseValueError(f"Got unknown type {message}")if"name"inmessage.additional_kwargs:message_dict["name"]=message.additional_kwargs["name"]returnmessage_dict
[docs]classJinaChat(BaseChatModel):"""`Jina AI` Chat models API. To use, you should have the ``openai`` python package installed, and the environment variable ``JINACHAT_API_KEY`` set to your API key, which you can generate at https://chat.jina.ai/api. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain_community.chat_models import JinaChat chat = JinaChat() """@propertydeflc_secrets(self)->Dict[str,str]:return{"jinachat_api_key":"JINACHAT_API_KEY"}@classmethoddefis_lc_serializable(cls)->bool:"""Return whether this model can be serialized by Langchain."""returnFalseclient:Any=None#: :meta private:temperature:float=0.7"""What sampling temperature to use."""model_kwargs:Dict[str,Any]=Field(default_factory=dict)"""Holds any model parameters valid for `create` call not explicitly specified."""jinachat_api_key:Optional[SecretStr]=None"""Base URL path for API requests, leave blank if not using a proxy or service emulator."""request_timeout:Optional[Union[float,Tuple[float,float]]]=None"""Timeout for requests to JinaChat completion API. Default is 600 seconds."""max_retries:int=6"""Maximum number of retries to make when generating."""streaming:bool=False"""Whether to stream the results or not."""max_tokens:Optional[int]=None"""Maximum number of tokens to generate."""model_config=ConfigDict(populate_by_name=True,)@model_validator(mode="before")@classmethoddefbuild_extra(cls,values:Dict[str,Any])->Any:"""Build extra kwargs from additional params that were passed in."""all_required_field_names=get_pydantic_field_names(cls)extra=values.get("model_kwargs",{})forfield_nameinlist(values):iffield_nameinextra:raiseValueError(f"Found {field_name} supplied twice.")iffield_namenotinall_required_field_names:logger.warning(f"""WARNING! {field_name} is not default parameter.{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""")extra[field_name]=values.pop(field_name)invalid_model_kwargs=all_required_field_names.intersection(extra.keys())ifinvalid_model_kwargs:raiseValueError(f"Parameters {invalid_model_kwargs} should be specified explicitly. "f"Instead they were passed in as part of `model_kwargs` parameter.")values["model_kwargs"]=extrareturnvalues
[docs]@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key and python package exists in environment."""values["jinachat_api_key"]=convert_to_secret_str(get_from_dict_or_env(values,"jinachat_api_key","JINACHAT_API_KEY"))try:importopenaiexceptImportError:raiseImportError("Could not import openai python package. ""Please install it with `pip install openai`.")try:values["client"]=openai.ChatCompletion# type: ignore[attr-defined]exceptAttributeError:raiseValueError("`openai` has no `ChatCompletion` attribute, this is likely ""due to an old version of the openai package. Try upgrading it ""with `pip install --upgrade openai`.")returnvalues
@propertydef_default_params(self)->Dict[str,Any]:"""Get the default parameters for calling JinaChat API."""return{"request_timeout":self.request_timeout,"max_tokens":self.max_tokens,"stream":self.streaming,"temperature":self.temperature,**self.model_kwargs,}def_create_retry_decorator(self)->Callable[[Any],Any]:importopenaimin_seconds=1max_seconds=60# Wait 2^x * 1 second between each retry starting with# 4 seconds, then up to 10 seconds, then 10 seconds afterwardsreturnretry(reraise=True,stop=stop_after_attempt(self.max_retries),wait=wait_exponential(multiplier=1,min=min_seconds,max=max_seconds),retry=(retry_if_exception_type(openai.error.Timeout)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.APIConnectionError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.RateLimitError)# type: ignore[attr-defined]|retry_if_exception_type(openai.error.ServiceUnavailableError)# type: ignore[attr-defined]),before_sleep=before_sleep_log(logger,logging.WARNING),)
[docs]defcompletion_with_retry(self,**kwargs:Any)->Any:"""Use tenacity to retry the completion call."""retry_decorator=self._create_retry_decorator()@retry_decoratordef_completion_with_retry(**kwargs:Any)->Any:returnself.client.create(**kwargs)return_completion_with_retry(**kwargs)
def_combine_llm_outputs(self,llm_outputs:List[Optional[dict]])->dict:overall_token_usage:dict={}foroutputinllm_outputs:ifoutputisNone:# Happens in streamingcontinuetoken_usage=output["token_usage"]fork,vintoken_usage.items():ifkinoverall_token_usage:overall_token_usage[k]+=velse:overall_token_usage[k]=vreturn{"token_usage":overall_token_usage}def_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**kwargs,"stream":True}default_chunk_class=AIMessageChunkforchunkinself.completion_with_retry(messages=message_dicts,**params):delta=chunk["choices"][0]["delta"]chunk=_convert_delta_to_message_chunk(delta,default_chunk_class)default_chunk_class=chunk.__class__cg_chunk=ChatGenerationChunk(message=chunk)ifrun_manager:run_manager.on_llm_new_token(chunk.content,chunk=cg_chunk)yieldcg_chunkdef_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:ifself.streaming:stream_iter=self._stream(messages=messages,stop=stop,run_manager=run_manager,**kwargs)returngenerate_from_stream(stream_iter)message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**kwargs}response=self.completion_with_retry(messages=message_dicts,**params)returnself._create_chat_result(response)def_create_message_dicts(self,messages:List[BaseMessage],stop:Optional[List[str]])->Tuple[List[Dict[str,Any]],Dict[str,Any]]:params=dict(self._invocation_params)ifstopisnotNone:if"stop"inparams:raiseValueError("`stop` found in both the input and default params.")params["stop"]=stopmessage_dicts=[_convert_message_to_dict(m)forminmessages]returnmessage_dicts,paramsdef_create_chat_result(self,response:Mapping[str,Any])->ChatResult:generations=[]forresinresponse["choices"]:message=_convert_dict_to_message(res["message"])gen=ChatGeneration(message=message)generations.append(gen)llm_output={"token_usage":response["usage"]}returnChatResult(generations=generations,llm_output=llm_output)asyncdef_astream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[ChatGenerationChunk]:message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**kwargs,"stream":True}default_chunk_class=AIMessageChunkasyncforchunkinawaitacompletion_with_retry(self,messages=message_dicts,**params):delta=chunk["choices"][0]["delta"]chunk=_convert_delta_to_message_chunk(delta,default_chunk_class)default_chunk_class=chunk.__class__cg_chunk=ChatGenerationChunk(message=chunk)ifrun_manager:awaitrun_manager.on_llm_new_token(chunk.content,chunk=cg_chunk)yieldcg_chunkasyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:ifself.streaming:stream_iter=self._astream(messages=messages,stop=stop,run_manager=run_manager,**kwargs)returnawaitagenerate_from_stream(stream_iter)message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**kwargs}response=awaitacompletion_with_retry(self,messages=message_dicts,**params)returnself._create_chat_result(response)@propertydef_invocation_params(self)->Mapping[str,Any]:"""Get the parameters used to invoke the model."""jinachat_creds:Dict[str,Any]={"api_key":self.jinachat_api_keyandself.jinachat_api_key.get_secret_value(),"api_base":"https://api.chat.jina.ai/v1","model":"jinachat",}return{**jinachat_creds,**self._default_params}@propertydef_llm_type(self)->str:"""Return type of chat model."""return"jinachat"