Source code for langchain_community.chat_models.edenai
importjsonimportwarningsfromoperatorimportitemgetterfromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Literal,Optional,Sequence,Tuple,Type,Union,cast,)fromaiohttpimportClientSessionfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_modelsimportLanguageModelInputfromlangchain_core.language_models.chat_modelsimport(BaseChatModel,agenerate_from_stream,generate_from_stream,)fromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,HumanMessage,InvalidToolCall,SystemMessage,ToolCall,ToolMessage,)fromlangchain_core.messages.toolimportinvalid_tool_callascreate_invalid_tool_callfromlangchain_core.messages.toolimporttool_callascreate_tool_callfromlangchain_core.messages.toolimporttool_call_chunkascreate_tool_call_chunkfromlangchain_core.output_parsers.baseimportOutputParserLikefromlangchain_core.output_parsers.openai_toolsimport(JsonOutputKeyToolsParser,PydanticToolsParser,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.pydantic_v1import(BaseModel,Field,SecretStr,)fromlangchain_core.runnablesimportRunnable,RunnableMap,RunnablePassthroughfromlangchain_core.toolsimportBaseToolfromlangchain_core.utilsimportconvert_to_secret_str,get_from_dict_or_env,pre_initfromlangchain_core.utils.function_callingimportconvert_to_openai_toolfromlangchain_core.utils.pydanticimportis_basemodel_subclassfromlangchain_community.utilities.requestsimportRequestsdef_result_to_chunked_message(generated_result:ChatResult)->ChatGenerationChunk:message=generated_result.generations[0].messageifisinstance(message,AIMessage)andmessage.tool_callsisnotNone:tool_call_chunks=[create_tool_call_chunk(name=tool_call["name"],args=json.dumps(tool_call["args"]),id=tool_call["id"],index=idx,)foridx,tool_callinenumerate(message.tool_calls)]message_chunk=AIMessageChunk(content=message.content,tool_call_chunks=tool_call_chunks,)returnChatGenerationChunk(message=message_chunk)else:returncast(ChatGenerationChunk,generated_result.generations[0])def_message_role(type:str)->str:role_mapping={"ai":"assistant","human":"user","chat":"user","AIMessageChunk":"assistant",}iftypeinrole_mapping:returnrole_mapping[type]else:raiseValueError(f"Unknown type: {type}")def_extract_edenai_tool_results_from_messages(messages:List[BaseMessage],)->Tuple[List[Dict[str,Any]],List[BaseMessage]]:""" Get the last langchain tools messages to transform them into edenai tool_results Returns tool_results and messages without the extracted tool messages """tool_results:List[Dict[str,Any]]=[]other_messages=messages[:]formsginreversed(messages):ifisinstance(msg,ToolMessage):tool_results=[{"id":msg.tool_call_id,"result":msg.content},*tool_results,]other_messages.pop()else:breakreturntool_results,other_messagesdef_format_edenai_messages(messages:List[BaseMessage])->Dict[str,Any]:system=Noneformatted_messages=[]human_messages=list(filter(lambdamsg:isinstance(msg,HumanMessage),messages))last_human_message=human_messages[-1]ifhuman_messageselse""tool_results,other_messages=_extract_edenai_tool_results_from_messages(messages)fori,messageinenumerate(other_messages):ifisinstance(message,SystemMessage):ifi!=0:raiseValueError("System message must be at beginning of message list.")system=message.contentelifisinstance(message,ToolMessage):formatted_messages.append({"role":"tool","message":message.content})elifmessage!=last_human_message:formatted_messages.append({"role":_message_role(message.type),"message":message.content,"tool_calls":_format_tool_calls_to_edenai_tool_calls(message),})return{"text":getattr(last_human_message,"content",""),"previous_history":formatted_messages,"chatbot_global_action":system,"tool_results":tool_results,}def_format_tool_calls_to_edenai_tool_calls(message:BaseMessage)->List:tool_calls=getattr(message,"tool_calls",[])invalid_tool_calls=getattr(message,"invalid_tool_calls",[])edenai_tool_calls=[]forinvalid_tool_callininvalid_tool_calls:edenai_tool_calls.append({"arguments":invalid_tool_call.get("args"),"id":invalid_tool_call.get("id"),"name":invalid_tool_call.get("name"),})fortool_callintool_calls:tool_args=tool_call.get("args",{})try:arguments=json.dumps(tool_args)exceptTypeError:arguments=str(tool_args)edenai_tool_calls.append({"arguments":arguments,"id":tool_call["id"],"name":tool_call["name"],})returnedenai_tool_callsdef_extract_tool_calls_from_edenai_response(provider_response:Dict[str,Any],)->Tuple[List[ToolCall],List[InvalidToolCall]]:tool_calls=[]invalid_tool_calls=[]message=provider_response.get("message",{})[1]ifraw_tool_calls:=message.get("tool_calls"):forraw_tool_callinraw_tool_calls:try:tool_calls.append(create_tool_call(name=raw_tool_call["name"],args=json.loads(raw_tool_call["arguments"]),id=raw_tool_call["id"],))exceptjson.JSONDecodeErrorasexc:invalid_tool_calls.append(create_invalid_tool_call(name=raw_tool_call.get("name"),args=raw_tool_call.get("arguments"),id=raw_tool_call.get("id"),error=f"Received JSONDecodeError {exc}",))returntool_calls,invalid_tool_calls
[docs]classChatEdenAI(BaseChatModel):"""`EdenAI` chat large language models. `EdenAI` is a versatile platform that allows you to access various language models from different providers such as Google, OpenAI, Cohere, Mistral and more. To get started, make sure you have the environment variable ``EDENAI_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Additionally, `EdenAI` provides the flexibility to choose from a variety of models, including the ones like "gpt-4". Example: .. code-block:: python from langchain_community.chat_models import ChatEdenAI from langchain_core.messages import HumanMessage # Initialize `ChatEdenAI` with the desired configuration chat = ChatEdenAI( provider="openai", model="gpt-4", max_tokens=256, temperature=0.75) # Create a list of messages to interact with the model messages = [HumanMessage(content="hello")] # Invoke the model with the provided messages chat.invoke(messages) `EdenAI` goes beyond mere model invocation. It empowers you with advanced features : - **Multiple Providers**: access to a diverse range of llms offered by various providers giving you the freedom to choose the best-suited model for your use case. - **Fallback Mechanism**: Set a fallback mechanism to ensure seamless operations even if the primary provider is unavailable, you can easily switches to an alternative provider. - **Usage Statistics**: Track usage statistics on a per-project and per-API key basis. This feature allows you to monitor and manage resource consumption effectively. - **Monitoring and Observability**: `EdenAI` provides comprehensive monitoring and observability tools on the platform. Example of setting up a fallback mechanism: .. code-block:: python # Initialize `ChatEdenAI` with a fallback provider chat_with_fallback = ChatEdenAI( provider="openai", model="gpt-4", max_tokens=256, temperature=0.75, fallback_provider="google") you can find more details here : https://docs.edenai.co/reference/text_chat_create """provider:str="openai""""chat provider to use (eg: openai,google etc.)"""model:Optional[str]=None""" model name for above provider (eg: 'gpt-4' for openai) available models are shown on https://docs.edenai.co/ under 'available providers' """max_tokens:int=256"""Denotes the number of tokens to predict per generation."""temperature:Optional[float]=0"""A non-negative float that tunes the degree of randomness in generation."""streaming:bool=False"""Whether to stream the results."""fallback_providers:Optional[str]=None"""Providers in this will be used as fallback if the call to provider fails."""edenai_api_url:str="https://api.edenai.run/v2"edenai_api_key:Optional[SecretStr]=Field(None,description="EdenAI API Token")classConfig:extra="forbid"@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key exists in environment."""values["edenai_api_key"]=convert_to_secret_str(get_from_dict_or_env(values,"edenai_api_key","EDENAI_API_KEY"))returnvalues
@propertydef_llm_type(self)->str:"""Return type of chat model."""return"edenai-chat"@propertydef_api_key(self)->str:ifself.edenai_api_key:returnself.edenai_api_key.get_secret_value()return""def_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:"""Call out to EdenAI's chat endpoint."""if"available_tools"inkwargs:yieldself._stream_with_tools_as_generate(messages,stop=stop,run_manager=run_manager,**kwargs)returnurl=f"{self.edenai_api_url}/text/chat/stream"headers={"Authorization":f"Bearer {self._api_key}","User-Agent":self.get_user_agent(),}formatted_data=_format_edenai_messages(messages=messages)payload:Dict[str,Any]={"providers":self.provider,"max_tokens":self.max_tokens,"temperature":self.temperature,"fallback_providers":self.fallback_providers,**formatted_data,**kwargs,}payload={k:vfork,vinpayload.items()ifvisnotNone}ifself.modelisnotNone:payload["settings"]={self.provider:self.model}request=Requests(headers=headers)response=request.post(url=url,data=payload,stream=True)response.raise_for_status()forchunk_responseinresponse.iter_lines():chunk=json.loads(chunk_response.decode())token=chunk["text"]cg_chunk=ChatGenerationChunk(message=AIMessageChunk(content=token))ifrun_manager:run_manager.on_llm_new_token(token,chunk=cg_chunk)yieldcg_chunkasyncdef_astream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[ChatGenerationChunk]:if"available_tools"inkwargs:yieldawaitself._astream_with_tools_as_agenerate(messages,stop=stop,run_manager=run_manager,**kwargs)returnurl=f"{self.edenai_api_url}/text/chat/stream"headers={"Authorization":f"Bearer {self._api_key}","User-Agent":self.get_user_agent(),}formatted_data=_format_edenai_messages(messages=messages)payload:Dict[str,Any]={"providers":self.provider,"max_tokens":self.max_tokens,"temperature":self.temperature,"fallback_providers":self.fallback_providers,**formatted_data,**kwargs,}payload={k:vfork,vinpayload.items()ifvisnotNone}ifself.modelisnotNone:payload["settings"]={self.provider:self.model}asyncwithClientSession()assession:asyncwithsession.post(url,json=payload,headers=headers)asresponse:response.raise_for_status()asyncforchunk_responseinresponse.content:chunk=json.loads(chunk_response.decode())token=chunk["text"]cg_chunk=ChatGenerationChunk(message=AIMessageChunk(content=token))ifrun_manager:awaitrun_manager.on_llm_new_token(token=chunk["text"],chunk=cg_chunk)yieldcg_chunk
def_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Call out to EdenAI's chat endpoint."""ifself.streaming:if"available_tools"inkwargs:warnings.warn("stream: Tool use is not yet supported in streaming mode.")else:stream_iter=self._stream(messages,stop=stop,run_manager=run_manager,**kwargs)returngenerate_from_stream(stream_iter)url=f"{self.edenai_api_url}/text/chat"headers={"Authorization":f"Bearer {self._api_key}","User-Agent":self.get_user_agent(),}formatted_data=_format_edenai_messages(messages=messages)payload:Dict[str,Any]={"providers":self.provider,"max_tokens":self.max_tokens,"temperature":self.temperature,"fallback_providers":self.fallback_providers,**formatted_data,**kwargs,}payload={k:vfork,vinpayload.items()ifvisnotNone}ifself.modelisnotNone:payload["settings"]={self.provider:self.model}request=Requests(headers=headers)response=request.post(url=url,data=payload)response.raise_for_status()data=response.json()provider_response=data[self.provider]ifself.fallback_providers:fallback_response=data.get(self.fallback_providers)iffallback_response:provider_response=fallback_responseifprovider_response.get("status")=="fail":err_msg=provider_response.get("error",{}).get("message")raiseException(err_msg)tool_calls,invalid_tool_calls=_extract_tool_calls_from_edenai_response(provider_response)returnChatResult(generations=[ChatGeneration(message=AIMessage(content=provider_response["generated_text"]or"",tool_calls=tool_calls,invalid_tool_calls=invalid_tool_calls,))],llm_output=data,)asyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:ifself.streaming:if"available_tools"inkwargs:warnings.warn("stream: Tool use is not yet supported in streaming mode.")else:stream_iter=self._astream(messages,stop=stop,run_manager=run_manager,**kwargs)returnawaitagenerate_from_stream(stream_iter)url=f"{self.edenai_api_url}/text/chat"headers={"Authorization":f"Bearer {self._api_key}","User-Agent":self.get_user_agent(),}formatted_data=_format_edenai_messages(messages=messages)payload:Dict[str,Any]={"providers":self.provider,"max_tokens":self.max_tokens,"temperature":self.temperature,"fallback_providers":self.fallback_providers,**formatted_data,**kwargs,}payload={k:vfork,vinpayload.items()ifvisnotNone}ifself.modelisnotNone:payload["settings"]={self.provider:self.model}asyncwithClientSession()assession:asyncwithsession.post(url,json=payload,headers=headers)asresponse:response.raise_for_status()data=awaitresponse.json()provider_response=data[self.provider]ifself.fallback_providers:fallback_response=data.get(self.fallback_providers)iffallback_response:provider_response=fallback_responseifprovider_response.get("status")=="fail":err_msg=provider_response.get("error",{}).get("message")raiseException(err_msg)returnChatResult(generations=[ChatGeneration(message=AIMessage(content=provider_response["generated_text"]))],llm_output=data,)def_stream_with_tools_as_generate(self,messages:List[BaseMessage],stop:Optional[List[str]],run_manager:Optional[CallbackManagerForLLMRun],**kwargs:Any,)->ChatGenerationChunk:warnings.warn("stream: Tool use is not yet supported in streaming mode.")result=self._generate(messages,stop=stop,run_manager=run_manager,**kwargs)return_result_to_chunked_message(result)asyncdef_astream_with_tools_as_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]],run_manager:Optional[AsyncCallbackManagerForLLMRun],**kwargs:Any,)->ChatGenerationChunk:warnings.warn("stream: Tool use is not yet supported in streaming mode.")result=awaitself._agenerate(messages,stop=stop,run_manager=run_manager,**kwargs)return_result_to_chunked_message(result)