Source code for langchain_community.chat_models.fireworks
fromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Optional,Type,Union,)fromlangchain_core._api.deprecationimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimportBaseChatModelfromlangchain_core.language_models.llmsimportcreate_base_retry_decoratorfromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,BaseMessageChunk,ChatMessage,ChatMessageChunk,FunctionMessage,FunctionMessageChunk,HumanMessage,HumanMessageChunk,SystemMessage,SystemMessageChunk,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.utilsimportconvert_to_secret_strfromlangchain_core.utils.envimportget_from_dict_or_envfrompydanticimportField,SecretStr,model_validatorfromlangchain_community.adapters.openaiimportconvert_message_to_dictdef_convert_delta_to_message_chunk(_dict:Any,default_class:Type[BaseMessageChunk])->BaseMessageChunk:"""Convert a delta response to a message chunk."""role=_dict.rolecontent=_dict.contentor""additional_kwargs:Dict={}ifrole=="user"ordefault_class==HumanMessageChunk:returnHumanMessageChunk(content=content)elifrole=="assistant"ordefault_class==AIMessageChunk:returnAIMessageChunk(content=content,additional_kwargs=additional_kwargs)elifrole=="system"ordefault_class==SystemMessageChunk:returnSystemMessageChunk(content=content)elifrole=="function"ordefault_class==FunctionMessageChunk:returnFunctionMessageChunk(content=content,name=_dict.name)elifroleordefault_class==ChatMessageChunk:returnChatMessageChunk(content=content,role=role)else:returndefault_class(content=content)# type: ignore[call-arg]
[docs]defconvert_dict_to_message(_dict:Any)->BaseMessage:"""Convert a dict response to a message."""role=_dict.rolecontent=_dict.contentor""ifrole=="user":returnHumanMessage(content=content)elifrole=="assistant":content=_dict.contentadditional_kwargs:Dict={}returnAIMessage(content=content,additional_kwargs=additional_kwargs)elifrole=="system":returnSystemMessage(content=content)elifrole=="function":returnFunctionMessage(content=content,name=_dict.name)else:returnChatMessage(content=content,role=role)
[docs]@deprecated(since="0.0.26",removal="1.0",alternative_import="langchain_fireworks.ChatFireworks",)classChatFireworks(BaseChatModel):"""Fireworks Chat models."""model:str="accounts/fireworks/models/llama-v2-7b-chat"model_kwargs:dict=Field(default_factory=lambda:{"temperature":0.7,"max_tokens":512,"top_p":1,}.copy())fireworks_api_key:Optional[SecretStr]=Nonemax_retries:int=20use_retry:bool=True@propertydeflc_secrets(self)->Dict[str,str]:return{"fireworks_api_key":"FIREWORKS_API_KEY"}@classmethoddefis_lc_serializable(cls)->bool:returnTrue@classmethoddefget_lc_namespace(cls)->List[str]:"""Get the namespace of the langchain object."""return["langchain","chat_models","fireworks"]@model_validator(mode="before")@classmethoddefvalidate_environment(cls,values:Dict)->Any:"""Validate that api key in environment."""try:importfireworks.clientexceptImportErrorase:raiseImportError("Could not import fireworks-ai python package. ""Please install it with `pip install fireworks-ai`.")fromefireworks_api_key=convert_to_secret_str(get_from_dict_or_env(values,"fireworks_api_key","FIREWORKS_API_KEY"))fireworks.client.api_key=fireworks_api_key.get_secret_value()returnvalues@propertydef_llm_type(self)->str:"""Return type of llm."""return"fireworks-chat"def_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:message_dicts=self._create_message_dicts(messages)params={"model":self.model,"messages":message_dicts,**self.model_kwargs,**kwargs,}response=completion_with_retry(self,self.use_retry,run_manager=run_manager,stop=stop,**params,)returnself._create_chat_result(response)asyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:message_dicts=self._create_message_dicts(messages)params={"model":self.model,"messages":message_dicts,**self.model_kwargs,**kwargs,}response=awaitacompletion_with_retry(self,self.use_retry,run_manager=run_manager,stop=stop,**params)returnself._create_chat_result(response)def_combine_llm_outputs(self,llm_outputs:List[Optional[dict]])->dict:ifllm_outputs[0]isNone:return{}returnllm_outputs[0]def_create_chat_result(self,response:Any)->ChatResult:generations=[]forresinresponse.choices:message=convert_dict_to_message(res.message)gen=ChatGeneration(message=message,generation_info=dict(finish_reason=res.finish_reason),)generations.append(gen)llm_output={"model":self.model}returnChatResult(generations=generations,llm_output=llm_output)def_create_message_dicts(self,messages:List[BaseMessage])->List[Dict[str,Any]]:message_dicts=[convert_message_to_dict(m)forminmessages]returnmessage_dictsdef_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:message_dicts=self._create_message_dicts(messages)default_chunk_class=AIMessageChunkparams={"model":self.model,"messages":message_dicts,"stream":True,**self.model_kwargs,**kwargs,}forchunkincompletion_with_retry(self,self.use_retry,run_manager=run_manager,stop=stop,**params):choice=chunk.choices[0]chunk=_convert_delta_to_message_chunk(choice.delta,default_chunk_class)finish_reason=choice.finish_reasongeneration_info=(dict(finish_reason=finish_reason)iffinish_reasonisnotNoneelseNone)default_chunk_class=chunk.__class__cg_chunk=ChatGenerationChunk(message=chunk,generation_info=generation_info)ifrun_manager:run_manager.on_llm_new_token(cg_chunk.text,chunk=cg_chunk)yieldcg_chunkasyncdef_astream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[ChatGenerationChunk]:message_dicts=self._create_message_dicts(messages)default_chunk_class=AIMessageChunkparams={"model":self.model,"messages":message_dicts,"stream":True,**self.model_kwargs,**kwargs,}asyncforchunkinawaitacompletion_with_retry_streaming(self,self.use_retry,run_manager=run_manager,stop=stop,**params):choice=chunk.choices[0]chunk=_convert_delta_to_message_chunk(choice.delta,default_chunk_class)finish_reason=choice.finish_reasongeneration_info=(dict(finish_reason=finish_reason)iffinish_reasonisnotNoneelseNone)default_chunk_class=chunk.__class__cg_chunk=ChatGenerationChunk(message=chunk,generation_info=generation_info)ifrun_manager:awaitrun_manager.on_llm_new_token(token=chunk.text,chunk=cg_chunk)yieldcg_chunk
[docs]defconditional_decorator(condition:bool,decorator:Callable[[Any],Any])->Callable[[Any],Any]:"""Define conditional decorator. Args: condition: The condition. decorator: The decorator. Returns: The decorated function. """defactual_decorator(func:Callable[[Any],Any])->Callable[[Any],Any]:ifcondition:returndecorator(func)returnfuncreturnactual_decorator
[docs]defcompletion_with_retry(llm:ChatFireworks,use_retry:bool,*,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the completion call."""importfireworks.clientretry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)def_completion_with_retry(**kwargs:Any)->Any:"""Use tenacity to retry the completion call."""returnfireworks.client.ChatCompletion.create(**kwargs,)return_completion_with_retry(**kwargs)
[docs]asyncdefacompletion_with_retry(llm:ChatFireworks,use_retry:bool,*,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the async completion call."""importfireworks.clientretry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)asyncdef_completion_with_retry(**kwargs:Any)->Any:returnawaitfireworks.client.ChatCompletion.acreate(**kwargs,)returnawait_completion_with_retry(**kwargs)
[docs]asyncdefacompletion_with_retry_streaming(llm:ChatFireworks,use_retry:bool,*,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the completion call for streaming."""importfireworks.clientretry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)asyncdef_completion_with_retry(**kwargs:Any)->Any:returnfireworks.client.ChatCompletion.acreate(**kwargs,)returnawait_completion_with_retry(**kwargs)