Source code for langchain_community.llms.fireworks
importasynciofromconcurrent.futuresimportThreadPoolExecutorfromtypingimportAny,AsyncIterator,Callable,Dict,Iterator,List,Optional,Unionfromlangchain_core._api.deprecationimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.llmsimportBaseLLM,create_base_retry_decoratorfromlangchain_core.outputsimportGeneration,GenerationChunk,LLMResultfromlangchain_core.utilsimportconvert_to_secret_str,pre_initfromlangchain_core.utils.envimportget_from_dict_or_envfrompydanticimportField,SecretStrdef_stream_response_to_generation_chunk(stream_response:Any,)->GenerationChunk:"""Convert a stream response to a generation chunk."""returnGenerationChunk(text=stream_response.choices[0].text,generation_info=dict(finish_reason=stream_response.choices[0].finish_reason,logprobs=stream_response.choices[0].logprobs,),)
[docs]@deprecated(since="0.0.26",removal="1.0",alternative_import="langchain_fireworks.Fireworks",)classFireworks(BaseLLM):"""Fireworks models."""model:str="accounts/fireworks/models/llama-v2-7b-chat"model_kwargs:dict=Field(default_factory=lambda:{"temperature":0.7,"max_tokens":512,"top_p":1,}.copy())fireworks_api_key:Optional[SecretStr]=Nonemax_retries:int=20batch_size:int=20use_retry:bool=True@propertydeflc_secrets(self)->Dict[str,str]:return{"fireworks_api_key":"FIREWORKS_API_KEY"}@classmethoddefis_lc_serializable(cls)->bool:returnTrue@classmethoddefget_lc_namespace(cls)->List[str]:"""Get the namespace of the langchain object."""return["langchain","llms","fireworks"]
[docs]@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key in environment."""try:importfireworks.clientexceptImportErrorase:raiseImportError("Could not import fireworks-ai python package. ""Please install it with `pip install fireworks-ai`.")fromefireworks_api_key=convert_to_secret_str(get_from_dict_or_env(values,"fireworks_api_key","FIREWORKS_API_KEY"))fireworks.client.api_key=fireworks_api_key.get_secret_value()returnvalues
@propertydef_llm_type(self)->str:"""Return type of llm."""return"fireworks"def_generate(self,prompts:List[str],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->LLMResult:"""Call out to Fireworks endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. """params={"model":self.model,**self.model_kwargs,}sub_prompts=self.get_batch_prompts(prompts)choices=[]for_promptsinsub_prompts:response=completion_with_retry_batching(self,self.use_retry,prompt=_prompts,run_manager=run_manager,stop=stop,**params,)choices.extend(response)returnself.create_llm_result(choices,prompts)asyncdef_agenerate(self,prompts:List[str],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->LLMResult:"""Call out to Fireworks endpoint async with k unique prompts."""params={"model":self.model,**self.model_kwargs,}sub_prompts=self.get_batch_prompts(prompts)choices=[]for_promptsinsub_prompts:response=awaitacompletion_with_retry_batching(self,self.use_retry,prompt=_prompts,run_manager=run_manager,stop=stop,**params,)choices.extend(response)returnself.create_llm_result(choices,prompts)
[docs]defget_batch_prompts(self,prompts:List[str],)->List[List[str]]:"""Get the sub prompts for llm call."""sub_prompts=[prompts[i:i+self.batch_size]foriinrange(0,len(prompts),self.batch_size)]returnsub_prompts
[docs]defcreate_llm_result(self,choices:Any,prompts:List[str])->LLMResult:"""Create the LLMResult from the choices and prompts."""generations=[]fori,_inenumerate(prompts):sub_choices=choices[i:(i+1)]generations.append([Generation(text=choice.__dict__["choices"][0].text,)forchoiceinsub_choices])llm_output={"model":self.model}returnLLMResult(generations=generations,llm_output=llm_output)
[docs]defconditional_decorator(condition:bool,decorator:Callable[[Any],Any])->Callable[[Any],Any]:"""Conditionally apply a decorator. Args: condition: A boolean indicating whether to apply the decorator. decorator: A decorator function. Returns: A decorator function. """defactual_decorator(func:Callable[[Any],Any])->Callable[[Any],Any]:ifcondition:returndecorator(func)returnfuncreturnactual_decorator
[docs]defcompletion_with_retry(llm:Fireworks,use_retry:bool,*,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the completion call."""importfireworks.clientretry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)def_completion_with_retry(**kwargs:Any)->Any:returnfireworks.client.Completion.create(**kwargs,)return_completion_with_retry(**kwargs)
[docs]asyncdefacompletion_with_retry(llm:Fireworks,use_retry:bool,*,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the completion call."""importfireworks.clientretry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)asyncdef_completion_with_retry(**kwargs:Any)->Any:returnawaitfireworks.client.Completion.acreate(**kwargs,)returnawait_completion_with_retry(**kwargs)
[docs]defcompletion_with_retry_batching(llm:Fireworks,use_retry:bool,*,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the completion call."""importfireworks.clientprompt=kwargs["prompt"]delkwargs["prompt"]retry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)def_completion_with_retry(prompt:str)->Any:returnfireworks.client.Completion.create(**kwargs,prompt=prompt)defbatch_sync_run()->List:withThreadPoolExecutor()asexecutor:results=list(executor.map(_completion_with_retry,prompt))returnresultsreturnbatch_sync_run()
[docs]asyncdefacompletion_with_retry_batching(llm:Fireworks,use_retry:bool,*,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the completion call."""importfireworks.clientprompt=kwargs["prompt"]delkwargs["prompt"]retry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)asyncdef_completion_with_retry(prompt:str)->Any:returnawaitfireworks.client.Completion.acreate(**kwargs,prompt=prompt)defrun_coroutine_in_new_loop(coroutine_func:Any,*args:Dict,**kwargs:Dict)->Any:new_loop=asyncio.new_event_loop()try:asyncio.set_event_loop(new_loop)returnnew_loop.run_until_complete(coroutine_func(*args,**kwargs))finally:new_loop.close()asyncdefbatch_sync_run()->List:withThreadPoolExecutor()asexecutor:results=list(executor.map(run_coroutine_in_new_loop,[_completion_with_retry]*len(prompt),prompt,))returnresultsreturnawaitbatch_sync_run()
[docs]asyncdefacompletion_with_retry_streaming(llm:Fireworks,use_retry:bool,*,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->Any:"""Use tenacity to retry the completion call for streaming."""importfireworks.clientretry_decorator=_create_retry_decorator(llm,run_manager=run_manager)@conditional_decorator(use_retry,retry_decorator)asyncdef_completion_with_retry(**kwargs:Any)->Any:returnfireworks.client.Completion.acreate(**kwargs,)returnawait_completion_with_retry(**kwargs)