"""Wrapper around Fireworks AI's Completion API."""importloggingfromtypingimportAny,Dict,List,OptionalimportrequestsfromaiohttpimportClientSessionfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.llmsimportLLMfromlangchain_core.pydantic_v1importField,SecretStr,root_validatorfromlangchain_core.utilsimport(convert_to_secret_str,get_from_dict_or_env,get_pydantic_field_names,)fromlangchain_core.utils.utilsimportbuild_extra_kwargsfromlangchain_fireworks.versionimport__version__logger=logging.getLogger(__name__)
[docs]classFireworks(LLM):"""LLM models from `Fireworks`. To use, you'll need an API key which you can find here: https://fireworks.ai This can be passed in as init param ``fireworks_api_key`` or set as environment variable ``FIREWORKS_API_KEY``. Fireworks AI API reference: https://readme.fireworks.ai/ Example: .. code-block:: python response = fireworks.generate(["Tell me a joke."]) """base_url:str="https://api.fireworks.ai/inference/v1/completions""""Base inference API URL."""fireworks_api_key:SecretStr=Field(default=None,alias="api_key")"""Fireworks AI API key. Get it here: https://fireworks.ai"""model:str"""Model name. Available models listed here: https://readme.fireworks.ai/ """temperature:Optional[float]=None"""Model temperature."""top_p:Optional[float]=None"""Used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output. """model_kwargs:Dict[str,Any]=Field(default_factory=dict)"""Holds any model parameters valid for `create` call not explicitly specified."""top_k:Optional[int]=None"""Used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. """max_tokens:Optional[int]=None"""The maximum number of tokens to generate."""repetition_penalty:Optional[float]=None"""A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. """logprobs:Optional[int]=None"""An integer that specifies how many top token log probabilities are included in the response for each token generation step. """classConfig:"""Configuration for this pydantic object."""extra="forbid"allow_population_by_field_name=True@root_validator(pre=True)defbuild_extra(cls,values:Dict[str,Any])->Dict[str,Any]:"""Build extra kwargs from additional params that were passed in."""all_required_field_names=get_pydantic_field_names(cls)extra=values.get("model_kwargs",{})values["model_kwargs"]=build_extra_kwargs(extra,values,all_required_field_names)returnvalues@root_validator(pre=False,skip_on_failure=True)defvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key exists in environment."""values["fireworks_api_key"]=convert_to_secret_str(get_from_dict_or_env(values,"fireworks_api_key","FIREWORKS_API_KEY"))returnvalues@propertydef_llm_type(self)->str:"""Return type of model."""return"fireworks"def_format_output(self,output:dict)->str:returnoutput["choices"][0]["text"]
@propertydefdefault_params(self)->Dict[str,Any]:return{"model":self.model,"temperature":self.temperature,"top_p":self.top_p,"top_k":self.top_k,"max_tokens":self.max_tokens,"repetition_penalty":self.repetition_penalty,}def_call(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Call out to Fireworks's text generation endpoint. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model.. """headers={"Authorization":f"Bearer {self.fireworks_api_key.get_secret_value()}","Content-Type":"application/json",}stop_to_use=stop[0]ifstopandlen(stop)==1elsestoppayload:Dict[str,Any]={**self.default_params,"prompt":prompt,"stop":stop_to_use,**kwargs,}# filter None values to not pass them to the http payloadpayload={k:vfork,vinpayload.items()ifvisnotNone}response=requests.post(url=self.base_url,json=payload,headers=headers)ifresponse.status_code>=500:raiseException(f"Fireworks Server: Error {response.status_code}")elifresponse.status_code>=400:raiseValueError(f"Fireworks received an invalid payload: {response.text}")elifresponse.status_code!=200:raiseException(f"Fireworks returned an unexpected response with status "f"{response.status_code}: {response.text}")data=response.json()output=self._format_output(data)returnoutputasyncdef_acall(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Call Fireworks model to get predictions based on the prompt. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. """headers={"Authorization":f"Bearer {self.fireworks_api_key.get_secret_value()}","Content-Type":"application/json",}stop_to_use=stop[0]ifstopandlen(stop)==1elsestoppayload:Dict[str,Any]={**self.default_params,"prompt":prompt,"stop":stop_to_use,**kwargs,}# filter None values to not pass them to the http payloadpayload={k:vfork,vinpayload.items()ifvisnotNone}asyncwithClientSession()assession:asyncwithsession.post(self.base_url,json=payload,headers=headers)asresponse:ifresponse.status>=500:raiseException(f"Fireworks Server: Error {response.status}")elifresponse.status>=400:raiseValueError(f"Fireworks received an invalid payload: {response.text}")elifresponse.status!=200:raiseException(f"Fireworks returned an unexpected response with status "f"{response.status}: {response.text}")response_json=awaitresponse.json()output=self._format_output(response_json)returnoutput