[docs]classPredictionGuard(LLM):"""Prediction Guard large language models. To use, you should have the ``predictionguard`` python package installed, and the environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass it as a named parameter to the constructor. To use Prediction Guard's API along with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your OpenAI API key as well. Example: .. code-block:: python pgllm = PredictionGuard(model="MPT-7B-Instruct", token="my-access-token", output={ "type": "boolean" }) """client:Any#: :meta private:model:Optional[str]="MPT-7B-Instruct""""Model name to use."""output:Optional[Dict[str,Any]]=None"""The output type or structure for controlling the LLM output."""max_tokens:int=256"""Denotes the number of tokens to predict per generation."""temperature:float=0.75"""A non-negative float that tunes the degree of randomness in generation."""token:Optional[str]=None"""Your Prediction Guard access token."""stop:Optional[List[str]]=NoneclassConfig:extra="forbid"@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that the access token and python package exists in environment."""token=get_from_dict_or_env(values,"token","PREDICTIONGUARD_TOKEN")try:importpredictionguardaspgvalues["client"]=pg.Client(token=token)exceptImportError:raiseImportError("Could not import predictionguard python package. ""Please install it with `pip install predictionguard`.")returnvalues@propertydef_default_params(self)->Dict[str,Any]:"""Get the default parameters for calling the Prediction Guard API."""return{"max_tokens":self.max_tokens,"temperature":self.temperature,}@propertydef_identifying_params(self)->Dict[str,Any]:"""Get the identifying parameters."""return{**{"model":self.model},**self._default_params}@propertydef_llm_type(self)->str:"""Return type of llm."""return"predictionguard"def_call(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Call out to Prediction Guard's model API. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = pgllm.invoke("Tell me a joke.") """importpredictionguardaspgparams=self._default_paramsifself.stopisnotNoneandstopisnotNone:raiseValueError("`stop` found in both the input and default params.")elifself.stopisnotNone:params["stop_sequences"]=self.stopelse:params["stop_sequences"]=stopresponse=pg.Completion.create(model=self.model,prompt=prompt,output=self.output,temperature=params["temperature"],max_tokens=params["max_tokens"],**kwargs,)text=response["choices"][0]["text"]# If stop tokens are provided, Prediction Guard's endpoint returns them.# In order to make this consistent with other endpoints, we strip them.ifstopisnotNoneorself.stopisnotNone:text=enforce_stop_tokens(text,params["stop_sequences"])returntext