[docs]classOpaquePrompts(LLM):"""LLM that uses OpaquePrompts to sanitize prompts. Wraps another LLM and sanitizes prompts before passing it to the LLM, then de-sanitizes the response. To use, you should have the ``opaqueprompts`` python package installed, and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.llms import OpaquePrompts from langchain_community.chat_models import ChatOpenAI op_llm = OpaquePrompts(base_llm=ChatOpenAI()) """base_llm:BaseLanguageModel"""The base LLM to use."""model_config=ConfigDict(extra="forbid",)
[docs]@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validates that the OpaquePrompts API key and the Python package exist."""try:importopaquepromptsasopexceptImportError:raiseImportError("Could not import the `opaqueprompts` Python package, ""please install it with `pip install opaqueprompts`.")ifop.__package__isNone:raiseValueError("Could not properly import `opaqueprompts`, ""opaqueprompts.__package__ is None.")api_key=get_from_dict_or_env(values,"opaqueprompts_api_key","OPAQUEPROMPTS_API_KEY",default="")ifnotapi_key:raiseValueError("Could not find OPAQUEPROMPTS_API_KEY in the environment. ""Please set it to your OpaquePrompts API key.""You can get it by creating an account on the OpaquePrompts website: ""https://opaqueprompts.opaque.co/ .")returnvalues
def_call(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Call base LLM with sanitization before and de-sanitization after. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = op_llm.invoke("Tell me a joke.") """importopaquepromptsasop_run_manager=run_managerorCallbackManagerForLLMRun.get_noop_manager()# sanitize the prompt by replacing the sensitive information with a placeholdersanitize_response:op.SanitizeResponse=op.sanitize([prompt])sanitized_prompt_value_str=sanitize_response.sanitized_texts[0]# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.# call the LLM with the sanitized prompt and get the responsellm_response=self.base_llm.bind(stop=stop).invoke(sanitized_prompt_value_str,)ifisinstance(llm_response,AIMessage):llm_response=llm_response.content# desanitize the response by restoring the original sensitive informationdesanitize_response:op.DesanitizeResponse=op.desanitize(llm_response,secure_context=sanitize_response.secure_context,)returndesanitize_response.desanitized_text@propertydef_llm_type(self)->str:"""Return type of LLM. This is an override of the base class method. """return"opaqueprompts"