[docs]classPromptLayerOpenAI(OpenAI):"""PromptLayer OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain_community.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="gpt-3.5-turbo-instruct") """pl_tags:Optional[List[str]]return_pl_id:Optional[bool]=False@classmethoddefis_lc_serializable(cls)->bool:returnFalsedef_generate(self,prompts:List[str],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->LLMResult:"""Call OpenAI generate and then call PromptLayer API to log the request."""frompromptlayer.utilsimportget_api_key,promptlayer_api_requestrequest_start_time=datetime.datetime.now().timestamp()generated_responses=super()._generate(prompts,stop,run_manager)request_end_time=datetime.datetime.now().timestamp()foriinrange(len(prompts)):prompt=prompts[i]generation=generated_responses.generations[i][0]resp={"text":generation.text,"llm_output":generated_responses.llm_output,}params={**self._identifying_params,**kwargs}pl_request_id=promptlayer_api_request("langchain.PromptLayerOpenAI","langchain",[prompt],params,self.pl_tags,resp,request_start_time,request_end_time,get_api_key(),return_pl_id=self.return_pl_id,)ifself.return_pl_id:ifgeneration.generation_infoisNoneornotisinstance(generation.generation_info,dict):generation.generation_info={}generation.generation_info["pl_request_id"]=pl_request_idreturngenerated_responsesasyncdef_agenerate(self,prompts:List[str],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->LLMResult:frompromptlayer.utilsimportget_api_key,promptlayer_api_request_asyncrequest_start_time=datetime.datetime.now().timestamp()generated_responses=awaitsuper()._agenerate(prompts,stop,run_manager)request_end_time=datetime.datetime.now().timestamp()foriinrange(len(prompts)):prompt=prompts[i]generation=generated_responses.generations[i][0]resp={"text":generation.text,"llm_output":generated_responses.llm_output,}params={**self._identifying_params,**kwargs}pl_request_id=awaitpromptlayer_api_request_async("langchain.PromptLayerOpenAI.async","langchain",[prompt],params,self.pl_tags,resp,request_start_time,request_end_time,get_api_key(),return_pl_id=self.return_pl_id,)ifself.return_pl_id:ifgeneration.generation_infoisNoneornotisinstance(generation.generation_info,dict):generation.generation_info={}generation.generation_info["pl_request_id"]=pl_request_idreturngenerated_responses
[docs]classPromptLayerOpenAIChat(OpenAIChat):"""PromptLayer OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain_community.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") """pl_tags:Optional[List[str]]return_pl_id:Optional[bool]=Falsedef_generate(self,prompts:List[str],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->LLMResult:"""Call OpenAI generate and then call PromptLayer API to log the request."""frompromptlayer.utilsimportget_api_key,promptlayer_api_requestrequest_start_time=datetime.datetime.now().timestamp()generated_responses=super()._generate(prompts,stop,run_manager)request_end_time=datetime.datetime.now().timestamp()foriinrange(len(prompts)):prompt=prompts[i]generation=generated_responses.generations[i][0]resp={"text":generation.text,"llm_output":generated_responses.llm_output,}params={**self._identifying_params,**kwargs}pl_request_id=promptlayer_api_request("langchain.PromptLayerOpenAIChat","langchain",[prompt],params,self.pl_tags,resp,request_start_time,request_end_time,get_api_key(),return_pl_id=self.return_pl_id,)ifself.return_pl_id:ifgeneration.generation_infoisNoneornotisinstance(generation.generation_info,dict):generation.generation_info={}generation.generation_info["pl_request_id"]=pl_request_idreturngenerated_responsesasyncdef_agenerate(self,prompts:List[str],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->LLMResult:frompromptlayer.utilsimportget_api_key,promptlayer_api_request_asyncrequest_start_time=datetime.datetime.now().timestamp()generated_responses=awaitsuper()._agenerate(prompts,stop,run_manager)request_end_time=datetime.datetime.now().timestamp()foriinrange(len(prompts)):prompt=prompts[i]generation=generated_responses.generations[i][0]resp={"text":generation.text,"llm_output":generated_responses.llm_output,}params={**self._identifying_params,**kwargs}pl_request_id=awaitpromptlayer_api_request_async("langchain.PromptLayerOpenAIChat.async","langchain",[prompt],params,self.pl_tags,resp,request_start_time,request_end_time,get_api_key(),return_pl_id=self.return_pl_id,)ifself.return_pl_id:ifgeneration.generation_infoisNoneornotisinstance(generation.generation_info,dict):generation.generation_info={}generation.generation_info["pl_request_id"]=pl_request_idreturngenerated_responses