[docs]classCloudflareWorkersAI(LLM):"""Cloudflare Workers AI service. To use, you must provide an API token and account ID to access Cloudflare Workers AI, and pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI my_account_id = "my_account_id" my_api_token = "my_secret_api_token" llm_model = "@cf/meta/llama-2-7b-chat-int8" cf_ai = CloudflareWorkersAI( account_id=my_account_id, api_token=my_api_token, model=llm_model ) """# noqa: E501account_id:strapi_token:strmodel:str="@cf/meta/llama-2-7b-chat-int8"base_url:str="https://api.cloudflare.com/client/v4/accounts"streaming:bool=Falseendpoint_url:str=""def__init__(self,**kwargs:Any)->None:"""Initialize the Cloudflare Workers AI class."""super().__init__(**kwargs)self.endpoint_url=f"{self.base_url}/{self.account_id}/ai/run/{self.model}"@propertydef_llm_type(self)->str:"""Return type of LLM."""return"cloudflare"@propertydef_default_params(self)->Dict[str,Any]:"""Default parameters"""return{}@propertydef_identifying_params(self)->Dict[str,Any]:"""Identifying parameters"""return{"account_id":self.account_id,"api_token":self.api_token,"model":self.model,"base_url":self.base_url,}def_call_api(self,prompt:str,params:Dict[str,Any])->requests.Response:"""Call Cloudflare Workers API"""headers={"Authorization":f"Bearer {self.api_token}"}data={"prompt":prompt,"stream":self.streaming,**params}response=requests.post(self.endpoint_url,headers=headers,json=data,stream=self.streaming)returnresponsedef_process_response(self,response:requests.Response)->str:"""Process API response"""ifresponse.ok:data=response.json()returndata["result"]["response"]else:raiseValueError(f"Request failed with status {response.status_code}")def_stream(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[GenerationChunk]:"""Streaming prediction"""original_steaming:bool=self.streamingself.streaming=True_response_prefix_count=len("data: ")_response_stream_end=b"data: [DONE]"forchunkinself._call_api(prompt,kwargs).iter_lines():ifchunk==_response_stream_end:breakiflen(chunk)>_response_prefix_count:try:data=json.loads(chunk[_response_prefix_count:])exceptExceptionase:logger.debug(chunk)raiseeifdataisnotNoneand"response"indata:yieldGenerationChunk(text=data["response"])ifrun_manager:run_manager.on_llm_new_token(data["response"])logger.debug("stream end")self.streaming=original_steamingdef_call(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Regular prediction"""ifself.streaming:return"".join([c.textforcinself._stream(prompt,stop,run_manager,**kwargs)])else:response=self._call_api(prompt,kwargs)returnself._process_response(response)