[docs]classNebula(LLM):"""Nebula Service models. To use, you should have the environment variable ``NEBULA_SERVICE_URL``, ``NEBULA_SERVICE_PATH`` and ``NEBULA_API_KEY`` set with your Nebula Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.llms import Nebula nebula = Nebula( nebula_service_url="NEBULA_SERVICE_URL", nebula_service_path="NEBULA_SERVICE_PATH", nebula_api_key="NEBULA_API_KEY", ) """"""Key/value arguments to pass to the model. Reserved for future use"""model_kwargs:Optional[dict]=None"""Optional"""nebula_service_url:Optional[str]=Nonenebula_service_path:Optional[str]=Nonenebula_api_key:Optional[SecretStr]=Nonemodel:Optional[str]=Nonemax_new_tokens:Optional[int]=128temperature:Optional[float]=0.6top_p:Optional[float]=0.95repetition_penalty:Optional[float]=1.0top_k:Optional[int]=1stop_sequences:Optional[List[str]]=Nonemax_retries:Optional[int]=10model_config=ConfigDict(extra="forbid",)
[docs]@pre_initdefvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key and python package exists in environment."""nebula_service_url=get_from_dict_or_env(values,"nebula_service_url","NEBULA_SERVICE_URL",DEFAULT_NEBULA_SERVICE_URL,)nebula_service_path=get_from_dict_or_env(values,"nebula_service_path","NEBULA_SERVICE_PATH",DEFAULT_NEBULA_SERVICE_PATH,)nebula_api_key=convert_to_secret_str(get_from_dict_or_env(values,"nebula_api_key","NEBULA_API_KEY",None))ifnebula_service_url.endswith("/"):nebula_service_url=nebula_service_url[:-1]ifnotnebula_service_path.startswith("/"):nebula_service_path="/"+nebula_service_pathvalues["nebula_service_url"]=nebula_service_urlvalues["nebula_service_path"]=nebula_service_pathvalues["nebula_api_key"]=nebula_api_keyreturnvalues
@propertydef_default_params(self)->Dict[str,Any]:"""Get the default parameters for calling Cohere API."""return{"max_new_tokens":self.max_new_tokens,"temperature":self.temperature,"top_k":self.top_k,"top_p":self.top_p,"repetition_penalty":self.repetition_penalty,}@propertydef_identifying_params(self)->Mapping[str,Any]:"""Get the identifying parameters."""_model_kwargs=self.model_kwargsor{}return{"nebula_service_url":self.nebula_service_url,"nebula_service_path":self.nebula_service_path,**{"model_kwargs":_model_kwargs},}@propertydef_llm_type(self)->str:"""Return type of llm."""return"nebula"def_invocation_params(self,stop_sequences:Optional[List[str]],**kwargs:Any)->dict:params=self._default_paramsifself.stop_sequencesisnotNoneandstop_sequencesisnotNone:raiseValueError("`stop` found in both the input and default params.")elifself.stop_sequencesisnotNone:params["stop_sequences"]=self.stop_sequenceselse:params["stop_sequences"]=stop_sequencesreturn{**params,**kwargs}@staticmethoddef_process_response(response:Any,stop:Optional[List[str]])->str:text=response["output"]["text"]ifstop:text=enforce_stop_tokens(text,stop)returntextdef_call(self,prompt:str,stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->str:"""Call out to Nebula Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = nebula("Tell me a joke.") """params=self._invocation_params(stop,**kwargs)prompt=prompt.strip()response=completion_with_retry(self,prompt=prompt,params=params,url=f"{self.nebula_service_url}{self.nebula_service_path}",)_stop=params.get("stop_sequences")returnself._process_response(response,_stop)
[docs]defmake_request(self:Nebula,prompt:str,url:str=f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}",params:Optional[Dict]=None,)->Any:"""Generate text from the model."""params=paramsor{}api_key=Noneifself.nebula_api_keyisnotNone:api_key=self.nebula_api_key.get_secret_value()headers={"Content-Type":"application/json","ApiKey":f"{api_key}",}body={"prompt":prompt}# add params to bodyforkey,valueinparams.items():body[key]=value# make requestresponse=requests.post(url,headers=headers,json=body)ifresponse.status_code!=200:raiseException(f"Request failed with status code {response.status_code}"f" and message {response.text}")returnjson.loads(response.text)
def_create_retry_decorator(llm:Nebula)->Callable[[Any],Any]:min_seconds=4max_seconds=10# Wait 2^x * 1 second between each retry starting with# 4 seconds, then up to 10 seconds, then 10 seconds afterwardmax_retries=llm.max_retriesifllm.max_retriesisnotNoneelse3returnretry(reraise=True,stop=stop_after_attempt(max_retries),wait=wait_exponential(multiplier=1,min=min_seconds,max=max_seconds),retry=(retry_if_exception_type((RequestException,ConnectTimeout,ReadTimeout))),before_sleep=before_sleep_log(logger,logging.WARNING),)
[docs]defcompletion_with_retry(llm:Nebula,**kwargs:Any)->Any:"""Use tenacity to retry the completion call."""retry_decorator=_create_retry_decorator(llm)@retry_decoratordef_completion_with_retry(**_kwargs:Any)->Any:returnmake_request(llm,**_kwargs)return_completion_with_retry(**kwargs)