Source code for langchain_community.chat_models.yandex
"""Wrapper around YandexGPT chat models."""from__future__importannotationsimportloggingfromtypingimportAny,Callable,Dict,List,Optional,castfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_models.chat_modelsimportBaseChatModelfromlangchain_core.messagesimport(AIMessage,BaseMessage,HumanMessage,SystemMessage,)fromlangchain_core.outputsimportChatGeneration,ChatResultfromtenacityimport(before_sleep_log,retry,retry_if_exception_type,stop_after_attempt,wait_exponential,)fromlangchain_community.llms.utilsimportenforce_stop_tokensfromlangchain_community.llms.yandeximport_BaseYandexGPTlogger=logging.getLogger(__name__)def_parse_message(role:str,text:str)->Dict:return{"role":role,"text":text}def_parse_chat_history(history:List[BaseMessage])->List[Dict[str,str]]:"""Parse a sequence of messages into history. Returns: A list of parsed messages. """chat_history=[]formessageinhistory:content=cast(str,message.content)ifisinstance(message,HumanMessage):chat_history.append(_parse_message("user",content))ifisinstance(message,AIMessage):chat_history.append(_parse_message("assistant",content))ifisinstance(message,SystemMessage):chat_history.append(_parse_message("system",content))returnchat_history
[docs]classChatYandexGPT(_BaseYandexGPT,BaseChatModel):"""YandexGPT large language models. There are two authentication options for the service account with the ``ai.languageModels.user`` role: - You can specify the token in a constructor parameter `iam_token` or in an environment variable `YC_IAM_TOKEN`. - You can specify the key in a constructor parameter `api_key` or in an environment variable `YC_API_KEY`. Example: .. code-block:: python from langchain_community.chat_models import ChatYandexGPT chat_model = ChatYandexGPT(iam_token="t1.9eu...") """def_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """text=completion_with_retry(self,messages=messages)text=textifstopisNoneelseenforce_stop_tokens(text,stop)message=AIMessage(content=text)returnChatResult(generations=[ChatGeneration(message=message)])asyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Async method to generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """text=awaitacompletion_with_retry(self,messages=messages)text=textifstopisNoneelseenforce_stop_tokens(text,stop)message=AIMessage(content=text)returnChatResult(generations=[ChatGeneration(message=message)])
def_make_request(self:ChatYandexGPT,messages:List[BaseMessage],)->str:try:importgrpcfromgoogle.protobuf.wrappers_pb2importDoubleValue,Int64Valuetry:fromyandex.cloud.ai.foundation_models.v1.text_common_pb2import(CompletionOptions,Message,)fromyandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2import(# noqa: E501CompletionRequest,)fromyandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2_grpcimport(# noqa: E501TextGenerationServiceStub,)exceptModuleNotFoundError:fromyandex.cloud.ai.foundation_models.v1.foundation_models_pb2import(CompletionOptions,Message,)fromyandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2import(# noqa: E501CompletionRequest,)fromyandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpcimport(# noqa: E501TextGenerationServiceStub,)exceptImportErrorase:raiseImportError("Please install YandexCloud SDK with `pip install yandexcloud` \ or upgrade it to recent version.")fromeifnotmessages:raiseValueError("You should provide at least one message to start the chat!")message_history=_parse_chat_history(messages)channel_credentials=grpc.ssl_channel_credentials()channel=grpc.secure_channel(self.url,channel_credentials)request=CompletionRequest(model_uri=self.model_uri,completion_options=CompletionOptions(temperature=DoubleValue(value=self.temperature),max_tokens=Int64Value(value=self.max_tokens),),messages=[Message(**message)formessageinmessage_history],)stub=TextGenerationServiceStub(channel)res=stub.Completion(request,metadata=self.grpc_metadata)returnlist(res)[0].alternatives[0].message.textasyncdef_amake_request(self:ChatYandexGPT,messages:List[BaseMessage])->str:try:importasyncioimportgrpcfromgoogle.protobuf.wrappers_pb2importDoubleValue,Int64Valuetry:fromyandex.cloud.ai.foundation_models.v1.text_common_pb2import(CompletionOptions,Message,)fromyandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2import(# noqa: E501CompletionRequest,CompletionResponse,)fromyandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2_grpcimport(# noqa: E501TextGenerationAsyncServiceStub,)exceptModuleNotFoundError:fromyandex.cloud.ai.foundation_models.v1.foundation_models_pb2import(CompletionOptions,Message,)fromyandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2import(# noqa: E501CompletionRequest,CompletionResponse,)fromyandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpcimport(# noqa: E501TextGenerationAsyncServiceStub,)fromyandex.cloud.operation.operation_service_pb2importGetOperationRequestfromyandex.cloud.operation.operation_service_pb2_grpcimport(OperationServiceStub,)exceptImportErrorase:raiseImportError("Please install YandexCloud SDK with `pip install yandexcloud` \ or upgrade it to recent version.")fromeifnotmessages:raiseValueError("You should provide at least one message to start the chat!")message_history=_parse_chat_history(messages)operation_api_url="operation.api.cloud.yandex.net:443"channel_credentials=grpc.ssl_channel_credentials()asyncwithgrpc.aio.secure_channel(self.url,channel_credentials)aschannel:request=CompletionRequest(model_uri=self.model_uri,completion_options=CompletionOptions(temperature=DoubleValue(value=self.temperature),max_tokens=Int64Value(value=self.max_tokens),),messages=[Message(**message)formessageinmessage_history],)stub=TextGenerationAsyncServiceStub(channel)operation=awaitstub.Completion(request,metadata=self.grpc_metadata)asyncwithgrpc.aio.secure_channel(operation_api_url,channel_credentials)asoperation_channel:operation_stub=OperationServiceStub(operation_channel)whilenotoperation.done:awaitasyncio.sleep(1)operation_request=GetOperationRequest(operation_id=operation.id)operation=awaitoperation_stub.Get(operation_request,metadata=self.grpc_metadata,)completion_response=CompletionResponse()operation.response.Unpack(completion_response)returncompletion_response.alternatives[0].message.textdef_create_retry_decorator(llm:ChatYandexGPT)->Callable[[Any],Any]:fromgrpcimportRpcErrormin_seconds=llm.sleep_intervalmax_seconds=60returnretry(reraise=True,stop=stop_after_attempt(llm.max_retries),wait=wait_exponential(multiplier=1,min=min_seconds,max=max_seconds),retry=(retry_if_exception_type((RpcError))),before_sleep=before_sleep_log(logger,logging.WARNING),)
[docs]defcompletion_with_retry(llm:ChatYandexGPT,**kwargs:Any)->Any:"""Use tenacity to retry the completion call."""retry_decorator=_create_retry_decorator(llm)@retry_decoratordef_completion_with_retry(**_kwargs:Any)->Any:return_make_request(llm,**_kwargs)return_completion_with_retry(**kwargs)
[docs]asyncdefacompletion_with_retry(llm:ChatYandexGPT,**kwargs:Any)->Any:"""Use tenacity to retry the async completion call."""retry_decorator=_create_retry_decorator(llm)@retry_decoratorasyncdef_completion_with_retry(**_kwargs:Any)->Any:returnawait_amake_request(llm,**_kwargs)returnawait_completion_with_retry(**kwargs)