Source code for langchain_community.chat_models.minimax
"""Wrapper around Minimax chat models."""importjsonimportloggingfromcontextlibimportasynccontextmanager,contextmanagerfromoperatorimportitemgetterfromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Optional,Sequence,Type,Union,)fromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_modelsimportLanguageModelInputfromlangchain_core.language_models.chat_modelsimport(BaseChatModel,agenerate_from_stream,generate_from_stream,)fromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,BaseMessageChunk,ChatMessage,ChatMessageChunk,HumanMessage,SystemMessage,ToolMessage,)fromlangchain_core.output_parsers.baseimportOutputParserLikefromlangchain_core.output_parsers.openai_toolsimport(JsonOutputKeyToolsParser,PydanticToolsParser,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.pydantic_v1importBaseModel,Field,SecretStr,root_validatorfromlangchain_core.runnablesimportRunnable,RunnableMap,RunnablePassthroughfromlangchain_core.toolsimportBaseToolfromlangchain_core.utilsimportconvert_to_secret_str,get_from_dict_or_envfromlangchain_core.utils.function_callingimportconvert_to_openai_toolfromlangchain_core.utils.pydanticimportget_fieldslogger=logging.getLogger(__name__)
[docs]@contextmanagerdefconnect_httpx_sse(client:Any,method:str,url:str,**kwargs:Any)->Iterator:"""Context manager for connecting to an SSE stream. Args: client: The httpx client. method: The HTTP method. url: The URL to connect to. kwargs: Additional keyword arguments to pass to the client. Yields: An EventSource object. """fromhttpx_sseimportEventSourcewithclient.stream(method,url,**kwargs)asresponse:yieldEventSource(response)
[docs]@asynccontextmanagerasyncdefaconnect_httpx_sse(client:Any,method:str,url:str,**kwargs:Any)->AsyncIterator:"""Async context manager for connecting to an SSE stream. Args: client: The httpx client. method: The HTTP method. url: The URL to connect to. kwargs: Additional keyword arguments to pass to the client. Yields: An EventSource object. """fromhttpx_sseimportEventSourceasyncwithclient.stream(method,url,**kwargs)asresponse:yieldEventSource(response)
def_convert_message_to_dict(message:BaseMessage)->Dict[str,Any]:"""Convert a LangChain messages to Dict."""message_dict:Dict[str,Any]ifisinstance(message,HumanMessage):message_dict={"role":"user","content":message.content}elifisinstance(message,AIMessage):message_dict={"role":"assistant","content":message.content,"tool_calls":message.additional_kwargs.get("tool_calls"),}elifisinstance(message,SystemMessage):message_dict={"role":"system","content":message.content}elifisinstance(message,ToolMessage):message_dict={"role":"tool","content":message.content,"tool_call_id":message.tool_call_id,"name":message.nameormessage.additional_kwargs.get("name"),}else:raiseTypeError(f"Got unknown type '{message.__class__.__name__}'.")returnmessage_dictdef_convert_dict_to_message(dct:Dict[str,Any])->BaseMessage:"""Convert a dict to LangChain message."""role=dct.get("role")content=dct.get("content","")ifrole=="assistant":additional_kwargs={}tool_calls=dct.get("tool_calls",None)iftool_callsisnotNone:additional_kwargs["tool_calls"]=tool_callsreturnAIMessage(content=content,additional_kwargs=additional_kwargs)returnChatMessage(role=role,content=content)# type: ignore[arg-type]def_convert_delta_to_message_chunk(dct:Dict[str,Any],default_class:Type[BaseMessageChunk])->BaseMessageChunk:role=dct.get("role")content=dct.get("content","")additional_kwargs={}tool_calls=dct.get("tool_call",None)iftool_callsisnotNone:additional_kwargs["tool_calls"]=tool_callsifrole=="assistant"ordefault_class==AIMessageChunk:returnAIMessageChunk(content=content,additional_kwargs=additional_kwargs)ifroleordefault_class==ChatMessageChunk:returnChatMessageChunk(content=content,role=role)# type: ignore[arg-type]returndefault_class(content=content)# type: ignore[call-arg]
[docs]classMiniMaxChat(BaseChatModel):"""MiniMax chat model integration. Setup: To use, you should have the environment variable``MINIMAX_API_KEY`` set with your API KEY. .. code-block:: bash export MINIMAX_API_KEY="your-api-key" Key init args — completion params: model: Optional[str] Name of MiniMax model to use. max_tokens: Optional[int] Max number of tokens to generate. temperature: Optional[float] Sampling temperature. top_p: Optional[float] Total probability mass of tokens to consider at each step. streaming: Optional[bool] Whether to stream the results or not. Key init args — client params: api_key: Optional[str] MiniMax API key. If not passed in will be read from env var MINIMAX_API_KEY. base_url: Optional[str] Base URL for API requests. See full list of supported init args and their descriptions in the params section. Instantiate: .. code-block:: python from langchain_community.chat_models import MiniMaxChat chat = MiniMaxChat( api_key=api_key, model='abab6.5-chat', # temperature=..., # other params... ) Invoke: .. code-block:: python messages = [ ("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"), ("human", "我喜欢编程。"), ] chat.invoke(messages) .. code-block:: python AIMessage( content='I enjoy programming.', response_metadata={ 'token_usage': {'total_tokens': 48}, 'model_name': 'abab6.5-chat', 'finish_reason': 'stop' }, id='run-42d62ba6-5dc1-4e16-98dc-f72708a4162d-0' ) Stream: .. code-block:: python for chunk in chat.stream(messages): print(chunk) .. code-block:: python content='I' id='run-a5837c45-4aaa-4f64-9ab4-2679bbd55522' content=' enjoy programming.' response_metadata={'finish_reason': 'stop'} id='run-a5837c45-4aaa-4f64-9ab4-2679bbd55522' .. code-block:: python stream = chat.stream(messages) full = next(stream) for chunk in stream: full += chunk full .. code-block:: python AIMessageChunk( content='I enjoy programming.', response_metadata={'finish_reason': 'stop'}, id='run-01aed0a0-61c4-4709-be22-c6d8b17155d6' ) Async: .. code-block:: python await chat.ainvoke(messages) # stream # async for chunk in chat.astream(messages): # print(chunk) # batch # await chat.abatch([messages]) .. code-block:: python AIMessage( content='I enjoy programming.', response_metadata={ 'token_usage': {'total_tokens': 48}, 'model_name': 'abab6.5-chat', 'finish_reason': 'stop' }, id='run-c263b6f1-1736-4ece-a895-055c26b3436f-0' ) Tool calling: .. code-block:: python from langchain_core.pydantic_v1 import BaseModel, Field class GetWeather(BaseModel): '''Get the current weather in a given location''' location: str = Field( ..., description="The city and state, e.g. San Francisco, CA" ) class GetPopulation(BaseModel): '''Get the current population in a given location''' location: str = Field( ..., description="The city and state, e.g. San Francisco, CA" ) chat_with_tools = chat.bind_tools([GetWeather, GetPopulation]) ai_msg = chat_with_tools.invoke( "Which city is hotter today and which is bigger: LA or NY?" ) ai_msg.tool_calls .. code-block:: python [ { 'name': 'GetWeather', 'args': {'location': 'LA'}, 'id': 'call_function_2140449382', 'type': 'tool_call' } ] Structured output: .. code-block:: python from typing import Optional from langchain_core.pydantic_v1 import BaseModel, Field class Joke(BaseModel): '''Joke to tell user.''' setup: str = Field(description="The setup of the joke") punchline: str = Field(description="The punchline to the joke") rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10") structured_chat = chat.with_structured_output(Joke) structured_chat.invoke("Tell me a joke about cats") .. code-block:: python Joke( setup='Why do cats have nine lives?', punchline='Because they are so cute and cuddly!', rating=None ) Response metadata .. code-block:: python ai_msg = chat.invoke(messages) ai_msg.response_metadata .. code-block:: python {'token_usage': {'total_tokens': 48}, 'model_name': 'abab6.5-chat', 'finish_reason': 'stop'} """# noqa: E501@propertydef_identifying_params(self)->Dict[str,Any]:"""Get the identifying parameters."""return{**{"model":self.model},**self._default_params}@propertydef_llm_type(self)->str:"""Return type of llm."""return"minimax"@propertydef_default_params(self)->Dict[str,Any]:"""Get the default parameters for calling OpenAI API."""return{"model":self.model,"max_tokens":self.max_tokens,"temperature":self.temperature,"top_p":self.top_p,**self.model_kwargs,}_client:Anymodel:str="abab6.5-chat""""Model name to use."""max_tokens:int=256"""Denotes the number of tokens to predict per generation."""temperature:float=0.7"""A non-negative float that tunes the degree of randomness in generation."""top_p:float=0.95"""Total probability mass of tokens to consider at each step."""model_kwargs:Dict[str,Any]=Field(default_factory=dict)"""Holds any model parameters valid for `create` call not explicitly specified."""minimax_api_host:str=Field(default="https://api.minimax.chat/v1/text/chatcompletion_v2",alias="base_url")minimax_group_id:Optional[str]=Field(default=None,alias="group_id")"""[DEPRECATED, keeping it for for backward compatibility] Group Id"""minimax_api_key:SecretStr=Field(alias="api_key")"""Minimax API Key"""streaming:bool=False"""Whether to stream the results or not."""classConfig:allow_population_by_field_name=True@root_validator(pre=True)defvalidate_environment(cls,values:Dict)->Dict:"""Validate that api key and python package exists in environment."""values["minimax_api_key"]=convert_to_secret_str(get_from_dict_or_env(values,["minimax_api_key","api_key"],"MINIMAX_API_KEY",))default_values={name:field.defaultforname,fieldinget_fields(cls).items()iffield.defaultisnotNone}default_values.update(values)# Get custom api url from environment.values["minimax_api_host"]=get_from_dict_or_env(values,["minimax_api_host","base_url"],"MINIMAX_API_HOST",default_values["minimax_api_host"],)returnvaluesdef_create_chat_result(self,response:Union[dict,BaseModel])->ChatResult:generations=[]ifnotisinstance(response,dict):response=response.dict()forresinresponse["choices"]:message=_convert_dict_to_message(res["message"])generation_info=dict(finish_reason=res.get("finish_reason"))generations.append(ChatGeneration(message=message,generation_info=generation_info))token_usage=response.get("usage",{})llm_output={"token_usage":token_usage,"model_name":self.model,}returnChatResult(generations=generations,llm_output=llm_output)def_create_payload_parameters(# type: ignore[no-untyped-def]self,messages:List[BaseMessage],is_stream:bool=False,**kwargs)->Dict[str,Any]:"""Create API request body parameters."""message_dicts=[_convert_message_to_dict(m)forminmessages]payload=self._default_paramspayload["messages"]=message_dictsself._reformat_function_parameters(kwargs.get("tools",{}))payload.update(**kwargs)ifis_stream:payload["stream"]=Truereturnpayload@staticmethoddef_reformat_function_parameters(tools_arg:Dict[Any,Any])->None:"""Reformat the function parameters to strings."""fortool_argintools_arg:iftool_arg["type"]=="function"andnotisinstance(tool_arg["function"]["parameters"],str):tool_arg["function"]["parameters"]=json.dumps(tool_arg["function"]["parameters"])def_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,stream:Optional[bool]=None,**kwargs:Any,)->ChatResult:"""Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. Code chat does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. stream: Whether to stream the results or not. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ifnotmessages:raiseValueError("You should provide at least one message to start the chat!")is_stream=streamifstreamisnotNoneelseself.streamingifis_stream:stream_iter=self._stream(messages,stop=stop,run_manager=run_manager,**kwargs)returngenerate_from_stream(stream_iter)payload=self._create_payload_parameters(messages,**kwargs)api_key=""ifself.minimax_api_keyisnotNone:api_key=self.minimax_api_key.get_secret_value()headers={"Authorization":f"Bearer {api_key}","Content-Type":"application/json",}importhttpxwithhttpx.Client(headers=headers,timeout=60)asclient:response=client.post(self.minimax_api_host,json=payload)response.raise_for_status()returnself._create_chat_result(response.json())def_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:"""Stream the chat response in chunks."""payload=self._create_payload_parameters(messages,is_stream=True,**kwargs)api_key=""ifself.minimax_api_keyisnotNone:api_key=self.minimax_api_key.get_secret_value()headers={"Authorization":f"Bearer {api_key}","Content-Type":"application/json",}importhttpxwithhttpx.Client(headers=headers,timeout=60)asclient:withconnect_httpx_sse(client,"POST",self.minimax_api_host,json=payload)asevent_source:forsseinevent_source.iter_sse():chunk=json.loads(sse.data)iflen(chunk["choices"])==0:continuechoice=chunk["choices"][0]chunk=_convert_delta_to_message_chunk(choice["delta"],AIMessageChunk)finish_reason=choice.get("finish_reason",None)generation_info=({"finish_reason":finish_reason}iffinish_reasonisnotNoneelseNone)chunk=ChatGenerationChunk(message=chunk,generation_info=generation_info)ifrun_manager:run_manager.on_llm_new_token(chunk.text,chunk=chunk)yieldchunkiffinish_reasonisnotNone:breakasyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,stream:Optional[bool]=None,**kwargs:Any,)->ChatResult:ifnotmessages:raiseValueError("You should provide at least one message to start the chat!")is_stream=streamifstreamisnotNoneelseself.streamingifis_stream:stream_iter=self._astream(messages,stop=stop,run_manager=run_manager,**kwargs)returnawaitagenerate_from_stream(stream_iter)payload=self._create_payload_parameters(messages,**kwargs)api_key=""ifself.minimax_api_keyisnotNone:api_key=self.minimax_api_key.get_secret_value()headers={"Authorization":f"Bearer {api_key}","Content-Type":"application/json",}importhttpxasyncwithhttpx.AsyncClient(headers=headers,timeout=60)asclient:response=awaitclient.post(self.minimax_api_host,json=payload)response.raise_for_status()returnself._create_chat_result(response.json())asyncdef_astream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[ChatGenerationChunk]:payload=self._create_payload_parameters(messages,is_stream=True,**kwargs)api_key=""ifself.minimax_api_keyisnotNone:api_key=self.minimax_api_key.get_secret_value()headers={"Authorization":f"Bearer {api_key}","Content-Type":"application/json",}importhttpxasyncwithhttpx.AsyncClient(headers=headers,timeout=60)asclient:asyncwithaconnect_httpx_sse(client,"POST",self.minimax_api_host,json=payload)asevent_source:asyncforsseinevent_source.aiter_sse():chunk=json.loads(sse.data)iflen(chunk["choices"])==0:continuechoice=chunk["choices"][0]chunk=_convert_delta_to_message_chunk(choice["delta"],AIMessageChunk)finish_reason=choice.get("finish_reason",None)generation_info=({"finish_reason":finish_reason}iffinish_reasonisnotNoneelseNone)chunk=ChatGenerationChunk(message=chunk,generation_info=generation_info)ifrun_manager:awaitrun_manager.on_llm_new_token(chunk.text,chunk=chunk)yieldchunkiffinish_reasonisnotNone:break
[docs]defbind_tools(self,tools:Sequence[Union[Dict[str,Any],Type[BaseModel],Callable,BaseTool]],**kwargs:Any,)->Runnable[LanguageModelInput,BaseMessage]:"""Bind tool-like objects to this chat model. Args: tools: A list of tool definitions to bind to this chat model. Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic models, callables, and BaseTools will be automatically converted to their schema dictionary representation. **kwargs: Any additional parameters to pass to the :class: `~langchain.runnable.Runnable` constructor. """formatted_tools=[convert_to_openai_tool(tool)fortoolintools]returnsuper().bind(tools=formatted_tools,**kwargs)
[docs]defwith_structured_output(self,schema:Union[Dict,Type[BaseModel]],*,include_raw:bool=False,**kwargs:Any,)->Runnable[LanguageModelInput,Union[Dict,BaseModel]]:"""Model wrapper that returns outputs formatted to match the given schema. Args: schema: The output schema as a dict or a Pydantic class. If a Pydantic class then the model output will be an object of that class. If a dict then the model output will be a dict. With a Pydantic class the returned attributes will be validated, whereas with a dict they will not be. If `method` is "function_calling" and `schema` is a dict, then the dict must match the OpenAI function-calling spec. include_raw: If False then only the parsed structured output is returned. If an error occurs during model output parsing it will be raised. If True then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict with keys "raw", "parsed", and "parsing_error". Returns: A Runnable that takes any ChatModel input and returns as output: If include_raw is True then a dict with keys: raw: BaseMessage parsed: Optional[_DictOrPydantic] parsing_error: Optional[BaseException] If include_raw is False then just _DictOrPydantic is returned, where _DictOrPydantic depends on the schema: If schema is a Pydantic class then _DictOrPydantic is the Pydantic class. If schema is a dict then _DictOrPydantic is a dict. Example: Function-calling, Pydantic schema (method="function_calling", include_raw=False): .. code-block:: python from langchain_community.chat_models import MiniMaxChat from langchain_core.pydantic_v1 import BaseModel class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: str llm = MiniMaxChat() structured_llm = llm.with_structured_output(AnswerWithJustification) structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") # -> AnswerWithJustification( # answer='A pound of bricks and a pound of feathers weigh the same.', # justification='The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same.' # ) Example: Function-calling, Pydantic schema (method="function_calling", include_raw=True): .. code-block:: python from langchain_community.chat_models import MiniMaxChat from langchain_core.pydantic_v1 import BaseModel class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: str llm = MiniMaxChat() structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_function_8953642285', 'type': 'function', 'function': {'name': 'AnswerWithJustification', 'arguments': '{"answer": "A pound of bricks and a pound of feathers weigh the same.", "justification": "The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same."}'}}]}, response_metadata={'token_usage': {'total_tokens': 257}, 'model_name': 'abab6.5-chat', 'finish_reason': 'tool_calls'}, id='run-d897e037-2796-49f5-847e-f9f69dd390db-0', tool_calls=[{'name': 'AnswerWithJustification', 'args': {'answer': 'A pound of bricks and a pound of feathers weigh the same.', 'justification': 'The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same.'}, 'id': 'call_function_8953642285', 'type': 'tool_call'}]), # 'parsed': AnswerWithJustification(answer='A pound of bricks and a pound of feathers weigh the same.', justification='The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same.'), # 'parsing_error': None # } Example: Function-calling, dict schema (method="function_calling", include_raw=False): .. code-block:: python from langchain_community.chat_models import MiniMaxChat from langchain_core.pydantic_v1 import BaseModel from langchain_core.utils.function_calling import convert_to_openai_tool class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: str dict_schema = convert_to_openai_tool(AnswerWithJustification) llm = MiniMaxChat() structured_llm = llm.with_structured_output(dict_schema) structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") # -> { # 'answer': 'A pound of bricks and a pound of feathers both weigh the same, which is a pound.', # 'justification': 'The difference is that bricks are much denser than feathers, so a pound of bricks will take up much less space than a pound of feathers.' # } """# noqa: E501ifkwargs:raiseValueError(f"Received unsupported arguments {kwargs}")is_pydantic_schema=isinstance(schema,type)andissubclass(schema,BaseModel)llm=self.bind_tools([schema])ifis_pydantic_schema:output_parser:OutputParserLike=PydanticToolsParser(tools=[schema],# type: ignore[list-item]first_tool_only=True,# type: ignore[list-item])else:key_name=convert_to_openai_tool(schema)["function"]["name"]output_parser=JsonOutputKeyToolsParser(key_name=key_name,first_tool_only=True)ifinclude_raw:parser_assign=RunnablePassthrough.assign(parsed=itemgetter("raw")|output_parser,parsing_error=lambda_:None)parser_none=RunnablePassthrough.assign(parsed=lambda_:None)parser_with_fallback=parser_assign.with_fallbacks([parser_none],exception_key="parsing_error")returnRunnableMap(raw=llm)|parser_with_fallbackelse:returnllm|output_parser