Source code for langchain_community.chat_models.oci_generative_ai
importjsonimportreimportuuidfromabcimportABC,abstractmethodfromoperatorimportitemgetterfromtypingimport(Any,Callable,Dict,Iterator,List,Literal,Mapping,Optional,Sequence,Type,Union,)fromlangchain_core.callbacksimportCallbackManagerForLLMRunfromlangchain_core.language_modelsimportLanguageModelInputfromlangchain_core.language_models.chat_modelsimport(BaseChatModel,generate_from_stream,)fromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,ChatMessage,HumanMessage,SystemMessage,ToolCall,ToolMessage,)fromlangchain_core.messages.toolimportToolCallChunkfromlangchain_core.output_parsersimport(JsonOutputParser,PydanticOutputParser,)fromlangchain_core.output_parsers.baseimportOutputParserLikefromlangchain_core.output_parsers.openai_toolsimport(JsonOutputKeyToolsParser,PydanticToolsParser,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.runnablesimportRunnable,RunnableMap,RunnablePassthroughfromlangchain_core.toolsimportBaseToolfromlangchain_core.utils.function_callingimportconvert_to_openai_functionfrompydanticimportBaseModel,ConfigDictfromlangchain_community.llms.oci_generative_aiimportOCIGenAIBasefromlangchain_community.llms.utilsimportenforce_stop_tokensCUSTOM_ENDPOINT_PREFIX="ocid1.generativeaiendpoint"JSON_TO_PYTHON_TYPES={"string":"str","number":"float","boolean":"bool","integer":"int","array":"List","object":"Dict","any":"any",}def_is_pydantic_class(obj:Any)->bool:returnisinstance(obj,type)andissubclass(obj,BaseModel)def_remove_signature_from_tool_description(name:str,description:str)->str:""" Removes the `{name}{signature} - ` prefix and Args: section from tool description. The signature is usually present for tools created with the @tool decorator, whereas the Args: section may be present in function doc blocks. """description=re.sub(rf"^{name}\(.*?\) -(?:> \w+? -)? ","",description)description=re.sub(r"(?s)(?:\n?\n\s*?)?Args:.*$","",description)returndescriptiondef_format_oci_tool_calls(tool_calls:Optional[List[Any]]=None,)->List[Dict]:""" Formats a OCI GenAI API response into the tool call format used in Langchain. """ifnottool_calls:return[]formatted_tool_calls=[]fortool_callintool_calls:formatted_tool_calls.append({"id":uuid.uuid4().hex[:],"function":{"name":tool_call.name,"arguments":json.dumps(tool_call.parameters),},"type":"function",})returnformatted_tool_callsdef_convert_oci_tool_call_to_langchain(tool_call:Any)->ToolCall:"""Convert a OCI GenAI tool call into langchain_core.messages.ToolCall"""_id=uuid.uuid4().hex[:]returnToolCall(name=tool_call.name,args=tool_call.parameters,id=_id)
[docs]defchat_generation_info(self,response:Any)->Dict[str,Any]:generation_info:Dict[str,Any]={"documents":response.data.chat_response.documents,"citations":response.data.chat_response.citations,"search_queries":response.data.chat_response.search_queries,"is_search_required":response.data.chat_response.is_search_required,"finish_reason":response.data.chat_response.finish_reason,}ifresponse.data.chat_response.tool_calls:# Only populate tool_calls when 1) present on the response and# 2) has one or more calls.generation_info["tool_calls"]=_format_oci_tool_calls(response.data.chat_response.tool_calls)returngeneration_info
[docs]defget_role(self,message:BaseMessage)->str:ifisinstance(message,HumanMessage):return"USER"elifisinstance(message,AIMessage):return"CHATBOT"elifisinstance(message,SystemMessage):return"SYSTEM"elifisinstance(message,ToolMessage):return"TOOL"else:raiseValueError(f"Got unknown type {message}")
[docs]defmessages_to_oci_params(self,messages:Sequence[ChatMessage],**kwargs:Any)->Dict[str,Any]:is_force_single_step=kwargs.get("is_force_single_step")orFalseoci_chat_history=[]formsginmessages[:-1]:ifself.get_role(msg)=="USER"orself.get_role(msg)=="SYSTEM":oci_chat_history.append(self.oci_chat_message[self.get_role(msg)](message=msg.content))elifisinstance(msg,AIMessage):ifmsg.tool_callsandis_force_single_step:continuetool_calls=([self.oci_tool_call(name=tc["name"],parameters=tc["args"])fortcinmsg.tool_calls]ifmsg.tool_callselseNone)msg_content=msg.contentifmsg.contentelse" "oci_chat_history.append(self.oci_chat_message[self.get_role(msg)](message=msg_content,tool_calls=tool_calls))# Get the messages for the current chat turncurrent_chat_turn_messages=[]formessageinmessages[::-1]:current_chat_turn_messages.append(message)ifisinstance(message,HumanMessage):breakcurrent_chat_turn_messages=current_chat_turn_messages[::-1]oci_tool_results:Union[List[Any],None]=[]formessageincurrent_chat_turn_messages:ifisinstance(message,ToolMessage):tool_message=messageprevious_ai_msgs=[messageformessageincurrent_chat_turn_messagesifisinstance(message,AIMessage)andmessage.tool_calls]ifprevious_ai_msgs:previous_ai_msg=previous_ai_msgs[-1]forlc_tool_callinprevious_ai_msg.tool_calls:iflc_tool_call["id"]==tool_message.tool_call_id:tool_result=self.oci_tool_result()tool_result.call=self.oci_tool_call(name=lc_tool_call["name"],parameters=lc_tool_call["args"],)tool_result.outputs=[{"output":tool_message.content}]oci_tool_results.append(tool_result)ifnotoci_tool_results:oci_tool_results=Nonemessage_str=""ifoci_tool_resultselsemessages[-1].contentoci_params={"message":message_str,"chat_history":oci_chat_history,"tool_results":oci_tool_results,"api_format":self.chat_api_format,}return{k:vfork,vinoci_params.items()ifvisnotNone}
[docs]defconvert_to_oci_tool(self,tool:Union[Union[Dict[str,Any],Type[BaseModel],Callable,BaseTool]],)->Dict[str,Any]:""" Convert a BaseTool instance, JSON schema dict, or BaseModel type to a OCI tool. """ifisinstance(tool,BaseTool):returnself.oci_tool(name=tool.name,description=_remove_signature_from_tool_description(tool.name,tool.description),parameter_definitions={p_name:self.oci_tool_param(description=p_def.get("description")if"description"inp_defelse"",type=JSON_TO_PYTHON_TYPES.get(p_def.get("type"),p_def.get("type","any")),is_required="default"notinp_def,)forp_name,p_defintool.args.items()},)elifisinstance(tool,dict):ifnotall(kintoolforkin("title","description","properties")):raiseValueError("Unsupported dict type. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type."# noqa: E501)returnself.oci_tool(name=tool.get("title"),description=tool.get("description"),parameter_definitions={p_name:self.oci_tool_param(description=p_def.get("description"),type=JSON_TO_PYTHON_TYPES.get(p_def.get("type"),p_def.get("type","any")),is_required="default"notinp_def,)forp_name,p_defintool.get("properties",{}).items()},)elif(isinstance(tool,type)andissubclass(tool,BaseModel))orcallable(tool):as_json_schema_function=convert_to_openai_function(tool)parameters=as_json_schema_function.get("parameters",{})properties=parameters.get("properties",{})returnself.oci_tool(name=as_json_schema_function.get("name"),description=as_json_schema_function.get("description",as_json_schema_function.get("name"),),parameter_definitions={p_name:self.oci_tool_param(description=p_def.get("description"),type=JSON_TO_PYTHON_TYPES.get(p_def.get("type"),p_def.get("type","any")),is_required=p_nameinparameters.get("required",[]),)forp_name,p_definproperties.items()},)else:raiseValueError(f"Unsupported tool type {type(tool)}. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type."# noqa: E501)
[docs]defget_role(self,message:BaseMessage)->str:# meta only supports alternating user/assistant rolesifisinstance(message,HumanMessage):return"USER"elifisinstance(message,AIMessage):return"ASSISTANT"elifisinstance(message,SystemMessage):return"SYSTEM"else:raiseValueError(f"Got unknown type {message}")
[docs]defmessages_to_oci_params(self,messages:List[BaseMessage],**kwargs:Any)->Dict[str,Any]:"""Convert LangChain messages to OCI chat parameters. Args: messages: List of LangChain BaseMessage objects **kwargs: Additional keyword arguments Returns: Dict containing OCI chat parameters Raises: ValueError: If message content is invalid """oci_messages=[]formessageinmessages:content=self._process_message_content(message.content)oci_message=self.oci_chat_message[self.get_role(message)](content=content)oci_messages.append(oci_message)return{"messages":oci_messages,"api_format":self.chat_api_format,"top_k":-1,}
def_process_message_content(self,content:Union[str,List[Union[str,Dict]]])->List[Any]:"""Process message content into OCI chat content format. Args: content: Message content as string or list Returns: List of OCI chat content objects Raises: ValueError: If content format is invalid """ifisinstance(content,str):return[self.oci_chat_message_text_content(text=content)]ifnotisinstance(content,list):raiseValueError("Message content must be str or list of items")processed_content=[]foritemincontent:ifisinstance(item,str):processed_content.append(self.oci_chat_message_text_content(text=item))continueifnotisinstance(item,dict):raiseValueError(f"Content items must be str or dict, got: {type(item)}")if"type"notinitem:raiseValueError("Dict content item must have a type key")ifitem["type"]=="image_url":processed_content.append(self.oci_chat_message_image_content(image_url=self.oci_chat_message_image_url(url=item["image_url"]["url"])))elifitem["type"]=="text":processed_content.append(self.oci_chat_message_text_content(text=item["text"]))else:raiseValueError(f"Unsupported content type: {item['type']}")returnprocessed_content
[docs]defconvert_to_oci_tool(self,tool:Union[Union[Dict[str,Any],Type[BaseModel],Callable,BaseTool]],)->Dict[str,Any]:raiseNotImplementedError("Tools not supported for Meta models")
[docs]classChatOCIGenAI(BaseChatModel,OCIGenAIBase):"""ChatOCIGenAI chat model integration. Setup: Install ``langchain-community`` and the ``oci`` sdk. .. code-block:: bash pip install -U langchain-community oci Key init args — completion params: model_id: str Id of the OCIGenAI chat model to use, e.g., cohere.command-r-16k. is_stream: bool Whether to stream back partial progress model_kwargs: Optional[Dict] Keyword arguments to pass to the specific model used, e.g., temperature, max_tokens. Key init args — client params: service_endpoint: str The endpoint URL for the OCIGenAI service, e.g., https://inference.generativeai.us-chicago-1.oci.oraclecloud.com. compartment_id: str The compartment OCID. auth_type: str The authentication type to use, e.g., API_KEY (default), SECURITY_TOKEN, INSTANCE_PRINCIPAL, RESOURCE_PRINCIPAL. auth_profile: Optional[str] The name of the profile in ~/.oci/config, if not specified , DEFAULT will be used. auth_file_location: Optional[str] Path to the config file, If not specified, ~/.oci/config will be used. provider: str Provider name of the model. Default to None, will try to be derived from the model_id otherwise, requires user input. See full list of supported init args and their descriptions in the params section. Instantiate: .. code-block:: python from langchain_community.chat_models import ChatOCIGenAI chat = ChatOCIGenAI( model_id="cohere.command-r-16k", service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", compartment_id="MY_OCID", model_kwargs={"temperature": 0.7, "max_tokens": 500}, ) Invoke: .. code-block:: python messages = [ SystemMessage(content="your are an AI assistant."), AIMessage(content="Hi there human!"), HumanMessage(content="tell me a joke."), ] response = chat.invoke(messages) Stream: .. code-block:: python for r in chat.stream(messages): print(r.content, end="", flush=True) Response metadata .. code-block:: python response = chat.invoke(messages) print(response.response_metadata) """# noqa: E501model_config=ConfigDict(extra="forbid",arbitrary_types_allowed=True,)@propertydef_llm_type(self)->str:"""Return type of llm."""return"oci_generative_ai_chat"@propertydef_provider_map(self)->Mapping[str,Any]:"""Get the provider map"""return{"cohere":CohereProvider(),"meta":MetaProvider(),}@propertydef_provider(self)->Any:"""Get the internal provider object"""returnself._get_provider(provider_map=self._provider_map)def_prepare_request(self,messages:List[BaseMessage],stop:Optional[List[str]],stream:bool,**kwargs:Any,)->Dict[str,Any]:try:fromoci.generative_ai_inferenceimportmodelsexceptImportErrorasex:raiseModuleNotFoundError("Could not import oci python package. ""Please make sure you have the oci package installed.")fromexoci_params=self._provider.messages_to_oci_params(messages,**kwargs)oci_params["is_stream"]=stream_model_kwargs=self.model_kwargsor{}ifstopisnotNone:_model_kwargs[self._provider.stop_sequence_key]=stopchat_params={**_model_kwargs,**kwargs,**oci_params}ifnotself.model_id:raiseValueError("Model ID is required to chat")ifself.model_id.startswith(CUSTOM_ENDPOINT_PREFIX):serving_mode=models.DedicatedServingMode(endpoint_id=self.model_id)else:serving_mode=models.OnDemandServingMode(model_id=self.model_id)request=models.ChatDetails(compartment_id=self.compartment_id,serving_mode=serving_mode,chat_request=self._provider.oci_chat_request(**chat_params),)returnrequest
[docs]defwith_structured_output(self,schema:Optional[Union[Dict,Type[BaseModel]]]=None,*,method:Literal["function_calling","json_mode"]="function_calling",include_raw:bool=False,**kwargs:Any,)->Runnable[LanguageModelInput,Union[Dict,BaseModel]]:"""Model wrapper that returns outputs formatted to match the given schema. Args: schema: The output schema as a dict or a Pydantic class. If a Pydantic class then the model output will be an object of that class. If a dict then the model output will be a dict. With a Pydantic class the returned attributes will be validated, whereas with a dict they will not be. If `method` is "function_calling" and `schema` is a dict, then the dict must match the OCI Generative AI function-calling spec. method: The method for steering model generation, either "function_calling" or "json_mode". If "function_calling" then the schema will be converted to an OCI function and the returned model will make use of the function-calling API. If "json_mode" then Cohere's JSON mode will be used. Note that if using "json_mode" then you must include instructions for formatting the output into the desired schema into the model call. include_raw: If False then only the parsed structured output is returned. If an error occurs during model output parsing it will be raised. If True then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict with keys "raw", "parsed", and "parsing_error". Returns: A Runnable that takes any ChatModel input and returns as output: If include_raw is True then a dict with keys: raw: BaseMessage parsed: Optional[_DictOrPydantic] parsing_error: Optional[BaseException] If include_raw is False then just _DictOrPydantic is returned, where _DictOrPydantic depends on the schema: If schema is a Pydantic class then _DictOrPydantic is the Pydantic class. If schema is a dict then _DictOrPydantic is a dict. """# noqa: E501ifkwargs:raiseValueError(f"Received unsupported arguments {kwargs}")is_pydantic_schema=_is_pydantic_class(schema)ifmethod=="function_calling":ifschemaisNone:raiseValueError("schema must be specified when method is 'function_calling'. ""Received None.")llm=self.bind_tools([schema],**kwargs)tool_name=getattr(self._provider.convert_to_oci_tool(schema),"name")ifis_pydantic_schema:output_parser:OutputParserLike=PydanticToolsParser(tools=[schema],# type: ignore[list-item]first_tool_only=True,)else:output_parser=JsonOutputKeyToolsParser(key_name=tool_name,first_tool_only=True)elifmethod=="json_mode":llm=self.bind(response_format={"type":"json_object"})output_parser=(PydanticOutputParser(pydantic_object=schema)# type: ignore[type-var, arg-type]ifis_pydantic_schemaelseJsonOutputParser())else:raiseValueError(f"Unrecognized method argument. "f"Expected `function_calling` or `json_mode`."f"Received: `{method}`.")ifinclude_raw:parser_assign=RunnablePassthrough.assign(parsed=itemgetter("raw")|output_parser,parsing_error=lambda_:None)parser_none=RunnablePassthrough.assign(parsed=lambda_:None)parser_with_fallback=parser_assign.with_fallbacks([parser_none],exception_key="parsing_error")returnRunnableMap(raw=llm)|parser_with_fallbackelse:returnllm|output_parser
def_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->ChatResult:"""Call out to a OCIGenAI chat model. Args: messages: list of LangChain messages stop: Optional list of stop words to use. Returns: LangChain ChatResult Example: .. code-block:: python messages = [ HumanMessage(content="hello!"), AIMessage(content="Hi there human!"), HumanMessage(content="Meow!") ] response = llm.invoke(messages) """ifself.is_stream:stream_iter=self._stream(messages,stop=stop,run_manager=run_manager,**kwargs)returngenerate_from_stream(stream_iter)request=self._prepare_request(messages,stop=stop,stream=False,**kwargs)response=self.client.chat(request)content=self._provider.chat_response_to_text(response)ifstopisnotNone:content=enforce_stop_tokens(content,stop)generation_info=self._provider.chat_generation_info(response)llm_output={"model_id":response.data.model_id,"model_version":response.data.model_version,"request_id":response.request_id,"content-length":response.headers["content-length"],}if"tool_calls"ingeneration_info:tool_calls=[_convert_oci_tool_call_to_langchain(tool_call)fortool_callinresponse.data.chat_response.tool_calls]else:tool_calls=[]message=AIMessage(content=content,additional_kwargs=generation_info,tool_calls=tool_calls,)returnChatResult(generations=[ChatGeneration(message=message,generation_info=generation_info)],llm_output=llm_output,)def_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:request=self._prepare_request(messages,stop=stop,stream=True,**kwargs)response=self.client.chat(request)foreventinresponse.data.events():event_data=json.loads(event.data)ifnotself._provider.is_chat_stream_end(event_data):# still streamingdelta=self._provider.chat_stream_to_text(event_data)chunk=ChatGenerationChunk(message=AIMessageChunk(content=delta))ifrun_manager:run_manager.on_llm_new_token(delta,chunk=chunk)yieldchunkelse:# stream endgeneration_info=self._provider.chat_stream_generation_info(event_data)tool_call_chunks=[]iftool_calls:=generation_info.get("tool_calls"):content=self._provider.chat_stream_to_text(event_data)try:tool_call_chunks=[ToolCallChunk(name=tool_call["function"].get("name"),args=tool_call["function"].get("arguments"),id=tool_call.get("id"),index=tool_call.get("index"),)fortool_callintool_calls]exceptKeyError:passelse:content=""message=AIMessageChunk(content=content,additional_kwargs=generation_info,tool_call_chunks=tool_call_chunks,)yieldChatGenerationChunk(message=message,generation_info=generation_info,)