from__future__importannotationsimportwarningsfromimportlibimportutilfromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Literal,Optional,Sequence,Tuple,Type,Union,cast,overload,)fromlangchain_core.language_modelsimport(BaseChatModel,LanguageModelInput,SimpleChatModel,)fromlangchain_core.language_models.chat_modelsimport(agenerate_from_stream,generate_from_stream,)fromlangchain_core.messagesimportAnyMessage,BaseMessagefromlangchain_core.runnablesimportRunnable,RunnableConfig,ensure_configfromlangchain_core.runnables.schemaimportStreamEventfromlangchain_core.toolsimportBaseToolfromlangchain_core.tracersimportRunLog,RunLogPatchfrompydanticimportBaseModelfromtyping_extensionsimportTypeAlias__all__=["init_chat_model",# For backwards compatibility"BaseChatModel","SimpleChatModel","generate_from_stream","agenerate_from_stream",]@overloaddefinit_chat_model(# type: ignore[overload-overlap]model:str,*,model_provider:Optional[str]=None,configurable_fields:Literal[None]=None,config_prefix:Optional[str]=None,**kwargs:Any,)->BaseChatModel:...@overloaddefinit_chat_model(model:Literal[None]=None,*,model_provider:Optional[str]=None,configurable_fields:Literal[None]=None,config_prefix:Optional[str]=None,**kwargs:Any,)->_ConfigurableModel:...@overloaddefinit_chat_model(model:Optional[str]=None,*,model_provider:Optional[str]=None,configurable_fields:Union[Literal["any"],List[str],Tuple[str,...]]=...,config_prefix:Optional[str]=None,**kwargs:Any,)->_ConfigurableModel:...# FOR CONTRIBUTORS: If adding support for a new provider, please append the provider# name to the supported list in the docstring below. Do *not* change the order of the# existing providers.
[docs]definit_chat_model(model:Optional[str]=None,*,model_provider:Optional[str]=None,configurable_fields:Optional[Union[Literal["any"],List[str],Tuple[str,...]]]=None,config_prefix:Optional[str]=None,**kwargs:Any,)->Union[BaseChatModel,_ConfigurableModel]:"""Initialize a ChatModel from the model name and provider. **Note:** Must have the integration package corresponding to the model provider installed. Args: model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can also specify model and model provider in a single argument using '{model_provider}:{model}' format, e.g. "openai:o1". model_provider: The model provider if not specified as part of model arg (see above). Supported model_provider values and the corresponding integration package are: - 'openai' -> langchain-openai - 'anthropic' -> langchain-anthropic - 'azure_openai' -> langchain-openai - 'azure_ai' -> langchain-azure-ai - 'google_vertexai' -> langchain-google-vertexai - 'google_genai' -> langchain-google-genai - 'bedrock' -> langchain-aws - 'bedrock_converse' -> langchain-aws - 'cohere' -> langchain-cohere - 'fireworks' -> langchain-fireworks - 'together' -> langchain-together - 'mistralai' -> langchain-mistralai - 'huggingface' -> langchain-huggingface - 'groq' -> langchain-groq - 'ollama' -> langchain-ollama - 'google_anthropic_vertex' -> langchain-google-vertexai - 'deepseek' -> langchain-deepseek - 'ibm' -> langchain-ibm - 'nvidia' -> langchain-nvidia-ai-endpoints - 'xai' -> langchain-xai Will attempt to infer model_provider from model if not specified. The following providers will be inferred based on these model prefixes: - 'gpt-3...' | 'gpt-4...' | 'o1...' -> 'openai' - 'claude...' -> 'anthropic' - 'amazon....' -> 'bedrock' - 'gemini...' -> 'google_vertexai' - 'command...' -> 'cohere' - 'accounts/fireworks...' -> 'fireworks' - 'mistral...' -> 'mistralai' - 'deepseek...' -> 'deepseek' - 'grok...' -> 'xai' configurable_fields: Which model parameters are configurable: - None: No configurable fields. - "any": All fields are configurable. *See Security Note below.* - Union[List[str], Tuple[str, ...]]: Specified fields are configurable. Fields are assumed to have config_prefix stripped if there is a config_prefix. If model is specified, then defaults to None. If model is not specified, then defaults to ``("model", "model_provider")``. ***Security Note***: Setting ``configurable_fields="any"`` means fields like api_key, base_url, etc. can be altered at runtime, potentially redirecting model requests to a different service/user. Make sure that if you're accepting untrusted configurations that you enumerate the ``configurable_fields=(...)`` explicitly. config_prefix: If config_prefix is a non-empty string then model will be configurable at runtime via the ``config["configurable"]["{config_prefix}_{param}"]`` keys. If config_prefix is an empty string then model will be configurable via ``config["configurable"]["{param}"]``. temperature: Model temperature. max_tokens: Max output tokens. timeout: The maximum time (in seconds) to wait for a response from the model before canceling the request. max_retries: The maximum number of attempts the system will make to resend a request if it fails due to issues like network timeouts or rate limits. base_url: The URL of the API endpoint where requests are sent. rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding rate limits. kwargs: Additional model-specific keyword args to pass to ``<<selected ChatModel>>.__init__(model=model_name, **kwargs)``. Returns: A BaseChatModel corresponding to the model_name and model_provider specified if configurability is inferred to be False. If configurable, a chat model emulator that initializes the underlying model at runtime once a config is passed in. Raises: ValueError: If model_provider cannot be inferred or isn't supported. ImportError: If the model provider integration package is not installed. .. dropdown:: Init non-configurable model :open: .. code-block:: python # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai from langchain.chat_models import init_chat_model o3_mini = init_chat_model("openai:o3-mini", temperature=0) claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0) gemini_2_flash = init_chat_model("google_vertexai:gemini-2.0-flash", temperature=0) o3_mini.invoke("what's your name") claude_sonnet.invoke("what's your name") gemini_2_flash.invoke("what's your name") .. dropdown:: Partially configurable model with no default .. code-block:: python # pip install langchain langchain-openai langchain-anthropic from langchain.chat_models import init_chat_model # We don't need to specify configurable=True if a model isn't specified. configurable_model = init_chat_model(temperature=0) configurable_model.invoke( "what's your name", config={"configurable": {"model": "gpt-4o"}} ) # GPT-4o response configurable_model.invoke( "what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}} ) # claude-3.5 sonnet response .. dropdown:: Fully configurable model with a default .. code-block:: python # pip install langchain langchain-openai langchain-anthropic from langchain.chat_models import init_chat_model configurable_model_with_default = init_chat_model( "openai:gpt-4o", configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime. config_prefix="foo", temperature=0 ) configurable_model_with_default.invoke("what's your name") # GPT-4o response with temperature 0 configurable_model_with_default.invoke( "what's your name", config={ "configurable": { "foo_model": "anthropic:claude-3-5-sonnet-20240620", "foo_temperature": 0.6 } } ) # Claude-3.5 sonnet response with temperature 0.6 .. dropdown:: Bind tools to a configurable model You can call any ChatModel declarative methods on a configurable model in the same way that you would with a normal model. .. code-block:: python # pip install langchain langchain-openai langchain-anthropic from langchain.chat_models import init_chat_model from pydantic import BaseModel, Field class GetWeather(BaseModel): '''Get the current weather in a given location''' location: str = Field(..., description="The city and state, e.g. San Francisco, CA") class GetPopulation(BaseModel): '''Get the current population in a given location''' location: str = Field(..., description="The city and state, e.g. San Francisco, CA") configurable_model = init_chat_model( "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0 ) configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation]) configurable_model_with_tools.invoke( "Which city is hotter today and which is bigger: LA or NY?" ) # GPT-4o response with tool calls configurable_model_with_tools.invoke( "Which city is hotter today and which is bigger: LA or NY?", config={"configurable": {"model": "claude-3-5-sonnet-20240620"}} ) # Claude-3.5 sonnet response with tools .. versionadded:: 0.2.7 .. versionchanged:: 0.2.8 Support for ``configurable_fields`` and ``config_prefix`` added. .. versionchanged:: 0.2.12 Support for Ollama via langchain-ollama package added (langchain_ollama.ChatOllama). Previously, the now-deprecated langchain-community version of Ollama was imported (langchain_community.chat_models.ChatOllama). Support for AWS Bedrock models via the Converse API added (model_provider="bedrock_converse"). .. versionchanged:: 0.3.5 Out of beta. .. versionchanged:: 0.3.19 Support for Deepseek, IBM, Nvidia, and xAI models added. """# noqa: E501ifnotmodelandnotconfigurable_fields:configurable_fields=("model","model_provider")config_prefix=config_prefixor""ifconfig_prefixandnotconfigurable_fields:warnings.warn(f"{config_prefix=} has been set but no fields are configurable. Set "f"`configurable_fields=(...)` to specify the model params that are "f"configurable.")ifnotconfigurable_fields:return_init_chat_model_helper(cast(str,model),model_provider=model_provider,**kwargs)else:ifmodel:kwargs["model"]=modelifmodel_provider:kwargs["model_provider"]=model_providerreturn_ConfigurableModel(default_config=kwargs,config_prefix=config_prefix,configurable_fields=configurable_fields,)
def_init_chat_model_helper(model:str,*,model_provider:Optional[str]=None,**kwargs:Any)->BaseChatModel:model,model_provider=_parse_model(model,model_provider)ifmodel_provider=="openai":_check_pkg("langchain_openai")fromlangchain_openaiimportChatOpenAIreturnChatOpenAI(model=model,**kwargs)elifmodel_provider=="anthropic":_check_pkg("langchain_anthropic")fromlangchain_anthropicimportChatAnthropicreturnChatAnthropic(model=model,**kwargs)# type: ignore[call-arg]elifmodel_provider=="azure_openai":_check_pkg("langchain_openai")fromlangchain_openaiimportAzureChatOpenAIreturnAzureChatOpenAI(model=model,**kwargs)elifmodel_provider=="azure_ai":_check_pkg("langchain_azure_ai")fromlangchain_azure_ai.chat_modelsimportAzureAIChatCompletionsModelreturnAzureAIChatCompletionsModel(model=model,**kwargs)elifmodel_provider=="cohere":_check_pkg("langchain_cohere")fromlangchain_cohereimportChatCoherereturnChatCohere(model=model,**kwargs)elifmodel_provider=="google_vertexai":_check_pkg("langchain_google_vertexai")fromlangchain_google_vertexaiimportChatVertexAIreturnChatVertexAI(model=model,**kwargs)elifmodel_provider=="google_genai":_check_pkg("langchain_google_genai")fromlangchain_google_genaiimportChatGoogleGenerativeAIreturnChatGoogleGenerativeAI(model=model,**kwargs)elifmodel_provider=="fireworks":_check_pkg("langchain_fireworks")fromlangchain_fireworksimportChatFireworksreturnChatFireworks(model=model,**kwargs)elifmodel_provider=="ollama":try:_check_pkg("langchain_ollama")fromlangchain_ollamaimportChatOllamaexceptImportError:# For backwards compatibilitytry:_check_pkg("langchain_community")fromlangchain_community.chat_modelsimportChatOllamaexceptImportError:# If both langchain-ollama and langchain-community aren't available,# raise an error related to langchain-ollama_check_pkg("langchain_ollama")returnChatOllama(model=model,**kwargs)elifmodel_provider=="together":_check_pkg("langchain_together")fromlangchain_togetherimportChatTogetherreturnChatTogether(model=model,**kwargs)elifmodel_provider=="mistralai":_check_pkg("langchain_mistralai")fromlangchain_mistralaiimportChatMistralAIreturnChatMistralAI(model=model,**kwargs)# type: ignore[call-arg]elifmodel_provider=="huggingface":_check_pkg("langchain_huggingface")fromlangchain_huggingfaceimportChatHuggingFacereturnChatHuggingFace(model_id=model,**kwargs)elifmodel_provider=="groq":_check_pkg("langchain_groq")fromlangchain_groqimportChatGroqreturnChatGroq(model=model,**kwargs)elifmodel_provider=="bedrock":_check_pkg("langchain_aws")fromlangchain_awsimportChatBedrock# TODO: update to use model= once ChatBedrock supportsreturnChatBedrock(model_id=model,**kwargs)elifmodel_provider=="bedrock_converse":_check_pkg("langchain_aws")fromlangchain_awsimportChatBedrockConversereturnChatBedrockConverse(model=model,**kwargs)elifmodel_provider=="google_anthropic_vertex":_check_pkg("langchain_google_vertexai")fromlangchain_google_vertexai.model_gardenimportChatAnthropicVertexreturnChatAnthropicVertex(model=model,**kwargs)elifmodel_provider=="deepseek":_check_pkg("langchain_deepseek",pkg_kebab="langchain-deepseek")fromlangchain_deepseekimportChatDeepSeekreturnChatDeepSeek(model=model,**kwargs)elifmodel_provider=="nvidia":_check_pkg("langchain_nvidia_ai_endpoints")fromlangchain_nvidia_ai_endpointsimportChatNVIDIAreturnChatNVIDIA(model=model,**kwargs)elifmodel_provider=="ibm":_check_pkg("langchain_ibm")fromlangchain_ibmimportChatWatsonxreturnChatWatsonx(model_id=model,**kwargs)elifmodel_provider=="xai":_check_pkg("langchain_xai")fromlangchain_xaiimportChatXAIreturnChatXAI(model=model,**kwargs)else:supported=", ".join(_SUPPORTED_PROVIDERS)raiseValueError(f"Unsupported {model_provider=}.\n\nSupported model providers are: "f"{supported}")_SUPPORTED_PROVIDERS={"openai","anthropic","azure_openai","azure_ai","cohere","google_vertexai","google_genai","fireworks","ollama","together","mistralai","huggingface","groq","bedrock","bedrock_converse","google_anthropic_vertex","deepseek","ibm","xai",}def_attempt_infer_model_provider(model_name:str)->Optional[str]:ifany(model_name.startswith(pre)forprein("gpt-3","gpt-4","o1","o3")):return"openai"elifmodel_name.startswith("claude"):return"anthropic"elifmodel_name.startswith("command"):return"cohere"elifmodel_name.startswith("accounts/fireworks"):return"fireworks"elifmodel_name.startswith("gemini"):return"google_vertexai"elifmodel_name.startswith("amazon."):return"bedrock"elifmodel_name.startswith("mistral"):return"mistralai"elifmodel_name.startswith("deepseek"):return"deepseek"elifmodel_name.startswith("grok"):return"xai"else:returnNonedef_parse_model(model:str,model_provider:Optional[str])->Tuple[str,str]:if(notmodel_providerand":"inmodelandmodel.split(":")[0]in_SUPPORTED_PROVIDERS):model_provider=model.split(":")[0]model=":".join(model.split(":")[1:])model_provider=model_provideror_attempt_infer_model_provider(model)ifnotmodel_provider:raiseValueError(f"Unable to infer model provider for {model=}, please specify "f"model_provider directly.")model_provider=model_provider.replace("-","_").lower()returnmodel,model_providerdef_check_pkg(pkg:str,*,pkg_kebab:Optional[str]=None)->None:ifnotutil.find_spec(pkg):pkg_kebab=pkg_kebabifpkg_kebabisnotNoneelsepkg.replace("_","-")raiseImportError(f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`")def_remove_prefix(s:str,prefix:str)->str:ifs.startswith(prefix):s=s[len(prefix):]returns_DECLARATIVE_METHODS=("bind_tools","with_structured_output")class_ConfigurableModel(Runnable[LanguageModelInput,Any]):def__init__(self,*,default_config:Optional[dict]=None,configurable_fields:Union[Literal["any"],List[str],Tuple[str,...]]="any",config_prefix:str="",queued_declarative_operations:Sequence[Tuple[str,Tuple,Dict]]=(),)->None:self._default_config:dict=default_configor{}self._configurable_fields:Union[Literal["any"],List[str]]=(configurable_fieldsifconfigurable_fields=="any"elselist(configurable_fields))self._config_prefix=(config_prefix+"_"ifconfig_prefixandnotconfig_prefix.endswith("_")elseconfig_prefix)self._queued_declarative_operations:List[Tuple[str,Tuple,Dict]]=list(queued_declarative_operations)def__getattr__(self,name:str)->Any:ifnamein_DECLARATIVE_METHODS:# Declarative operations that cannot be applied until after an actual model# object is instantiated. So instead of returning the actual operation,# we record the operation and its arguments in a queue. This queue is# then applied in order whenever we actually instantiate the model (in# self._model()).defqueue(*args:Any,**kwargs:Any)->_ConfigurableModel:queued_declarative_operations=list(self._queued_declarative_operations)queued_declarative_operations.append((name,args,kwargs))return_ConfigurableModel(default_config=dict(self._default_config),configurable_fields=list(self._configurable_fields)ifisinstance(self._configurable_fields,list)elseself._configurable_fields,config_prefix=self._config_prefix,queued_declarative_operations=queued_declarative_operations,)returnqueueelifself._default_configand(model:=self._model())andhasattr(model,name):returngetattr(model,name)else:msg=f"{name} is not a BaseChatModel attribute"ifself._default_config:msg+=" and is not implemented on the default model"msg+="."raiseAttributeError(msg)def_model(self,config:Optional[RunnableConfig]=None)->Runnable:params={**self._default_config,**self._model_params(config)}model=_init_chat_model_helper(**params)forname,args,kwargsinself._queued_declarative_operations:model=getattr(model,name)(*args,**kwargs)returnmodeldef_model_params(self,config:Optional[RunnableConfig])->dict:config=ensure_config(config)model_params={_remove_prefix(k,self._config_prefix):vfork,vinconfig.get("configurable",{}).items()ifk.startswith(self._config_prefix)}ifself._configurable_fields!="any":model_params={k:vfork,vinmodel_params.items()ifkinself._configurable_fields}returnmodel_paramsdefwith_config(self,config:Optional[RunnableConfig]=None,**kwargs:Any,)->_ConfigurableModel:"""Bind config to a Runnable, returning a new Runnable."""config=RunnableConfig(**(configor{}),**cast(RunnableConfig,kwargs))model_params=self._model_params(config)remaining_config={k:vfork,vinconfig.items()ifk!="configurable"}remaining_config["configurable"]={k:vfork,vinconfig.get("configurable",{}).items()if_remove_prefix(k,self._config_prefix)notinmodel_params}queued_declarative_operations=list(self._queued_declarative_operations)ifremaining_config:queued_declarative_operations.append(("with_config",(),{"config":remaining_config},))return_ConfigurableModel(default_config={**self._default_config,**model_params},configurable_fields=list(self._configurable_fields)ifisinstance(self._configurable_fields,list)elseself._configurable_fields,config_prefix=self._config_prefix,queued_declarative_operations=queued_declarative_operations,)@propertydefInputType(self)->TypeAlias:"""Get the input type for this runnable."""fromlangchain_core.prompt_valuesimport(ChatPromptValueConcrete,StringPromptValue,)# This is a version of LanguageModelInput which replaces the abstract# base class BaseMessage with a union of its subclasses, which makes# for a much better schema.returnUnion[str,Union[StringPromptValue,ChatPromptValueConcrete],List[AnyMessage],]definvoke(self,input:LanguageModelInput,config:Optional[RunnableConfig]=None,**kwargs:Any,)->Any:returnself._model(config).invoke(input,config=config,**kwargs)asyncdefainvoke(self,input:LanguageModelInput,config:Optional[RunnableConfig]=None,**kwargs:Any,)->Any:returnawaitself._model(config).ainvoke(input,config=config,**kwargs)defstream(self,input:LanguageModelInput,config:Optional[RunnableConfig]=None,**kwargs:Optional[Any],)->Iterator[Any]:yield fromself._model(config).stream(input,config=config,**kwargs)asyncdefastream(self,input:LanguageModelInput,config:Optional[RunnableConfig]=None,**kwargs:Optional[Any],)->AsyncIterator[Any]:asyncforxinself._model(config).astream(input,config=config,**kwargs):yieldxdefbatch(self,inputs:List[LanguageModelInput],config:Optional[Union[RunnableConfig,List[RunnableConfig]]]=None,*,return_exceptions:bool=False,**kwargs:Optional[Any],)->List[Any]:config=configorNone# If <= 1 config use the underlying models batch implementation.ifconfigisNoneorisinstance(config,dict)orlen(config)<=1:ifisinstance(config,list):config=config[0]returnself._model(config).batch(inputs,config=config,return_exceptions=return_exceptions,**kwargs)# If multiple configs default to Runnable.batch which uses executor to invoke# in parallel.else:returnsuper().batch(inputs,config=config,return_exceptions=return_exceptions,**kwargs)asyncdefabatch(self,inputs:List[LanguageModelInput],config:Optional[Union[RunnableConfig,List[RunnableConfig]]]=None,*,return_exceptions:bool=False,**kwargs:Optional[Any],)->List[Any]:config=configorNone# If <= 1 config use the underlying models batch implementation.ifconfigisNoneorisinstance(config,dict)orlen(config)<=1:ifisinstance(config,list):config=config[0]returnawaitself._model(config).abatch(inputs,config=config,return_exceptions=return_exceptions,**kwargs)# If multiple configs default to Runnable.batch which uses executor to invoke# in parallel.else:returnawaitsuper().abatch(inputs,config=config,return_exceptions=return_exceptions,**kwargs)defbatch_as_completed(self,inputs:Sequence[LanguageModelInput],config:Optional[Union[RunnableConfig,Sequence[RunnableConfig]]]=None,*,return_exceptions:bool=False,**kwargs:Any,)->Iterator[Tuple[int,Union[Any,Exception]]]:config=configorNone# If <= 1 config use the underlying models batch implementation.ifconfigisNoneorisinstance(config,dict)orlen(config)<=1:ifisinstance(config,list):config=config[0]yield fromself._model(cast(RunnableConfig,config)).batch_as_completed(# type: ignore[call-overload]inputs,config=config,return_exceptions=return_exceptions,**kwargs)# If multiple configs default to Runnable.batch which uses executor to invoke# in parallel.else:yield fromsuper().batch_as_completed(# type: ignore[call-overload]inputs,config=config,return_exceptions=return_exceptions,**kwargs)asyncdefabatch_as_completed(self,inputs:Sequence[LanguageModelInput],config:Optional[Union[RunnableConfig,Sequence[RunnableConfig]]]=None,*,return_exceptions:bool=False,**kwargs:Any,)->AsyncIterator[Tuple[int,Any]]:config=configorNone# If <= 1 config use the underlying models batch implementation.ifconfigisNoneorisinstance(config,dict)orlen(config)<=1:ifisinstance(config,list):config=config[0]asyncforxinself._model(cast(RunnableConfig,config)).abatch_as_completed(# type: ignore[call-overload]inputs,config=config,return_exceptions=return_exceptions,**kwargs):yieldx# If multiple configs default to Runnable.batch which uses executor to invoke# in parallel.else:asyncforxinsuper().abatch_as_completed(# type: ignore[call-overload]inputs,config=config,return_exceptions=return_exceptions,**kwargs):yieldxdeftransform(self,input:Iterator[LanguageModelInput],config:Optional[RunnableConfig]=None,**kwargs:Optional[Any],)->Iterator[Any]:forxinself._model(config).transform(input,config=config,**kwargs):yieldxasyncdefatransform(self,input:AsyncIterator[LanguageModelInput],config:Optional[RunnableConfig]=None,**kwargs:Optional[Any],)->AsyncIterator[Any]:asyncforxinself._model(config).atransform(input,config=config,**kwargs):yieldx@overloaddefastream_log(self,input:Any,config:Optional[RunnableConfig]=None,*,diff:Literal[True]=True,with_streamed_output_list:bool=True,include_names:Optional[Sequence[str]]=None,include_types:Optional[Sequence[str]]=None,include_tags:Optional[Sequence[str]]=None,exclude_names:Optional[Sequence[str]]=None,exclude_types:Optional[Sequence[str]]=None,exclude_tags:Optional[Sequence[str]]=None,**kwargs:Any,)->AsyncIterator[RunLogPatch]:...@overloaddefastream_log(self,input:Any,config:Optional[RunnableConfig]=None,*,diff:Literal[False],with_streamed_output_list:bool=True,include_names:Optional[Sequence[str]]=None,include_types:Optional[Sequence[str]]=None,include_tags:Optional[Sequence[str]]=None,exclude_names:Optional[Sequence[str]]=None,exclude_types:Optional[Sequence[str]]=None,exclude_tags:Optional[Sequence[str]]=None,**kwargs:Any,)->AsyncIterator[RunLog]:...asyncdefastream_log(self,input:Any,config:Optional[RunnableConfig]=None,*,diff:bool=True,with_streamed_output_list:bool=True,include_names:Optional[Sequence[str]]=None,include_types:Optional[Sequence[str]]=None,include_tags:Optional[Sequence[str]]=None,exclude_names:Optional[Sequence[str]]=None,exclude_types:Optional[Sequence[str]]=None,exclude_tags:Optional[Sequence[str]]=None,**kwargs:Any,)->Union[AsyncIterator[RunLogPatch],AsyncIterator[RunLog]]:asyncforxinself._model(config).astream_log(# type: ignore[call-overload, misc]input,config=config,diff=diff,with_streamed_output_list=with_streamed_output_list,include_names=include_names,include_types=include_types,include_tags=include_tags,exclude_tags=exclude_tags,exclude_types=exclude_types,exclude_names=exclude_names,**kwargs,):yieldxasyncdefastream_events(self,input:Any,config:Optional[RunnableConfig]=None,*,version:Literal["v1","v2"]="v2",include_names:Optional[Sequence[str]]=None,include_types:Optional[Sequence[str]]=None,include_tags:Optional[Sequence[str]]=None,exclude_names:Optional[Sequence[str]]=None,exclude_types:Optional[Sequence[str]]=None,exclude_tags:Optional[Sequence[str]]=None,**kwargs:Any,)->AsyncIterator[StreamEvent]:asyncforxinself._model(config).astream_events(input,config=config,version=version,include_names=include_names,include_types=include_types,include_tags=include_tags,exclude_tags=exclude_tags,exclude_types=exclude_types,exclude_names=exclude_names,**kwargs,):yieldx# Explicitly added to satisfy downstream linters.defbind_tools(self,tools:Sequence[Union[Dict[str,Any],Type[BaseModel],Callable,BaseTool]],**kwargs:Any,)->Runnable[LanguageModelInput,BaseMessage]:returnself.__getattr__("bind_tools")(tools,**kwargs)# Explicitly added to satisfy downstream linters.defwith_structured_output(self,schema:Union[Dict,Type[BaseModel]],**kwargs:Any)->Runnable[LanguageModelInput,Union[Dict,BaseModel]]:returnself.__getattr__("with_structured_output")(schema,**kwargs)