"""Global values and configuration that apply to all of LangChain."""importwarningsfromtypingimportTYPE_CHECKING,OptionalifTYPE_CHECKING:fromlangchain_core.cachesimportBaseCache# DO NOT USE THESE VALUES DIRECTLY!# Use them only via `get_<X>()` and `set_<X>()` below,# or else your code may behave unexpectedly with other uses of these global settings:# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004_verbose:bool=False_debug:bool=False_llm_cache:Optional["BaseCache"]=None
[docs]defset_verbose(value:bool)->None:"""Set a new value for the `verbose` global setting."""importlangchain# We're about to run some deprecated code, don't report warnings from it.# The user called the correct (non-deprecated) code path and shouldn't get warnings.withwarnings.catch_warnings():warnings.filterwarnings("ignore",message=("Importing verbose from langchain root module is no longer supported"),)# N.B.: This is a workaround for an unfortunate quirk of Python's# module-level `__getattr__()` implementation:# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004## Remove it once `langchain.verbose` is no longer supported, and once all users# have migrated to using `set_verbose()` here.langchain.verbose=valueglobal_verbose_verbose=value
[docs]defget_verbose()->bool:"""Get the value of the `verbose` global setting."""importlangchain# We're about to run some deprecated code, don't report warnings from it.# The user called the correct (non-deprecated) code path and shouldn't get warnings.withwarnings.catch_warnings():warnings.filterwarnings("ignore",message=("Importing verbose from langchain root module is no longer supported"),)# N.B.: This is a workaround for an unfortunate quirk of Python's# module-level `__getattr__()` implementation:# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004## Remove it once `langchain.verbose` is no longer supported, and once all users# have migrated to using `set_verbose()` here.## In the meantime, the `verbose` setting is considered True if either the old# or the new value are True. This accommodates users who haven't migrated# to using `set_verbose()` yet. Those users are getting deprecation warnings# directing them to use `set_verbose()` when they import `langhchain.verbose`.old_verbose=langchain.verboseglobal_verbosereturn_verboseorold_verbose
[docs]defset_debug(value:bool)->None:"""Set a new value for the `debug` global setting."""importlangchain# We're about to run some deprecated code, don't report warnings from it.# The user called the correct (non-deprecated) code path and shouldn't get warnings.withwarnings.catch_warnings():warnings.filterwarnings("ignore",message="Importing debug from langchain root module is no longer supported",)# N.B.: This is a workaround for an unfortunate quirk of Python's# module-level `__getattr__()` implementation:# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004## Remove it once `langchain.debug` is no longer supported, and once all users# have migrated to using `set_debug()` here.langchain.debug=valueglobal_debug_debug=value
[docs]defget_debug()->bool:"""Get the value of the `debug` global setting."""importlangchain# We're about to run some deprecated code, don't report warnings from it.# The user called the correct (non-deprecated) code path and shouldn't get warnings.withwarnings.catch_warnings():warnings.filterwarnings("ignore",message="Importing debug from langchain root module is no longer supported",)# N.B.: This is a workaround for an unfortunate quirk of Python's# module-level `__getattr__()` implementation:# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004## Remove it once `langchain.debug` is no longer supported, and once all users# have migrated to using `set_debug()` here.## In the meantime, the `debug` setting is considered True if either the old# or the new value are True. This accommodates users who haven't migrated# to using `set_debug()` yet. Those users are getting deprecation warnings# directing them to use `set_debug()` when they import `langhchain.debug`.old_debug=langchain.debugglobal_debugreturn_debugorold_debug
[docs]defset_llm_cache(value:Optional["BaseCache"])->None:"""Set a new LLM cache, overwriting the previous value, if any."""importlangchain# We're about to run some deprecated code, don't report warnings from it.# The user called the correct (non-deprecated) code path and shouldn't get warnings.withwarnings.catch_warnings():warnings.filterwarnings("ignore",message=("Importing llm_cache from langchain root module is no longer supported"),)# N.B.: This is a workaround for an unfortunate quirk of Python's# module-level `__getattr__()` implementation:# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004## Remove it once `langchain.llm_cache` is no longer supported, and# once all users have migrated to using `set_llm_cache()` here.langchain.llm_cache=valueglobal_llm_cache_llm_cache=value
[docs]defget_llm_cache()->"BaseCache":"""Get the value of the `llm_cache` global setting."""importlangchain# We're about to run some deprecated code, don't report warnings from it.# The user called the correct (non-deprecated) code path and shouldn't get warnings.withwarnings.catch_warnings():warnings.filterwarnings("ignore",message=("Importing llm_cache from langchain root module is no longer supported"),)# N.B.: This is a workaround for an unfortunate quirk of Python's# module-level `__getattr__()` implementation:# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004## Remove it once `langchain.llm_cache` is no longer supported, and# once all users have migrated to using `set_llm_cache()` here.## In the meantime, the `llm_cache` setting returns whichever of# its two backing sources is truthy (not `None` and non-empty),# or the old value if both are falsy. This accommodates users# who haven't migrated to using `set_llm_cache()` yet.# Those users are getting deprecation warnings directing them# to use `set_llm_cache()` when they import `langhchain.llm_cache`.old_llm_cache=langchain.llm_cacheglobal_llm_cachereturn_llm_cacheorold_llm_cache