Source code for langchain.chains.conversation.base
"""Chain that carries on a conversation and calls an LLM."""fromtypingimportListfromlangchain_core._apiimportdeprecatedfromlangchain_core.memoryimportBaseMemoryfromlangchain_core.promptsimportBasePromptTemplatefrompydanticimportConfigDict,Field,model_validatorfromtyping_extensionsimportSelffromlangchain.chains.conversation.promptimportPROMPTfromlangchain.chains.llmimportLLMChainfromlangchain.memory.bufferimportConversationBufferMemory
[docs]@deprecated(since="0.2.7",alternative=("RunnableWithMessageHistory: ""https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html"# noqa: E501),removal="1.0",)classConversationChain(LLMChain):# type: ignore[override, override]"""Chain to have a conversation and load context from memory. This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer to this tutorial for more detail: https://python.langchain.com/docs/tutorials/chatbot/ ``RunnableWithMessageHistory`` offers several benefits, including: - Stream, batch, and async support; - More flexible memory handling, including the ability to manage memory outside the chain; - Support for multiple threads. Below is a minimal implementation, analogous to using ``ConversationChain`` with the default ``ConversationBufferMemory``: .. code-block:: python from langchain_core.chat_history import InMemoryChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_openai import ChatOpenAI store = {} # memory is maintained outside the chain def get_session_history(session_id: str) -> InMemoryChatMessageHistory: if session_id not in store: store[session_id] = InMemoryChatMessageHistory() return store[session_id] llm = ChatOpenAI(model="gpt-3.5-turbo-0125") chain = RunnableWithMessageHistory(llm, get_session_history) chain.invoke( "Hi I'm Bob.", config={"configurable": {"session_id": "1"}}, ) # session_id determines thread Memory objects can also be incorporated into the ``get_session_history`` callable: .. code-block:: python from langchain.memory import ConversationBufferWindowMemory from langchain_core.chat_history import InMemoryChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_openai import ChatOpenAI store = {} # memory is maintained outside the chain def get_session_history(session_id: str) -> InMemoryChatMessageHistory: if session_id not in store: store[session_id] = InMemoryChatMessageHistory() return store[session_id] memory = ConversationBufferWindowMemory( chat_memory=store[session_id], k=3, return_messages=True, ) assert len(memory.memory_variables) == 1 key = memory.memory_variables[0] messages = memory.load_memory_variables({})[key] store[session_id] = InMemoryChatMessageHistory(messages=messages) return store[session_id] llm = ChatOpenAI(model="gpt-3.5-turbo-0125") chain = RunnableWithMessageHistory(llm, get_session_history) chain.invoke( "Hi I'm Bob.", config={"configurable": {"session_id": "1"}}, ) # session_id determines thread Example: .. code-block:: python from langchain.chains import ConversationChain from langchain_community.llms import OpenAI conversation = ConversationChain(llm=OpenAI()) """memory:BaseMemory=Field(default_factory=ConversationBufferMemory)"""Default memory store."""prompt:BasePromptTemplate=PROMPT"""Default conversation prompt to use."""input_key:str="input"#: :meta private:output_key:str="response"#: :meta private:model_config=ConfigDict(arbitrary_types_allowed=True,extra="forbid",)@classmethoddefis_lc_serializable(cls)->bool:returnFalse@propertydefinput_keys(self)->List[str]:"""Use this since so some prompt vars come from history."""return[self.input_key]@model_validator(mode="after")defvalidate_prompt_input_variables(self)->Self:"""Validate that prompt input variables are consistent."""memory_keys=self.memory.memory_variablesinput_key=self.input_keyifinput_keyinmemory_keys:raiseValueError(f"The input key {input_key} was also found in the memory keys "f"({memory_keys}) - please provide keys that don't overlap.")prompt_variables=self.prompt.input_variablesexpected_keys=memory_keys+[input_key]ifset(expected_keys)!=set(prompt_variables):raiseValueError("Got unexpected prompt input variables. The prompt expects "f"{prompt_variables}, but got {memory_keys} as inputs from "f"memory, and {input_key} as the normal input key.")returnself