Memory#

class langchain.memory.CassandraChatMessageHistory(contact_points: List[str], session_id: str, port: int = 9042, username: str = 'cassandra', password: str = 'cassandra', keyspace_name: str = 'chat_history', table_name: str = 'message_store')[source]#

Chat message history that stores history in Cassandra.

Parameters
  • contact_points – list of ips to connect to Cassandra cluster

  • session_id – arbitrary key that is used to store the messages of a single chat session.

  • port – port to connect to Cassandra cluster

  • username – username to connect to Cassandra cluster

  • password – password to connect to Cassandra cluster

  • keyspace_name – name of the keyspace to use

  • table_name – name of the table to use

add_message(message: langchain.schema.BaseMessage) None[source]#

Append the message to the record in Cassandra

clear() None[source]#

Clear session memory from Cassandra

property messages: List[langchain.schema.BaseMessage]#

Retrieve the messages from Cassandra

pydantic model langchain.memory.ChatMessageHistory[source]#
field messages: List[langchain.schema.BaseMessage] = []#
add_message(message: langchain.schema.BaseMessage) None[source]#

Add a self-created message to the store

clear() None[source]#

Remove all messages from the store

pydantic model langchain.memory.CombinedMemory[source]#

Class for combining multiple memories’ data together.

Validators
  • check_input_key » memories

  • check_repeated_memory_variable » memories

field memories: List[langchain.schema.BaseMemory] [Required]#

For tracking all the memories that should be accessed.

clear() None[source]#

Clear context from this session for every memory.

load_memory_variables(inputs: Dict[str, Any]) Dict[str, str][source]#

Load all vars from sub-memories.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this session for every memory.

property memory_variables: List[str]#

All the memory variables that this instance provides.

pydantic model langchain.memory.ConversationBufferMemory[source]#

Buffer for storing conversation memory.

field ai_prefix: str = 'AI'#
field human_prefix: str = 'Human'#
load_memory_variables(inputs: Dict[str, Any]) Dict[str, Any][source]#

Return history buffer.

property buffer: Any#

String buffer of memory.

pydantic model langchain.memory.ConversationBufferWindowMemory[source]#

Buffer for storing conversation memory.

field ai_prefix: str = 'AI'#
field human_prefix: str = 'Human'#
field k: int = 5#
load_memory_variables(inputs: Dict[str, Any]) Dict[str, str][source]#

Return history buffer.

property buffer: List[langchain.schema.BaseMessage]#

String buffer of memory.

pydantic model langchain.memory.ConversationEntityMemory[source]#

Entity extractor & summarizer to memory.

field ai_prefix: str = 'AI'#
field chat_history_key: str = 'history'#
field entity_cache: List[str] = []#
field entity_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nThe conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)#
field entity_store: langchain.memory.entity.BaseEntityStore [Optional]#
field entity_summarization_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['entity', 'summary', 'history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence.\nThe update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity.\n\nIf there is no new information about the provided entity or the information is not worth noting (not an important or relevant fact to remember long-term), return the existing summary unchanged.\n\nFull conversation history (for context):\n{history}\n\nEntity to summarize:\n{entity}\n\nExisting summary of {entity}:\n{summary}\n\nLast line of conversation:\nHuman: {input}\nUpdated summary:', template_format='f-string', validate_template=True)#
field human_prefix: str = 'Human'#
field k: int = 3#
field llm: langchain.base_language.BaseLanguageModel [Required]#
clear() None[source]#

Clear memory contents.

load_memory_variables(inputs: Dict[str, Any]) Dict[str, Any][source]#

Return history buffer.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this conversation to buffer.

property buffer: List[langchain.schema.BaseMessage]#
pydantic model langchain.memory.ConversationKGMemory[source]#

Knowledge graph memory for storing conversation memory.

Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation.

field ai_prefix: str = 'AI'#
field entity_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nThe conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)#
field human_prefix: str = 'Human'#
field k: int = 2#
field kg: langchain.graphs.networkx_graph.NetworkxEntityGraph [Optional]#
field knowledge_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template="You are a networked intelligence helping a human track knowledge triples about all relevant people, things, concepts, etc. and integrating them with your knowledge stored within your weights as well as that stored in a knowledge graph. Extract all of the knowledge triples from the last line of conversation. A knowledge triple is a clause that contains a subject, a predicate, and an object. The subject is the entity being described, the predicate is the property of the subject that is being described, and the object is the value of the property.\n\nEXAMPLE\nConversation history:\nPerson #1: Did you hear aliens landed in Area 51?\nAI: No, I didn't hear that. What do you know about Area 51?\nPerson #1: It's a secret military base in Nevada.\nAI: What do you know about Nevada?\nLast line of conversation:\nPerson #1: It's a state in the US. It's also the number 1 producer of gold in the US.\n\nOutput: (Nevada, is a, state)<|>(Nevada, is in, US)<|>(Nevada, is the number 1 producer of, gold)\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: Hello.\nAI: Hi! How are you?\nPerson #1: I'm good. How are you?\nAI: I'm good too.\nLast line of conversation:\nPerson #1: I'm going to the store.\n\nOutput: NONE\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: What do you know about Descartes?\nAI: Descartes was a French philosopher, mathematician, and scientist who lived in the 17th century.\nPerson #1: The Descartes I'm referring to is a standup comedian and interior designer from Montreal.\nAI: Oh yes, He is a comedian and an interior designer. He has been in the industry for 30 years. His favorite food is baked bean pie.\nLast line of conversation:\nPerson #1: Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\nOutput: (Descartes, likes to drive, antique scooters)<|>(Descartes, plays, mandolin)\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:", template_format='f-string', validate_template=True)#
field llm: langchain.base_language.BaseLanguageModel [Required]#
field summary_message_cls: Type[langchain.schema.BaseMessage] = <class 'langchain.schema.SystemMessage'>#

Number of previous utterances to include in the context.

clear() None[source]#

Clear memory contents.

get_current_entities(input_string: str) List[str][source]#
get_knowledge_triplets(input_string: str) List[langchain.graphs.networkx_graph.KnowledgeTriple][source]#
load_memory_variables(inputs: Dict[str, Any]) Dict[str, Any][source]#

Return history buffer.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this conversation to buffer.

pydantic model langchain.memory.ConversationStringBufferMemory[source]#

Buffer for storing conversation memory.

field ai_prefix: str = 'AI'#

Prefix to use for AI generated responses.

field buffer: str = ''#
field human_prefix: str = 'Human'#
field input_key: Optional[str] = None#
field output_key: Optional[str] = None#
clear() None[source]#

Clear memory contents.

load_memory_variables(inputs: Dict[str, Any]) Dict[str, str][source]#

Return history buffer.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this conversation to buffer.

property memory_variables: List[str]#

Will always return list of memory variables. :meta private:

pydantic model langchain.memory.ConversationSummaryBufferMemory[source]#

Buffer with summarizer for storing conversation memory.

field max_token_limit: int = 2000#
field memory_key: str = 'history'#
field moving_summary_buffer: str = ''#
clear() None[source]#

Clear memory contents.

load_memory_variables(inputs: Dict[str, Any]) Dict[str, Any][source]#

Return history buffer.

prune() None[source]#

Prune buffer if it exceeds max token limit

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this conversation to buffer.

property buffer: List[langchain.schema.BaseMessage]#
pydantic model langchain.memory.ConversationSummaryMemory[source]#

Conversation summarizer to memory.

field buffer: str = ''#
clear() None[source]#

Clear memory contents.

classmethod from_messages(llm: langchain.base_language.BaseLanguageModel, chat_memory: langchain.schema.BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any) langchain.memory.summary.ConversationSummaryMemory[source]#
load_memory_variables(inputs: Dict[str, Any]) Dict[str, Any][source]#

Return history buffer.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this conversation to buffer.

pydantic model langchain.memory.ConversationTokenBufferMemory[source]#

Buffer for storing conversation memory.

field ai_prefix: str = 'AI'#
field human_prefix: str = 'Human'#
field llm: langchain.base_language.BaseLanguageModel [Required]#
field max_token_limit: int = 2000#
field memory_key: str = 'history'#
load_memory_variables(inputs: Dict[str, Any]) Dict[str, Any][source]#

Return history buffer.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this conversation to buffer. Pruned.

property buffer: List[langchain.schema.BaseMessage]#

String buffer of memory.

class langchain.memory.CosmosDBChatMessageHistory(cosmos_endpoint: str, cosmos_database: str, cosmos_container: str, session_id: str, user_id: str, credential: Any = None, connection_string: Optional[str] = None, ttl: Optional[int] = None, cosmos_client_kwargs: Optional[dict] = None)[source]#

Chat history backed by Azure CosmosDB.

add_message(message: langchain.schema.BaseMessage) None[source]#

Add a self-created message to the store

clear() None[source]#

Clear session memory from this memory and cosmos.

load_messages() None[source]#

Retrieve the messages from Cosmos

prepare_cosmos() None[source]#

Prepare the CosmosDB client.

Use this function or the context manager to make sure your database is ready.

upsert_messages() None[source]#

Update the cosmosdb item.

class langchain.memory.DynamoDBChatMessageHistory(table_name: str, session_id: str)[source]#

Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table with name table_name and a partition Key of SessionId is present.

Parameters
  • table_name – name of the DynamoDB table

  • session_id – arbitrary key that is used to store the messages of a single chat session.

add_message(message: langchain.schema.BaseMessage) None[source]#

Append the message to the record in DynamoDB

clear() None[source]#

Clear session memory from DynamoDB

property messages: List[langchain.schema.BaseMessage]#

Retrieve the messages from DynamoDB

class langchain.memory.FileChatMessageHistory(file_path: str)[source]#

Chat message history that stores history in a local file.

Parameters

file_path – path of the local file to store the messages.

add_message(message: langchain.schema.BaseMessage) None[source]#

Append the message to the record in the local file

clear() None[source]#

Clear session memory from the local file

property messages: List[langchain.schema.BaseMessage]#

Retrieve the messages from the local file

pydantic model langchain.memory.InMemoryEntityStore[source]#

Basic in-memory entity store.

field store: Dict[str, Optional[str]] = {}#
clear() None[source]#

Delete all entities from store.

delete(key: str) None[source]#

Delete entity value from store.

exists(key: str) bool[source]#

Check if entity exists in store.

get(key: str, default: Optional[str] = None) Optional[str][source]#

Get entity value from store.

set(key: str, value: Optional[str]) None[source]#

Set entity value in store.

class langchain.memory.MomentoChatMessageHistory(session_id: str, cache_client: momento.CacheClient, cache_name: str, *, key_prefix: str = 'message_store:', ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True)[source]#

Chat message history cache that uses Momento as a backend. See https://gomomento.com/

add_message(message: langchain.schema.BaseMessage) None[source]#

Store a message in the cache.

Parameters

message (BaseMessage) – The message object to store.

Raises
  • SdkException – Momento service or network error.

  • Exception – Unexpected response.

clear() None[source]#

Remove the session’s messages from the cache.

Raises
  • SdkException – Momento service or network error.

  • Exception – Unexpected response.

classmethod from_client_params(session_id: str, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any) MomentoChatMessageHistory[source]#

Construct cache from CacheClient parameters.

property messages: list[langchain.schema.BaseMessage]#

Retrieve the messages from Momento.

Raises
  • SdkException – Momento service or network error

  • Exception – Unexpected response

Returns

List of cached messages

Return type

list[BaseMessage]

class langchain.memory.MongoDBChatMessageHistory(connection_string: str, session_id: str, database_name: str = 'chat_history', collection_name: str = 'message_store')[source]#

Chat message history that stores history in MongoDB.

Parameters
  • connection_string – connection string to connect to MongoDB

  • session_id – arbitrary key that is used to store the messages of a single chat session.

  • database_name – name of the database to use

  • collection_name – name of the collection to use

add_message(message: langchain.schema.BaseMessage) None[source]#

Append the message to the record in MongoDB

clear() None[source]#

Clear session memory from MongoDB

property messages: List[langchain.schema.BaseMessage]#

Retrieve the messages from MongoDB

class langchain.memory.PostgresChatMessageHistory(session_id: str, connection_string: str = 'postgresql://postgres:mypassword@localhost/chat_history', table_name: str = 'message_store')[source]#
add_message(message: langchain.schema.BaseMessage) None[source]#

Append the message to the record in PostgreSQL

clear() None[source]#

Clear session memory from PostgreSQL

property messages: List[langchain.schema.BaseMessage]#

Retrieve the messages from PostgreSQL

pydantic model langchain.memory.ReadOnlySharedMemory[source]#

A memory wrapper that is read-only and cannot be changed.

field memory: langchain.schema.BaseMemory [Required]#
clear() None[source]#

Nothing to clear, got a memory like a vault.

load_memory_variables(inputs: Dict[str, Any]) Dict[str, str][source]#

Load memory variables from memory.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Nothing should be saved or changed

property memory_variables: List[str]#

Return memory variables.

class langchain.memory.RedisChatMessageHistory(session_id: str, url: str = 'redis://localhost:6379/0', key_prefix: str = 'message_store:', ttl: Optional[int] = None)[source]#
add_message(message: langchain.schema.BaseMessage) None[source]#

Append the message to the record in Redis

clear() None[source]#

Clear session memory from Redis

property key: str#

Construct the record key to use

property messages: List[langchain.schema.BaseMessage]#

Retrieve the messages from Redis

pydantic model langchain.memory.RedisEntityStore[source]#

Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back.

field key_prefix: str = 'memory_store'#
field recall_ttl: Optional[int] = 259200#
field redis_client: Any = None#
field session_id: str = 'default'#
field ttl: Optional[int] = 86400#
clear() None[source]#

Delete all entities from store.

delete(key: str) None[source]#

Delete entity value from store.

exists(key: str) bool[source]#

Check if entity exists in store.

get(key: str, default: Optional[str] = None) Optional[str][source]#

Get entity value from store.

set(key: str, value: Optional[str]) None[source]#

Set entity value in store.

property full_key_prefix: str#
pydantic model langchain.memory.SQLiteEntityStore[source]#

SQLite-backed Entity store

field session_id: str = 'default'#
field table_name: str = 'memory_store'#
clear() None[source]#

Delete all entities from store.

delete(key: str) None[source]#

Delete entity value from store.

exists(key: str) bool[source]#

Check if entity exists in store.

get(key: str, default: Optional[str] = None) Optional[str][source]#

Get entity value from store.

set(key: str, value: Optional[str]) None[source]#

Set entity value in store.

property full_table_name: str#
pydantic model langchain.memory.SimpleMemory[source]#

Simple memory for storing context or other bits of information that shouldn’t ever change between prompts.

field memories: Dict[str, Any] = {}#
clear() None[source]#

Nothing to clear, got a memory like a vault.

load_memory_variables(inputs: Dict[str, Any]) Dict[str, str][source]#

Return key-value pairs given the text input to the chain.

If None, return all memories

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Nothing should be saved or changed, my memory is set in stone.

property memory_variables: List[str]#

Input keys this memory class will load dynamically.

pydantic model langchain.memory.VectorStoreRetrieverMemory[source]#

Class for a VectorStore-backed memory object.

field input_key: Optional[str] = None#

Key name to index the inputs to load_memory_variables.

field memory_key: str = 'history'#

Key name to locate the memories in the result of load_memory_variables.

field retriever: langchain.vectorstores.base.VectorStoreRetriever [Required]#

VectorStoreRetriever object to connect to.

field return_docs: bool = False#

Whether or not to return the result of querying the database directly.

clear() None[source]#

Nothing to clear.

load_memory_variables(inputs: Dict[str, Any]) Dict[str, Union[List[langchain.schema.Document], str]][source]#

Return history buffer.

save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None[source]#

Save context from this conversation to buffer.

property memory_variables: List[str]#

The list of keys emitted from the load_memory_variables method.