[docs]@deprecated(since="0.3.1",removal="1.0.0",message=("Please see the migration guide at: ""https://python.langchain.com/docs/versions/migrating_memory/"),)classConversationTokenBufferMemory(BaseChatMemory):"""Conversation chat memory with token limit. Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. """human_prefix:str="Human"ai_prefix:str="AI"llm:BaseLanguageModelmemory_key:str="history"max_token_limit:int=2000@propertydefbuffer(self)->Any:"""String buffer of memory."""returnself.buffer_as_messagesifself.return_messageselseself.buffer_as_str@propertydefbuffer_as_str(self)->str:"""Exposes the buffer as a string in case return_messages is False."""returnget_buffer_string(self.chat_memory.messages,human_prefix=self.human_prefix,ai_prefix=self.ai_prefix,)@propertydefbuffer_as_messages(self)->List[BaseMessage]:"""Exposes the buffer as a list of messages in case return_messages is True."""returnself.chat_memory.messages@propertydefmemory_variables(self)->List[str]:"""Will always return list of memory variables. :meta private: """return[self.memory_key]
[docs]defload_memory_variables(self,inputs:Dict[str,Any])->Dict[str,Any]:"""Return history buffer."""return{self.memory_key:self.buffer}
[docs]defsave_context(self,inputs:Dict[str,Any],outputs:Dict[str,str])->None:"""Save context from this conversation to buffer. Pruned."""super().save_context(inputs,outputs)# Prune buffer if it exceeds max token limitbuffer=self.chat_memory.messagescurr_buffer_length=self.llm.get_num_tokens_from_messages(buffer)ifcurr_buffer_length>self.max_token_limit:pruned_memory=[]whilecurr_buffer_length>self.max_token_limit:pruned_memory.append(buffer.pop(0))curr_buffer_length=self.llm.get_num_tokens_from_messages(buffer)