[docs]@deprecated(since="0.3.1",removal="1.0.0",message=("Please see the migration guide at: ""https://python.langchain.com/docs/versions/migrating_memory/"),)classConversationSummaryBufferMemory(BaseChatMemory,SummarizerMixin):"""Buffer with summarizer for storing conversation memory. Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. """max_token_limit:int=2000moving_summary_buffer:str=""memory_key:str="history"@propertydefbuffer(self)->Union[str,List[BaseMessage]]:"""String buffer of memory."""returnself.load_memory_variables({})[self.memory_key]
@propertydefmemory_variables(self)->List[str]:"""Will always return list of memory variables. :meta private: """return[self.memory_key]
[docs]defload_memory_variables(self,inputs:Dict[str,Any])->Dict[str,Any]:"""Return history buffer."""buffer=self.chat_memory.messagesifself.moving_summary_buffer!="":first_messages:List[BaseMessage]=[self.summary_message_cls(content=self.moving_summary_buffer)]buffer=first_messages+bufferifself.return_messages:final_buffer:Any=bufferelse:final_buffer=get_buffer_string(buffer,human_prefix=self.human_prefix,ai_prefix=self.ai_prefix)return{self.memory_key:final_buffer}
[docs]asyncdefaload_memory_variables(self,inputs:Dict[str,Any])->Dict[str,Any]:"""Asynchronously return key-value pairs given the text input to the chain."""buffer=awaitself.chat_memory.aget_messages()ifself.moving_summary_buffer!="":first_messages:List[BaseMessage]=[self.summary_message_cls(content=self.moving_summary_buffer)]buffer=first_messages+bufferifself.return_messages:final_buffer:Any=bufferelse:final_buffer=get_buffer_string(buffer,human_prefix=self.human_prefix,ai_prefix=self.ai_prefix)return{self.memory_key:final_buffer}
[docs]@pre_initdefvalidate_prompt_input_variables(cls,values:Dict)->Dict:"""Validate that prompt input variables are consistent."""prompt_variables=values["prompt"].input_variablesexpected_keys={"summary","new_lines"}ifexpected_keys!=set(prompt_variables):raiseValueError("Got unexpected prompt input variables. The prompt expects "f"{prompt_variables}, but it should have {expected_keys}.")returnvalues
[docs]defsave_context(self,inputs:Dict[str,Any],outputs:Dict[str,str])->None:"""Save context from this conversation to buffer."""super().save_context(inputs,outputs)self.prune()
[docs]asyncdefasave_context(self,inputs:Dict[str,Any],outputs:Dict[str,str])->None:"""Asynchronously save context from this conversation to buffer."""awaitsuper().asave_context(inputs,outputs)awaitself.aprune()
[docs]defprune(self)->None:"""Prune buffer if it exceeds max token limit"""buffer=self.chat_memory.messagescurr_buffer_length=self.llm.get_num_tokens_from_messages(buffer)ifcurr_buffer_length>self.max_token_limit:pruned_memory=[]whilecurr_buffer_length>self.max_token_limit:pruned_memory.append(buffer.pop(0))curr_buffer_length=self.llm.get_num_tokens_from_messages(buffer)self.moving_summary_buffer=self.predict_new_summary(pruned_memory,self.moving_summary_buffer)
[docs]asyncdefaprune(self)->None:"""Asynchronously prune buffer if it exceeds max token limit"""buffer=self.chat_memory.messagescurr_buffer_length=self.llm.get_num_tokens_from_messages(buffer)ifcurr_buffer_length>self.max_token_limit:pruned_memory=[]whilecurr_buffer_length>self.max_token_limit:pruned_memory.append(buffer.pop(0))curr_buffer_length=self.llm.get_num_tokens_from_messages(buffer)self.moving_summary_buffer=awaitself.apredict_new_summary(pruned_memory,self.moving_summary_buffer)