[docs]classGenerativeAgentMemory(BaseMemory):"""Memory for the generative agent."""llm:BaseLanguageModel"""The core language model."""memory_retriever:TimeWeightedVectorStoreRetriever"""The retriever to fetch related memories."""verbose:bool=Falsereflection_threshold:Optional[float]=None"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""current_plan:List[str]=[]"""The current plan of the agent."""# A weight of 0.15 makes this less important than it# would be otherwise, relative to salience and timeimportance_weight:float=0.15"""How much weight to assign the memory importance."""aggregate_importance:float=0.0# : :meta private:"""Track the sum of the 'importance' of recent memories. Triggers reflection when it reaches reflection_threshold."""max_tokens_limit:int=1200# : :meta private:# input keysqueries_key:str="queries"most_recent_memories_token_key:str="recent_memories_token"add_memory_key:str="add_memory"# output keysrelevant_memories_key:str="relevant_memories"relevant_memories_simple_key:str="relevant_memories_simple"most_recent_memories_key:str="most_recent_memories"now_key:str="now"reflecting:bool=False
@staticmethoddef_parse_list(text:str)->List[str]:"""Parse a newline-separated string into a list of strings."""lines=re.split(r"\n",text.strip())lines=[lineforlineinlinesifline.strip()]# remove empty linesreturn[re.sub(r"^\s*\d+\.\s*","",line).strip()forlineinlines]def_get_topics_of_reflection(self,last_k:int=50)->List[str]:"""Return the 3 most salient high-level questions about recent observations."""prompt=PromptTemplate.from_template("{observations}\n\n""Given only the information above, what are the 3 most salient ""high-level questions we can answer about the subjects in the statements?\n""Provide each question on a new line.")observations=self.memory_retriever.memory_stream[-last_k:]observation_str="\n".join([self._format_memory_detail(o)foroinobservations])result=self.chain(prompt).run(observations=observation_str)returnself._parse_list(result)def_get_insights_on_topic(self,topic:str,now:Optional[datetime]=None)->List[str]:"""Generate 'insights' on a topic of reflection, based on pertinent memories."""prompt=PromptTemplate.from_template("Statements relevant to: '{topic}'\n""---\n""{related_statements}\n""---\n""What 5 high-level novel insights can you infer from the above statements ""that are relevant for answering the following question?\n""Do not include any insights that are not relevant to the question.\n""Do not repeat any insights that have already been made.\n\n""Question: {topic}\n\n""(example format: insight (because of 1, 5, 3))\n")related_memories=self.fetch_memories(topic,now=now)related_statements="\n".join([self._format_memory_detail(memory,prefix=f"{i+1}. ")fori,memoryinenumerate(related_memories)])result=self.chain(prompt).run(topic=topic,related_statements=related_statements)# TODO: Parse the connections between memories and insightsreturnself._parse_list(result)
[docs]defpause_to_reflect(self,now:Optional[datetime]=None)->List[str]:"""Reflect on recent observations and generate 'insights'."""ifself.verbose:logger.info("Character is reflecting")new_insights=[]topics=self._get_topics_of_reflection()fortopicintopics:insights=self._get_insights_on_topic(topic,now=now)forinsightininsights:self.add_memory(insight,now=now)new_insights.extend(insights)returnnew_insights
def_score_memory_importance(self,memory_content:str)->float:"""Score the absolute importance of the given memory."""prompt=PromptTemplate.from_template("On the scale of 1 to 10, where 1 is purely mundane"+" (e.g., brushing teeth, making bed) and 10 is"+" extremely poignant (e.g., a break up, college"+" acceptance), rate the likely poignancy of the"+" following piece of memory. Respond with a single integer."+"\nMemory: {memory_content}"+"\nRating: ")score=self.chain(prompt).run(memory_content=memory_content).strip()ifself.verbose:logger.info(f"Importance score: {score}")match=re.search(r"^\D*(\d+)",score)ifmatch:return(float(match.group(1))/10)*self.importance_weightelse:return0.0def_score_memories_importance(self,memory_content:str)->List[float]:"""Score the absolute importance of the given memory."""prompt=PromptTemplate.from_template("On the scale of 1 to 10, where 1 is purely mundane"+" (e.g., brushing teeth, making bed) and 10 is"+" extremely poignant (e.g., a break up, college"+" acceptance), rate the likely poignancy of the"+" following piece of memory. Always answer with only a list of numbers."+" If just given one memory still respond in a list."+" Memories are separated by semi colans (;)"+"\nMemories: {memory_content}"+"\nRating: ")scores=self.chain(prompt).run(memory_content=memory_content).strip()ifself.verbose:logger.info(f"Importance scores: {scores}")# Split into list of strings and convert to floatsscores_list=[float(x)forxinscores.split(";")]returnscores_list
[docs]defadd_memories(self,memory_content:str,now:Optional[datetime]=None)->List[str]:"""Add an observations or memories to the agent's memory."""importance_scores=self._score_memories_importance(memory_content)self.aggregate_importance+=max(importance_scores)memory_list=memory_content.split(";")documents=[]foriinrange(len(memory_list)):documents.append(Document(page_content=memory_list[i],metadata={"importance":importance_scores[i]},))result=self.memory_retriever.add_documents(documents,current_time=now)# After an agent has processed a certain amount of memories (as measured by# aggregate importance), it is time to reflect on recent events to add# more synthesized memories to the agent's memory stream.if(self.reflection_thresholdisnotNoneandself.aggregate_importance>self.reflection_thresholdandnotself.reflecting):self.reflecting=Trueself.pause_to_reflect(now=now)# Hack to clear the importance from reflectionself.aggregate_importance=0.0self.reflecting=Falsereturnresult
[docs]defadd_memory(self,memory_content:str,now:Optional[datetime]=None)->List[str]:"""Add an observation or memory to the agent's memory."""importance_score=self._score_memory_importance(memory_content)self.aggregate_importance+=importance_scoredocument=Document(page_content=memory_content,metadata={"importance":importance_score})result=self.memory_retriever.add_documents([document],current_time=now)# After an agent has processed a certain amount of memories (as measured by# aggregate importance), it is time to reflect on recent events to add# more synthesized memories to the agent's memory stream.if(self.reflection_thresholdisnotNoneandself.aggregate_importance>self.reflection_thresholdandnotself.reflecting):self.reflecting=Trueself.pause_to_reflect(now=now)# Hack to clear the importance from reflectionself.aggregate_importance=0.0self.reflecting=Falsereturnresult
[docs]deffetch_memories(self,observation:str,now:Optional[datetime]=None)->List[Document]:"""Fetch related memories."""ifnowisnotNone:withmock_now(now):returnself.memory_retriever.invoke(observation)else:returnself.memory_retriever.invoke(observation)
def_get_memories_until_limit(self,consumed_tokens:int)->str:"""Reduce the number of tokens in the documents."""result=[]fordocinself.memory_retriever.memory_stream[::-1]:ifconsumed_tokens>=self.max_tokens_limit:breakconsumed_tokens+=self.llm.get_num_tokens(doc.page_content)ifconsumed_tokens<self.max_tokens_limit:result.append(doc)returnself.format_memories_simple(result)@propertydefmemory_variables(self)->List[str]:"""Input keys this memory class will load dynamically."""return[]
[docs]defload_memory_variables(self,inputs:Dict[str,Any])->Dict[str,str]:"""Return key-value pairs given the text input to the chain."""queries=inputs.get(self.queries_key)now=inputs.get(self.now_key)ifqueriesisnotNone:relevant_memories=[memforqueryinqueriesformeminself.fetch_memories(query,now=now)]return{self.relevant_memories_key:self.format_memories_detail(relevant_memories),self.relevant_memories_simple_key:self.format_memories_simple(relevant_memories),}most_recent_memories_token=inputs.get(self.most_recent_memories_token_key)ifmost_recent_memories_tokenisnotNone:return{self.most_recent_memories_key:self._get_memories_until_limit(most_recent_memories_token)}return{}
[docs]defsave_context(self,inputs:Dict[str,Any],outputs:Dict[str,Any])->None:"""Save the context of this model run to memory."""# TODO: fix the save memory keymem=outputs.get(self.add_memory_key)now=outputs.get(self.now_key)ifmem:self.add_memory(mem,now=now)