[docs]classGenerativeAgent(BaseModel):"""Agent as a character with memory and innate characteristics."""name:str"""The character's name."""age:Optional[int]=None"""The optional age of the character."""traits:str="N/A""""Permanent traits to ascribe to the character."""status:str"""The traits of the character you wish not to change."""memory:GenerativeAgentMemory"""The memory object that combines relevance, recency, and 'importance'."""llm:BaseLanguageModel"""The underlying language model."""verbose:bool=Falsesummary:str=""#: :meta private:"""Stateful self-summary generated via reflection on the character's memory."""summary_refresh_seconds:int=3600#: :meta private:"""How frequently to re-generate the summary."""last_refreshed:datetime=Field(default_factory=datetime.now)# : :meta private:"""The last time the character's summary was regenerated."""daily_summaries:List[str]=Field(default_factory=list)# : :meta private:"""Summary of the events in the plan that the agent took."""model_config=ConfigDict(arbitrary_types_allowed=True,)# LLM-related methods@staticmethoddef_parse_list(text:str)->List[str]:"""Parse a newline-separated string into a list of strings."""lines=re.split(r"\n",text.strip())return[re.sub(r"^\s*\d+\.\s*","",line).strip()forlineinlines]
[docs]defchain(self,prompt:PromptTemplate)->LLMChain:"""Create a chain with the same settings as the agent."""returnLLMChain(llm=self.llm,prompt=prompt,verbose=self.verbose,memory=self.memory)
def_get_entity_from_observation(self,observation:str)->str:prompt=PromptTemplate.from_template("What is the observed entity in the following observation? {observation}"+"\nEntity=")returnself.chain(prompt).run(observation=observation).strip()def_get_entity_action(self,observation:str,entity_name:str)->str:prompt=PromptTemplate.from_template("What is the {entity} doing in the following observation? {observation}"+"\nThe {entity} is")return(self.chain(prompt).run(entity=entity_name,observation=observation).strip())
[docs]defsummarize_related_memories(self,observation:str)->str:"""Summarize memories that are most relevant to an observation."""prompt=PromptTemplate.from_template("""{q1}?Context from memory:{relevant_memories}Relevant context: """)entity_name=self._get_entity_from_observation(observation)entity_action=self._get_entity_action(observation,entity_name)q1=f"What is the relationship between {self.name} and {entity_name}"q2=f"{entity_name} is {entity_action}"returnself.chain(prompt=prompt).run(q1=q1,queries=[q1,q2]).strip()
def_generate_reaction(self,observation:str,suffix:str,now:Optional[datetime]=None)->str:"""React to a given observation or dialogue act."""prompt=PromptTemplate.from_template("{agent_summary_description}"+"\nIt is {current_time}."+"\n{agent_name}'s status: {agent_status}"+"\nSummary of relevant context from {agent_name}'s memory:"+"\n{relevant_memories}"+"\nMost recent observations: {most_recent_memories}"+"\nObservation: {observation}"+"\n\n"+suffix)agent_summary_description=self.get_summary(now=now)relevant_memories_str=self.summarize_related_memories(observation)current_time_str=(datetime.now().strftime("%B %d, %Y, %I:%M %p")ifnowisNoneelsenow.strftime("%B %d, %Y, %I:%M %p"))kwargs:Dict[str,Any]=dict(agent_summary_description=agent_summary_description,current_time=current_time_str,relevant_memories=relevant_memories_str,agent_name=self.name,observation=observation,agent_status=self.status,)consumed_tokens=self.llm.get_num_tokens(prompt.format(most_recent_memories="",**kwargs))kwargs[self.memory.most_recent_memories_token_key]=consumed_tokensreturnself.chain(prompt=prompt).run(**kwargs).strip()def_clean_response(self,text:str)->str:returnre.sub(f"^{self.name} ","",text.strip()).strip()
[docs]defgenerate_reaction(self,observation:str,now:Optional[datetime]=None)->Tuple[bool,str]:"""React to a given observation."""call_to_action_template=("Should {agent_name} react to the observation, and if so,"+" what would be an appropriate reaction? Respond in one line."+' If the action is to engage in dialogue, write:\nSAY: "what to say"'+"\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)."+"\nEither do nothing, react, or say something but not both.\n\n")full_result=self._generate_reaction(observation,call_to_action_template,now=now)result=full_result.strip().split("\n")[0]# AAAself.memory.save_context({},{self.memory.add_memory_key:f"{self.name} observed "f"{observation} and reacted by {result}",self.memory.now_key:now,},)if"REACT:"inresult:reaction=self._clean_response(result.split("REACT:")[-1])returnFalse,f"{self.name}{reaction}"if"SAY:"inresult:said_value=self._clean_response(result.split("SAY:")[-1])returnTrue,f"{self.name} said {said_value}"else:returnFalse,result
[docs]defgenerate_dialogue_response(self,observation:str,now:Optional[datetime]=None)->Tuple[bool,str]:"""React to a given observation."""call_to_action_template=("What would {agent_name} say? To end the conversation, write:"' GOODBYE: "what to say". Otherwise to continue the conversation,'' write: SAY: "what to say next"\n\n')full_result=self._generate_reaction(observation,call_to_action_template,now=now)result=full_result.strip().split("\n")[0]if"GOODBYE:"inresult:farewell=self._clean_response(result.split("GOODBYE:")[-1])self.memory.save_context({},{self.memory.add_memory_key:f"{self.name} observed "f"{observation} and said {farewell}",self.memory.now_key:now,},)returnFalse,f"{self.name} said {farewell}"if"SAY:"inresult:response_text=self._clean_response(result.split("SAY:")[-1])self.memory.save_context({},{self.memory.add_memory_key:f"{self.name} observed "f"{observation} and said {response_text}",self.memory.now_key:now,},)returnTrue,f"{self.name} said {response_text}"else:returnFalse,result
####################################################### Agent stateful' summary methods. ## Each dialog or response prompt includes a header ## summarizing the agent's self-description. This is ## updated periodically through probing its memories #######################################################def_compute_agent_summary(self)->str:""""""prompt=PromptTemplate.from_template("How would you summarize {name}'s core characteristics given the"+" following statements:\n"+"{relevant_memories}"+"Do not embellish."+"\n\nSummary: ")# The agent seeks to think about their core characteristics.return(self.chain(prompt).run(name=self.name,queries=[f"{self.name}'s core characteristics"]).strip())
[docs]defget_summary(self,force_refresh:bool=False,now:Optional[datetime]=None)->str:"""Return a descriptive summary of the agent."""current_time=datetime.now()ifnowisNoneelsenowsince_refresh=(current_time-self.last_refreshed).secondsif(notself.summaryorsince_refresh>=self.summary_refresh_secondsorforce_refresh):self.summary=self._compute_agent_summary()self.last_refreshed=current_timeage=self.ageifself.ageisnotNoneelse"N/A"return(f"Name: {self.name} (age: {age})"+f"\nInnate traits: {self.traits}"+f"\n{self.summary}")
[docs]defget_full_header(self,force_refresh:bool=False,now:Optional[datetime]=None)->str:"""Return a full header of the agent's status, summary, and current time."""now=datetime.now()ifnowisNoneelsenowsummary=self.get_summary(force_refresh=force_refresh,now=now)current_time_str=now.strftime("%B %d, %Y, %I:%M %p")return(f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}")