Source code for langchain_experimental.autonomous_agents.autogpt.prompt
importtimefromtypingimportAny,Callable,List,castfromlangchain_core.messagesimportBaseMessage,HumanMessage,SystemMessagefromlangchain_core.prompts.chatimport(BaseChatPromptTemplate,)fromlangchain_core.toolsimportBaseToolfromlangchain_core.vectorstoresimportVectorStoreRetrieverfrompydanticimportBaseModelfromlangchain_experimental.autonomous_agents.autogpt.prompt_generatorimportget_prompt# This class has a metaclass conflict: both `BaseChatPromptTemplate` and `BaseModel`# define a metaclass to use, and the two metaclasses attempt to define# the same functions but in mutually-incompatible ways.# It isn't clear how to resolve this, and this code predates mypy# beginning to perform that check.## Mypy errors:# ```# Definition of "__private_attributes__" in base class "BaseModel" is# incompatible with definition in base class "BaseModel" [misc]# Definition of "__repr_name__" in base class "Representation" is# incompatible with definition in base class "BaseModel" [misc]# Definition of "__pretty__" in base class "Representation" is# incompatible with definition in base class "BaseModel" [misc]# Definition of "__repr_str__" in base class "Representation" is# incompatible with definition in base class "BaseModel" [misc]# Definition of "__rich_repr__" in base class "Representation" is# incompatible with definition in base class "BaseModel" [misc]# Metaclass conflict: the metaclass of a derived class must be# a (non-strict) subclass of the metaclasses of all its bases [misc]# ```## TODO: look into refactoring this class in a way that avoids the mypy type errors
[docs]classAutoGPTPrompt(BaseChatPromptTemplate,BaseModel):# type: ignore[misc]"""Prompt for AutoGPT."""ai_name:strai_role:strtools:List[BaseTool]token_counter:Callable[[str],int]send_token_limit:int=4196
[docs]defconstruct_full_prompt(self,goals:List[str])->str:prompt_start=("Your decisions must always be made independently ""without seeking user assistance.\n""Play to your strengths as an LLM and pursue simple ""strategies with no legal complications.\n""If you have completed all your tasks, make sure to "'use the "finish" command.')# Construct full promptfull_prompt=(f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n")fori,goalinenumerate(goals):full_prompt+=f"{i+1}. {goal}\n"full_prompt+=f"\n\n{get_prompt(self.tools)}"returnfull_prompt
[docs]defformat_messages(self,**kwargs:Any)->List[BaseMessage]:base_prompt=SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))time_prompt=SystemMessage(content=f"The current time and date is {time.strftime('%c')}")used_tokens=self.token_counter(cast(str,base_prompt.content))+self.token_counter(cast(str,time_prompt.content))memory:VectorStoreRetriever=kwargs["memory"]previous_messages=kwargs["messages"]relevant_docs=memory.invoke(str(previous_messages[-10:]))relevant_memory=[d.page_contentfordinrelevant_docs]relevant_memory_tokens=sum([self.token_counter(doc)fordocinrelevant_memory])whileused_tokens+relevant_memory_tokens>2500:relevant_memory=relevant_memory[:-1]relevant_memory_tokens=sum([self.token_counter(doc)fordocinrelevant_memory])content_format=(f"This reminds you of these events "f"from your past:\n{relevant_memory}\n\n")memory_message=SystemMessage(content=content_format)used_tokens+=self.token_counter(cast(str,memory_message.content))historical_messages:List[BaseMessage]=[]formessageinprevious_messages[-10:][::-1]:message_tokens=self.token_counter(message.content)ifused_tokens+message_tokens>self.send_token_limit-1000:breakhistorical_messages=[message]+historical_messagesused_tokens+=message_tokensinput_message=HumanMessage(content=kwargs["user_input"])messages:List[BaseMessage]=[base_prompt,time_prompt,memory_message]messages+=historical_messagesmessages.append(input_message)returnmessages