Experimental Modules#
This module contains experimental modules and reproductions of existing work using LangChain primitives.
Autonomous Agents#
Here, we document the BabyAGI and AutoGPT classes from the langchain.experimental module.
- class langchain.experimental.BabyAGI(*, memory: Optional[langchain.schema.BaseMemory] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, verbose: bool = None, task_list: collections.deque = None, task_creation_chain: langchain.chains.base.Chain, task_prioritization_chain: langchain.chains.base.Chain, execution_chain: langchain.chains.base.Chain, task_id_counter: int = 1, vectorstore: langchain.vectorstores.base.VectorStore, max_iterations: Optional[int] = None)[source]#
Controller model for the BabyAGI agent.
- classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, vectorstore: langchain.vectorstores.base.VectorStore, verbose: bool = False, task_execution_chain: Optional[langchain.chains.base.Chain] = None, **kwargs: Dict[str, Any]) langchain.experimental.autonomous_agents.baby_agi.baby_agi.BabyAGI [source]#
Initialize the BabyAGI Controller.
- get_next_task(result: str, task_description: str, objective: str) List[Dict] [source]#
Get the next task.
- property input_keys: List[str]#
Input keys this chain expects.
- property output_keys: List[str]#
Output keys this chain expects.
- class langchain.experimental.AutoGPT(ai_name: str, memory: langchain.vectorstores.base.VectorStoreRetriever, chain: langchain.chains.llm.LLMChain, output_parser: langchain.experimental.autonomous_agents.autogpt.output_parser.BaseAutoGPTOutputParser, tools: List[langchain.tools.base.BaseTool], feedback_tool: Optional[langchain.tools.human.tool.HumanInputRun] = None)[source]#
Agent class for interacting with Auto-GPT.
Generative Agents#
Here, we document the GenerativeAgent and GenerativeAgentMemory classes from the langchain.experimental module.
- class langchain.experimental.GenerativeAgent(*, name: str, age: Optional[int] = None, traits: str = 'N/A', status: str, memory: langchain.experimental.generative_agents.memory.GenerativeAgentMemory, llm: langchain.base_language.BaseLanguageModel, verbose: bool = False, summary: str = '', summary_refresh_seconds: int = 3600, last_refreshed: datetime.datetime = None, daily_summaries: List[str] = None)[source]#
A character with memory and innate characteristics.
- field age: Optional[int] = None#
The optional age of the character.
- field daily_summaries: List[str] [Optional]#
Summary of the events in the plan that the agent took.
- generate_dialogue_response(observation: str, now: Optional[datetime.datetime] = None) Tuple[bool, str] [source]#
React to a given observation.
- generate_reaction(observation: str, now: Optional[datetime.datetime] = None) Tuple[bool, str] [source]#
React to a given observation.
- get_full_header(force_refresh: bool = False, now: Optional[datetime.datetime] = None) str [source]#
Return a full header of the agent’s status, summary, and current time.
- get_summary(force_refresh: bool = False, now: Optional[datetime.datetime] = None) str [source]#
Return a descriptive summary of the agent.
- field last_refreshed: datetime.datetime [Optional]#
The last time the character’s summary was regenerated.
- field llm: langchain.base_language.BaseLanguageModel [Required]#
The underlying language model.
- field memory: langchain.experimental.generative_agents.memory.GenerativeAgentMemory [Required]#
The memory object that combines relevance, recency, and ‘importance’.
- field name: str [Required]#
The character’s name.
- field status: str [Required]#
The traits of the character you wish not to change.
Summarize memories that are most relevant to an observation.
- field summary: str = ''#
Stateful self-summary generated via reflection on the character’s memory.
- field summary_refresh_seconds: int = 3600#
How frequently to re-generate the summary.
- field traits: str = 'N/A'#
Permanent traits to ascribe to the character.
- class langchain.experimental.GenerativeAgentMemory(*, llm: langchain.base_language.BaseLanguageModel, memory_retriever: langchain.retrievers.time_weighted_retriever.TimeWeightedVectorStoreRetriever, verbose: bool = False, reflection_threshold: Optional[float] = None, current_plan: List[str] = [], importance_weight: float = 0.15, aggregate_importance: float = 0.0, max_tokens_limit: int = 1200, queries_key: str = 'queries', most_recent_memories_token_key: str = 'recent_memories_token', add_memory_key: str = 'add_memory', relevant_memories_key: str = 'relevant_memories', relevant_memories_simple_key: str = 'relevant_memories_simple', most_recent_memories_key: str = 'most_recent_memories', now_key: str = 'now', reflecting: bool = False)[source]#
- add_memories(memory_content: str, now: Optional[datetime.datetime] = None) List[str] [source]#
Add an observations or memories to the agent’s memory.
- add_memory(memory_content: str, now: Optional[datetime.datetime] = None) List[str] [source]#
Add an observation or memory to the agent’s memory.
- field aggregate_importance: float = 0.0#
Track the sum of the ‘importance’ of recent memories.
Triggers reflection when it reaches reflection_threshold.
- field current_plan: List[str] = []#
The current plan of the agent.
- fetch_memories(observation: str, now: Optional[datetime.datetime] = None) List[langchain.schema.Document] [source]#
Fetch related memories.
- field importance_weight: float = 0.15#
How much weight to assign the memory importance.
- field llm: langchain.base_language.BaseLanguageModel [Required]#
The core language model.
- load_memory_variables(inputs: Dict[str, Any]) Dict[str, str] [source]#
Return key-value pairs given the text input to the chain.
- field memory_retriever: langchain.retrievers.time_weighted_retriever.TimeWeightedVectorStoreRetriever [Required]#
The retriever to fetch related memories.
- property memory_variables: List[str]#
Input keys this memory class will load dynamically.
- pause_to_reflect(now: Optional[datetime.datetime] = None) List[str] [source]#
Reflect on recent observations and generate ‘insights’.
- field reflection_threshold: Optional[float] = None#
When aggregate_importance exceeds reflection_threshold, stop to reflect.