refactor(agent): Remove unused autogpt code (#7112)

Remove unused `autogpt` code and reorganize its file structure.

* Moved:
  * `autogpt/agent_manager/agent_manager.py` to `autogpt/agents/agent_manager.py`, so the dir `agent_manager` was removed
  * `dump_prompt` from `autogpt.core.runner.client_lib.logging.helpers` to `forge/llm/prompting/utils.py`
  * `coroutine` decorator from `autogpt.core.runner.client_lib.utils` to `autogpt/app/utils.py`
* Removed within `autogpt`:
  * Memory-related code from multiple files (not used), including `memory/*`
  * Memory-related config entries/env vars: `memory_backend`, `memory_index`, `redis_host`, `redis_port`, `redis_password`, `wipe_redis_on_start`
  * `core` files, from failed re-arch:
    * `*.md` docs
    * `core/ability/*`
    * `core/agent/*`
    * `core/memory/*`
    * `core/planning/*`
    * `core/plugin/*`
    * `core/workspace/*`
    * `core/runner/*` (`dump_prompt` and `coroutine` were moved)
  * Related tests
* Updated relevant docs
This commit is contained in:
Krzysztof Czerwinski
2024-05-22 09:43:15 +01:00
committed by GitHub
parent bcc5282aba
commit 3475aaf384
79 changed files with 46 additions and 5804 deletions

View File

@@ -70,7 +70,6 @@ Options:
--debug Enable Debug Mode
--gpt3only Enable GPT3.5 Only Mode
--gpt4only Enable GPT4 Only Mode
-m, --use-memory TEXT Defines which Memory backend to use
-b, --browser-name TEXT Specifies which web-browser to use when
using selenium to scrape the web.
--allow-downloads Dangerous: Allows AutoGPT to download files
@@ -116,7 +115,6 @@ Options:
--debug Enable Debug Mode
--gpt3only Enable GPT3.5 Only Mode
--gpt4only Enable GPT4 Only Mode
-m, --use-memory TEXT Defines which Memory backend to use
-b, --browser-name TEXT Specifies which web-browser to use when using
selenium to scrape the web.
--allow-downloads Dangerous: Allows AutoGPT to download files

View File

@@ -8,8 +8,8 @@ from forge.config.config import ConfigBuilder
from forge.file_storage import FileStorageBackendName, get_storage
from forge.logging.config import configure_logging
from autogpt.agent_manager.agent_manager import AgentManager
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.agents.agent_manager import AgentManager
from autogpt.app.main import _configure_llm_provider, run_interaction_loop
LOG_DIR = Path(__file__).parent / "logs"
@@ -31,7 +31,6 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
config.continuous_mode = continuous_mode
config.continuous_limit = 20
config.noninteractive_mode = True
config.memory_backend = "no_memory"
ai_profile = AIProfile(
ai_name="AutoGPT",

View File

@@ -73,8 +73,6 @@ def _configure_agent(
app_config=app_config,
)
# TODO: configure memory
return Agent(
settings=agent_state,
llm_provider=llm_provider,

View File

@@ -1,3 +0,0 @@
from .agent_manager import AgentManager
__all__ = ["AgentManager"]

View File

@@ -1,7 +1,9 @@
from .agent import Agent
from .agent_manager import AgentManager
from .prompt_strategies.one_shot import OneShotAgentActionProposal
__all__ = [
"AgentManager",
"Agent",
"OneShotAgentActionProposal",
]

View File

@@ -30,6 +30,7 @@ from forge.components.watchdog import WatchdogComponent
from forge.components.web import WebSearchComponent, WebSeleniumComponent
from forge.file_storage.base import FileStorage
from forge.llm.prompting.schema import ChatPrompt
from forge.llm.prompting.utils import dump_prompt
from forge.llm.providers import (
AssistantFunctionCall,
ChatMessage,
@@ -58,7 +59,6 @@ from autogpt.app.log_cycle import (
USER_INPUT_FILE_NAME,
LogCycleHandler,
)
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
from .prompt_strategies.one_shot import (
OneShotAgentActionProposal,

View File

@@ -35,7 +35,7 @@ from sentry_sdk import set_user
from autogpt.agent_factory.configurators import configure_agent_with_state
from autogpt.agent_factory.generators import generate_agent_for_task
from autogpt.agent_manager import AgentManager
from autogpt.agents.agent_manager import AgentManager
from autogpt.app.utils import is_port_free
logger = logging.getLogger(__name__)

View File

@@ -5,12 +5,10 @@ import logging
from typing import Literal, Optional
import click
from colorama import Back, Fore, Style
from colorama import Back, Style
from forge.config.config import GPT_3_MODEL, GPT_4_MODEL, Config
from forge.llm.providers import ModelName, MultiProvider
from autogpt.memory.vector import get_supported_memory_backends
logger = logging.getLogger(__name__)
@@ -21,7 +19,6 @@ async def apply_overrides_to_config(
skip_reprompt: bool = False,
gpt3only: bool = False,
gpt4only: bool = False,
memory_type: Optional[str] = None,
browser_name: Optional[str] = None,
allow_downloads: bool = False,
skip_news: bool = False,
@@ -40,7 +37,6 @@ async def apply_overrides_to_config(
log_file_format (str): Override the format for the log file.
gpt3only (bool): Whether to enable GPT3.5 only mode.
gpt4only (bool): Whether to enable GPT4 only mode.
memory_type (str): The type of memory backend to use.
browser_name (str): The name of the browser to use for scraping the web.
allow_downloads (bool): Whether to allow AutoGPT to download files natively.
skips_news (bool): Whether to suppress the output of latest news on startup.
@@ -78,20 +74,6 @@ async def apply_overrides_to_config(
config.fast_llm = await check_model(config.fast_llm, "fast_llm")
config.smart_llm = await check_model(config.smart_llm, "smart_llm")
if memory_type:
supported_memory = get_supported_memory_backends()
chosen = memory_type
if chosen not in supported_memory:
logger.warning(
extra={
"title": "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED:",
"title_color": Fore.RED,
},
msg=f"{supported_memory}",
)
else:
config.memory_backend = chosen
if skip_reprompt:
config.skip_reprompt = True

View File

@@ -32,9 +32,8 @@ from forge.utils.exceptions import AgentTerminated, InvalidAgentResponseError
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
from autogpt.agent_factory.profile_generator import generate_agent_profile_for_task
from autogpt.agent_manager import AgentManager
from autogpt.agents.agent_manager import AgentManager
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
from autogpt.core.runner.client_lib.utils import coroutine
if TYPE_CHECKING:
from autogpt.agents.agent import Agent
@@ -44,6 +43,7 @@ from .input import clean_input
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
from .spinner import Spinner
from .utils import (
coroutine,
get_legal_warning,
markdown_to_ansi_style,
print_git_branch_info,

View File

@@ -1,15 +1,21 @@
import asyncio
import contextlib
import functools
import logging
import os
import re
import socket
import sys
from pathlib import Path
from typing import Any, Callable, Coroutine, ParamSpec, TypeVar
import requests
from colorama import Fore, Style
from git import InvalidGitRepositoryError, Repo
P = ParamSpec("P")
T = TypeVar("T")
logger = logging.getLogger(__name__)
@@ -231,3 +237,11 @@ def is_port_free(port: int, host: str = "127.0.0.1"):
return True # If successful, the port is free
except OSError:
return False # If failed, the port is likely in use
def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]:
@functools.wraps(f)
def wrapper(*args: P.args, **kwargs: P.kwargs):
return asyncio.run(f(*args, **kwargs))
return wrapper

View File

@@ -1,271 +0,0 @@
# Re-architecture Notes
## Key Documents
- [Planned Agent Workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
- [Original Architecture Diagram](https://www.figma.com/file/fwdj44tPR7ArYtnGGUKknw/Modular-Architecture?type=whiteboard&node-id=0-1) - This is sadly well out of date at this point.
- [Kanban](https://github.com/orgs/Significant-Gravitas/projects/1/views/1?filterQuery=label%3Are-arch)
## The Motivation
The `master` branch of AutoGPT is an organically grown amalgamation of many thoughts
and ideas about agent-driven autonomous systems. It lacks clear abstraction boundaries,
has issues of global state and poorly encapsulated state, and is generally just hard to
make effective changes to. Mainly it's just a system that's hard to make changes to.
And research in the field is moving fast, so we want to be able to try new ideas
quickly.
## Initial Planning
A large group of maintainers and contributors met do discuss the architectural
challenges associated with the existing codebase. Many much-desired features (building
new user interfaces, enabling project-specific agents, enabling multi-agent systems)
are bottlenecked by the global state in the system. We discussed the tradeoffs between
an incremental system transition and a big breaking version change and decided to go
for the breaking version change. We justified this by saying:
- We can maintain, in essence, the same user experience as now even with a radical
restructuring of the codebase
- Our developer audience is struggling to use the existing codebase to build
applications and libraries of their own, so this breaking change will largely be
welcome.
## Primary Goals
- Separate the AutoGPT application code from the library code.
- Remove global state from the system
- Allow for multiple agents per user (with facilities for running simultaneously)
- Create a serializable representation of an Agent
- Encapsulate the core systems in abstractions with clear boundaries.
## Secondary goals
- Use existing tools to ditch any unnecessary cruft in the codebase (document loading,
json parsing, anything easier to replace than to port).
- Bring in the [core agent loop updates](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
being developed simultaneously by @Pwuts
# The Agent Subsystems
## Configuration
We want a lot of things from a configuration system. We lean heavily on it in the
`master` branch to allow several parts of the system to communicate with each other.
[Recent work](https://github.com/Significant-Gravitas/AutoGPT/pull/4737) has made it
so that the config is no longer a singleton object that is materialized from the import
state, but it's still treated as a
[god object](https://en.wikipedia.org/wiki/God_object) containing all information about
the system and _critically_ allowing any system to reference configuration information
about other parts of the system.
### What we want
- It should still be reasonable to collate the entire system configuration in a
sensible way.
- The configuration should be validatable and validated.
- The system configuration should be a _serializable_ representation of an `Agent`.
- The configuration system should provide a clear (albeit very low-level) contract
about user-configurable aspects of the system.
- The configuration should reasonably manage default values and user-provided overrides.
- The configuration system needs to handle credentials in a reasonable way.
- The configuration should be the representation of some amount of system state, like
api budgets and resource usage. These aspects are recorded in the configuration and
updated by the system itself.
- Agent systems should have encapsulated views of the configuration. E.g. the memory
system should know about memory configuration but nothing about command configuration.
## Workspace
There are two ways to think about the workspace:
- The workspace is a scratch space for an agent where it can store files, write code,
and do pretty much whatever else it likes.
- The workspace is, at any given point in time, the single source of truth for what an
agent is. It contains the serializable state (the configuration) as well as all
other working state (stored files, databases, memories, custom code).
In the existing system there is **one** workspace. And because the workspace holds so
much agent state, that means a user can only work with one agent at a time.
## Memory
The memory system has been under extremely active development.
See [#3536](https://github.com/Significant-Gravitas/AutoGPT/issues/3536) and
[#4208](https://github.com/Significant-Gravitas/AutoGPT/pull/4208) for discussion and
work in the `master` branch. The TL;DR is
that we noticed a couple of months ago that the `Agent` performed **worse** with
permanent memory than without it. Since then the knowledge storage and retrieval
system has been [redesigned](https://whimsical.com/memory-system-8Ae6x6QkjDwQAUe9eVJ6w1)
and partially implemented in the `master` branch.
## Planning/Prompt-Engineering
The planning system is the system that translates user desires/agent intentions into
language model prompts. In the course of development, it has become pretty clear
that `Planning` is the wrong name for this system
### What we want
- It should be incredibly obvious what's being passed to a language model, when it's
being passed, and what the language model response is. The landscape of language
model research is developing very rapidly, so building complex abstractions between
users/contributors and the language model interactions is going to make it very
difficult for us to nimbly respond to new research developments.
- Prompt-engineering should ideally be exposed in a parameterizeable way to users.
- We should, where possible, leverage OpenAI's new
[function calling api](https://openai.com/blog/function-calling-and-other-api-updates)
to get outputs in a standard machine-readable format and avoid the deep pit of
parsing json (and fixing unparsable json).
### Planning Strategies
The [new agent workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
has many, many interaction points for language models. We really would like to not
distribute prompt templates and raw strings all through the system. The re-arch solution
is to encapsulate language model interactions into planning strategies.
These strategies are defined by
- The `LanguageModelClassification` they use (`FAST` or `SMART`)
- A function `build_prompt` that takes strategy specific arguments and constructs a
`LanguageModelPrompt` (a simple container for lists of messages and functions to
pass to the language model)
- A function `parse_content` that parses the response content (a dict) into a better
formatted dict. Contracts here are intentionally loose and will tighten once we have
at least one other language model provider.
## Resources
Resources are kinds of services we consume from external APIs. They may have associated
credentials and costs we need to manage. Management of those credentials is implemented
as manipulation of the resource configuration. We have two categories of resources
currently
- AI/ML model providers (including language model providers and embedding model providers, ie OpenAI)
- Memory providers (e.g. Pinecone, Weaviate, ChromaDB, etc.)
### What we want
- Resource abstractions should provide a common interface to different service providers
for a particular kind of service.
- Resource abstractions should manipulate the configuration to manage their credentials
and budget/accounting.
- Resource abstractions should be composable over an API (e.g. I should be able to make
an OpenAI provider that is both a LanguageModelProvider and an EmbeddingModelProvider
and use it wherever I need those services).
## Abilities
Along with planning and memory usage, abilities are one of the major augmentations of
augmented language models. They allow us to expand the scope of what language models
can do by hooking them up to code they can execute to obtain new knowledge or influence
the world.
### What we want
- Abilities should have an extremely clear interface that users can write to.
- Abilities should have an extremely clear interface that a language model can
understand
- Abilities should be declarative about their dependencies so the system can inject them
- Abilities should be executable (where sensible) in an async run loop.
- Abilities should be not have side effects unless those side effects are clear in
their representation to an agent (e.g. the BrowseWeb ability shouldn't write a file,
but the WriteFile ability can).
## Plugins
Users want to add lots of features that we don't want to support as first-party.
Or solution to this is a plugin system to allow users to plug in their functionality or
to construct their agent from a public plugin marketplace. Our primary concern in the
re-arch is to build a stateless plugin service interface and a simple implementation
that can load plugins from installed packages or from zip files. Future efforts will
expand this system to allow plugins to load from a marketplace or some other kind
of service.
### What is a Plugin
Plugins are a kind of garbage term. They refer to a number of things.
- New commands for the agent to execute. This is the most common usage.
- Replacements for entire subsystems like memory or language model providers
- Application plugins that do things like send emails or communicate via whatsapp
- The repositories contributors create that may themselves have multiple plugins in them.
### Usage in the existing system
The current plugin system is _hook-based_. This means plugins don't correspond to
kinds of objects in the system, but rather to times in the system at which we defer
execution to them. The main advantage of this setup is that user code can hijack
pretty much any behavior of the agent by injecting code that supersedes the normal
agent execution. The disadvantages to this approach are numerous:
- We have absolutely no mechanisms to enforce any security measures because the threat
surface is everything.
- We cannot reason about agent behavior in a cohesive way because control flow can be
ceded to user code at pretty much any point and arbitrarily change or break the
agent behavior
- The interface for designing a plugin is kind of terrible and difficult to standardize
- The hook based implementation means we couple ourselves to a particular flow of
control (or otherwise risk breaking plugin behavior). E.g. many of the hook targets
in the [old workflow](https://whimsical.com/agent-workflow-VAzeKcup3SR7awpNZJKTyK)
are not present or mean something entirely different in the
[new workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ).
- Etc.
### What we want
- A concrete definition of a plugin that is narrow enough in scope that we can define
it well and reason about how it will work in the system.
- A set of abstractions that let us define a plugin by its storage format and location
- A service interface that knows how to parse the plugin abstractions and turn them
into concrete classes and objects.
## Some Notes on how and why we'll use OO in this project
First and foremost, Python itself is an object-oriented language. It's
underlying [data model](https://docs.python.org/3/reference/datamodel.html) is built
with object-oriented programming in mind. It offers useful tools like abstract base
classes to communicate interfaces to developers who want to, e.g., write plugins, or
help work on implementations. If we were working in a different language that offered
different tools, we'd use a different paradigm.
While many things are classes in the re-arch, they are not classes in the same way.
There are three kinds of things (roughly) that are written as classes in the re-arch:
1. **Configuration**: AutoGPT has *a lot* of configuration. This configuration
is *data* and we use **[Pydantic](https://docs.pydantic.dev/latest/)** to manage it as
pydantic is basically industry standard for this stuff. It provides runtime validation
for all the configuration and allows us to easily serialize configuration to both basic
python types (dicts, lists, and primitives) as well as serialize to json, which is
important for us being able to put representations of agents
[on the wire](https://en.wikipedia.org/wiki/Wire_protocol) for web applications and
agent-to-agent communication. *These are essentially
[structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language)) rather than
traditional classes.*
2. **Internal Data**: Very similar to configuration, AutoGPT passes around boatloads
of internal data. We are interacting with language models and language model APIs
which means we are handling lots of *structured* but *raw* text. Here we also
leverage **pydantic** to both *parse* and *validate* the internal data and also to
give us concrete types which we can use static type checkers to validate against
and discover problems before they show up as bugs at runtime. *These are
essentially [structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language))
rather than traditional classes.*
3. **System Interfaces**: This is our primary traditional use of classes in the
re-arch. We have a bunch of systems. We want many of those systems to have
alternative implementations (e.g. via plugins). We use abstract base classes to
define interfaces to communicate with people who might want to provide those
plugins. We provide a single concrete implementation of most of those systems as a
subclass of the interface. This should not be controversial.
The approach is consistent with
[prior](https://github.com/Significant-Gravitas/AutoGPT/issues/2458)
[work](https://github.com/Significant-Gravitas/AutoGPT/pull/2442) done by other
maintainers in this direction.
From an organization standpoint, OO programming is by far the most popular programming
paradigm (especially for Python). It's the one most often taught in programming classes
and the one with the most available online training for people interested in
contributing.
Finally, and importantly, we scoped the plan and initial design of the re-arch as a
large group of maintainers and collaborators early on. This is consistent with the
design we chose and no-one offered alternatives.

View File

@@ -1,92 +0,0 @@
# AutoGPT Core
This subpackage contains the ongoing work for the
[AutoGPT Re-arch](https://github.com/Significant-Gravitas/AutoGPT/issues/4770). It is
a work in progress and is not yet feature complete. In particular, it does not yet
have many of the AutoGPT commands implemented and is pending ongoing work to
[re-incorporate vector-based memory and knowledge retrieval](https://github.com/Significant-Gravitas/AutoGPT/issues/3536).
## [Overview](ARCHITECTURE_NOTES.md)
The AutoGPT Re-arch is a re-implementation of the AutoGPT agent that is designed to be more modular,
more extensible, and more maintainable than the original AutoGPT agent. It is also designed to be
more accessible to new developers and to be easier to contribute to. The re-arch is a work in progress
and is not yet feature complete. It is also not yet ready for production use.
## Running the Re-arch Code
1. Open the `autogpt/core` folder in a terminal
2. Set up a dedicated virtual environment:
`python -m venv .venv`
3. Install everything needed to run the project:
`poetry install`
## CLI Application
There are two client applications for AutoGPT included.
:star2: **This is the reference application I'm working with for now** :star2:
The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the ~~`logger.typewriter_log`~~`user_friendly_output` logic.
- [Entry Point](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py)
- [Client Application](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_app/main.py)
You'll then need a settings file. Run
```
poetry run cli make-settings
```
This will write a file called `default_agent_settings.yaml` with all the user-modifiable
configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory
in your user directory if it doesn't exist). Your user directory is located in different places
depending on your operating system:
- On Linux, it's `/home/USERNAME`
- On Windows, it's `C:\Users\USERNAME`
- On Mac, it's `/Users/USERNAME`
At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run
the model.
You can then run AutoGPT with
```
poetry run cli run
```
to launch the interaction loop.
### CLI Web App
:warning: I am not actively developing this application. I am primarily working with the traditional CLI app
described above. It is a very good place to get involved if you have web application design experience and are
looking to get involved in the re-arch.
The second app is still a CLI, but it sets up a local webserver that the client application talks to
rather than invoking calls to the Agent library code directly. This application is essentially a sketch
at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed.
- [Entry Point](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_web_app/cli.py)
- [Client Application](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_web_app/client/client.py)
- [Server API](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py)
To run, you still need to generate a default configuration. You can do
```
poetry run cli-web make-settings
```
It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key.
To run, do
```
poetry run cli-web client
```
This will launch a webserver and then start the client cli application to communicate with it.

View File

@@ -1,18 +0,0 @@
"""The command system provides a way to extend the functionality of the AI agent."""
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
from autogpt.core.ability.schema import AbilityResult
from autogpt.core.ability.simple import (
AbilityRegistryConfiguration,
AbilityRegistrySettings,
SimpleAbilityRegistry,
)
__all__ = [
"Ability",
"AbilityConfiguration",
"AbilityRegistry",
"AbilityResult",
"AbilityRegistryConfiguration",
"AbilityRegistrySettings",
"SimpleAbilityRegistry",
]

View File

@@ -1,88 +0,0 @@
import abc
from pprint import pformat
from typing import Any, ClassVar
import inflection
from forge.llm.providers import CompletionModelFunction
from forge.models.config import SystemConfiguration
from forge.models.json_schema import JSONSchema
from pydantic import Field
from autogpt.core.planning.simple import LanguageModelConfiguration
from autogpt.core.plugin.base import PluginLocation
from .schema import AbilityResult
class AbilityConfiguration(SystemConfiguration):
"""Struct for model configuration."""
location: PluginLocation
packages_required: list[str] = Field(default_factory=list)
language_model_required: LanguageModelConfiguration = None
memory_provider_required: bool = False
workspace_required: bool = False
class Ability(abc.ABC):
"""A class representing an agent ability."""
default_configuration: ClassVar[AbilityConfiguration]
@classmethod
def name(cls) -> str:
"""The name of the ability."""
return inflection.underscore(cls.__name__)
@property
@classmethod
@abc.abstractmethod
def description(cls) -> str:
"""A detailed description of what the ability does."""
...
@property
@classmethod
@abc.abstractmethod
def parameters(cls) -> dict[str, JSONSchema]:
...
@abc.abstractmethod
async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult:
...
def __str__(self) -> str:
return pformat(self.spec)
@property
@classmethod
def spec(cls) -> CompletionModelFunction:
return CompletionModelFunction(
name=cls.name(),
description=cls.description,
parameters=cls.parameters,
)
class AbilityRegistry(abc.ABC):
@abc.abstractmethod
def register_ability(
self, ability_name: str, ability_configuration: AbilityConfiguration
) -> None:
...
@abc.abstractmethod
def list_abilities(self) -> list[str]:
...
@abc.abstractmethod
def dump_abilities(self) -> list[CompletionModelFunction]:
...
@abc.abstractmethod
def get_ability(self, ability_name: str) -> Ability:
...
@abc.abstractmethod
async def perform(self, ability_name: str, **kwargs: Any) -> AbilityResult:
...

View File

@@ -1,12 +0,0 @@
from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility
from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel
BUILTIN_ABILITIES = {
QueryLanguageModel.name(): QueryLanguageModel,
}
__all__ = [
"BUILTIN_ABILITIES",
"CreateNewAbility",
"QueryLanguageModel",
]

View File

@@ -1,108 +0,0 @@
import logging
from typing import ClassVar
from forge.models.json_schema import JSONSchema
from autogpt.core.ability.base import Ability, AbilityConfiguration
from autogpt.core.ability.schema import AbilityResult
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
class CreateNewAbility(Ability):
default_configuration = AbilityConfiguration(
location=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.ability.builtins.CreateNewAbility",
),
)
def __init__(
self,
logger: logging.Logger,
configuration: AbilityConfiguration,
):
self._logger = logger
self._configuration = configuration
description: ClassVar[str] = "Create a new ability by writing python code."
parameters: ClassVar[dict[str, JSONSchema]] = {
"ability_name": JSONSchema(
description="A meaningful and concise name for the new ability.",
type=JSONSchema.Type.STRING,
required=True,
),
"description": JSONSchema(
description=(
"A detailed description of the ability and its uses, "
"including any limitations."
),
type=JSONSchema.Type.STRING,
required=True,
),
"arguments": JSONSchema(
description="A list of arguments that the ability will accept.",
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
type=JSONSchema.Type.OBJECT,
properties={
"name": JSONSchema(
description="The name of the argument.",
type=JSONSchema.Type.STRING,
),
"type": JSONSchema(
description=(
"The type of the argument. "
"Must be a standard json schema type."
),
type=JSONSchema.Type.STRING,
),
"description": JSONSchema(
description=(
"A detailed description of the argument and its uses."
),
type=JSONSchema.Type.STRING,
),
},
),
),
"required_arguments": JSONSchema(
description="A list of the names of the arguments that are required.",
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
description="The names of the arguments that are required.",
type=JSONSchema.Type.STRING,
),
),
"package_requirements": JSONSchema(
description=(
"A list of the names of the Python packages that are required to "
"execute the ability."
),
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
description=(
"The of the Python package that is required to execute the ability."
),
type=JSONSchema.Type.STRING,
),
),
"code": JSONSchema(
description=(
"The Python code that will be executed when the ability is called."
),
type=JSONSchema.Type.STRING,
required=True,
),
}
async def __call__(
self,
ability_name: str,
description: str,
arguments: list[dict],
required_arguments: list[str],
package_requirements: list[str],
code: str,
) -> AbilityResult:
raise NotImplementedError

View File

@@ -1,171 +0,0 @@
import logging
import os
from typing import ClassVar
from forge.models.json_schema import JSONSchema
from autogpt.core.ability.base import Ability, AbilityConfiguration
from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
from autogpt.core.workspace import Workspace
class ReadFile(Ability):
default_configuration = AbilityConfiguration(
location=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.ability.builtins.ReadFile",
),
packages_required=["unstructured"],
workspace_required=True,
)
def __init__(
self,
logger: logging.Logger,
workspace: Workspace,
):
self._logger = logger
self._workspace = workspace
description: ClassVar[str] = "Read and parse all text from a file."
parameters: ClassVar[dict[str, JSONSchema]] = {
"filename": JSONSchema(
type=JSONSchema.Type.STRING,
description="The name of the file to read.",
),
}
def _check_preconditions(self, filename: str) -> AbilityResult | None:
message = ""
try:
pass
except ImportError:
message = "Package charset_normalizer is not installed."
try:
file_path = self._workspace.get_path(filename)
if not file_path.exists():
message = f"File {filename} does not exist."
if not file_path.is_file():
message = f"{filename} is not a file."
except ValueError as e:
message = str(e)
if message:
return AbilityResult(
ability_name=self.name(),
ability_args={"filename": filename},
success=False,
message=message,
data=None,
)
def __call__(self, filename: str) -> AbilityResult:
if result := self._check_preconditions(filename):
return result
from unstructured.partition.auto import partition
file_path = self._workspace.get_path(filename)
try:
elements = partition(str(file_path))
# TODO: Lots of other potentially useful information is available
# in the partitioned file. Consider returning more of it.
new_knowledge = Knowledge(
content="\n\n".join([element.text for element in elements]),
content_type=ContentType.TEXT,
content_metadata={"filename": filename},
)
success = True
message = f"File {file_path} read successfully."
except IOError as e:
new_knowledge = None
success = False
message = str(e)
return AbilityResult(
ability_name=self.name(),
ability_args={"filename": filename},
success=success,
message=message,
new_knowledge=new_knowledge,
)
class WriteFile(Ability):
default_configuration = AbilityConfiguration(
location=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.ability.builtins.WriteFile",
),
packages_required=["unstructured"],
workspace_required=True,
)
def __init__(
self,
logger: logging.Logger,
workspace: Workspace,
):
self._logger = logger
self._workspace = workspace
description: ClassVar[str] = "Write text to a file."
parameters: ClassVar[dict[str, JSONSchema]] = {
"filename": JSONSchema(
type=JSONSchema.Type.STRING,
description="The name of the file to write.",
),
"contents": JSONSchema(
type=JSONSchema.Type.STRING,
description="The contents of the file to write.",
),
}
def _check_preconditions(
self, filename: str, contents: str
) -> AbilityResult | None:
message = ""
try:
file_path = self._workspace.get_path(filename)
if file_path.exists():
message = f"File {filename} already exists."
if len(contents):
message = f"File {filename} was not given any content."
except ValueError as e:
message = str(e)
if message:
return AbilityResult(
ability_name=self.name(),
ability_args={"filename": filename, "contents": contents},
success=False,
message=message,
data=None,
)
def __call__(self, filename: str, contents: str) -> AbilityResult:
if result := self._check_preconditions(filename, contents):
return result
file_path = self._workspace.get_path(filename)
try:
directory = os.path.dirname(file_path)
os.makedirs(directory)
with open(filename, "w", encoding="utf-8") as f:
f.write(contents)
success = True
message = f"File {file_path} written successfully."
except IOError as e:
success = False
message = str(e)
return AbilityResult(
ability_name=self.name(),
ability_args={"filename": filename},
success=success,
message=message,
)

View File

@@ -1,67 +0,0 @@
import logging
from typing import ClassVar
from forge.llm.providers import (
ChatMessage,
ChatModelProvider,
ModelProviderName,
OpenAIModelName,
)
from forge.models.json_schema import JSONSchema
from autogpt.core.ability.base import Ability, AbilityConfiguration
from autogpt.core.ability.schema import AbilityResult
from autogpt.core.planning.simple import LanguageModelConfiguration
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
class QueryLanguageModel(Ability):
default_configuration = AbilityConfiguration(
location=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.ability.builtins.QueryLanguageModel",
),
language_model_required=LanguageModelConfiguration(
model_name=OpenAIModelName.GPT3,
provider_name=ModelProviderName.OPENAI,
temperature=0.9,
),
)
def __init__(
self,
logger: logging.Logger,
configuration: AbilityConfiguration,
language_model_provider: ChatModelProvider,
):
self._logger = logger
self._configuration = configuration
self._language_model_provider = language_model_provider
description: ClassVar[str] = (
"Query a language model."
" A query should be a question and any relevant context."
)
parameters: ClassVar[dict[str, JSONSchema]] = {
"query": JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"A query for a language model. "
"A query should contain a question and any relevant context."
),
)
}
async def __call__(self, query: str) -> AbilityResult:
model_response = await self._language_model_provider.create_chat_completion(
model_prompt=[ChatMessage.user(query)],
functions=[],
model_name=self._configuration.language_model_required.model_name,
)
return AbilityResult(
ability_name=self.name(),
ability_args={"query": query},
success=True,
message=model_response.response.content or "",
)

View File

@@ -1,30 +0,0 @@
import enum
from typing import Any
from pydantic import BaseModel
class ContentType(str, enum.Enum):
# TBD what these actually are.
TEXT = "text"
CODE = "code"
class Knowledge(BaseModel):
content: str
content_type: ContentType
content_metadata: dict[str, Any]
class AbilityResult(BaseModel):
"""The AbilityResult is a standard response struct for an ability."""
ability_name: str
ability_args: dict[str, str]
success: bool
message: str
new_knowledge: Knowledge = None
def summary(self) -> str:
kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items())
return f"{self.ability_name}({kwargs}): {self.message}"

View File

@@ -1,98 +0,0 @@
import logging
from forge.llm.providers import (
ChatModelProvider,
CompletionModelFunction,
ModelProviderName,
)
from forge.models.config import Configurable, SystemConfiguration, SystemSettings
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
from autogpt.core.ability.builtins import BUILTIN_ABILITIES
from autogpt.core.ability.schema import AbilityResult
from autogpt.core.memory.base import Memory
from autogpt.core.plugin.simple import SimplePluginService
from autogpt.core.workspace.base import Workspace
class AbilityRegistryConfiguration(SystemConfiguration):
"""Configuration for the AbilityRegistry subsystem."""
abilities: dict[str, AbilityConfiguration]
class AbilityRegistrySettings(SystemSettings):
configuration: AbilityRegistryConfiguration
class SimpleAbilityRegistry(AbilityRegistry, Configurable):
default_settings = AbilityRegistrySettings(
name="simple_ability_registry",
description="A simple ability registry.",
configuration=AbilityRegistryConfiguration(
abilities={
ability_name: ability.default_configuration
for ability_name, ability in BUILTIN_ABILITIES.items()
},
),
)
def __init__(
self,
settings: AbilityRegistrySettings,
logger: logging.Logger,
memory: Memory,
workspace: Workspace,
model_providers: dict[ModelProviderName, ChatModelProvider],
):
self._configuration = settings.configuration
self._logger = logger
self._memory = memory
self._workspace = workspace
self._model_providers = model_providers
self._abilities: list[Ability] = []
for (
ability_name,
ability_configuration,
) in self._configuration.abilities.items():
self.register_ability(ability_name, ability_configuration)
def register_ability(
self, ability_name: str, ability_configuration: AbilityConfiguration
) -> None:
ability_class = SimplePluginService.get_plugin(ability_configuration.location)
ability_args = {
"logger": self._logger.getChild(ability_name),
"configuration": ability_configuration,
}
if ability_configuration.packages_required:
# TODO: Check packages are installed and maybe install them.
pass
if ability_configuration.memory_provider_required:
ability_args["memory"] = self._memory
if ability_configuration.workspace_required:
ability_args["workspace"] = self._workspace
if ability_configuration.language_model_required:
ability_args["language_model_provider"] = self._model_providers[
ability_configuration.language_model_required.provider_name
]
ability = ability_class(**ability_args)
self._abilities.append(ability)
def list_abilities(self) -> list[str]:
return [
f"{ability.name()}: {ability.description}" for ability in self._abilities
]
def dump_abilities(self) -> list[CompletionModelFunction]:
return [ability.spec for ability in self._abilities]
def get_ability(self, ability_name: str) -> Ability:
for ability in self._abilities:
if ability.name() == ability_name:
return ability
raise ValueError(f"Ability '{ability_name}' not found.")
async def perform(self, ability_name: str, **kwargs) -> AbilityResult:
ability = self.get_ability(ability_name)
return await ability(**kwargs)

View File

@@ -1,9 +0,0 @@
"""The Agent is an autonomouos entity guided by a LLM provider."""
from autogpt.core.agent.base import Agent
from autogpt.core.agent.simple import AgentSettings, SimpleAgent
__all__ = [
"Agent",
"AgentSettings",
"SimpleAgent",
]

View File

@@ -1,26 +0,0 @@
import abc
import logging
from pathlib import Path
class Agent(abc.ABC):
@abc.abstractmethod
def __init__(self, *args, **kwargs):
...
@classmethod
@abc.abstractmethod
def from_workspace(
cls,
workspace_path: Path,
logger: logging.Logger,
) -> "Agent":
...
@abc.abstractmethod
async def determine_next_ability(self, *args, **kwargs):
...
@abc.abstractmethod
def __repr__(self):
...

View File

@@ -1,398 +0,0 @@
import logging
from datetime import datetime
from pathlib import Path
from typing import Any
from forge.llm.providers import CompletionModelFunction, OpenAIProvider, OpenAISettings
from forge.models.config import Configurable, SystemConfiguration, SystemSettings
from pydantic import BaseModel
from autogpt.core.ability import (
AbilityRegistrySettings,
AbilityResult,
SimpleAbilityRegistry,
)
from autogpt.core.agent.base import Agent
from autogpt.core.memory import MemorySettings, SimpleMemory
from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus
from autogpt.core.plugin.simple import (
PluginLocation,
PluginStorageFormat,
SimplePluginService,
)
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
class AgentSystems(SystemConfiguration):
ability_registry: PluginLocation
memory: PluginLocation
openai_provider: PluginLocation
planning: PluginLocation
workspace: PluginLocation
class AgentConfiguration(SystemConfiguration):
cycle_count: int
max_task_cycle_count: int
creation_time: str
name: str
role: str
goals: list[str]
systems: AgentSystems
class AgentSystemSettings(SystemSettings):
configuration: AgentConfiguration
class AgentSettings(BaseModel):
agent: AgentSystemSettings
ability_registry: AbilityRegistrySettings
memory: MemorySettings
openai_provider: OpenAISettings
planning: PlannerSettings
workspace: WorkspaceSettings
def update_agent_name_and_goals(self, agent_goals: dict) -> None:
self.agent.configuration.name = agent_goals["agent_name"]
self.agent.configuration.role = agent_goals["agent_role"]
self.agent.configuration.goals = agent_goals["agent_goals"]
class SimpleAgent(Agent, Configurable):
default_settings = AgentSystemSettings(
name="simple_agent",
description="A simple agent.",
configuration=AgentConfiguration(
name="Entrepreneur-GPT",
role=(
"An AI designed to autonomously develop and run businesses with "
"the sole goal of increasing your net worth."
),
goals=[
"Increase net worth",
"Grow Twitter Account",
"Develop and manage multiple businesses autonomously",
],
cycle_count=0,
max_task_cycle_count=3,
creation_time="",
systems=AgentSystems(
ability_registry=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.ability.SimpleAbilityRegistry",
),
memory=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.memory.SimpleMemory",
),
openai_provider=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route=("forge.llm.model_providers.OpenAIProvider"),
),
planning=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.planning.SimplePlanner",
),
workspace=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.workspace.SimpleWorkspace",
),
),
),
)
def __init__(
self,
settings: AgentSystemSettings,
logger: logging.Logger,
ability_registry: SimpleAbilityRegistry,
memory: SimpleMemory,
openai_provider: OpenAIProvider,
planning: SimplePlanner,
workspace: SimpleWorkspace,
):
self._configuration = settings.configuration
self._logger = logger
self._ability_registry = ability_registry
self._memory = memory
# FIXME: Need some work to make this work as a dict of providers
# Getting the construction of the config to work is a bit tricky
self._openai_provider = openai_provider
self._planning = planning
self._workspace = workspace
self._task_queue = []
self._completed_tasks = []
self._current_task = None
self._next_ability = None
@classmethod
def from_workspace(
cls,
workspace_path: Path,
logger: logging.Logger,
) -> "SimpleAgent":
agent_settings = SimpleWorkspace.load_agent_settings(workspace_path)
agent_args = {}
agent_args["settings"] = agent_settings.agent
agent_args["logger"] = logger
agent_args["workspace"] = cls._get_system_instance(
"workspace",
agent_settings,
logger,
)
agent_args["openai_provider"] = cls._get_system_instance(
"openai_provider",
agent_settings,
logger,
)
agent_args["planning"] = cls._get_system_instance(
"planning",
agent_settings,
logger,
model_providers={"openai": agent_args["openai_provider"]},
)
agent_args["memory"] = cls._get_system_instance(
"memory",
agent_settings,
logger,
workspace=agent_args["workspace"],
)
agent_args["ability_registry"] = cls._get_system_instance(
"ability_registry",
agent_settings,
logger,
workspace=agent_args["workspace"],
memory=agent_args["memory"],
model_providers={"openai": agent_args["openai_provider"]},
)
return cls(**agent_args)
async def build_initial_plan(self) -> dict:
plan = await self._planning.make_initial_plan(
agent_name=self._configuration.name,
agent_role=self._configuration.role,
agent_goals=self._configuration.goals,
abilities=self._ability_registry.list_abilities(),
)
tasks = [Task.parse_obj(task) for task in plan.parsed_result["task_list"]]
# TODO: Should probably do a step to evaluate the quality of the generated tasks
# and ensure that they have actionable ready and acceptance criteria
self._task_queue.extend(tasks)
self._task_queue.sort(key=lambda t: t.priority, reverse=True)
self._task_queue[-1].context.status = TaskStatus.READY
return plan.parsed_result
async def determine_next_ability(self, *args, **kwargs):
if not self._task_queue:
return {"response": "I don't have any tasks to work on right now."}
self._configuration.cycle_count += 1
task = self._task_queue.pop()
self._logger.info(f"Working on task: {task}")
task = await self._evaluate_task_and_add_context(task)
next_ability = await self._choose_next_ability(
task,
self._ability_registry.dump_abilities(),
)
self._current_task = task
self._next_ability = next_ability.parsed_result
return self._current_task, self._next_ability
async def execute_next_ability(self, user_input: str, *args, **kwargs):
if user_input == "y":
ability = self._ability_registry.get_ability(
self._next_ability["next_ability"]
)
ability_response = await ability(**self._next_ability["ability_arguments"])
await self._update_tasks_and_memory(ability_response)
if self._current_task.context.status == TaskStatus.DONE:
self._completed_tasks.append(self._current_task)
else:
self._task_queue.append(self._current_task)
self._current_task = None
self._next_ability = None
return ability_response.dict()
else:
raise NotImplementedError
async def _evaluate_task_and_add_context(self, task: Task) -> Task:
"""Evaluate the task and add context to it."""
if task.context.status == TaskStatus.IN_PROGRESS:
# Nothing to do here
return task
else:
self._logger.debug(f"Evaluating task {task} and adding relevant context.")
# TODO: Look up relevant memories (need working memory system)
# TODO: Eval whether there is enough information to start the task (w/ LLM).
task.context.enough_info = True
task.context.status = TaskStatus.IN_PROGRESS
return task
async def _choose_next_ability(
self,
task: Task,
ability_specs: list[CompletionModelFunction],
):
"""Choose the next ability to use for the task."""
self._logger.debug(f"Choosing next ability for task {task}.")
if task.context.cycle_count > self._configuration.max_task_cycle_count:
# Don't hit the LLM, just set the next action as "breakdown_task"
# with an appropriate reason
raise NotImplementedError
elif not task.context.enough_info:
# Don't ask the LLM, just set the next action as "breakdown_task"
# with an appropriate reason
raise NotImplementedError
else:
next_ability = await self._planning.determine_next_ability(
task, ability_specs
)
return next_ability
async def _update_tasks_and_memory(self, ability_result: AbilityResult):
self._current_task.context.cycle_count += 1
self._current_task.context.prior_actions.append(ability_result)
# TODO: Summarize new knowledge
# TODO: store knowledge and summaries in memory and in relevant tasks
# TODO: evaluate whether the task is complete
def __repr__(self):
return "SimpleAgent()"
################################################################
# Factory interface for agent bootstrapping and initialization #
################################################################
@classmethod
def build_user_configuration(cls) -> dict[str, Any]:
"""Build the user's configuration."""
configuration_dict = {
"agent": cls.get_user_config(),
}
system_locations = configuration_dict["agent"]["configuration"]["systems"]
for system_name, system_location in system_locations.items():
system_class = SimplePluginService.get_plugin(system_location)
configuration_dict[system_name] = system_class.get_user_config()
configuration_dict = _prune_empty_dicts(configuration_dict)
return configuration_dict
@classmethod
def compile_settings(
cls, logger: logging.Logger, user_configuration: dict
) -> AgentSettings:
"""Compile the user's configuration with the defaults."""
logger.debug("Processing agent system configuration.")
configuration_dict = {
"agent": cls.build_agent_configuration(
user_configuration.get("agent", {})
).dict(),
}
system_locations = configuration_dict["agent"]["configuration"]["systems"]
# Build up default configuration
for system_name, system_location in system_locations.items():
logger.debug(f"Compiling configuration for system {system_name}")
system_class = SimplePluginService.get_plugin(system_location)
configuration_dict[system_name] = system_class.build_agent_configuration(
user_configuration.get(system_name, {})
).dict()
return AgentSettings.parse_obj(configuration_dict)
@classmethod
async def determine_agent_name_and_goals(
cls,
user_objective: str,
agent_settings: AgentSettings,
logger: logging.Logger,
) -> dict:
logger.debug("Loading OpenAI provider.")
provider: OpenAIProvider = cls._get_system_instance(
"openai_provider",
agent_settings,
logger=logger,
)
logger.debug("Loading agent planner.")
agent_planner: SimplePlanner = cls._get_system_instance(
"planning",
agent_settings,
logger=logger,
model_providers={"openai": provider},
)
logger.debug("determining agent name and goals.")
model_response = await agent_planner.decide_name_and_goals(
user_objective,
)
return model_response.parsed_result
@classmethod
def provision_agent(
cls,
agent_settings: AgentSettings,
logger: logging.Logger,
):
agent_settings.agent.configuration.creation_time = datetime.now().strftime(
"%Y%m%d_%H%M%S"
)
workspace: SimpleWorkspace = cls._get_system_instance(
"workspace",
agent_settings,
logger=logger,
)
return workspace.setup_workspace(agent_settings, logger)
@classmethod
def _get_system_instance(
cls,
system_name: str,
agent_settings: AgentSettings,
logger: logging.Logger,
*args,
**kwargs,
):
system_locations = agent_settings.agent.configuration.systems.dict()
system_settings = getattr(agent_settings, system_name)
system_class = SimplePluginService.get_plugin(system_locations[system_name])
system_instance = system_class(
system_settings,
*args,
logger=logger.getChild(system_name),
**kwargs,
)
return system_instance
def _prune_empty_dicts(d: dict) -> dict:
"""
Prune branches from a nested dictionary if the branch only contains empty
dictionaries at the leaves.
Args:
d: The dictionary to prune.
Returns:
The pruned dictionary.
"""
pruned = {}
for key, value in d.items():
if isinstance(value, dict):
pruned_value = _prune_empty_dicts(value)
if (
pruned_value
): # if the pruned dictionary is not empty, add it to the result
pruned[key] = pruned_value
else:
pruned[key] = value
return pruned

View File

@@ -1,9 +0,0 @@
"""The memory subsystem manages the Agent's long-term memory."""
from autogpt.core.memory.base import Memory
from autogpt.core.memory.simple import MemorySettings, SimpleMemory
__all__ = [
"Memory",
"MemorySettings",
"SimpleMemory",
]

View File

@@ -1,13 +0,0 @@
import abc
class Memory(abc.ABC):
pass
class MemoryItem(abc.ABC):
pass
class MessageHistory(abc.ABC):
pass

View File

@@ -1,48 +0,0 @@
import json
import logging
from forge.models.config import Configurable, SystemConfiguration, SystemSettings
from autogpt.core.memory.base import Memory
from autogpt.core.workspace import Workspace
class MemoryConfiguration(SystemConfiguration):
pass
class MemorySettings(SystemSettings):
configuration: MemoryConfiguration
class MessageHistory:
def __init__(self, previous_message_history: list[str]):
self._message_history = previous_message_history
class SimpleMemory(Memory, Configurable):
default_settings = MemorySettings(
name="simple_memory",
description="A simple memory.",
configuration=MemoryConfiguration(),
)
def __init__(
self,
settings: MemorySettings,
logger: logging.Logger,
workspace: Workspace,
):
self._configuration = settings.configuration
self._logger = logger
self._message_history = self._load_message_history(workspace)
@staticmethod
def _load_message_history(workspace: Workspace):
message_history_path = workspace.get_path("message_history.json")
if message_history_path.exists():
with message_history_path.open("r") as f:
message_history = json.load(f)
else:
message_history = []
return MessageHistory(message_history)

View File

@@ -1,11 +0,0 @@
"""The planning system organizes the Agent's activities."""
from autogpt.core.planning.schema import Task, TaskStatus, TaskType
from autogpt.core.planning.simple import PlannerSettings, SimplePlanner
__all__ = [
"PlannerSettings",
"SimplePlanner",
"Task",
"TaskStatus",
"TaskType",
]

View File

@@ -1,54 +0,0 @@
# class Planner(abc.ABC):
# """
# Manages the agent's planning and goal-setting
# by constructing language model prompts.
# """
#
# @staticmethod
# @abc.abstractmethod
# async def decide_name_and_goals(
# user_objective: str,
# ) -> LanguageModelResponse:
# """Decide the name and goals of an Agent from a user-defined objective.
#
# Args:
# user_objective: The user-defined objective for the agent.
#
# Returns:
# The agent name and goals as a response from the language model.
#
# """
# ...
#
# @abc.abstractmethod
# async def plan(self, context: PlanningContext) -> LanguageModelResponse:
# """Plan the next ability for the Agent.
#
# Args:
# context: A context object containing information about the agent's
# progress, result, memories, and feedback.
#
#
# Returns:
# The next ability the agent should take along with thoughts and reasoning.
#
# """
# ...
#
# @abc.abstractmethod
# def reflect(
# self,
# context: ReflectionContext,
# ) -> LanguageModelResponse:
# """Reflect on a planned ability and provide self-criticism.
#
#
# Args:
# context: A context object containing information about the agent's
# reasoning, plan, thoughts, and criticism.
#
# Returns:
# Self-criticism about the agent's plan.
#
# """
# ...

View File

@@ -1,12 +0,0 @@
from .initial_plan import InitialPlan, InitialPlanConfiguration
from .name_and_goals import NameAndGoals, NameAndGoalsConfiguration
from .next_ability import NextAbility, NextAbilityConfiguration
__all__ = [
"InitialPlan",
"InitialPlanConfiguration",
"NameAndGoals",
"NameAndGoalsConfiguration",
"NextAbility",
"NextAbilityConfiguration",
]

View File

@@ -1,204 +0,0 @@
import logging
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
from forge.llm.prompting.utils import to_numbered_list
from forge.llm.providers import (
AssistantChatMessage,
ChatMessage,
CompletionModelFunction,
)
from forge.models.config import SystemConfiguration, UserConfigurable
from forge.models.json_schema import JSONSchema
from autogpt.core.planning.schema import Task, TaskType
logger = logging.getLogger(__name__)
class InitialPlanConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable()
system_prompt_template: str = UserConfigurable()
system_info: list[str] = UserConfigurable()
user_prompt_template: str = UserConfigurable()
create_plan_function: dict = UserConfigurable()
class InitialPlan(PromptStrategy):
DEFAULT_SYSTEM_PROMPT_TEMPLATE = (
"You are an expert project planner. "
"Your responsibility is to create work plans for autonomous agents. "
"You will be given a name, a role, set of goals for the agent to accomplish. "
"Your job is to break down those goals into a set of tasks that the agent can"
" accomplish to achieve those goals. "
"Agents are resourceful, but require clear instructions."
" Each task you create should have clearly defined `ready_criteria` that the"
" agent can check to see if the task is ready to be started."
" Each task should also have clearly defined `acceptance_criteria` that the"
" agent can check to evaluate if the task is complete. "
"You should create as many tasks as you think is necessary to accomplish"
" the goals.\n\n"
"System Info:\n{system_info}"
)
DEFAULT_SYSTEM_INFO = [
"The OS you are running on is: {os_info}",
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
"The current time and date is {current_time}",
]
DEFAULT_USER_PROMPT_TEMPLATE = (
"You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}"
)
DEFAULT_CREATE_PLAN_FUNCTION = CompletionModelFunction(
name="create_initial_agent_plan",
description=(
"Creates a set of tasks that forms the initial plan of an autonomous agent."
),
parameters={
"task_list": JSONSchema(
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
type=JSONSchema.Type.OBJECT,
properties={
"objective": JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"An imperative verb phrase that succinctly describes "
"the task."
),
),
"type": JSONSchema(
type=JSONSchema.Type.STRING,
description="A categorization for the task.",
enum=[t.value for t in TaskType],
),
"acceptance_criteria": JSONSchema(
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"A list of measurable and testable criteria that "
"must be met for the task to be considered "
"complete."
),
),
),
"priority": JSONSchema(
type=JSONSchema.Type.INTEGER,
description=(
"A number between 1 and 10 indicating the priority of "
"the task relative to other generated tasks."
),
minimum=1,
maximum=10,
),
"ready_criteria": JSONSchema(
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"A list of measurable and testable criteria that "
"must be met before the task can be started."
),
),
),
},
),
),
},
)
default_configuration: InitialPlanConfiguration = InitialPlanConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
system_info=DEFAULT_SYSTEM_INFO,
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION.schema,
)
def __init__(
self,
model_classification: LanguageModelClassification,
system_prompt_template: str,
system_info: list[str],
user_prompt_template: str,
create_plan_function: dict,
):
self._model_classification = model_classification
self._system_prompt_template = system_prompt_template
self._system_info = system_info
self._user_prompt_template = user_prompt_template
self._create_plan_function = CompletionModelFunction.parse(create_plan_function)
@property
def model_classification(self) -> LanguageModelClassification:
return self._model_classification
def build_prompt(
self,
agent_name: str,
agent_role: str,
agent_goals: list[str],
abilities: list[str],
os_info: str,
api_budget: float,
current_time: str,
**kwargs,
) -> ChatPrompt:
template_kwargs = {
"agent_name": agent_name,
"agent_role": agent_role,
"os_info": os_info,
"api_budget": api_budget,
"current_time": current_time,
**kwargs,
}
template_kwargs["agent_goals"] = to_numbered_list(
agent_goals, **template_kwargs
)
template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs)
template_kwargs["system_info"] = to_numbered_list(
self._system_info, **template_kwargs
)
system_prompt = ChatMessage.system(
self._system_prompt_template.format(**template_kwargs),
)
user_prompt = ChatMessage.user(
self._user_prompt_template.format(**template_kwargs),
)
return ChatPrompt(
messages=[system_prompt, user_prompt],
functions=[self._create_plan_function],
# TODO:
tokens_used=0,
)
def parse_response_content(
self,
response_content: AssistantChatMessage,
) -> dict:
"""Parse the actual text response from the objective model.
Args:
response_content: The raw response content from the objective model.
Returns:
The parsed response.
"""
try:
if not response_content.tool_calls:
raise ValueError(
f"LLM did not call {self._create_plan_function.name} function; "
"plan creation failed"
)
parsed_response: object = response_content.tool_calls[0].function.arguments
parsed_response["task_list"] = [
Task.parse_obj(task) for task in parsed_response["task_list"]
]
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
return parsed_response

View File

@@ -1,146 +0,0 @@
import logging
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
from forge.llm.providers import (
AssistantChatMessage,
ChatMessage,
CompletionModelFunction,
)
from forge.models.config import SystemConfiguration, UserConfigurable
from forge.models.json_schema import JSONSchema
logger = logging.getLogger(__name__)
class NameAndGoalsConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable()
system_prompt: str = UserConfigurable()
user_prompt_template: str = UserConfigurable()
create_agent_function: dict = UserConfigurable()
class NameAndGoals(PromptStrategy):
DEFAULT_SYSTEM_PROMPT = (
"Your job is to respond to a user-defined task, given in triple quotes, by "
"invoking the `create_agent` function to generate an autonomous agent to "
"complete the task. "
"You should supply a role-based name for the agent, "
"an informative description for what the agent does, and "
"1 to 5 goals that are optimally aligned with the successful completion of "
"its assigned task.\n"
"\n"
"Example Input:\n"
'"""Help me with marketing my business"""\n\n'
"Example Function Call:\n"
"create_agent(name='CMOGPT', "
"description='A professional digital marketer AI that assists Solopreneurs in "
"growing their businesses by providing world-class expertise in solving "
"marketing problems for SaaS, content products, agencies, and more.', "
"goals=['Engage in effective problem-solving, prioritization, planning, and "
"supporting execution to address your marketing needs as your virtual Chief "
"Marketing Officer.', 'Provide specific, actionable, and concise advice to "
"help you make informed decisions without the use of platitudes or overly "
"wordy explanations.', 'Identify and prioritize quick wins and cost-effective "
"campaigns that maximize results with minimal time and budget investment.', "
"'Proactively take the lead in guiding you and offering suggestions when faced "
"with unclear information or uncertainty to ensure your marketing strategy "
"remains on track.'])"
)
DEFAULT_USER_PROMPT_TEMPLATE = '"""{user_objective}"""'
DEFAULT_CREATE_AGENT_FUNCTION = CompletionModelFunction(
name="create_agent",
description="Create a new autonomous AI agent to complete a given task.",
parameters={
"agent_name": JSONSchema(
type=JSONSchema.Type.STRING,
description="A short role-based name for an autonomous agent.",
),
"agent_role": JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"An informative one sentence description of what the AI agent does"
),
),
"agent_goals": JSONSchema(
type=JSONSchema.Type.ARRAY,
minItems=1,
maxItems=5,
items=JSONSchema(
type=JSONSchema.Type.STRING,
),
description=(
"One to five highly effective goals that are optimally aligned "
"with the completion of a specific task. "
"The number and complexity of the goals should correspond to the "
"complexity of the agent's primary objective."
),
),
},
)
default_configuration: NameAndGoalsConfiguration = NameAndGoalsConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt=DEFAULT_SYSTEM_PROMPT,
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION.schema,
)
def __init__(
self,
model_classification: LanguageModelClassification,
system_prompt: str,
user_prompt_template: str,
create_agent_function: dict,
):
self._model_classification = model_classification
self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template
self._create_agent_function = CompletionModelFunction.parse(
create_agent_function
)
@property
def model_classification(self) -> LanguageModelClassification:
return self._model_classification
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
system_message = ChatMessage.system(self._system_prompt_message)
user_message = ChatMessage.user(
self._user_prompt_template.format(
user_objective=user_objective,
)
)
prompt = ChatPrompt(
messages=[system_message, user_message],
functions=[self._create_agent_function],
# TODO
tokens_used=0,
)
return prompt
def parse_response_content(
self,
response_content: AssistantChatMessage,
) -> dict:
"""Parse the actual text response from the objective model.
Args:
response_content: The raw response content from the objective model.
Returns:
The parsed response.
"""
try:
if not response_content.tool_calls:
raise ValueError(
f"LLM did not call {self._create_agent_function} function; "
"agent profile creation failed"
)
parsed_response = response_content.tool_calls[0].function.arguments
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
return parsed_response

View File

@@ -1,201 +0,0 @@
import logging
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
from forge.llm.prompting.utils import to_numbered_list
from forge.llm.providers import (
AssistantChatMessage,
ChatMessage,
CompletionModelFunction,
)
from forge.models.config import SystemConfiguration, UserConfigurable
from forge.models.json_schema import JSONSchema
from autogpt.core.planning.schema import Task
logger = logging.getLogger(__name__)
class NextAbilityConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable()
system_prompt_template: str = UserConfigurable()
system_info: list[str] = UserConfigurable()
user_prompt_template: str = UserConfigurable()
additional_ability_arguments: dict = UserConfigurable()
class NextAbility(PromptStrategy):
DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}"
DEFAULT_SYSTEM_INFO = [
"The OS you are running on is: {os_info}",
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
"The current time and date is {current_time}",
]
DEFAULT_USER_PROMPT_TEMPLATE = (
"Your current task is is {task_objective}.\n"
"You have taken {cycle_count} actions on this task already. "
"Here is the actions you have taken and their results:\n"
"{action_history}\n\n"
"Here is additional information that may be useful to you:\n"
"{additional_info}\n\n"
"Additionally, you should consider the following:\n"
"{user_input}\n\n"
"Your task of {task_objective} is complete when the following acceptance"
" criteria have been met:\n"
"{acceptance_criteria}\n\n"
"Please choose one of the provided functions to accomplish this task. "
"Some tasks may require multiple functions to accomplish. If that is the case,"
" choose the function that you think is most appropriate for the current"
" situation given your progress so far."
)
DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = {
"motivation": JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"Your justification for choosing choosing this function instead of a "
"different one."
),
),
"self_criticism": JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"Thoughtful self-criticism that explains why this function may not be "
"the best choice."
),
),
"reasoning": JSONSchema(
type=JSONSchema.Type.STRING,
description=(
"Your reasoning for choosing this function taking into account the "
"`motivation` and weighing the `self_criticism`."
),
),
}
default_configuration: NextAbilityConfiguration = NextAbilityConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
system_info=DEFAULT_SYSTEM_INFO,
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
additional_ability_arguments={
k: v.to_dict() for k, v in DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS.items()
},
)
def __init__(
self,
model_classification: LanguageModelClassification,
system_prompt_template: str,
system_info: list[str],
user_prompt_template: str,
additional_ability_arguments: dict,
):
self._model_classification = model_classification
self._system_prompt_template = system_prompt_template
self._system_info = system_info
self._user_prompt_template = user_prompt_template
self._additional_ability_arguments = JSONSchema.parse_properties(
additional_ability_arguments
)
for p in self._additional_ability_arguments.values():
p.required = True
@property
def model_classification(self) -> LanguageModelClassification:
return self._model_classification
def build_prompt(
self,
task: Task,
ability_specs: list[CompletionModelFunction],
os_info: str,
api_budget: float,
current_time: str,
**kwargs,
) -> ChatPrompt:
template_kwargs = {
"os_info": os_info,
"api_budget": api_budget,
"current_time": current_time,
**kwargs,
}
for ability in ability_specs:
ability.parameters.update(self._additional_ability_arguments)
template_kwargs["task_objective"] = task.objective
template_kwargs["cycle_count"] = task.context.cycle_count
template_kwargs["action_history"] = to_numbered_list(
[action.summary() for action in task.context.prior_actions],
no_items_response="You have not taken any actions yet.",
**template_kwargs,
)
template_kwargs["additional_info"] = to_numbered_list(
[memory.summary() for memory in task.context.memories]
+ [info for info in task.context.supplementary_info],
no_items_response=(
"There is no additional information available at this time."
),
**template_kwargs,
)
template_kwargs["user_input"] = to_numbered_list(
[user_input for user_input in task.context.user_input],
no_items_response="There are no additional considerations at this time.",
**template_kwargs,
)
template_kwargs["acceptance_criteria"] = to_numbered_list(
[acceptance_criteria for acceptance_criteria in task.acceptance_criteria],
**template_kwargs,
)
template_kwargs["system_info"] = to_numbered_list(
self._system_info,
**template_kwargs,
)
system_prompt = ChatMessage.system(
self._system_prompt_template.format(**template_kwargs)
)
user_prompt = ChatMessage.user(
self._user_prompt_template.format(**template_kwargs)
)
return ChatPrompt(
messages=[system_prompt, user_prompt],
functions=ability_specs,
# TODO:
tokens_used=0,
)
def parse_response_content(
self,
response_content: AssistantChatMessage,
) -> dict:
"""Parse the actual text response from the objective model.
Args:
response_content: The raw response content from the objective model.
Returns:
The parsed response.
"""
try:
if not response_content.tool_calls:
raise ValueError("LLM did not call any function")
function_name = response_content.tool_calls[0].function.name
function_arguments = response_content.tool_calls[0].function.arguments
parsed_response = {
"motivation": function_arguments.pop("motivation"),
"self_criticism": function_arguments.pop("self_criticism"),
"reasoning": function_arguments.pop("reasoning"),
"next_ability": function_name,
"ability_arguments": function_arguments,
}
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
return parsed_response

View File

@@ -1,48 +0,0 @@
import enum
from typing import Optional
from pydantic import BaseModel, Field
from autogpt.core.ability.schema import AbilityResult
class TaskType(str, enum.Enum):
RESEARCH = "research"
WRITE = "write"
EDIT = "edit"
CODE = "code"
DESIGN = "design"
TEST = "test"
PLAN = "plan"
class TaskStatus(str, enum.Enum):
BACKLOG = "backlog"
READY = "ready"
IN_PROGRESS = "in_progress"
DONE = "done"
class TaskContext(BaseModel):
cycle_count: int = 0
status: TaskStatus = TaskStatus.BACKLOG
parent: Optional["Task"] = None
prior_actions: list[AbilityResult] = Field(default_factory=list)
memories: list = Field(default_factory=list)
user_input: list[str] = Field(default_factory=list)
supplementary_info: list[str] = Field(default_factory=list)
enough_info: bool = False
class Task(BaseModel):
objective: str
type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema
priority: int
ready_criteria: list[str]
acceptance_criteria: list[str]
context: TaskContext = Field(default_factory=TaskContext)
# Need to resolve the circular dependency between Task and TaskContext
# once both models are defined.
TaskContext.update_forward_refs()

View File

@@ -1,188 +0,0 @@
import logging
import platform
import time
import distro
from forge.llm.prompting import PromptStrategy
from forge.llm.prompting.schema import LanguageModelClassification
from forge.llm.providers import (
ChatModelProvider,
ChatModelResponse,
CompletionModelFunction,
ModelProviderName,
OpenAIModelName,
)
from forge.models.config import (
Configurable,
SystemConfiguration,
SystemSettings,
UserConfigurable,
)
from autogpt.core.planning import prompt_strategies
from autogpt.core.planning.schema import Task
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
from autogpt.core.workspace import Workspace
class LanguageModelConfiguration(SystemConfiguration):
"""Struct for model configuration."""
model_name: str = UserConfigurable()
provider_name: ModelProviderName = UserConfigurable()
temperature: float = UserConfigurable()
class PromptStrategiesConfiguration(SystemConfiguration):
name_and_goals: prompt_strategies.NameAndGoalsConfiguration
initial_plan: prompt_strategies.InitialPlanConfiguration
next_ability: prompt_strategies.NextAbilityConfiguration
class PlannerConfiguration(SystemConfiguration):
"""Configuration for the Planner subsystem."""
models: dict[LanguageModelClassification, LanguageModelConfiguration]
prompt_strategies: PromptStrategiesConfiguration
class PlannerSettings(SystemSettings):
"""Settings for the Planner subsystem."""
configuration: PlannerConfiguration
class SimplePlanner(Configurable):
"""
Manages the agent's planning and goal-setting
by constructing language model prompts.
"""
default_settings = PlannerSettings(
name="planner",
description=(
"Manages the agent's planning and goal-setting "
"by constructing language model prompts."
),
configuration=PlannerConfiguration(
models={
LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration(
model_name=OpenAIModelName.GPT3,
provider_name=ModelProviderName.OPENAI,
temperature=0.9,
),
LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration(
model_name=OpenAIModelName.GPT4,
provider_name=ModelProviderName.OPENAI,
temperature=0.9,
),
},
prompt_strategies=PromptStrategiesConfiguration(
name_and_goals=prompt_strategies.NameAndGoals.default_configuration,
initial_plan=prompt_strategies.InitialPlan.default_configuration,
next_ability=prompt_strategies.NextAbility.default_configuration,
),
),
)
def __init__(
self,
settings: PlannerSettings,
logger: logging.Logger,
model_providers: dict[ModelProviderName, ChatModelProvider],
workspace: Workspace = None, # Workspace is not available during bootstrapping.
) -> None:
self._configuration = settings.configuration
self._logger = logger
self._workspace = workspace
self._providers: dict[LanguageModelClassification, ChatModelProvider] = {}
for model, model_config in self._configuration.models.items():
self._providers[model] = model_providers[model_config.provider_name]
self._prompt_strategies = {
"name_and_goals": prompt_strategies.NameAndGoals(
**self._configuration.prompt_strategies.name_and_goals.dict()
),
"initial_plan": prompt_strategies.InitialPlan(
**self._configuration.prompt_strategies.initial_plan.dict()
),
"next_ability": prompt_strategies.NextAbility(
**self._configuration.prompt_strategies.next_ability.dict()
),
}
async def decide_name_and_goals(self, user_objective: str) -> ChatModelResponse:
return await self.chat_with_model(
self._prompt_strategies["name_and_goals"],
user_objective=user_objective,
)
async def make_initial_plan(
self,
agent_name: str,
agent_role: str,
agent_goals: list[str],
abilities: list[str],
) -> ChatModelResponse:
return await self.chat_with_model(
self._prompt_strategies["initial_plan"],
agent_name=agent_name,
agent_role=agent_role,
agent_goals=agent_goals,
abilities=abilities,
)
async def determine_next_ability(
self,
task: Task,
ability_specs: list[CompletionModelFunction],
):
return await self.chat_with_model(
self._prompt_strategies["next_ability"],
task=task,
ability_specs=ability_specs,
)
async def chat_with_model(
self,
prompt_strategy: PromptStrategy,
**kwargs,
) -> ChatModelResponse:
model_classification = prompt_strategy.model_classification
model_configuration = self._configuration.models[model_classification].dict()
self._logger.debug(f"Using model configuration: {model_configuration}")
del model_configuration["provider_name"]
provider = self._providers[model_classification]
template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy)
template_kwargs.update(kwargs)
prompt = prompt_strategy.build_prompt(**template_kwargs)
self._logger.debug(f"Using prompt:\n{dump_prompt(prompt)}\n")
response = await provider.create_chat_completion(
model_prompt=prompt.messages,
functions=prompt.functions,
**model_configuration,
completion_parser=prompt_strategy.parse_response_content,
)
return response
def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy):
provider = self._providers[strategy.model_classification]
template_kwargs = {
"os_info": get_os_info(),
"api_budget": provider.get_remaining_budget(),
"current_time": time.strftime("%c"),
}
return template_kwargs
def get_os_info() -> str:
os_name = platform.system()
os_info = (
platform.platform(terse=True)
if os_name != "Linux"
else distro.name(pretty=True)
)
return os_info

View File

@@ -1,84 +0,0 @@
# Rules of thumb:
# - Templates don't add new lines at the end of the string. This is the
# responsibility of the or a consuming template.
####################
# Planner defaults #
####################
USER_OBJECTIVE = (
"Write a wikipedia style article about the project: "
"https://github.com/significant-gravitas/AutoGPT"
)
# Plan Prompt
# -----------
PLAN_PROMPT_CONSTRAINTS = (
"~4000 word limit for short term memory. Your short term memory is short, so "
"immediately save important information to files.",
"If you are unsure how you previously did something or want to recall past "
"events, thinking about similar events will help you remember.",
"No user assistance",
"Exclusively use the commands listed below e.g. command_name",
)
PLAN_PROMPT_RESOURCES = (
"Internet access for searches and information gathering.",
"Long-term memory management.",
"File output.",
)
PLAN_PROMPT_PERFORMANCE_EVALUATIONS = (
"Continuously review and analyze your actions to ensure you are performing to"
" the best of your abilities.",
"Constructively self-criticize your big-picture behavior constantly.",
"Reflect on past decisions and strategies to refine your approach.",
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
" the least number of steps.",
"Write all code to a file",
)
PLAN_PROMPT_RESPONSE_DICT = {
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user",
},
"command": {"name": "command name", "args": {"arg name": "value"}},
}
PLAN_PROMPT_RESPONSE_FORMAT = (
"You should only respond in JSON format as described below\n"
"Response Format:\n"
"{response_json_structure}\n"
"Ensure the response can be parsed by Python json.loads"
)
PLAN_TRIGGERING_PROMPT = (
"Determine which next command to use, and respond using the format specified above:"
)
PLAN_PROMPT_MAIN = (
"{header}\n\n"
"GOALS:\n\n{goals}\n\n"
"Info:\n{info}\n\n"
"Constraints:\n{constraints}\n\n"
"Commands:\n{commands}\n\n"
"Resources:\n{resources}\n\n"
"Performance Evaluations:\n{performance_evaluations}\n\n"
"You should only respond in JSON format as described below\n"
"Response Format:\n{response_json_structure}\n"
"Ensure the response can be parsed by Python json.loads"
)
###########################
# Parameterized templates #
###########################

View File

@@ -1,6 +0,0 @@
"""The plugin system allows the Agent to be extended with new functionality."""
from autogpt.core.plugin.base import PluginService
__all__ = [
"PluginService",
]

View File

@@ -1,159 +0,0 @@
import abc
import enum
from typing import TYPE_CHECKING, Type
from forge.models.config import SystemConfiguration, UserConfigurable
from pydantic import BaseModel
if TYPE_CHECKING:
from forge.llm.providers import ChatModelProvider, EmbeddingModelProvider
from autogpt.core.ability import Ability, AbilityRegistry
from autogpt.core.memory import Memory
# Expand to other types as needed
PluginType = (
Type[Ability] # Swappable now
| Type[AbilityRegistry] # Swappable maybe never
| Type[ChatModelProvider] # Swappable soon
| Type[EmbeddingModelProvider] # Swappable soon
| Type[Memory] # Swappable now
# | Type[Planner] # Swappable soon
)
class PluginStorageFormat(str, enum.Enum):
"""Supported plugin storage formats.
Plugins can be stored at one of these supported locations.
"""
INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults
WORKSPACE = "workspace" # Required now
# Soon (requires some tooling we don't have yet).
# OPENAPI_URL = "open_api_url"
# OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now)
# GIT = "git" # Maybe later (or soon)
# PYPI = "pypi" # Maybe later
# Long term solution, requires design
# AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service"
# Feature for later maybe, automatically find plugin.
# AUTO = "auto"
# Installed package example
# PluginLocation(
# storage_format='installed_package',
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
# )
# Workspace example
# PluginLocation(
# storage_format='workspace',
# storage_route='relative/path/to/plugin.pkl'
# OR
# storage_route='relative/path/to/plugin.py'
# )
# Git
# PluginLocation(
# storage_format='git',
# Exact format TBD.
# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py'
# )
# PyPI
# PluginLocation(
# storage_format='pypi',
# storage_route='package_name'
# )
# PluginLocation(
# storage_format='installed_package',
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
# )
# A plugin storage route.
#
# This is a string that specifies where to load a plugin from
# (e.g. an import path or file path).
PluginStorageRoute = str
class PluginLocation(SystemConfiguration):
"""A plugin location.
This is a combination of a plugin storage format and a plugin storage route.
It is used by the PluginService to load plugins.
"""
storage_format: PluginStorageFormat = UserConfigurable()
storage_route: PluginStorageRoute = UserConfigurable()
class PluginMetadata(BaseModel):
"""Metadata about a plugin."""
name: str
description: str
location: PluginLocation
class PluginService(abc.ABC):
"""Base class for plugin service.
The plugin service should be stateless. This defines the interface for
loading plugins from various storage formats.
"""
@staticmethod
@abc.abstractmethod
def get_plugin(plugin_location: PluginLocation) -> "PluginType":
"""Get a plugin from a plugin location."""
...
####################################
# Low-level storage format loaders #
####################################
@staticmethod
@abc.abstractmethod
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
"""Load a plugin from a file path."""
...
@staticmethod
@abc.abstractmethod
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
"""Load a plugin from an import path."""
...
@staticmethod
@abc.abstractmethod
def resolve_name_to_path(
plugin_route: PluginStorageRoute, path_type: str
) -> PluginStorageRoute:
"""Resolve a plugin name to a plugin path."""
...
#####################################
# High-level storage format loaders #
#####################################
@staticmethod
@abc.abstractmethod
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
"""Load a plugin from the workspace."""
...
@staticmethod
@abc.abstractmethod
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
"""Load a plugin from an installed package."""
...

View File

@@ -1,75 +0,0 @@
from importlib import import_module
from typing import TYPE_CHECKING
from autogpt.core.plugin.base import (
PluginLocation,
PluginService,
PluginStorageFormat,
PluginStorageRoute,
)
if TYPE_CHECKING:
from autogpt.core.plugin.base import PluginType
class SimplePluginService(PluginService):
@staticmethod
def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType":
"""Get a plugin from a plugin location."""
if isinstance(plugin_location, dict):
plugin_location = PluginLocation.parse_obj(plugin_location)
if plugin_location.storage_format == PluginStorageFormat.WORKSPACE:
return SimplePluginService.load_from_workspace(
plugin_location.storage_route
)
elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE:
return SimplePluginService.load_from_installed_package(
plugin_location.storage_route
)
else:
raise NotImplementedError(
"Plugin storage format %s is not implemented."
% plugin_location.storage_format
)
####################################
# Low-level storage format loaders #
####################################
@staticmethod
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
"""Load a plugin from a file path."""
# TODO: Define an on disk storage format and implement this.
# Can pull from existing zip file loading implementation
raise NotImplementedError("Loading from file path is not implemented.")
@staticmethod
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
"""Load a plugin from an import path."""
module_path, _, class_name = plugin_route.rpartition(".")
return getattr(import_module(module_path), class_name)
@staticmethod
def resolve_name_to_path(
plugin_route: PluginStorageRoute, path_type: str
) -> PluginStorageRoute:
"""Resolve a plugin name to a plugin path."""
# TODO: Implement a discovery system for finding plugins by name from known
# storage locations. E.g. if we know that path_type is a file path, we can
# search the workspace for it. If it's an import path, we can check the core
# system and the auto_gpt_plugins package.
raise NotImplementedError("Resolving plugin name to path is not implemented.")
#####################################
# High-level storage format loaders #
#####################################
@staticmethod
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
"""Load a plugin from the workspace."""
plugin = SimplePluginService.load_from_file_path(plugin_route)
return plugin
@staticmethod
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
plugin = SimplePluginService.load_from_import_path(plugin_route)
return plugin

File diff suppressed because it is too large Load Diff

View File

@@ -1,77 +0,0 @@
[tool.poetry]
name = "agpt"
version = "1.0.0"
authors = ["Significant Gravitas <support@agpt.co>"]
maintainers = ["Reinier van der Leer <reinier.vanderleer@agpt.co>"]
description = "An open-source attempt at an autonomous generalist agent"
readme = "README.md"
repository = "https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/agpt"
# documentation = "https://docs.agpt.co/autogpts/agpt" # TODO
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
packages = [{ include = "autogpt/core", from = "../.." }]
[tool.poetry.scripts]
cli = "autogpt.core.runner.cli_app.cli:autogpt"
cli-web = "autogpt.core.runner.cli_web_app.cli:autogpt"
[tool.poetry.dependencies]
python = "^3.10"
agent-protocol = "^0.3.0"
click = "^8.1.7"
colorama = "^0.4.6"
distro = "^1.8.0"
inflection = "^0.5.1"
jsonschema = "^4.19.1"
openai = "^0.28.0"
pydantic = "^1.10.12"
pyyaml = "^6.0.0"
tiktoken = "^0.5.1"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 88
target-version = ['py310']
include = '\.pyi?$'
packages = ["autogpt"]
extend-exclude = '.+/(dist|.venv|venv|build)/.+'
[tool.isort]
profile = "black"
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
use_parentheses = true
ensure_newline_before_comments = true
line_length = 88
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"FIRSTPARTY",
"LOCALFOLDER"
]
skip = '''
.tox
__pycache__
*.pyc
.env
venv*/*
.venv/*
reports/*
dist/*
'''
[tool.pytest.ini_options]
markers = [
"requires_openai_api_key",
"requires_huggingface_api_key"
]

View File

@@ -1,3 +0,0 @@
"""
This module contains the runner for the v2 agent server and client.
"""

View File

@@ -1,47 +0,0 @@
from pathlib import Path
import click
import yaml
from autogpt.core.runner.cli_app.main import run_auto_gpt
from autogpt.core.runner.client_lib.shared_click_commands import (
DEFAULT_SETTINGS_FILE,
make_settings,
)
from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions
@click.group()
def autogpt():
"""Temporary command group for v2 commands."""
pass
autogpt.add_command(make_settings)
@autogpt.command()
@click.option(
"--settings-file",
type=click.Path(),
default=DEFAULT_SETTINGS_FILE,
)
@click.option(
"--pdb",
is_flag=True,
help="Drop into a debugger if an error is raised.",
)
@coroutine
async def run(settings_file: str, pdb: bool) -> None:
"""Run the AutoGPT agent."""
click.echo("Running AutoGPT agent...")
settings_file: Path = Path(settings_file)
settings = {}
if settings_file.exists():
settings = yaml.safe_load(settings_file.read_text())
main = handle_exceptions(run_auto_gpt, with_debugger=pdb)
await main(settings)
if __name__ == "__main__":
autogpt()

View File

@@ -1,74 +0,0 @@
import click
from autogpt.core.agent import AgentSettings, SimpleAgent
from autogpt.core.runner.client_lib.logging import (
configure_root_logger,
get_client_logger,
)
from autogpt.core.runner.client_lib.parser import (
parse_ability_result,
parse_agent_name_and_goals,
parse_agent_plan,
parse_next_ability,
)
async def run_auto_gpt(user_configuration: dict):
"""Run the AutoGPT CLI client."""
configure_root_logger()
client_logger = get_client_logger()
client_logger.debug("Getting agent settings")
agent_workspace = (
user_configuration.get("workspace", {}).get("configuration", {}).get("root", "")
)
if not agent_workspace: # We don't have an agent yet.
#################
# Bootstrapping #
#################
# Step 1. Collate the user's settings with the default system settings.
agent_settings: AgentSettings = SimpleAgent.compile_settings(
client_logger,
user_configuration,
)
# Step 2. Get a name and goals for the agent.
# First we need to figure out what the user wants to do with the agent.
# We'll do this by asking the user for a prompt.
user_objective = click.prompt("What do you want AutoGPT to do?")
# Ask a language model to determine a name and goals for a suitable agent.
name_and_goals = await SimpleAgent.determine_agent_name_and_goals(
user_objective,
agent_settings,
client_logger,
)
print("\n" + parse_agent_name_and_goals(name_and_goals))
# Finally, update the agent settings with the name and goals.
agent_settings.update_agent_name_and_goals(name_and_goals)
# Step 3. Provision the agent.
agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger)
client_logger.info("Agent is provisioned")
# launch agent interaction loop
agent = SimpleAgent.from_workspace(
agent_workspace,
client_logger,
)
client_logger.info("Agent is loaded")
plan = await agent.build_initial_plan()
print(parse_agent_plan(plan))
while True:
current_task, next_ability = await agent.determine_next_ability(plan)
print(parse_next_ability(current_task, next_ability))
user_input = click.prompt(
"Should the agent proceed with this ability?",
default="y",
)
ability_result = await agent.execute_next_ability(user_input)
print(parse_ability_result(ability_result))

View File

@@ -1,58 +0,0 @@
import pathlib
import click
import yaml
from agent_protocol import Agent as AgentProtocol
from autogpt.core.runner.cli_web_app.server.api import task_handler
from autogpt.core.runner.client_lib.shared_click_commands import (
DEFAULT_SETTINGS_FILE,
make_settings,
)
from autogpt.core.runner.client_lib.utils import coroutine
@click.group()
def autogpt():
"""Temporary command group for v2 commands."""
pass
autogpt.add_command(make_settings)
@autogpt.command()
@click.option(
"port",
"--port",
default=8080,
help="The port of the webserver.",
type=click.INT,
)
def server(port: int) -> None:
"""Run the AutoGPT runner httpserver."""
click.echo("Running AutoGPT runner httpserver...")
AgentProtocol.handle_task(task_handler).start(port)
@autogpt.command()
@click.option(
"--settings-file",
type=click.Path(),
default=DEFAULT_SETTINGS_FILE,
)
@coroutine
async def client(settings_file) -> None:
"""Run the AutoGPT runner client."""
settings_file = pathlib.Path(settings_file)
settings = {}
if settings_file.exists():
settings = yaml.safe_load(settings_file.read_text())
settings
# TODO: Call the API server with the settings and task,
# using the Python API client for agent protocol.
if __name__ == "__main__":
autogpt()

View File

@@ -1,96 +0,0 @@
import logging
from agent_protocol import StepHandler, StepResult
from forge.config.ai_profile import AIProfile
from forge.config.config import ConfigBuilder
from forge.llm.prompting.prompt import DEFAULT_TRIGGERING_PROMPT
from forge.logging.helpers import user_friendly_output
from autogpt.agents import Agent
from autogpt.app.main import UserFeedback
async def task_handler(task_input) -> StepHandler:
task = task_input.__root__ if task_input else {}
agent = bootstrap_agent(task.get("user_input"), False)
next_command_name: str | None = None
next_command_args: dict[str, str] | None = None
async def step_handler(step_input) -> StepResult:
step = step_input.__root__ if step_input else {}
nonlocal next_command_name, next_command_args
result = await interaction_step(
agent,
step.get("user_input"),
step.get("user_feedback"),
next_command_name,
next_command_args,
)
next_command_name = result["next_step_command_name"] if result else None
next_command_args = result["next_step_command_args"] if result else None
if not result:
return StepResult(output=None, is_last=True)
return StepResult(output=result)
return step_handler
async def interaction_step(
agent: Agent,
user_input,
user_feedback: UserFeedback | None,
command_name: str | None,
command_args: dict[str, str] | None,
):
"""Run one step of the interaction loop."""
if user_feedback == UserFeedback.EXIT:
return
if user_feedback == UserFeedback.TEXT:
command_name = "human_feedback"
result: str | None = None
if command_name is not None:
result = agent.execute(command_name, command_args, user_input)
if result is None:
user_friendly_output(
title="SYSTEM:", message="Unable to execute command", level=logging.WARN
)
return
next_command_name, next_command_args, assistant_reply_dict = agent.propose_action()
return {
"config": agent.config,
"ai_profile": agent.ai_profile,
"result": result,
"assistant_reply_dict": assistant_reply_dict,
"next_step_command_name": next_command_name,
"next_step_command_args": next_command_args,
}
def bootstrap_agent(task, continuous_mode) -> Agent:
config = ConfigBuilder.build_config_from_env()
config.logging.level = logging.DEBUG
config.logging.plain_console_output = True
config.continuous_mode = continuous_mode
config.temperature = 0
config.memory_backend = "no_memory"
ai_profile = AIProfile(
ai_name="AutoGPT",
ai_role="a multi-purpose AI assistant.",
ai_goals=[task],
)
# FIXME this won't work - ai_profile and triggering_prompt is not a valid argument,
# lacks file_storage, settings and llm_provider
return Agent(
ai_profile=ai_profile,
legacy_config=config,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
)

View File

@@ -1,22 +0,0 @@
import logging
from .config import BelowLevelFilter, FancyConsoleFormatter, configure_root_logger
from .helpers import dump_prompt
def get_client_logger():
# Configure logging before we do anything else.
# Application logs need a place to live.
client_logger = logging.getLogger("autogpt_client_application")
client_logger.setLevel(logging.DEBUG)
return client_logger
__all__ = [
"configure_root_logger",
"get_client_logger",
"FancyConsoleFormatter",
"BelowLevelFilter",
"dump_prompt",
]

View File

@@ -1,27 +0,0 @@
import logging
import sys
from forge.logging import BelowLevelFilter, FancyConsoleFormatter
from openai._base_client import log as openai_logger
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(message)s"
DEBUG_LOG_FORMAT = (
"%(asctime)s.%(msecs)03d %(levelname)s %(filename)s:%(lineno)d %(message)s"
)
def configure_root_logger():
console_formatter = FancyConsoleFormatter(SIMPLE_LOG_FORMAT)
stdout = logging.StreamHandler(stream=sys.stdout)
stdout.setLevel(logging.DEBUG)
stdout.addFilter(BelowLevelFilter(logging.WARNING))
stdout.setFormatter(console_formatter)
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
stderr.setFormatter(console_formatter)
logging.basicConfig(level=logging.DEBUG, handlers=[stdout, stderr])
# Disable debug logging from OpenAI library
openai_logger.setLevel(logging.WARNING)

View File

@@ -1,28 +0,0 @@
from math import ceil, floor
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from forge.llm.prompting import ChatPrompt
from forge.llm.providers.schema import ChatMessage
SEPARATOR_LENGTH = 42
def dump_prompt(prompt: "ChatPrompt | list[ChatMessage]") -> str:
def separator(text: str):
half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2
return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}"
if not isinstance(prompt, list):
prompt = prompt.messages
formatted_messages = "\n".join(
[f"{separator(m.role)}\n{m.content}" for m in prompt]
)
return f"""
============== {prompt.__class__.__name__} ==============
Length: {len(prompt)} messages
{formatted_messages}
==========================================
"""

View File

@@ -1,45 +0,0 @@
def parse_agent_name_and_goals(name_and_goals: dict) -> str:
parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n"
parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n"
parsed_response += "Agent Goals:\n"
for i, goal in enumerate(name_and_goals["agent_goals"]):
parsed_response += f"{i+1}. {goal}\n"
return parsed_response
def parse_agent_plan(plan: dict) -> str:
parsed_response = "Agent Plan:\n"
for i, task in enumerate(plan["task_list"]):
parsed_response += f"{i+1}. {task['objective']}\n"
parsed_response += f"Task type: {task['type']} "
parsed_response += f"Priority: {task['priority']}\n"
parsed_response += "Ready Criteria:\n"
for j, criteria in enumerate(task["ready_criteria"]):
parsed_response += f" {j+1}. {criteria}\n"
parsed_response += "Acceptance Criteria:\n"
for j, criteria in enumerate(task["acceptance_criteria"]):
parsed_response += f" {j+1}. {criteria}\n"
parsed_response += "\n"
return parsed_response
def parse_next_ability(current_task, next_ability: dict) -> str:
parsed_response = f"Current Task: {current_task.objective}\n"
ability_args = ", ".join(
f"{k}={v}" for k, v in next_ability["ability_arguments"].items()
)
parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n"
parsed_response += f"Motivation: {next_ability['motivation']}\n"
parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n"
parsed_response += f"Reasoning: {next_ability['reasoning']}\n"
return parsed_response
def parse_ability_result(ability_result) -> str:
parsed_response = f"Ability: {ability_result['ability_name']}\n"
parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n"
parsed_response += f"Ability Result: {ability_result['success']}\n"
parsed_response += f"Message: {ability_result['message']}\n"
parsed_response += f"Data: {ability_result['new_knowledge']}\n"
return parsed_response

View File

@@ -1,14 +0,0 @@
from pathlib import Path
import yaml
from autogpt.core.agent import SimpleAgent
def make_user_configuration(settings_file_path: Path):
user_configuration = SimpleAgent.build_user_configuration()
settings_file_path.parent.mkdir(parents=True, exist_ok=True)
print("Writing settings to", settings_file_path)
with settings_file_path.open("w") as f:
yaml.safe_dump(user_configuration, f)

View File

@@ -1,19 +0,0 @@
import pathlib
import click
DEFAULT_SETTINGS_FILE = str(
pathlib.Path("~/auto-gpt/default_agent_settings.yml").expanduser()
)
@click.command()
@click.option(
"--settings-file",
type=click.Path(),
default=DEFAULT_SETTINGS_FILE,
)
def make_settings(settings_file: str) -> None:
from autogpt.core.runner.client_lib.settings import make_user_configuration
make_user_configuration(pathlib.Path(settings_file))

View File

@@ -1,62 +0,0 @@
import asyncio
import functools
from bdb import BdbQuit
from typing import Any, Callable, Coroutine, ParamSpec, TypeVar
import click
P = ParamSpec("P")
T = TypeVar("T")
def handle_exceptions(
application_main: Callable[P, T],
with_debugger: bool,
) -> Callable[P, T]:
"""Wraps a function so that it drops a user into a debugger if it raises an error.
This is intended to be used as a wrapper for the main function of a CLI application.
It will catch all errors and drop a user into a debugger if the error is not a
`KeyboardInterrupt`. If the error is a `KeyboardInterrupt`, it will raise the error.
If the error is not a `KeyboardInterrupt`, it will log the error and drop a user
into a debugger if `with_debugger` is `True`.
If `with_debugger` is `False`, it will raise the error.
Parameters
----------
application_main
The function to wrap.
with_debugger
Whether to drop a user into a debugger if an error is raised.
Returns
-------
Callable
The wrapped function.
"""
@functools.wraps(application_main)
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> T:
try:
return await application_main(*args, **kwargs)
except (BdbQuit, KeyboardInterrupt, click.Abort):
raise
except Exception as e:
if with_debugger:
print(f"Uncaught exception {e}")
import pdb
pdb.post_mortem()
else:
raise
return wrapped
def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]:
@functools.wraps(f)
def wrapper(*args: P.args, **kwargs: P.kwargs):
return asyncio.run(f(*args, **kwargs))
return wrapper

View File

@@ -1,9 +0,0 @@
"""The workspace is the central hub for the Agent's on disk resources."""
from autogpt.core.workspace.base import Workspace
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
__all__ = [
"SimpleWorkspace",
"Workspace",
"WorkspaceSettings",
]

View File

@@ -1,70 +0,0 @@
from __future__ import annotations
import abc
import logging
import typing
from pathlib import Path
if typing.TYPE_CHECKING:
from autogpt.core.agent.simple import AgentConfiguration
class Workspace(abc.ABC):
"""The workspace is the root directory for all generated files.
The workspace is responsible for creating the root directory and
providing a method for getting the full path to an item in the
workspace.
"""
@property
@abc.abstractmethod
def root(self) -> Path:
"""The root directory of the workspace."""
...
@property
@abc.abstractmethod
def restrict_to_workspace(self) -> bool:
"""Whether to restrict generated paths to the workspace."""
...
@staticmethod
@abc.abstractmethod
def setup_workspace(
configuration: AgentConfiguration, logger: logging.Logger
) -> Path:
"""Create the workspace root directory and set up all initial content.
Parameters
----------
configuration
The Agent's configuration.
logger
The Agent's logger.
Returns
-------
Path
The path to the workspace root directory.
"""
...
@abc.abstractmethod
def get_path(self, relative_path: str | Path) -> Path:
"""Get the full path for an item in the workspace.
Parameters
----------
relative_path
The path to the item relative to the workspace root.
Returns
-------
Path
The full path to the item.
"""
...

View File

@@ -1,194 +0,0 @@
import json
import logging
import typing
from pathlib import Path
from forge.models.config import (
Configurable,
SystemConfiguration,
SystemSettings,
UserConfigurable,
)
from pydantic import SecretField
from autogpt.core.workspace.base import Workspace
if typing.TYPE_CHECKING:
# Cyclic import
from autogpt.core.agent.simple import AgentSettings
class WorkspaceConfiguration(SystemConfiguration):
root: str
parent: str = UserConfigurable()
restrict_to_workspace: bool = UserConfigurable()
class WorkspaceSettings(SystemSettings):
configuration: WorkspaceConfiguration
class SimpleWorkspace(Configurable, Workspace):
default_settings = WorkspaceSettings(
name="workspace",
description="The workspace is the root directory for all agent activity.",
configuration=WorkspaceConfiguration(
root="",
parent="~/auto-gpt/agents",
restrict_to_workspace=True,
),
)
NULL_BYTES = ["\0", "\000", "\x00", "\u0000", "%00"]
def __init__(
self,
settings: WorkspaceSettings,
logger: logging.Logger,
):
self._configuration = settings.configuration
self._logger = logger.getChild("workspace")
@property
def root(self) -> Path:
return Path(self._configuration.root)
@property
def debug_log_path(self) -> Path:
return self.root / "logs" / "debug.log"
@property
def cycle_log_path(self) -> Path:
return self.root / "logs" / "cycle.log"
@property
def configuration_path(self) -> Path:
return self.root / "configuration.yml"
@property
def restrict_to_workspace(self) -> bool:
return self._configuration.restrict_to_workspace
def get_path(self, relative_path: str | Path) -> Path:
"""Get the full path for an item in the workspace.
Parameters
----------
relative_path
The relative path to resolve in the workspace.
Returns
-------
Path
The resolved path relative to the workspace.
"""
return self._sanitize_path(
relative_path,
root=self.root,
restrict_to_root=self.restrict_to_workspace,
)
def _sanitize_path(
self,
relative_path: str | Path,
root: str | Path = None,
restrict_to_root: bool = True,
) -> Path:
"""Resolve the relative path within the given root if possible.
Parameters
----------
relative_path
The relative path to resolve.
root
The root path to resolve the relative path within.
restrict_to_root
Whether to restrict the path to the root.
Returns
-------
Path
The resolved path.
Raises
------
ValueError
If the path is absolute and a root is provided.
ValueError
If the path is outside the root and the root is restricted.
"""
# Posix systems disallow null bytes in paths. Windows is agnostic about it.
# Do an explicit check here for all sorts of null byte representations.
for null_byte in self.NULL_BYTES:
if null_byte in str(relative_path) or null_byte in str(root):
raise ValueError("embedded null byte")
if root is None:
return Path(relative_path).resolve()
self._logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
root, relative_path = Path(root).resolve(), Path(relative_path)
self._logger.debug(f"Resolved root as '{root}'")
if relative_path.is_absolute():
raise ValueError(
f"Attempted to access absolute path '{relative_path}' "
f"in workspace '{root}'."
)
full_path = root.joinpath(relative_path).resolve()
self._logger.debug(f"Joined paths as '{full_path}'")
if restrict_to_root and not full_path.is_relative_to(root):
raise ValueError(
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
)
return full_path
###################################
# Factory methods for agent setup #
###################################
@staticmethod
def setup_workspace(settings: "AgentSettings", logger: logging.Logger) -> Path:
workspace_parent = settings.workspace.configuration.parent
workspace_parent = Path(workspace_parent).expanduser().resolve()
workspace_parent.mkdir(parents=True, exist_ok=True)
agent_name = settings.agent.name
workspace_root = workspace_parent / agent_name
workspace_root.mkdir(parents=True, exist_ok=True)
settings.workspace.configuration.root = str(workspace_root)
with (workspace_root / "agent_settings.json").open("w") as f:
settings_json = settings.json(
encoder=lambda x: x.get_secret_value()
if isinstance(x, SecretField)
else x,
)
f.write(settings_json)
# TODO: What are all the kinds of logs we want here?
log_path = workspace_root / "logs"
log_path.mkdir(parents=True, exist_ok=True)
(log_path / "debug.log").touch()
(log_path / "cycle.log").touch()
return workspace_root
@staticmethod
def load_agent_settings(workspace_root: Path) -> "AgentSettings":
# Cyclic import
from autogpt.core.agent.simple import AgentSettings
with (workspace_root / "agent_settings.json").open("r") as f:
agent_settings = json.load(f)
return AgentSettings.parse_obj(agent_settings)

View File

@@ -1,156 +0,0 @@
from forge.config.config import Config
from .memory_item import MemoryItem, MemoryItemFactory, MemoryItemRelevance
from .providers.base import VectorMemoryProvider as VectorMemory
from .providers.json_file import JSONFileMemory
from .providers.no_memory import NoMemory
# List of supported memory backends
# Add a backend to this list if the import attempt is successful
supported_memory = ["json_file", "no_memory"]
# try:
# from .providers.redis import RedisMemory
# supported_memory.append("redis")
# except ImportError:
# RedisMemory = None
# try:
# from .providers.pinecone import PineconeMemory
# supported_memory.append("pinecone")
# except ImportError:
# PineconeMemory = None
# try:
# from .providers.weaviate import WeaviateMemory
# supported_memory.append("weaviate")
# except ImportError:
# WeaviateMemory = None
# try:
# from .providers.milvus import MilvusMemory
# supported_memory.append("milvus")
# except ImportError:
# MilvusMemory = None
def get_memory(config: Config) -> VectorMemory:
"""
Returns a memory object corresponding to the memory backend specified in the config.
The type of memory object returned depends on the value of the `memory_backend`
attribute in the configuration. E.g. if `memory_backend` is set to "pinecone", a
`PineconeMemory` object is returned. If it is set to "redis", a `RedisMemory`
object is returned.
By default, a `JSONFileMemory` object is returned.
Params:
config: A configuration object that contains information about the memory
backend to be used and other relevant parameters.
Returns:
VectorMemory: an instance of a memory object based on the configuration provided
"""
memory = None
match config.memory_backend:
case "json_file":
memory = JSONFileMemory(config)
case "pinecone":
raise NotImplementedError(
"The Pinecone memory backend has been rendered incompatible by work on "
"the memory system, and was removed. Whether support will be added "
"back in the future is subject to discussion, feel free to pitch in: "
"https://github.com/Significant-Gravitas/AutoGPT/discussions/4280"
)
# if not PineconeMemory:
# logger.warning(
# "Error: Pinecone is not installed. Please install pinecone"
# " to use Pinecone as a memory backend."
# )
# else:
# memory = PineconeMemory(config)
# if clear:
# memory.clear()
case "redis":
raise NotImplementedError(
"The Redis memory backend has been rendered incompatible by work on "
"the memory system, and has been removed temporarily."
)
# if not RedisMemory:
# logger.warning(
# "Error: Redis is not installed. Please install redis-py to"
# " use Redis as a memory backend."
# )
# else:
# memory = RedisMemory(config)
case "weaviate":
raise NotImplementedError(
"The Weaviate memory backend has been rendered incompatible by work on "
"the memory system, and was removed. Whether support will be added "
"back in the future is subject to discussion, feel free to pitch in: "
"https://github.com/Significant-Gravitas/AutoGPT/discussions/4280"
)
# if not WeaviateMemory:
# logger.warning(
# "Error: Weaviate is not installed. Please install weaviate-client"
# " to use Weaviate as a memory backend."
# )
# else:
# memory = WeaviateMemory(config)
case "milvus":
raise NotImplementedError(
"The Milvus memory backend has been rendered incompatible by work on "
"the memory system, and was removed. Whether support will be added "
"back in the future is subject to discussion, feel free to pitch in: "
"https://github.com/Significant-Gravitas/AutoGPT/discussions/4280"
)
# if not MilvusMemory:
# logger.warning(
# "Error: pymilvus sdk is not installed, but required "
# "to use Milvus or Zilliz as memory backend. "
# "Please install pymilvus."
# )
# else:
# memory = MilvusMemory(config)
case "no_memory":
memory = NoMemory()
case _:
raise ValueError(
f"Unknown memory backend '{config.memory_backend}'."
" Please check your config."
)
if memory is None:
memory = JSONFileMemory(config)
return memory
def get_supported_memory_backends():
return supported_memory
__all__ = [
"get_memory",
"MemoryItem",
"MemoryItemFactory",
"MemoryItemRelevance",
"JSONFileMemory",
"NoMemory",
"VectorMemory",
# "RedisMemory",
# "PineconeMemory",
# "MilvusMemory",
# "WeaviateMemory",
]

View File

@@ -1,280 +0,0 @@
from __future__ import annotations
import json
import logging
from typing import Literal
import ftfy
import numpy as np
from forge.config.config import Config
from forge.content_processing.text import chunk_content, split_text, summarize_text
from forge.llm.providers import ChatMessage, ChatModelProvider, EmbeddingModelProvider
from pydantic import BaseModel
from .utils import Embedding, get_embedding
logger = logging.getLogger(__name__)
MemoryDocType = Literal["webpage", "text_file", "code_file", "agent_history"]
class MemoryItem(BaseModel, arbitrary_types_allowed=True):
"""Memory object containing raw content as well as embeddings"""
raw_content: str
summary: str
chunks: list[str]
chunk_summaries: list[str]
e_summary: Embedding
e_chunks: list[Embedding]
metadata: dict
def relevance_for(self, query: str, e_query: Embedding | None = None):
return MemoryItemRelevance.of(self, query, e_query)
def dump(self, calculate_length=False) -> str:
n_chunks = len(self.e_chunks)
return f"""
=============== MemoryItem ===============
Size: {n_chunks} chunks
Metadata: {json.dumps(self.metadata, indent=2)}
---------------- SUMMARY -----------------
{self.summary}
------------------ RAW -------------------
{self.raw_content}
==========================================
"""
def __eq__(self, other: MemoryItem):
return (
self.raw_content == other.raw_content
and self.chunks == other.chunks
and self.chunk_summaries == other.chunk_summaries
# Embeddings can either be list[float] or np.ndarray[float32],
# and for comparison they must be of the same type
and np.array_equal(
self.e_summary
if isinstance(self.e_summary, np.ndarray)
else np.array(self.e_summary, dtype=np.float32),
other.e_summary
if isinstance(other.e_summary, np.ndarray)
else np.array(other.e_summary, dtype=np.float32),
)
and np.array_equal(
self.e_chunks
if isinstance(self.e_chunks[0], np.ndarray)
else [np.array(c, dtype=np.float32) for c in self.e_chunks],
other.e_chunks
if isinstance(other.e_chunks[0], np.ndarray)
else [np.array(c, dtype=np.float32) for c in other.e_chunks],
)
)
class MemoryItemFactory:
def __init__(
self,
llm_provider: ChatModelProvider,
embedding_provider: EmbeddingModelProvider,
):
self.llm_provider = llm_provider
self.embedding_provider = embedding_provider
async def from_text(
self,
text: str,
source_type: MemoryDocType,
config: Config,
metadata: dict = {},
how_to_summarize: str | None = None,
question_for_summary: str | None = None,
):
logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n")
# Fix encoding, e.g. removing unicode surrogates (see issue #778)
text = ftfy.fix_text(text)
# FIXME: needs ModelProvider
chunks = [
chunk
for chunk, _ in (
split_text(
text=text,
config=config,
max_chunk_length=1000, # arbitrary, but shorter ~= better
tokenizer=self.llm_provider.get_tokenizer(config.fast_llm),
)
if source_type != "code_file"
# TODO: chunk code based on structure/outline
else chunk_content(
content=text,
max_chunk_length=1000,
tokenizer=self.llm_provider.get_tokenizer(config.fast_llm),
)
)
]
logger.debug("Chunks: " + str(chunks))
chunk_summaries = [
summary
for summary, _ in [
await summarize_text(
text=text_chunk,
instruction=how_to_summarize,
question=question_for_summary,
llm_provider=self.llm_provider,
config=config,
)
for text_chunk in chunks
]
]
logger.debug("Chunk summaries: " + str(chunk_summaries))
e_chunks = get_embedding(chunks, config, self.embedding_provider)
summary = (
chunk_summaries[0]
if len(chunks) == 1
else (
await summarize_text(
text="\n\n".join(chunk_summaries),
instruction=how_to_summarize,
question=question_for_summary,
llm_provider=self.llm_provider,
config=config,
)
)[0]
)
logger.debug("Total summary: " + summary)
# TODO: investigate search performance of weighted average vs summary
# e_average = np.average(e_chunks, axis=0, weights=[len(c) for c in chunks])
e_summary = get_embedding(summary, config, self.embedding_provider)
metadata["source_type"] = source_type
return MemoryItem(
raw_content=text,
summary=summary,
chunks=chunks,
chunk_summaries=chunk_summaries,
e_summary=e_summary,
e_chunks=e_chunks,
metadata=metadata,
)
def from_text_file(self, content: str, path: str, config: Config):
return self.from_text(content, "text_file", config, {"location": path})
def from_code_file(self, content: str, path: str):
# TODO: implement tailored code memories
return self.from_text(content, "code_file", {"location": path})
def from_ai_action(self, ai_message: ChatMessage, result_message: ChatMessage):
# The result_message contains either user feedback
# or the result of the command specified in ai_message
if ai_message.role != "assistant":
raise ValueError(f"Invalid role on 'ai_message': {ai_message.role}")
result = (
result_message.content
if result_message.content.startswith("Command")
else "None"
)
user_input = (
result_message.content
if result_message.content.startswith("Human feedback")
else "None"
)
memory_content = (
f"Assistant Reply: {ai_message.content}"
"\n\n"
f"Result: {result}"
"\n\n"
f"Human Feedback: {user_input}"
)
return self.from_text(
text=memory_content,
source_type="agent_history",
how_to_summarize=(
"if possible, also make clear the link between the command in the"
" assistant's response and the command result. "
"Do not mention the human feedback if there is none.",
),
)
def from_webpage(
self, content: str, url: str, config: Config, question: str | None = None
):
return self.from_text(
text=content,
source_type="webpage",
config=config,
metadata={"location": url},
question_for_summary=question,
)
class MemoryItemRelevance(BaseModel):
"""
Class that encapsulates memory relevance search functionality and data.
Instances contain a MemoryItem and its relevance scores for a given query.
"""
memory_item: MemoryItem
for_query: str
summary_relevance_score: float
chunk_relevance_scores: list[float]
@staticmethod
def of(
memory_item: MemoryItem, for_query: str, e_query: Embedding | None = None
) -> MemoryItemRelevance:
e_query = e_query if e_query is not None else get_embedding(for_query)
_, srs, crs = MemoryItemRelevance.calculate_scores(memory_item, e_query)
return MemoryItemRelevance(
for_query=for_query,
memory_item=memory_item,
summary_relevance_score=srs,
chunk_relevance_scores=crs,
)
@staticmethod
def calculate_scores(
memory: MemoryItem, compare_to: Embedding
) -> tuple[float, float, list[float]]:
"""
Calculates similarity between given embedding and all embeddings of the memory
Returns:
float: the aggregate (max) relevance score of the memory
float: the relevance score of the memory summary
list: the relevance scores of the memory chunks
"""
summary_relevance_score = np.dot(memory.e_summary, compare_to)
chunk_relevance_scores = np.dot(memory.e_chunks, compare_to).tolist()
logger.debug(f"Relevance of summary: {summary_relevance_score}")
logger.debug(f"Relevance of chunks: {chunk_relevance_scores}")
relevance_scores = [summary_relevance_score, *chunk_relevance_scores]
logger.debug(f"Relevance scores: {relevance_scores}")
return max(relevance_scores), summary_relevance_score, chunk_relevance_scores
@property
def score(self) -> float:
"""The aggregate relevance score of the memory item for the given query"""
return max([self.summary_relevance_score, *self.chunk_relevance_scores])
@property
def most_relevant_chunk(self) -> tuple[str, float]:
"""The most relevant chunk of the memory item + its score for the given query"""
i_relmax = np.argmax(self.chunk_relevance_scores)
return self.memory_item.chunks[i_relmax], self.chunk_relevance_scores[i_relmax]
def __str__(self):
return (
f"{self.memory_item.summary} ({self.summary_relevance_score}) "
f"{self.chunk_relevance_scores}"
)

View File

@@ -1,7 +0,0 @@
from .json_file import JSONFileMemory
from .no_memory import NoMemory
__all__ = [
"JSONFileMemory",
"NoMemory",
]

View File

@@ -1,78 +0,0 @@
import abc
import functools
import logging
from typing import MutableSet, Sequence
import numpy as np
from forge.config.config import Config
from .. import MemoryItem, MemoryItemRelevance
from ..utils import Embedding, get_embedding
logger = logging.getLogger(__name__)
class VectorMemoryProvider(MutableSet[MemoryItem]):
@abc.abstractmethod
def __init__(self, config: Config):
pass
def get(self, query: str, config: Config) -> MemoryItemRelevance | None:
"""
Gets the data from the memory that is most relevant to the given query.
Args:
query: The query used to retrieve information.
config: The config Object.
Returns: The most relevant Memory
"""
result = self.get_relevant(query, 1, config)
return result[0] if result else None
def get_relevant(
self, query: str, k: int, config: Config
) -> Sequence[MemoryItemRelevance]:
"""
Returns the top-k most relevant memories for the given query
Args:
query: the query to compare stored memories to
k: the number of relevant memories to fetch
config: The config Object.
Returns:
list[MemoryItemRelevance] containing the top [k] relevant memories
"""
if len(self) < 1:
return []
logger.debug(
f"Searching for {k} relevant memories for query '{query}'; "
f"{len(self)} memories in index"
)
relevances = self.score_memories_for_relevance(query, config)
logger.debug(f"Memory relevance scores: {[str(r) for r in relevances]}")
# take last k items and reverse
top_k_indices = np.argsort([r.score for r in relevances])[-k:][::-1]
return [relevances[i] for i in top_k_indices]
def score_memories_for_relevance(
self, for_query: str, config: Config
) -> Sequence[MemoryItemRelevance]:
"""
Returns MemoryItemRelevance for every memory in the index.
Implementations may override this function for performance purposes.
"""
e_query: Embedding = get_embedding(for_query, config)
return [m.relevance_for(for_query, e_query) for m in self]
def get_stats(self) -> tuple[int, int]:
"""
Returns:
tuple (n_memories: int, n_chunks: int): the stats of the memory index
"""
return len(self), functools.reduce(lambda t, m: t + len(m.e_chunks), self, 0)

View File

@@ -1,91 +0,0 @@
from __future__ import annotations
import logging
from pathlib import Path
from typing import Iterator
import orjson
from forge.config.config import Config
from ..memory_item import MemoryItem
from .base import VectorMemoryProvider
logger = logging.getLogger(__name__)
class JSONFileMemory(VectorMemoryProvider):
"""Memory backend that stores memories in a JSON file"""
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
file_path: Path
memories: list[MemoryItem]
def __init__(self, config: Config) -> None:
"""Initialize a class instance
Args:
config: Config object
Returns:
None
"""
self.file_path = config.workspace_path / f"{config.memory_index}.json"
self.file_path.touch()
logger.debug(
f"Initialized {__class__.__name__} with index path {self.file_path}"
)
self.memories = []
try:
self.load_index()
logger.debug(f"Loaded {len(self.memories)} MemoryItems from file")
except Exception as e:
logger.warning(f"Could not load MemoryItems from file: {e}")
self.save_index()
def __iter__(self) -> Iterator[MemoryItem]:
return iter(self.memories)
def __contains__(self, x: MemoryItem) -> bool:
return x in self.memories
def __len__(self) -> int:
return len(self.memories)
def add(self, item: MemoryItem):
self.memories.append(item)
logger.debug(f"Adding item to memory: {item.dump()}")
self.save_index()
return len(self.memories)
def discard(self, item: MemoryItem):
try:
self.remove(item)
except ValueError: # item not in memory
pass
def clear(self):
"""Clears the data in memory."""
self.memories.clear()
self.save_index()
def load_index(self):
"""Loads all memories from the index file"""
if not self.file_path.is_file():
logger.debug(f"Index file '{self.file_path}' does not exist")
return
with self.file_path.open("r") as f:
logger.debug(f"Loading memories from index file '{self.file_path}'")
json_index = orjson.loads(f.read())
for memory_item_dict in json_index:
self.memories.append(MemoryItem.parse_obj(memory_item_dict))
def save_index(self):
logger.debug(f"Saving memory index to file {self.file_path}")
with self.file_path.open("wb") as f:
return f.write(
orjson.dumps(
[m.dict() for m in self.memories], option=self.SAVE_OPTIONS
)
)

View File

@@ -1,36 +0,0 @@
"""A class that does not store any data. This is the default memory provider."""
from __future__ import annotations
from typing import Iterator, Optional
from forge.config.config import Config
from .. import MemoryItem
from .base import VectorMemoryProvider
class NoMemory(VectorMemoryProvider):
"""
A class that does not store any data. This is the default memory provider.
"""
def __init__(self, config: Optional[Config] = None):
pass
def __iter__(self) -> Iterator[MemoryItem]:
return iter([])
def __contains__(self, x: MemoryItem) -> bool:
return False
def __len__(self) -> int:
return 0
def add(self, item: MemoryItem):
pass
def discard(self, item: MemoryItem):
pass
def clear(self):
pass

View File

@@ -1,79 +0,0 @@
import logging
from typing import Any, Sequence, overload
import numpy as np
from forge.config.config import Config
from forge.llm.providers import EmbeddingModelProvider
logger = logging.getLogger(__name__)
Embedding = list[float] | list[np.float32] | np.ndarray[Any, np.dtype[np.float32]]
"""Embedding vector"""
TText = Sequence[int]
"""Tokenized text"""
@overload
async def get_embedding(
input: str | TText, config: Config, embedding_provider: EmbeddingModelProvider
) -> Embedding:
...
@overload
async def get_embedding(
input: list[str] | list[TText],
config: Config,
embedding_provider: EmbeddingModelProvider,
) -> list[Embedding]:
...
async def get_embedding(
input: str | TText | list[str] | list[TText],
config: Config,
embedding_provider: EmbeddingModelProvider,
) -> Embedding | list[Embedding]:
"""Get an embedding from the ada model.
Args:
input: Input text to get embeddings for, encoded as a string or array of tokens.
Multiple inputs may be given as a list of strings or token arrays.
embedding_provider: The provider to create embeddings.
Returns:
List[float]: The embedding.
"""
multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input)
if isinstance(input, str):
input = input.replace("\n", " ")
elif multiple and isinstance(input[0], str):
input = [text.replace("\n", " ") for text in input]
model = config.embedding_model
logger.debug(
f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
f" with model '{model}'"
)
if not multiple:
return (
await embedding_provider.create_embedding(
text=input,
model_name=model,
embedding_parser=lambda e: e,
)
).embedding
else:
embeddings = []
for text in input:
result = await embedding_provider.create_embedding(
text=text,
model_name=model,
embedding_parser=lambda e: e,
)
embeddings.append(result.embedding)
return embeddings

View File

@@ -9,7 +9,7 @@ from forge.llm.providers import ChatMessage, MultiProvider
from forge.llm.providers.anthropic import AnthropicModelName
from git import Repo, TagReference
from autogpt.core.runner.client_lib.utils import coroutine
from autogpt.app.utils import coroutine
@click.command()

View File

@@ -1,126 +0,0 @@
# sourcery skip: snake-case-functions
"""Tests for JSONFileMemory class"""
import orjson
import pytest
from forge.config.config import Config
from forge.file_storage import FileStorage
from autogpt.memory.vector import JSONFileMemory, MemoryItem
def test_json_memory_init_without_backing_file(config: Config, storage: FileStorage):
index_file = storage.root / f"{config.memory_index}.json"
assert not index_file.exists()
JSONFileMemory(config)
assert index_file.exists()
assert index_file.read_text() == "[]"
def test_json_memory_init_with_backing_empty_file(config: Config, storage: FileStorage):
index_file = storage.root / f"{config.memory_index}.json"
index_file.touch()
assert index_file.exists()
JSONFileMemory(config)
assert index_file.exists()
assert index_file.read_text() == "[]"
def test_json_memory_init_with_backing_invalid_file(
config: Config, storage: FileStorage
):
index_file = storage.root / f"{config.memory_index}.json"
index_file.touch()
raw_data = {"texts": ["test"]}
data = orjson.dumps(raw_data, option=JSONFileMemory.SAVE_OPTIONS)
with index_file.open("wb") as f:
f.write(data)
assert index_file.exists()
JSONFileMemory(config)
assert index_file.exists()
assert index_file.read_text() == "[]"
def test_json_memory_add(config: Config, memory_item: MemoryItem):
index = JSONFileMemory(config)
index.add(memory_item)
assert index.memories[0] == memory_item
def test_json_memory_clear(config: Config, memory_item: MemoryItem):
index = JSONFileMemory(config)
assert index.memories == []
index.add(memory_item)
assert index.memories[0] == memory_item, "Cannot test clear() because add() fails"
index.clear()
assert index.memories == []
def test_json_memory_get(config: Config, memory_item: MemoryItem, mock_get_embedding):
index = JSONFileMemory(config)
assert (
index.get("test", config) is None
), "Cannot test get() because initial index is not empty"
index.add(memory_item)
retrieved = index.get("test", config)
assert retrieved is not None
assert retrieved.memory_item == memory_item
def test_json_memory_load_index(config: Config, memory_item: MemoryItem):
index = JSONFileMemory(config)
index.add(memory_item)
try:
assert index.file_path.exists(), "index was not saved to file"
assert len(index) == 1, f"index contains {len(index)} items instead of 1"
assert index.memories[0] == memory_item, "item in index != added mock item"
except AssertionError as e:
raise ValueError(f"Setting up for load_index test failed: {e}")
index.memories = []
index.load_index()
assert len(index) == 1
assert index.memories[0] == memory_item
@pytest.mark.vcr
@pytest.mark.requires_openai_api_key
def test_json_memory_get_relevant(config: Config, cached_openai_client: None) -> None:
index = JSONFileMemory(config)
mem1 = MemoryItem.from_text_file("Sample text", "sample.txt", config)
mem2 = MemoryItem.from_text_file(
"Grocery list:\n- Pancake mix", "groceries.txt", config
)
mem3 = MemoryItem.from_text_file(
"What is your favorite color?", "color.txt", config
)
lipsum = "Lorem ipsum dolor sit amet"
mem4 = MemoryItem.from_text_file(" ".join([lipsum] * 100), "lipsum.txt", config)
index.add(mem1)
index.add(mem2)
index.add(mem3)
index.add(mem4)
assert index.get_relevant(mem1.raw_content, 1, config)[0].memory_item == mem1
assert index.get_relevant(mem2.raw_content, 1, config)[0].memory_item == mem2
assert index.get_relevant(mem3.raw_content, 1, config)[0].memory_item == mem3
assert [mr.memory_item for mr in index.get_relevant(lipsum, 2, config)] == [
mem4,
mem1,
]
def test_json_memory_get_stats(config: Config, memory_item: MemoryItem) -> None:
index = JSONFileMemory(config)
index.add(memory_item)
n_memories, n_chunks = index.get_stats()
assert n_memories == 1
assert n_chunks == 1

View File

@@ -1,17 +0,0 @@
import pytest
from autogpt.memory.vector.memory_item import MemoryItem
from autogpt.memory.vector.utils import Embedding
@pytest.fixture
def memory_item(mock_embedding: Embedding):
return MemoryItem(
raw_content="test content",
summary="test content summary",
chunks=["test content"],
chunk_summaries=["test content summary"],
e_summary=mock_embedding,
e_chunks=[mock_embedding],
metadata={},
)

View File

@@ -1,44 +0,0 @@
import numpy
import pytest
from forge.config.config import Config
from forge.llm.providers import OPEN_AI_EMBEDDING_MODELS
from pytest_mock import MockerFixture
import autogpt.memory.vector.memory_item as vector_memory_item
import autogpt.memory.vector.providers.base as memory_provider_base
from autogpt.memory.vector import get_memory
from autogpt.memory.vector.utils import Embedding
@pytest.fixture
def embedding_dimension(config: Config):
return OPEN_AI_EMBEDDING_MODELS[config.embedding_model].embedding_dimensions
@pytest.fixture
def mock_embedding(embedding_dimension: int) -> Embedding:
return numpy.full((1, embedding_dimension), 0.0255, numpy.float32)[0]
@pytest.fixture
def mock_get_embedding(mocker: MockerFixture, mock_embedding: Embedding):
mocker.patch.object(
vector_memory_item,
"get_embedding",
return_value=mock_embedding,
)
mocker.patch.object(
memory_provider_base,
"get_embedding",
return_value=mock_embedding,
)
@pytest.fixture
def memory_none(agent_test_config: Config, mock_get_embedding):
was_memory_backend = agent_test_config.memory_backend
agent_test_config.memory_backend = "no_memory"
yield get_memory(agent_test_config)
agent_test_config.memory_backend = was_memory_backend

View File

@@ -8,7 +8,6 @@ from forge.logging.config import configure_logging
logger = logging.getLogger(__name__)
logo = """\n\n
d8888 888 .d8888b. 8888888b. 88888888888
d88888 888 d88P Y88b 888 Y88b 888

View File

@@ -79,19 +79,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
continuous_mode: bool = False
continuous_limit: int = 0
##########
# Memory #
##########
memory_backend: str = UserConfigurable("json_file", from_env="MEMORY_BACKEND")
memory_index: str = UserConfigurable("auto-gpt-memory", from_env="MEMORY_INDEX")
redis_host: str = UserConfigurable("localhost", from_env="REDIS_HOST")
redis_port: int = UserConfigurable(default=6379, from_env="REDIS_PORT")
redis_password: str = UserConfigurable("", from_env="REDIS_PASSWORD")
wipe_redis_on_start: bool = UserConfigurable(
default=True,
from_env=lambda: os.getenv("WIPE_REDIS_ON_START", "True") == "True",
)
############
# Commands #
############

View File

@@ -1,5 +1,26 @@
from math import ceil, floor
from typing import Any
from forge.llm.prompting.schema import ChatPrompt
SEPARATOR_LENGTH = 42
def dump_prompt(prompt: ChatPrompt) -> str:
def separator(text: str):
half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2
return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}"
formatted_messages = "\n".join(
[f"{separator(m.role)}\n{m.content}" for m in prompt.messages]
)
return f"""
============== {prompt.__class__.__name__} ==============
Length: {len(prompt.messages)} messages
{formatted_messages}
==========================================
"""
def format_numbered_list(items: list[Any], start_at: int = 1) -> str:
return "\n".join(f"{i}. {str(item)}" for i, item in enumerate(items, start_at))

View File

@@ -8,6 +8,7 @@ import sys
from pathlib import Path
from typing import TYPE_CHECKING, Optional
from colorama import Fore, Style
from openai._base_client import log as openai_logger
from forge.models.config import SystemConfiguration, UserConfigurable

View File

@@ -28,14 +28,9 @@ Configuration is controlled through the `Config` object. You can set configurati
- `HUGGINGFACE_IMAGE_MODEL`: HuggingFace model to use for image generation. Default: CompVis/stable-diffusion-v1-4
- `IMAGE_PROVIDER`: Image provider. Options are `dalle`, `huggingface`, and `sdwebui`. Default: dalle
- `IMAGE_SIZE`: Default size of image to generate. Default: 256
- `MEMORY_BACKEND`: Memory back-end to use. Currently `json_file` is the only supported and enabled backend. Default: json_file
- `MEMORY_INDEX`: Value used in the Memory backend for scoping, naming, or indexing. Default: auto-gpt
- `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys).
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False
- `REDIS_HOST`: Redis Host. Default: localhost
- `REDIS_PASSWORD`: Redis Password. Optional. Default:
- `REDIS_PORT`: Redis Port. Default: 6379
- `RESTRICT_TO_WORKSPACE`: The restrict file reading and writing to the workspace directory. Default: True
- `SD_WEBUI_AUTH`: Stable Diffusion Web UI username:password pair. Optional.
- `SD_WEBUI_URL`: Stable Diffusion Web UI URL. Default: http://localhost:7860

View File

@@ -33,7 +33,7 @@ Create your agent fixture.
```python
def kubernetes_agent(
agent_test_config, memory_json_file, workspace: Workspace
agent_test_config, workspace: Workspace
):
# Please choose the commands your agent will need to beat the challenges, the full list is available in the main.py
# (we 're working on a better way to design this, for now you have to look at main.py)
@@ -54,7 +54,6 @@ def kubernetes_agent(
system_prompt = ai_profile.construct_full_prompt()
agent_test_config.set_continuous_mode(False)
agent = Agent(
memory=memory_json_file,
command_registry=command_registry,
config=ai_profile,
next_action_count=0,