mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec47a318bc |
@@ -10,4 +10,4 @@ RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# Declare working directory
|
||||
WORKDIR /workspace/AutoGPT
|
||||
WORKDIR /workspace/Auto-GPT
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"dockerComposeFile": "./docker-compose.yml",
|
||||
"service": "auto-gpt",
|
||||
"workspaceFolder": "/workspace/AutoGPT",
|
||||
"workspaceFolder": "/workspace/Auto-GPT",
|
||||
"shutdownAction": "stopCompose",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
@@ -46,11 +46,11 @@
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
// "postCreateCommand": "poetry install",
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode",
|
||||
|
||||
// Add the freshly containerized repo to the list of safe repositories
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/AutoGPT && poetry install"
|
||||
}
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/Auto-GPT && pip3 install --user -r requirements.txt"
|
||||
}
|
||||
@@ -9,4 +9,4 @@ services:
|
||||
context: ../
|
||||
tty: true
|
||||
volumes:
|
||||
- ../:/workspace/AutoGPT
|
||||
- ../:/workspace/Auto-GPT
|
||||
@@ -1,40 +1,9 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
*
|
||||
|
||||
# AutoGPT
|
||||
!autogpt/autogpt/
|
||||
!autogpt/pyproject.toml
|
||||
!autogpt/poetry.lock
|
||||
!autogpt/README.md
|
||||
!autogpt/tests/
|
||||
|
||||
# Benchmark
|
||||
!benchmark/agbenchmark/
|
||||
!benchmark/pyproject.toml
|
||||
!benchmark/poetry.lock
|
||||
!benchmark/README.md
|
||||
|
||||
# Forge
|
||||
!forge/forge/
|
||||
!forge/pyproject.toml
|
||||
!forge/poetry.lock
|
||||
!forge/README.md
|
||||
|
||||
# Frontend
|
||||
!frontend/build/web/
|
||||
|
||||
# rnd
|
||||
!rnd/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
# rnd
|
||||
rnd/autogpt_builder/.next/
|
||||
rnd/autogpt_builder/node_modules
|
||||
rnd/autogpt_builder/.env.example
|
||||
rnd/autogpt_builder/.env.local
|
||||
rnd/autogpt_server/.env
|
||||
rnd/autogpt_server/.venv/
|
||||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
!prompt_settings.yaml
|
||||
|
||||
rnd/market/.env
|
||||
*.md
|
||||
*.png
|
||||
!BULLETIN.md
|
||||
|
||||
212
.env.template
Normal file
212
.env.template
Normal file
@@ -0,0 +1,212 @@
|
||||
# For further descriptions of these settings see docs/configuration/options.md or go to docs.agpt.co
|
||||
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the Auto-GPT root directory. (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the Auto-GPT root directory. (Default plugins_config.yaml)
|
||||
# PLUGINS_CONFIG_FILE=plugins_config.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the Auto-GPT root directory. (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
|
||||
# OPENAI_FUNCTIONS=False
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
## PLAIN_OUTPUT - Plain output, which disables the spinner (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMAND_CATEGORIES=
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
# USE_AZURE=False
|
||||
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the Auto-GPT root directory. (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM - Smart language model (Default: gpt-4)
|
||||
# SMART_LLM=gpt-4
|
||||
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
|
||||
################################################################################
|
||||
### SHELL EXECUTION
|
||||
################################################################################
|
||||
|
||||
## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
|
||||
# SHELL_COMMAND_CONTROL=denylist
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
|
||||
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by Auto-GPT (Default: sudo,su)
|
||||
# SHELL_DENYLIST=sudo,su
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
|
||||
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by Auto-GPT (Default: None)
|
||||
# SHELL_ALLOWLIST=
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### General
|
||||
|
||||
## MEMORY_BACKEND - Memory backend type
|
||||
# MEMORY_BACKEND=json_file
|
||||
|
||||
## MEMORY_INDEX - Value used in the Memory backend for scoping, naming, or indexing (Default: auto-gpt)
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### Redis
|
||||
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
# REDIS_HOST=localhost
|
||||
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PORT=6379
|
||||
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
# REDIS_PASSWORD=
|
||||
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Common
|
||||
|
||||
## IMAGE_PROVIDER - Image provider (Default: dalle)
|
||||
# IMAGE_PROVIDER=dalle
|
||||
|
||||
## IMAGE_SIZE - Image size (Default: 256)
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
|
||||
# SD_WEBUI_URL=http://localhost:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
|
||||
# AUDIO_TO_TEXT_PROVIDER=huggingface
|
||||
|
||||
## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
################################################################################
|
||||
### GITHUB
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
|
||||
################################################################################
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=streamelements
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=elevenlabs
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### CHAT MESSAGES
|
||||
################################################################################
|
||||
|
||||
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
@@ -1,4 +1,4 @@
|
||||
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
|
||||
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use AutoGPT.
|
||||
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
|
||||
|
||||
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
|
||||
2
.flake8
2
.flake8
@@ -1,5 +1,6 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
select = "E303, W293, W291, W292, E305, E231, E302"
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
@@ -9,4 +10,3 @@ exclude =
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
data/*,
|
||||
|
||||
11
.gitattributes
vendored
11
.gitattributes
vendored
@@ -1,10 +1,5 @@
|
||||
frontend/build/** linguist-generated
|
||||
|
||||
**/poetry.lock linguist-generated
|
||||
|
||||
docs/_javascript/** linguist-vendored
|
||||
|
||||
# Exclude VCR cassettes from stats
|
||||
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
tests/Auto-GPT-test-cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
* text=auto
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
|
||||
9
.github/CODEOWNERS
vendored
9
.github/CODEOWNERS
vendored
@@ -1,7 +1,2 @@
|
||||
* @Significant-Gravitas/maintainers
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
forge/ @Significant-Gravitas/forge-maintainers
|
||||
benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
rnd/infra @Significant-Gravitas/devops
|
||||
.github/CODEOWNERS @Significant-Gravitas/admins
|
||||
.github/workflows/ @Significant-Gravitas/maintainers
|
||||
autogpt/core @collijk
|
||||
|
||||
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: Torantulino
|
||||
63
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
63
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: Bug report 🐛
|
||||
description: Create a bug report for AutoGPT.
|
||||
description: Create a bug report for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
@@ -13,16 +13,16 @@ body:
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
description: >
|
||||
Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues)
|
||||
Please [search the history](https://github.com/Torantulino/Auto-GPT/issues)
|
||||
to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
@@ -35,8 +35,8 @@ body:
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
For example:
|
||||
BAD - my AutoGPT keeps looping
|
||||
GOOD - After performing execute_python_file, AutoGPT goes into a loop where it keeps trying to execute the file.
|
||||
BAD - my auto-gpt keeps looping
|
||||
GOOD - After performing execute_python_file, auto-gpt goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
@@ -54,7 +54,7 @@ body:
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
description: >
|
||||
Please select the operating system you were using to run AutoGPT when this problem occurred.
|
||||
Please select the operating system you were using to run Auto-GPT when this problem occurred.
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
@@ -73,12 +73,12 @@ body:
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of AutoGPT are you using?
|
||||
label: Which version of Auto-GPT are you using?
|
||||
description: |
|
||||
Please select which version of AutoGPT you were using when this issue occurred.
|
||||
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/AutoGPT/releases/) make sure you were using the latest code.
|
||||
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/AutoGPT/releases/)**.
|
||||
If installed with git you can run `git branch` to see which version of AutoGPT you are running.
|
||||
Please select which version of Auto-GPT you were using when this issue occurred.
|
||||
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code.
|
||||
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**.
|
||||
If installed with git you can run `git branch` to see which version of Auto-GPT you are running.
|
||||
options:
|
||||
- Latest Release
|
||||
- Stable (branch)
|
||||
@@ -88,16 +88,14 @@ body:
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: What LLM Provider do you use?
|
||||
label: Do you use OpenAI GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- Azure
|
||||
- Groq
|
||||
- Anthropic
|
||||
- Llamafile
|
||||
- Other (detail in issue)
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
- GPT-4(32k)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -128,17 +126,10 @@ body:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: What commit or version are you using?
|
||||
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -148,16 +139,16 @@ body:
|
||||
value: |
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main AutoGPT folder."
|
||||
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
@@ -166,8 +157,8 @@ body:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
6
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -1,16 +1,16 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for AutoGPT!
|
||||
description: Suggest a new idea for Auto-GPT!
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing)
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
description: Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues) to see if an issue already exists for the same problem.
|
||||
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
76
.github/PULL_REQUEST_TEMPLATE.md
vendored
76
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,31 +1,49 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### PR Quality Scorecard ✨
|
||||
|
||||
<!--
|
||||
Check out our contribution guide:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. Also consider contributing something other than code; see the [contribution guide]
|
||||
for options.
|
||||
3. Clearly explain your changes.
|
||||
4. Avoid making unnecessary changes, especially if they're purely based on personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
<!-- ⚠️ At the moment any non-essential commands are not being merged.
|
||||
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
|
||||
We are expecting to ship plugin support within the week (PR #757).
|
||||
Resources:
|
||||
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
|
||||
-->
|
||||
|
||||
- [x] Have you used the PR description template?   `+2 pts`
|
||||
- [ ] Is your pull request atomic, focusing on a single change?   `+5 pts`
|
||||
- [ ] Have you linked the GitHub issue(s) that this PR addresses?   `+5 pts`
|
||||
- [ ] Have you documented your changes clearly and comprehensively?   `+5 pts`
|
||||
- [ ] Have you changed or added a feature?   `-4 pts`
|
||||
- [ ] Have you added/updated corresponding documentation?   `+4 pts`
|
||||
- [ ] Have you added/updated corresponding integration tests?   `+5 pts`
|
||||
- [ ] Have you changed the behavior of AutoGPT?   `-5 pts`
|
||||
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance?   `+10 pts`
|
||||
<!-- 📢 Announcement
|
||||
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
||||
|
||||
Focus on a single, specific change.
|
||||
Do not include any unrelated or "extra" modifications.
|
||||
Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
### Background
|
||||
<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
|
||||
|
||||
### Changes
|
||||
<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
|
||||
|
||||
### Documentation
|
||||
<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
|
||||
|
||||
### Test Plan
|
||||
<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
|
||||
|
||||
### PR Quality Checklist
|
||||
- [ ] My pull request is atomic and focuses on a single change.
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes. <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
- [ ] I have run the following commands against my code to ensure it passes our linters:
|
||||
```shell
|
||||
black .
|
||||
isort .
|
||||
mypy
|
||||
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
|
||||
```
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
|
||||
|
||||
27
.github/labeler.yml
vendored
27
.github/labeler.yml
vendored
@@ -1,27 +0,0 @@
|
||||
AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: forge/**
|
||||
|
||||
Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: benchmark/**
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: frontend/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Builder:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_builder/**
|
||||
|
||||
Server:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_server/**
|
||||
41
.github/workflows/autogpt-builder-ci.yml
vendored
41
.github/workflows/autogpt-builder-ci.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: AutoGPT Builder CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_builder
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '21'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Check formatting with Prettier
|
||||
run: |
|
||||
npx prettier --check .
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
npm run lint
|
||||
138
.github/workflows/autogpt-ci.yml
vendored
138
.github/workflows/autogpt-ci.yml
vendored
@@ -1,138 +0,0 @@
|
||||
name: AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: autogpt/logs/
|
||||
162
.github/workflows/autogpt-docker-ci.yml
vendored
162
.github/workflows/autogpt-docker-ci.yml
vendored
@@ -1,162 +0,0 @@
|
||||
name: AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: autogpt
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
|
||||
DEV_IMAGE_TAG: latest-dev
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:edge-cicd
|
||||
options: >
|
||||
--name=minio
|
||||
--health-interval=10s --health-timeout=5s --health-retries=3
|
||||
--health-cmd="curl -f http://localhost:9000/minio/health/live"
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- if: github.event_name == 'push'
|
||||
name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://minio:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
run: |
|
||||
set +e
|
||||
docker run --env CI --env OPENAI_API_KEY \
|
||||
--network container:minio \
|
||||
--env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
|
||||
--entrypoint poetry ${{ env.IMAGE_NAME }} run \
|
||||
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
--numprocesses=4 --durations=10 \
|
||||
tests/unit tests/integration 2>&1 | tee test_output.txt
|
||||
|
||||
test_failure=${PIPESTATUS[0]}
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$(cat test_output.txt)
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
|
||||
- if: github.event_name == 'push' && github.ref_name == 'master'
|
||||
name: Push image to Docker Hub
|
||||
run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
56
.github/workflows/autogpt-infra-ci.yml
vendored
56
.github/workflows/autogpt-infra-ci.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: AutoGPT Builder Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
155
.github/workflows/autogpt-server-ci.yml
vendored
155
.github/workflows/autogpt-server-ci.yml
vendored
@@ -1,155 +0,0 @@
|
||||
name: AutoGPT Server CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_server
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Setup PostgreSQL
|
||||
uses: ikalnytskyi/action-setup-postgres@v6
|
||||
with:
|
||||
username: ${{ secrets.DB_USER || 'postgres' }}
|
||||
password: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
database: postgres
|
||||
port: 5432
|
||||
id: postgres
|
||||
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: "."
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
|
||||
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
DB_NAME: postgres
|
||||
DB_PORT: 5432
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
97
.github/workflows/autogpts-benchmark.yml
vendored
97
.github/workflows/autogpts-benchmark.yml
vendored
@@ -1,97 +0,0 @@
|
||||
name: AutoGPTs Nightly Benchmark
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
REPORTS_BRANCH: data/benchmark-reports
|
||||
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Prepare reports folder
|
||||
run: mkdir -p ${{ env.REPORTS_FOLDER }}
|
||||
|
||||
- run: poetry -C benchmark install
|
||||
|
||||
- name: Benchmark ${{ matrix.agent-name }}
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Do not quit on non-zero exit codes
|
||||
poetry run agbenchmark run -N 3 \
|
||||
--test=ReadFile \
|
||||
--test=BasicRetrieval --test=RevenueRetrieval2 \
|
||||
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
|
||||
--test=UrlShortener --test=TicTacToe --test=Battleship \
|
||||
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
|
||||
--test=WebArenaTask_134 --test=WebArenaTask_163
|
||||
|
||||
# Convert exit code 1 (some challenges failed) to exit code 0
|
||||
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
|
||||
exit 0
|
||||
else
|
||||
exit $?
|
||||
fi
|
||||
env:
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
|
||||
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
|
||||
- name: Push reports to data branch
|
||||
run: |
|
||||
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
|
||||
rm ${{ env.REPORTS_FOLDER }}/*.json
|
||||
|
||||
# Find folder with newest (untracked) report in it
|
||||
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
|
||||
| xargs -I {} dirname {} \
|
||||
| xargs -I {} git ls-files --others --exclude-standard {} \
|
||||
| xargs -I {} dirname {} \
|
||||
| sort -u)
|
||||
json_report_file="$report_subfolder/report.json"
|
||||
|
||||
# Convert JSON report to Markdown
|
||||
markdown_report_file="$report_subfolder/report.md"
|
||||
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
|
||||
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
git config --global user.name 'GitHub Actions'
|
||||
git config --global user.email 'github-actions@agpt.co'
|
||||
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
|
||||
&& git checkout ${{ env.REPORTS_BRANCH }} \
|
||||
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
|
||||
git reset --hard
|
||||
git add ${{ env.REPORTS_FOLDER }}
|
||||
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
|
||||
&& git push origin ${{ env.REPORTS_BRANCH }}
|
||||
71
.github/workflows/autogpts-ci.yml
vendored
71
.github/workflows/autogpts-ci.yml
vendored
@@ -1,71 +0,0 @@
|
||||
name: Agent smoke tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
HELICONE_CACHE_ENABLED: false
|
||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
169
.github/workflows/benchmark-ci.yml
vendored
169
.github/workflows/benchmark-ci.yml
vendored
@@ -1,169 +0,0 @@
|
||||
name: AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
- '!benchmark/reports/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- '!benchmark/reports/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: benchmark
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
|
||||
self-test-with-agent:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ forge ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: .
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
poetry run agbenchmark --maintain --mock
|
||||
EXIT_CODE=$?
|
||||
set -e # Stop ignoring non-zero exit codes
|
||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||
if [ $EXIT_CODE -eq 5 ]; then
|
||||
echo "regression_tests.json is empty."
|
||||
fi
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock"
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||
poetry run agbenchmark --mock --category=data
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
55
.github/workflows/benchmark_publish_package.yml
vendored
55
.github/workflows/benchmark_publish_package.yml
vendored
@@ -1,55 +0,0 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./benchmark/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build project for distribution
|
||||
working-directory: ./benchmark/
|
||||
run: poetry build
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./benchmark/
|
||||
run: poetry install
|
||||
|
||||
- name: Check Version
|
||||
working-directory: ./benchmark/
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "benchmark/dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: false
|
||||
tag: agbenchmark-v${{ steps.check-version.outputs.version }}
|
||||
commit: master
|
||||
|
||||
- name: Build and publish
|
||||
working-directory: ./benchmark/
|
||||
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
|
||||
73
.github/workflows/benchmarks.yml
vendored
Normal file
73
.github/workflows/benchmarks.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Benchmark:
|
||||
name: ${{ matrix.config.task-name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- python-version: "3.10"
|
||||
task: "tests/challenges"
|
||||
task-name: "Mandatory Tasks"
|
||||
- python-version: "3.10"
|
||||
task: "--beat-challenges -ra tests/challenges"
|
||||
task-name: "Challenging Tasks"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: master
|
||||
|
||||
- name: Set up Python ${{ matrix.config.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
rm -rf tests/Auto-GPT-test-cassettes
|
||||
pytest -n auto --record-mode=all ${{ matrix.config.task }}
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
AGENT_MODE: ${{ secrets.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload logs as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs-${{ matrix.config.task-name }}
|
||||
path: logs/
|
||||
|
||||
- name: Upload cassettes as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: cassettes-${{ matrix.config.task-name }}
|
||||
path: tests/Auto-GPT-test-cassettes/
|
||||
261
.github/workflows/ci.yml
vendored
Normal file
261
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, ci-test* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ stable, master, release-* ]
|
||||
pull_request_target:
|
||||
branches: [ master, release-*, ci-test* ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Lint with flake8
|
||||
run: flake8
|
||||
|
||||
- name: Check black formatting
|
||||
run: black . --check
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check isort formatting
|
||||
run: isort . --check
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check mypy formatting
|
||||
run: mypy
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check for unused imports and pass statements
|
||||
run: |
|
||||
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
|
||||
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
|
||||
|
||||
test:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
permissions:
|
||||
# Gives the action the necessary permissions for publishing new
|
||||
# comments in pull requests.
|
||||
pull-requests: write
|
||||
# Gives the action the necessary permissions for pushing data to the
|
||||
# python-coverage-comment-action branch, and for editing existing
|
||||
# comments (to avoid publishing multiple comments in the same PR)
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
pytest -vv --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration tests/challenges
|
||||
python tests/challenges/utils/build_current_score.py
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }}
|
||||
AGENT_MODE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_MODE || '' }}
|
||||
AGENT_TYPE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_TYPE || '' }}
|
||||
OPENAI_API_KEY: ${{ github.event_name != 'pull_request_target' && secrets.OPENAI_API_KEY || '' }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Push updated challenge scores
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
score_file="tests/challenges/current_score.json"
|
||||
|
||||
if ! git diff --quiet $score_file; then
|
||||
git add $score_file
|
||||
git commit -m "Update challenge scores"
|
||||
git push origin HEAD:${{ github.ref_name }}
|
||||
else
|
||||
echo "The challenge scores didn't change."
|
||||
fi
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || success() || failure()
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/Auto-GPT-test-cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
TOKEN=${{ secrets.PAT_REVIEW }}
|
||||
REPO=${{ github.repository }}
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels \
|
||||
-d '{"labels":["behaviour change"]}'
|
||||
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs
|
||||
path: logs/
|
||||
34
.github/workflows/close-stale-issues.yml
vendored
34
.github/workflows/close-stale-issues.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: 'Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
This issue has automatically been marked as _stale_ because it has not had
|
||||
any activity in the last 50 days. You can _unstale_ it by commenting or
|
||||
removing the label. Otherwise, this issue will be closed in 10 days.
|
||||
stale-pr-message: >
|
||||
This pull request has automatically been marked as _stale_ because it has
|
||||
not had any activity in the last 50 days. You can _unstale_ it by commenting
|
||||
or removing the label.
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 50
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
# Do not affect pull requests:
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
@@ -1,11 +1,11 @@
|
||||
name: Purge Auto-GPT Docker CI cache
|
||||
name: Purge Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: development
|
||||
BASE_BRANCH: master
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
@@ -16,20 +16,19 @@ jobs:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
@@ -38,10 +37,10 @@ jobs:
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
124
.github/workflows/docker-ci.yml
vendored
Normal file
124
.github/workflows/docker-ci.yml
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
name: Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master, release-*, stable ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
--numprocesses=4 --durations=10 \
|
||||
tests/unit tests/integration 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
@@ -1,4 +1,4 @@
|
||||
name: AutoGPT Docker Release
|
||||
name: Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
@@ -16,35 +16,31 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
@@ -52,11 +48,10 @@ jobs:
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release
|
||||
cache-to: type=gha,scope=docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
@@ -68,10 +63,10 @@ jobs:
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
37
.github/workflows/documentation-release.yml
vendored
Normal file
37
.github/workflows/documentation-release.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Docs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ stable ]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'mkdocs.yml'
|
||||
- '.github/workflows/documentation.yml'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Set up workflow cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
- run: pip install mkdocs-material
|
||||
|
||||
- run: mkdocs gh-deploy --force
|
||||
236
.github/workflows/forge-ci.yml
vendored
236
.github/workflows/forge-ci.yml
vendored
@@ -1,236 +0,0 @@
|
||||
name: Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: forge
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: forge/logs/
|
||||
60
.github/workflows/frontend-ci.yml
vendored
60
.github/workflows/frontend-ci.yml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
133
.github/workflows/hackathon.yml
vendored
133
.github/workflows/hackathon.yml
vendored
@@ -1,133 +0,0 @@
|
||||
name: Hackathon
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
agents:
|
||||
description: "Agents to run (comma-separated)"
|
||||
required: false
|
||||
default: "autogpt" # Default agents if none are specified
|
||||
|
||||
jobs:
|
||||
matrix-setup:
|
||||
runs-on: ubuntu-latest
|
||||
# Service containers to run with `matrix-setup`
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
env-name: ${{ steps.set-matrix.outputs.env-name }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "schedule" ]; then
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::[ 'irrelevant']"
|
||||
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
|
||||
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::$matrix_string"
|
||||
else
|
||||
echo "::set-output name=env-name::testing"
|
||||
echo "::set-output name=matrix::[ 'irrelevant' ]"
|
||||
fi
|
||||
|
||||
tests:
|
||||
environment:
|
||||
name: "${{ needs.matrix-setup.outputs.env-name }}"
|
||||
needs: matrix-setup
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
name: "${{ matrix.agent-name }}"
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
timeout-minutes: 50
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
|
||||
steps:
|
||||
- name: Print Environment Name
|
||||
run: |
|
||||
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
|
||||
|
||||
- name: Check Docker Container
|
||||
id: check
|
||||
run: docker ps
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: v18.15
|
||||
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
|
||||
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
|
||||
git clone "$link" -b "$branch" "$AGENT_NAME"
|
||||
cd $AGENT_NAME
|
||||
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
|
||||
./run agent start $AGENT_NAME
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
poetry run agbenchmark --no-dep
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
|
||||
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
21
.github/workflows/pr-label.yml
vendored
21
.github/workflows/pr-label.yml
vendored
@@ -3,10 +3,10 @@ name: "Pull Request auto-label"
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, release-* ]
|
||||
paths-ignore:
|
||||
- 'forge/tests/vcr_cassettes'
|
||||
- 'benchmark/reports/**'
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
@@ -52,15 +52,6 @@ jobs:
|
||||
l_label: 'size/l'
|
||||
l_max_size: 500
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl:
|
||||
|
||||
scope:
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
sync-labels: true
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 500 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
|
||||
151
.github/workflows/python-checks.yml
vendored
151
.github/workflows/python-checks.yml
vendored
@@ -1,151 +0,0 @@
|
||||
name: Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
get-changed-parts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: changes-in
|
||||
name: Determine affected subprojects
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
autogpt:
|
||||
- autogpt/autogpt/**
|
||||
- autogpt/tests/**
|
||||
- autogpt/poetry.lock
|
||||
forge:
|
||||
- forge/forge/**
|
||||
- forge/tests/**
|
||||
- forge/poetry.lock
|
||||
benchmark:
|
||||
- benchmark/agbenchmark/**
|
||||
- benchmark/tests/**
|
||||
- benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
lint:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
20
.github/workflows/repo-stats.yml
vendored
20
.github/workflows/repo-stats.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: github-repo-stats
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 23 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: github-repo-stats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
# Use latest release.
|
||||
uses: jgehrcke/github-repo-stats@HEAD
|
||||
with:
|
||||
ghtoken: ${{ secrets.ghrs_github_api_token }}
|
||||
|
||||
31
.github/workflows/workflow-checker.yml
vendored
31
.github/workflows/workflow-checker.yml
vendored
@@ -1,31 +0,0 @@
|
||||
name: PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
name: Check PR Status
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install requests
|
||||
- name: Check PR Status
|
||||
run: |
|
||||
echo "Current directory before running Python script:"
|
||||
pwd
|
||||
echo "Attempting to run Python script:"
|
||||
python check_actions_status.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
23
.gitignore
vendored
23
.gitignore
vendored
@@ -1,11 +1,12 @@
|
||||
## Original ignores
|
||||
.github_access_token
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
ai_settings.yaml
|
||||
last_run_ai_settings.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
@@ -27,11 +28,15 @@ __pycache__/
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
/plugins/
|
||||
plugins_config.yaml
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
@@ -155,19 +160,3 @@ openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
|
||||
|
||||
# Allow for locally private items
|
||||
# private
|
||||
pri*
|
||||
# ignore
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
rnd/autogpt_server/settings.py
|
||||
|
||||
7
.gitmodules
vendored
7
.gitmodules
vendored
@@ -1,3 +1,4 @@
|
||||
[submodule "forge/tests/vcr_cassettes"]
|
||||
path = forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "tests/Auto-GPT-test-cassettes"]
|
||||
path = tests/Auto-GPT-test-cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
branch = master
|
||||
10
.isort.cfg
Normal file
10
.isort.cfg
Normal file
@@ -0,0 +1,10 @@
|
||||
[settings]
|
||||
profile = black
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
force_grid_wrap = 0
|
||||
use_parentheses = true
|
||||
ensure_newline_before_comments = true
|
||||
line_length = 88
|
||||
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
||||
skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist
|
||||
@@ -1,6 +0,0 @@
|
||||
[pr_reviewer]
|
||||
num_code_suggestions=0
|
||||
|
||||
[pr_code_suggestions]
|
||||
commitable_code_suggestions=false
|
||||
num_code_suggestions=0
|
||||
@@ -3,125 +3,40 @@ repos:
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
args: ["--maxkb=500"]
|
||||
- id: fix-byte-order-marker
|
||||
args: ['--maxkb=500']
|
||||
- id: check-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C autogpt run isort
|
||||
files: ^autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C forge run isort
|
||||
files: ^forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C benchmark run isort
|
||||
files: ^benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.12.1
|
||||
# Black has sensible defaults, doesn't need package context, and ignores
|
||||
# everything in .gitignore, so it works fine without any config or arguments.
|
||||
hooks:
|
||||
- id: black
|
||||
name: Lint (Black)
|
||||
- id: isort
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
# To have flake8 load the config of the individual subprojects, we have to call
|
||||
# them separately.
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.3.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=autogpt/.flake8]
|
||||
- id: black
|
||||
language_version: python3.10
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^forge/(forge|tests)/
|
||||
args: [--config=forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: 'v1.3.0'
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C forge run pyright
|
||||
args: [-p, forge, forge]
|
||||
files: ^forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
- id: mypy
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^forge/(forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
- id: autoflake
|
||||
name: autoflake
|
||||
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
|
||||
language: python
|
||||
types: [ python ]
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt tests/unit
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
|
||||
61
.vscode/all-projects.code-workspace
vendored
61
.vscode/all-projects.code-workspace
vendored
@@ -1,61 +0,0 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "autogpt",
|
||||
"path": "../autogpt"
|
||||
},
|
||||
{
|
||||
"name": "benchmark",
|
||||
"path": "../benchmark"
|
||||
},
|
||||
{
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
{
|
||||
"name": "forge",
|
||||
"path": "../forge"
|
||||
},
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../frontend"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_server",
|
||||
"path": "../rnd/autogpt_server"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"path": "../rnd/autogpt_builder"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../rnd/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../rnd/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../rnd/infra"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
},
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"dart-code.flutter",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"prisma.prisma",
|
||||
"qwtel.sqlite-viewer"
|
||||
]
|
||||
}
|
||||
}
|
||||
27
BULLETIN.md
Normal file
27
BULLETIN.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
|
||||
|
||||
# v0.4.6 RELEASE HIGHLIGHTS! 🚀
|
||||
# -----------------------------
|
||||
This release includes under-the-hood improvements and bug fixes, including better UTF-8
|
||||
special character support, workspace write access for sandboxed Python execution,
|
||||
more robust path resolution for config files and the workspace, and a full restructure
|
||||
of the Agent class, the "brain" of Auto-GPT, to make it more extensible.
|
||||
|
||||
We have also released some documentation updates, including:
|
||||
|
||||
- *How to share your system logs*
|
||||
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
|
||||
via a log analyzer graciously contributed by https://www.e2b.dev/
|
||||
|
||||
- *Auto-GPT re-architecture documentation*
|
||||
You can learn more about the inner-workings of the Auto-GPT re-architecture
|
||||
released last cycle, via these links:
|
||||
* [autogpt/core/README.md]
|
||||
* [autogpt/core/ARCHITECTURE_NOTES.md]
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases.
|
||||
21
CITATION.cff
21
CITATION.cff
@@ -1,21 +0,0 @@
|
||||
# This CITATION.cff file was generated with cffinit.
|
||||
# Visit https://bit.ly/cffinit to generate yours today!
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: AutoGPT
|
||||
message: >-
|
||||
If you use this software, please cite it using the
|
||||
metadata from this file.
|
||||
type: software
|
||||
authors:
|
||||
- name: Significant Gravitas
|
||||
website: 'https://agpt.co'
|
||||
repository-code: 'https://github.com/Significant-Gravitas/AutoGPT'
|
||||
url: 'https://agpt.co'
|
||||
abstract: >-
|
||||
A collection of tools and experimental open-source attempts to make GPT-4 fully
|
||||
autonomous.
|
||||
keywords:
|
||||
- AI
|
||||
- Agent
|
||||
license: MIT
|
||||
182
CLI-USAGE.md
182
CLI-USAGE.md
@@ -1,182 +0,0 @@
|
||||
## CLI Documentation
|
||||
|
||||
This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
|
||||
|
||||
### 1. Entry Point for the CLI
|
||||
|
||||
Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
|
||||
|
||||
```sh
|
||||
./run
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
agent Commands to create, start and stop agents
|
||||
benchmark Commands to start the benchmark and list tests and categories
|
||||
setup Installs dependencies needed for your system.
|
||||
```
|
||||
|
||||
If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
|
||||
|
||||
```sh
|
||||
./run COMMAND --help
|
||||
```
|
||||
|
||||
This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
|
||||
|
||||
### 2. Setup Command
|
||||
|
||||
```sh
|
||||
./run setup
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Setup initiated
|
||||
Installation has been completed.
|
||||
```
|
||||
|
||||
This command initializes the setup of the project.
|
||||
|
||||
### 3. Agents Commands
|
||||
|
||||
**a. List All Agents**
|
||||
|
||||
```sh
|
||||
./run agent list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available agents: 🤖
|
||||
🐙 forge
|
||||
🐙 autogpt
|
||||
```
|
||||
|
||||
Lists all the available agents.
|
||||
|
||||
**b. Create a New Agent**
|
||||
|
||||
```sh
|
||||
./run agent create my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
|
||||
```
|
||||
|
||||
Creates a new agent named 'my_agent'.
|
||||
|
||||
**c. Start an Agent**
|
||||
|
||||
```sh
|
||||
./run agent start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
... (ASCII Art representing the agent startup)
|
||||
[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
|
||||
[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
|
||||
```
|
||||
|
||||
Starts the 'my_agent' and displays startup ASCII art and logs.
|
||||
|
||||
**d. Stop an Agent**
|
||||
|
||||
```sh
|
||||
./run agent stop
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Agent stopped
|
||||
```
|
||||
|
||||
Stops the running agent.
|
||||
|
||||
### 4. Benchmark Commands
|
||||
|
||||
**a. List Benchmark Categories**
|
||||
|
||||
```sh
|
||||
./run benchmark categories list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available categories: 📚
|
||||
📖 code
|
||||
📖 safety
|
||||
📖 memory
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark categories.
|
||||
|
||||
**b. List Benchmark Tests**
|
||||
|
||||
```sh
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available tests: 📚
|
||||
📖 interface
|
||||
🔬 Search - TestSearch
|
||||
🔬 Write File - TestWriteFile
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark tests.
|
||||
|
||||
**c. Show Details of a Benchmark Test**
|
||||
|
||||
```sh
|
||||
./run benchmark tests details TestWriteFile
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
TestWriteFile
|
||||
-------------
|
||||
|
||||
Category: interface
|
||||
Task: Write the word 'Washington' to a .txt file
|
||||
... (and other details)
|
||||
```
|
||||
|
||||
Displays the details of the 'TestWriteFile' benchmark test.
|
||||
|
||||
**d. Start Benchmark for the Agent**
|
||||
|
||||
```sh
|
||||
./run benchmark start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
(more details about the testing process shown whilst the test are running)
|
||||
============= 13 failed, 1 passed in 0.97s ============...
|
||||
```
|
||||
|
||||
Displays the results of the benchmark tests on 'my_agent'.
|
||||
@@ -1,12 +1,12 @@
|
||||
# Code of Conduct for AutoGPT
|
||||
# Code of Conduct for Auto-GPT
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The purpose of this Code of Conduct is to provide guidelines for contributors to the AutoGPT projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
|
||||
The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
|
||||
|
||||
## 2. Scope
|
||||
|
||||
This Code of Conduct applies to all contributors, maintainers, and users of the AutoGPT project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
|
||||
This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
|
||||
|
||||
## 3. Our Standards
|
||||
|
||||
@@ -36,5 +36,4 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont
|
||||
|
||||
## 6. Contact
|
||||
|
||||
If you have any questions or concerns, please contact the project maintainers on Discord:
|
||||
https://discord.gg/autogpt
|
||||
If you have any questions or concerns, please contact the project maintainers.
|
||||
|
||||
@@ -1,38 +1,14 @@
|
||||
# AutoGPT Contribution Guide
|
||||
If you are reading this, you are probably looking for the full **[contribution guide]**,
|
||||
which is part of our [wiki].
|
||||
We maintain a knowledgebase at this [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
|
||||
|
||||
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
|
||||
<!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
|
||||
We would like to say "We value all contributions". After all, we are an open-source project, so we should say something fluffy like this, right?
|
||||
|
||||
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
|
||||
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
However the reality is that some contributions are SUPER-valuable, while others create more trouble than they are worth and actually _create_ work for the core team.
|
||||
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
[todo's][roadmap]!
|
||||
* We highly recommend to post your idea and discuss it in the [dev channel].
|
||||
3. Create a draft PR when starting work on bigger changes.
|
||||
4. Adhere to the [Code Guidelines]
|
||||
5. Clearly explain your changes when submitting a PR.
|
||||
6. Don't submit broken code: test/validate your changes.
|
||||
7. Avoid making unnecessary changes, especially if they're purely based on your personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
8. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
If you wish to contribute, please look through the wiki [contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) page.
|
||||
|
||||
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
|
||||
[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the
|
||||
wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
|
||||
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and
|
||||
hop on our Discord. See you there! :-)
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and hop on our discord (you'll find it in the wiki).
|
||||
|
||||
❤️ & 🔆
|
||||
The team @ AutoGPT
|
||||
https://discord.gg/autogpt
|
||||
The team @ Auto-GPT
|
||||
|
||||
46
Dockerfile
Normal file
46
Dockerfile
Normal file
@@ -0,0 +1,46 @@
|
||||
# 'dev' or 'release' container build
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr ca-certificates \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Install the required python packages globally
|
||||
ENV PATH="$PATH:/root/.local/bin"
|
||||
COPY requirements.txt .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY . ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
@@ -1,61 +0,0 @@
|
||||
# 'dev' or 'release' container build
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver ca-certificates gcc \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_VIRTUALENVS_PATH="/venv" \
|
||||
POETRY_VIRTUALENVS_IN_PROJECT=0 \
|
||||
POETRY_NO_INTERACTION=1
|
||||
|
||||
# Install and configure Poetry
|
||||
RUN curl -sSL https://install.python-poetry.org | python3 -
|
||||
ENV PATH="$POETRY_HOME/bin:$PATH"
|
||||
RUN poetry config installer.max-workers 10
|
||||
|
||||
WORKDIR /app/autogpt
|
||||
COPY autogpt/pyproject.toml autogpt/poetry.lock ./
|
||||
|
||||
# Include forge so it can be used as a path dependency
|
||||
COPY forge/ ../forge
|
||||
|
||||
# Include frontend
|
||||
COPY frontend/ ../frontend
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["poetry", "run", "autogpt"]
|
||||
CMD []
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN poetry install --no-cache --no-root \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY autogpt/ ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN poetry install --no-cache --no-root --without dev \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY autogpt/autogpt/ ./autogpt
|
||||
ONBUILD COPY autogpt/scripts/ ./scripts
|
||||
ONBUILD COPY autogpt/plugins/ ./plugins
|
||||
ONBUILD COPY autogpt/README.md ./README.md
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS autogpt
|
||||
RUN poetry install --only-root
|
||||
@@ -1,173 +0,0 @@
|
||||
# Quickstart Guide
|
||||
|
||||
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
|
||||
|
||||
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
|
||||
|
||||
## System Requirements
|
||||
|
||||
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
|
||||
|
||||
## Getting Setup
|
||||
1. **Fork the Repository**
|
||||
To fork the repository, follow these steps:
|
||||
- Navigate to the main page of the repository.
|
||||
|
||||

|
||||
- In the top-right corner of the page, click Fork.
|
||||
|
||||

|
||||
- On the next page, select your GitHub account to create the fork.
|
||||
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
|
||||
|
||||
2. **Clone the Repository**
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
- Open your terminal.
|
||||
- Navigate to the directory where you want to clone the repository.
|
||||
- Run the git clone command for the fork you just created
|
||||
|
||||

|
||||
|
||||
- Then open your project in your ide
|
||||
|
||||

|
||||
|
||||
4. **Setup the Project**
|
||||
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
|
||||
It can be accessed by running the `run` command by typing `./run` in the terminal.
|
||||
|
||||
The first command you need to use is `./run setup.` This will guide you through setting up your system.
|
||||
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
|
||||
|
||||

|
||||
|
||||
### For Windows Users
|
||||
|
||||
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
|
||||
|
||||
#### Update WSL
|
||||
Run the following command in Powershell or Command Prompt:
|
||||
1. Enable the optional WSL and Virtual Machine Platform components.
|
||||
2. Download and install the latest Linux kernel.
|
||||
3. Set WSL 2 as the default.
|
||||
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
|
||||
|
||||
```shell
|
||||
wsl --install
|
||||
```
|
||||
|
||||
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
|
||||
|
||||
#### Resolve FileNotFoundError or "No such file or directory" Errors
|
||||
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
|
||||
|
||||
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script:
|
||||
|
||||
```shell
|
||||
sudo apt update
|
||||
sudo apt install dos2unix
|
||||
dos2unix ./run
|
||||
```
|
||||
|
||||
After executing the above commands, running `./run setup` should work successfully.
|
||||
|
||||
#### Store Project Files within the WSL File System
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
|
||||
|
||||
You can keep running the command to get feedback on where you are up to with your setup.
|
||||
When setup has been completed, the command will return an output like this:
|
||||
|
||||

|
||||
|
||||
## Creating Your Agent
|
||||
|
||||
After completing the setup, the next step is to create your agent template.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
|
||||
|
||||
Tips for naming your agent:
|
||||
* Give it its own unique name, or name it after yourself
|
||||
* Include an important aspect of your agent in the name, such as its purpose
|
||||
|
||||
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
|
||||
|
||||

|
||||
|
||||
## Running your Agent
|
||||
|
||||
Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
|
||||
|
||||
This starts the agent on the URL: `http://localhost:8000/`
|
||||
|
||||

|
||||
|
||||
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
|
||||
|
||||

|
||||
|
||||
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
|
||||
|
||||

|
||||
|
||||
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
|
||||
|
||||
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
|
||||
|
||||
## Benchmarking your Agent
|
||||
|
||||
The benchmarking system can also be accessed using the CLI too:
|
||||
|
||||
```bash
|
||||
agpt % ./run benchmark
|
||||
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Commands to start the benchmark and list tests and categories
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
categories Benchmark categories group command
|
||||
start Starts the benchmark command
|
||||
tests Benchmark tests group command
|
||||
agpt % ./run benchmark categories
|
||||
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark categories group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
list List benchmark categories command
|
||||
agpt % ./run benchmark tests
|
||||
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark tests group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
details Benchmark test details command
|
||||
list List benchmark tests command
|
||||
```
|
||||
|
||||
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
|
||||
```bash
|
||||
./run benchmark categories list
|
||||
# And what tests are available with
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||

|
||||
|
||||
|
||||
Finally, you can run the benchmark with
|
||||
|
||||
```bash
|
||||
./run benchmark start YOUR_AGENT_NAME
|
||||
|
||||
```
|
||||
|
||||
>
|
||||
66
SECURITY.md
66
SECURITY.md
@@ -1,66 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
|
||||
- [Restrict Workspace](#restrict-workspace)
|
||||
- [Untrusted inputs](#untrusted-inputs)
|
||||
- [Data privacy](#data-privacy)
|
||||
- [Untrusted environments or networks](#untrusted-environments-or-networks)
|
||||
- [Multi-Tenant environments](#multi-tenant-environments)
|
||||
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
|
||||
|
||||
## Using AutoGPT Securely
|
||||
|
||||
### Restrict Workspace
|
||||
|
||||
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
|
||||
|
||||
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
|
||||
|
||||
### Untrusted inputs
|
||||
|
||||
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
|
||||
|
||||
For maximum security when handling untrusted inputs, you may need to employ the following:
|
||||
|
||||
* Sandboxing: Isolate the process.
|
||||
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
|
||||
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
|
||||
* Validation: Enforce strict rules on allowed characters and data types.
|
||||
* Filtering: Remove potentially malicious scripts or code fragments.
|
||||
* Encoding: Convert special characters into safe representations.
|
||||
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
|
||||
|
||||
### Data privacy
|
||||
|
||||
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
|
||||
|
||||
### Untrusted environments or networks
|
||||
|
||||
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
|
||||
Additionally, running it on an untrusted network can expose your data to potential network attacks.
|
||||
|
||||
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
|
||||
|
||||
### Multi-Tenant environments
|
||||
|
||||
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
|
||||
|
||||
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
|
||||
|
||||
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenant’s identity.
|
||||
|
||||
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
|
||||
|
||||
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
|
||||
|
||||
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
|
||||
|
||||
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
|
||||
|
||||
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
|
||||
@@ -1,23 +0,0 @@
|
||||
This page is a list of issues you could encounter along with their fixes.
|
||||
|
||||
# Forge
|
||||
**Poetry configuration invalid**
|
||||
|
||||
The poetry configuration is invalid:
|
||||
- Additional properties are not allowed ('group' was unexpected)
|
||||
<img width="487" alt="Screenshot 2023-09-22 at 5 42 59 PM" src="https://github.com/Significant-Gravitas/AutoGPT/assets/9652976/dd451e6b-8114-44de-9928-075f5f06d661">
|
||||
|
||||
**Pydantic Validation Error**
|
||||
|
||||
Remove your sqlite agent.db file. it's probably because some of your data is not complying with the new spec (we will create migrations soon to avoid this problem)
|
||||
|
||||
|
||||
*Solution*
|
||||
|
||||
Update poetry
|
||||
|
||||
# Benchmark
|
||||
TODO
|
||||
|
||||
# Frontend
|
||||
TODO
|
||||
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 1.1 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 49 KiB |
@@ -1,179 +0,0 @@
|
||||
################################################################################
|
||||
### AutoGPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# OPENAI_API_KEY=
|
||||
|
||||
## ANTHROPIC_API_KEY - Anthropic API Key (Example: sk-ant-api03-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# ANTHROPIC_API_KEY=
|
||||
|
||||
## GROQ_API_KEY - Groq API Key (Example: gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# GROQ_API_KEY=
|
||||
|
||||
## LLAMAFILE_API_BASE - Llamafile API base URL
|
||||
# LLAMAFILE_API_BASE=http://localhost:8080/v1
|
||||
|
||||
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
|
||||
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
|
||||
# TELEMETRY_OPT_IN=true
|
||||
|
||||
## COMPONENT_CONFIG_FILE - Path to the json config file (Default: None)
|
||||
# COMPONENT_CONFIG_FILE=
|
||||
|
||||
### Workspace ###
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./data/agents/<agent_id>/workspace (Default: True)
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## DISABLED_COMMANDS - The comma separated list of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMANDS=
|
||||
|
||||
## FILE_STORAGE_BACKEND - Choose a storage backend for contents
|
||||
## Options: local, gcs, s3
|
||||
# FILE_STORAGE_BACKEND=local
|
||||
|
||||
## STORAGE_BUCKET - GCS/S3 Bucket to store contents in
|
||||
# STORAGE_BUCKET=autogpt
|
||||
|
||||
## GCS Credentials
|
||||
# see https://cloud.google.com/storage/docs/authentication#libauth
|
||||
|
||||
## AWS/S3 Credentials
|
||||
# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
|
||||
|
||||
## S3_ENDPOINT_URL - If you're using non-AWS S3, set your endpoint here.
|
||||
# S3_ENDPOINT_URL=
|
||||
|
||||
### Miscellaneous ###
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AutoGPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
# OPENAI_API_TYPE=
|
||||
# OPENAI_API_VERSION=
|
||||
|
||||
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
## Note: this feature is only supported by OpenAI's newer models.
|
||||
# OPENAI_FUNCTIONS=False
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
# USE_AZURE=False
|
||||
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the folder containing this file. (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
# AZURE_OPENAI_AD_TOKEN=
|
||||
# AZURE_OPENAI_ENDPOINT=
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM - Smart language model (Default: gpt-4-turbo)
|
||||
# SMART_LLM=gpt-4-turbo
|
||||
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
################################################################################
|
||||
### GITHUB
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
|
||||
################################################################################
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
## Options: gtts, streamelements, elevenlabs, macos
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### LOGGING
|
||||
################################################################################
|
||||
|
||||
## LOG_LEVEL - Set the minimum level to filter log output by. Setting this to DEBUG implies LOG_FORMAT=debug, unless LOG_FORMAT is set explicitly.
|
||||
## Options: DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
# LOG_LEVEL=INFO
|
||||
|
||||
## LOG_FORMAT - The format in which to log messages to the console (and log files).
|
||||
## Options: simple, debug, structured_google_cloud
|
||||
# LOG_FORMAT=simple
|
||||
|
||||
## LOG_FILE_FORMAT - Normally follows the LOG_FORMAT setting, but can be set separately.
|
||||
## Note: Log file output is disabled if LOG_FORMAT=structured_google_cloud.
|
||||
# LOG_FILE_FORMAT=simple
|
||||
|
||||
## PLAIN_OUTPUT - Disables animated typing and the spinner in the console output. (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
|
||||
################################################################################
|
||||
### Agent Protocol Server Settings
|
||||
################################################################################
|
||||
## AP_SERVER_PORT - Specifies what port the agent protocol server will listen on. (Default: 8000)
|
||||
## AP_SERVER_DB_URL - Specifies what connection url the agent protocol database will connect to (Default: Internal SQLite)
|
||||
## AP_SERVER_CORS_ALLOWED_ORIGINS - Comma separated list of allowed origins for CORS. (Default: http://localhost:{AP_SERVER_PORT})
|
||||
# AP_SERVER_PORT=8000
|
||||
# AP_SERVER_DB_URL=sqlite:///data/ap_server.db
|
||||
# AP_SERVER_CORS_ALLOWED_ORIGINS=
|
||||
@@ -1,14 +0,0 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
# Ignore rules that conflict with Black code style
|
||||
extend-ignore = E203, W503
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__/,
|
||||
*.pyc,
|
||||
.pytest_cache/,
|
||||
venv*/,
|
||||
.venv/,
|
||||
data/,
|
||||
logs/,
|
||||
tests/unit/data/,
|
||||
167
autogpt/.gitignore
vendored
167
autogpt/.gitignore
vendored
@@ -1,167 +0,0 @@
|
||||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
/logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
data/*
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
/plugins/*
|
||||
plugins_config.yaml
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
ENV/
|
||||
env.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
llama-*
|
||||
vicuna-*
|
||||
|
||||
# mac
|
||||
.DS_Store
|
||||
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
# Keep
|
||||
!.keep
|
||||
3
autogpt/.vscode/settings.json
vendored
3
autogpt/.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co/autogpt.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing.
|
||||
|
||||
# v0.5.0 RELEASE HIGHLIGHTS! 🚀🚀
|
||||
# -------------------------------
|
||||
Cloud-readiness, a new UI, support for the newest Agent Protocol version, and much more:
|
||||
*v0.5.0 is our biggest release yet!*
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/releases.
|
||||
@@ -1,160 +0,0 @@
|
||||
# AutoGPT: An Autonomous GPT-4 Experiment
|
||||
|
||||
[📖 **Documentation**][docs]
|
||||
 | 
|
||||
[🚀 **Contributing**](../../CONTRIBUTING.md)
|
||||
|
||||
AutoGPT is an experimental open-source application showcasing the capabilities of modern Large Language Models. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, AutoGPT pushes the boundaries of what is possible with AI.
|
||||
|
||||
<h2 align="center"> Demo April 16th 2023 </h2>
|
||||
|
||||
https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
|
||||
|
||||
Demo made by <a href=https://twitter.com/BlakeWerlinger>Blake Werlinger</a>
|
||||
|
||||
## 🚀 Features
|
||||
|
||||
- 🔌 Agent Protocol ([docs](https://agentprotocol.ai))
|
||||
- 💻 Easy to use UI
|
||||
- 🌐 Internet access for searches and information gathering
|
||||
- 🧠 Powered by a mix of GPT-4 and GPT-3.5 Turbo
|
||||
- 🔗 Access to popular websites and platforms
|
||||
- 🗃️ File generation and editing capabilities
|
||||
- 🔌 Extensibility with Plugins
|
||||
<!-- - 💾 Long-term and short-term memory management -->
|
||||
|
||||
## Setting up AutoGPT
|
||||
1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys)
|
||||
2. Copy `.env.template` to `.env` and set `OPENAI_API_KEY`
|
||||
3. Make sure you have Poetry [installed](https://python-poetry.org/docs/#installation)
|
||||
|
||||
For more ways to run AutoGPT, more detailed instructions, and more configuration options,
|
||||
see the [setup guide][docs/setup].
|
||||
|
||||
## Running AutoGPT
|
||||
The CLI should be self-documenting:
|
||||
```shell
|
||||
$ ./autogpt.sh --help
|
||||
Usage: python -m autogpt [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
run Sets up and runs an agent, based on the task specified by the...
|
||||
serve Starts an Agent Protocol compliant AutoGPT server, which creates...
|
||||
```
|
||||
When run without a sub-command, it will default to `run` for legacy reasons.
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
<code>$ ./autogpt.sh run --help</code>
|
||||
</summary>
|
||||
|
||||
The `run` sub-command starts AutoGPT with the legacy CLI interface:
|
||||
|
||||
```shell
|
||||
$ ./autogpt.sh run --help
|
||||
Usage: python -m autogpt run [OPTIONS]
|
||||
|
||||
Sets up and runs an agent, based on the task specified by the user, or
|
||||
resumes an existing agent.
|
||||
|
||||
Options:
|
||||
-c, --continuous Enable Continuous Mode
|
||||
-y, --skip-reprompt Skips the re-prompting messages at the
|
||||
beginning of the script
|
||||
-l, --continuous-limit INTEGER Defines the number of times to run in
|
||||
continuous mode
|
||||
--speak Enable Speak Mode
|
||||
--debug Enable Debug Mode
|
||||
--skip-news Specifies whether to suppress the output of
|
||||
latest news on startup.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--ai-name TEXT AI name override
|
||||
--ai-role TEXT AI role override
|
||||
--constraint TEXT Add or override AI constraints to include in
|
||||
the prompt; may be used multiple times to
|
||||
pass multiple constraints
|
||||
--resource TEXT Add or override AI resources to include in
|
||||
the prompt; may be used multiple times to
|
||||
pass multiple resources
|
||||
--best-practice TEXT Add or override AI best practices to include
|
||||
in the prompt; may be used multiple times to
|
||||
pass multiple best practices
|
||||
--override-directives If specified, --constraint, --resource and
|
||||
--best-practice will override the AI's
|
||||
directives instead of being appended to them
|
||||
--component-config-file TEXT Path to the json configuration file.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
<code>$ ./autogpt.sh serve --help</code>
|
||||
</summary>
|
||||
|
||||
The `serve` sub-command starts AutoGPT wrapped in an Agent Protocol server:
|
||||
|
||||
```shell
|
||||
$ ./autogpt.sh serve --help
|
||||
Usage: python -m autogpt serve [OPTIONS]
|
||||
|
||||
Starts an Agent Protocol compliant AutoGPT server, which creates a custom
|
||||
agent for every task.
|
||||
|
||||
Options:
|
||||
--debug Enable Debug Mode
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
|
||||
With `serve`, the application exposes an Agent Protocol compliant API and serves a frontend,
|
||||
by default on `http://localhost:8000`.
|
||||
|
||||
For more comprehensive instructions, see the [user guide][docs/usage].
|
||||
|
||||
[docs]: https://docs.agpt.co/autogpt
|
||||
[docs/setup]: https://docs.agpt.co/autogpt/setup
|
||||
[docs/usage]: https://docs.agpt.co/autogpt/usage
|
||||
[docs/plugins]: https://docs.agpt.co/autogpt/plugins
|
||||
|
||||
## 📚 Resources
|
||||
* 📔 AutoGPT [project wiki](https://github.com/Significant-Gravitas/AutoGPT/wiki)
|
||||
* 🧮 AutoGPT [project kanban](https://github.com/orgs/Significant-Gravitas/projects/1)
|
||||
* 🌃 AutoGPT [roadmap](https://github.com/orgs/Significant-Gravitas/projects/2)
|
||||
|
||||
## ⚠️ Limitations
|
||||
|
||||
This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
|
||||
|
||||
1. Not a polished application or product, just an experiment
|
||||
2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results!
|
||||
3. Quite expensive to run, so set and monitor your API key limits with OpenAI!
|
||||
|
||||
## 🛡 Disclaimer
|
||||
|
||||
This project, AutoGPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise.
|
||||
|
||||
The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by AutoGPT.
|
||||
|
||||
**Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges.
|
||||
|
||||
As an autonomous experiment, AutoGPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software.
|
||||
|
||||
By using AutoGPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms.
|
||||
|
||||
---
|
||||
|
||||
In Q2 of 2023, AutoGPT became the fastest growing open-source project in history. Now that the dust has settled, we're committed to continued sustainable development and growth of the project.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT&Date">
|
||||
<img src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" alt="Star History Chart">
|
||||
</a>
|
||||
</p>
|
||||
@@ -2,6 +2,13 @@ import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
|
||||
print("Setting random seed to 42")
|
||||
random.seed(42)
|
||||
|
||||
# Load the users .env file into environment variables
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
del load_dotenv
|
||||
5
autogpt/__main__.py
Normal file
5
autogpt/__main__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Auto-GPT: A GPT powered AI Assistant"""
|
||||
import autogpt.app.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.app.cli.main()
|
||||
3
autogpt/agbenchmark_config/.gitignore
vendored
3
autogpt/agbenchmark_config/.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
logs/
|
||||
reports/
|
||||
temp_folder/
|
||||
@@ -1,143 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
info = "-v" in sys.argv
|
||||
debug = "-vv" in sys.argv
|
||||
granular = "--granular" in sys.argv
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if debug else logging.INFO if info else logging.WARNING
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get a list of all JSON files in the directory
|
||||
report_files = [
|
||||
report_file
|
||||
for dir in (Path(__file__).parent / "reports").iterdir()
|
||||
if re.match(r"^\d{8}T\d{6}_", dir.name)
|
||||
and (report_file := dir / "report.json").is_file()
|
||||
]
|
||||
|
||||
labels = list[str]()
|
||||
runs_per_label = defaultdict[str, int](lambda: 0)
|
||||
suite_names = list[str]()
|
||||
test_names = list[str]()
|
||||
|
||||
# Create a dictionary to store grouped success values by suffix and test
|
||||
grouped_success_values = defaultdict[str, list[str]](list[str])
|
||||
|
||||
# Loop through each JSON file to collect suffixes and success values
|
||||
for report_file in sorted(report_files):
|
||||
with open(report_file) as f:
|
||||
logger.info(f"Loading {report_file}...")
|
||||
|
||||
data = json.load(f)
|
||||
if "tests" in data:
|
||||
test_tree = data["tests"]
|
||||
label = data["agent_git_commit_sha"].rsplit("/", 1)[1][:7] # commit hash
|
||||
else:
|
||||
# Benchmark run still in progress
|
||||
test_tree = data
|
||||
label = report_file.parent.name.split("_", 1)[1]
|
||||
logger.info(f"Run '{label}' seems to be in progress")
|
||||
|
||||
runs_per_label[label] += 1
|
||||
|
||||
def process_test(test_name: str, test_data: dict):
|
||||
result_group = grouped_success_values[f"{label}|{test_name}"]
|
||||
|
||||
if "tests" in test_data:
|
||||
logger.debug(f"{test_name} is a test suite")
|
||||
|
||||
# Test suite
|
||||
suite_attempted = any(
|
||||
test["metrics"]["attempted"] for test in test_data["tests"].values()
|
||||
)
|
||||
logger.debug(f"suite_attempted: {suite_attempted}")
|
||||
if not suite_attempted:
|
||||
return
|
||||
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
if test_data["metrics"]["percentage"] == 0:
|
||||
result_indicator = "❌"
|
||||
else:
|
||||
highest_difficulty = test_data["metrics"]["highest_difficulty"]
|
||||
result_indicator = {
|
||||
"interface": "🔌",
|
||||
"novice": "🌑",
|
||||
"basic": "🌒",
|
||||
"intermediate": "🌓",
|
||||
"advanced": "🌔",
|
||||
"hard": "🌕",
|
||||
}[highest_difficulty]
|
||||
|
||||
logger.debug(f"result group: {result_group}")
|
||||
logger.debug(f"runs_per_label: {runs_per_label[label]}")
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
["❔"] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
logger.debug(f"result group (after): {result_group}")
|
||||
|
||||
if granular:
|
||||
for test_name, test in test_data["tests"].items():
|
||||
process_test(test_name, test)
|
||||
return
|
||||
|
||||
test_metrics = test_data["metrics"]
|
||||
result_indicator = "❔"
|
||||
|
||||
if "attempted" not in test_metrics:
|
||||
return
|
||||
elif test_metrics["attempted"]:
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
success_value = test_metrics["success"]
|
||||
result_indicator = {True: "✅", False: "❌"}[success_value]
|
||||
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
[" "] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
|
||||
for test_name, suite in test_tree.items():
|
||||
try:
|
||||
process_test(test_name, suite)
|
||||
except KeyError:
|
||||
print(f"{test_name}.metrics: {suite['metrics']}")
|
||||
raise
|
||||
|
||||
if label not in labels:
|
||||
labels.append(label)
|
||||
|
||||
# Create headers
|
||||
headers = ["Test Name"] + list(labels)
|
||||
|
||||
# Prepare data for tabulation
|
||||
table_data = list[list[str]]()
|
||||
for test_name in test_names:
|
||||
row = [test_name]
|
||||
for label in labels:
|
||||
results = grouped_success_values.get(f"{label}|{test_name}", ["❔"])
|
||||
if len(results) < runs_per_label[label]:
|
||||
results.extend(["❔"] * (runs_per_label[label] - len(results)))
|
||||
if len(results) > 1 and all(r == "❔" for r in results):
|
||||
results.clear()
|
||||
row.append(" ".join(results))
|
||||
table_data.append(row)
|
||||
|
||||
# Print tabulated data
|
||||
print(tabulate(table_data, headers=headers, tablefmt="grid"))
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"workspace": {
|
||||
"input": "agbenchmark_config/workspace",
|
||||
"output": "agbenchmark_config/workspace"
|
||||
},
|
||||
"entry_path": "agbenchmark.benchmarks",
|
||||
"host": "http://localhost:8000"
|
||||
}
|
||||
4
autogpt/agents/__init__.py
Normal file
4
autogpt/agents/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .agent import Agent
|
||||
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
|
||||
|
||||
__all__ = ["BaseAgent", "Agent", "CommandName", "CommandArgs", "AgentThoughts"]
|
||||
298
autogpt/agents/agent.py
Normal file
298
autogpt/agents/agent.py
Normal file
@@ -0,0 +1,298 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import AIConfig, Config
|
||||
from autogpt.llm.base import ChatModelResponse, ChatSequence
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.logs import logger
|
||||
from autogpt.logs.log_cycle import (
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
|
||||
|
||||
|
||||
class Agent(BaseAgent):
|
||||
"""Agent class for interacting with Auto-GPT."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
command_registry: CommandRegistry,
|
||||
memory: VectorMemory,
|
||||
triggering_prompt: str,
|
||||
config: Config,
|
||||
cycle_budget: Optional[int] = None,
|
||||
):
|
||||
super().__init__(
|
||||
ai_config=ai_config,
|
||||
command_registry=command_registry,
|
||||
config=config,
|
||||
default_cycle_instruction=triggering_prompt,
|
||||
cycle_budget=cycle_budget,
|
||||
)
|
||||
|
||||
self.memory = memory
|
||||
"""VectorMemoryProvider used to manage the agent's context (TODO)"""
|
||||
|
||||
self.workspace = Workspace(config.workspace_path, config.restrict_to_workspace)
|
||||
"""Workspace that the agent has access to, e.g. for reading/writing files."""
|
||||
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
"""Timestamp the agent was created; only used for structured debug logging."""
|
||||
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
"""LogCycleHandler for structured debug logging."""
|
||||
|
||||
def construct_base_prompt(self, *args, **kwargs) -> ChatSequence:
|
||||
if kwargs.get("prepend_messages") is None:
|
||||
kwargs["prepend_messages"] = []
|
||||
|
||||
# Clock
|
||||
kwargs["prepend_messages"].append(
|
||||
Message("system", f"The current time and date is {time.strftime('%c')}"),
|
||||
)
|
||||
|
||||
# Add budget information (if any) to prompt
|
||||
api_manager = ApiManager()
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
remaining_budget = (
|
||||
api_manager.get_total_budget() - api_manager.get_total_cost()
|
||||
)
|
||||
if remaining_budget < 0:
|
||||
remaining_budget = 0
|
||||
|
||||
budget_msg = Message(
|
||||
"system",
|
||||
f"Your remaining API budget is ${remaining_budget:.3f}"
|
||||
+ (
|
||||
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
|
||||
if remaining_budget == 0
|
||||
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
|
||||
if remaining_budget < 0.005
|
||||
else " Budget nearly exceeded. Finish up.\n\n"
|
||||
if remaining_budget < 0.01
|
||||
else ""
|
||||
),
|
||||
)
|
||||
logger.debug(budget_msg)
|
||||
|
||||
if kwargs.get("append_messages") is None:
|
||||
kwargs["append_messages"] = []
|
||||
kwargs["append_messages"].append(budget_msg)
|
||||
|
||||
return super().construct_base_prompt(*args, **kwargs)
|
||||
|
||||
def on_before_think(self, *args, **kwargs) -> ChatSequence:
|
||||
prompt = super().on_before_think(*args, **kwargs)
|
||||
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.history.raw(),
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command_name: str | None,
|
||||
command_args: dict[str, str] | None,
|
||||
user_input: str | None,
|
||||
) -> str:
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = f"Could not execute command: {command_name}{command_args}"
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
USER_INPUT_FILE_NAME,
|
||||
)
|
||||
|
||||
else:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(command_name, command_args)
|
||||
command_result = execute_command(
|
||||
command_name=command_name,
|
||||
arguments=command_args,
|
||||
agent=self,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(str(command_result), self.llm.name)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), self.llm.name
|
||||
)
|
||||
if result_tlength + memory_tlength > self.send_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
# Check if there's a result from the command append it to the message
|
||||
if result is None:
|
||||
self.history.add("system", "Unable to execute command", "action_result")
|
||||
else:
|
||||
self.history.add("system", result, "action_result")
|
||||
|
||||
return result
|
||||
|
||||
def parse_and_process_response(
|
||||
self, llm_response: ChatModelResponse, *args, **kwargs
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
if not llm_response.content:
|
||||
raise SyntaxError("Assistant response has no text content")
|
||||
|
||||
assistant_reply_dict = extract_dict_from_response(llm_response.content)
|
||||
|
||||
valid, errors = validate_dict(assistant_reply_dict, self.config)
|
||||
if not valid:
|
||||
raise SyntaxError(
|
||||
"Validation of response failed:\n "
|
||||
+ ";\n ".join([str(e) for e in errors])
|
||||
)
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_dict = plugin.post_planning(assistant_reply_dict)
|
||||
|
||||
response = None, None, assistant_reply_dict
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_dict != {}:
|
||||
# Get command name and arguments
|
||||
try:
|
||||
command_name, arguments = extract_command(
|
||||
assistant_reply_dict, llm_response, self.config
|
||||
)
|
||||
response = command_name, arguments, assistant_reply_dict
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_dict,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def extract_command(
|
||||
assistant_reply_json: dict, assistant_reply: ChatModelResponse, config: Config
|
||||
) -> tuple[str, dict[str, str]]:
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
assistant_reply_json (dict): The response object from the AI
|
||||
assistant_reply (ChatModelResponse): The model response from the AI
|
||||
config (Config): The config object
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
if config.openai_functions:
|
||||
if assistant_reply.function_call is None:
|
||||
return "Error:", {"message": "No 'function_call' in assistant reply"}
|
||||
assistant_reply_json["command"] = {
|
||||
"name": assistant_reply.function_call.name,
|
||||
"args": json.loads(assistant_reply.function_call.arguments),
|
||||
}
|
||||
try:
|
||||
if "command" not in assistant_reply_json:
|
||||
return "Error:", {"message": "Missing 'command' object in JSON"}
|
||||
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
return (
|
||||
"Error:",
|
||||
{
|
||||
"message": f"The previous message sent was not a dictionary {assistant_reply_json}"
|
||||
},
|
||||
)
|
||||
|
||||
command = assistant_reply_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", {"message": "'command' object is not a dictionary"}
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", {"message": "Missing 'name' field in 'command' object"}
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", {"message": "Invalid JSON"}
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", {"message": str(e)}
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_name: str,
|
||||
arguments: dict[str, str],
|
||||
agent: Agent,
|
||||
) -> Any:
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
agent (Agent): The agent that is executing the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
if command := agent.command_registry.get_command(command_name):
|
||||
return command(**arguments, agent=agent)
|
||||
|
||||
# Handle non-native commands (e.g. from plugins)
|
||||
for command in agent.ai_config.prompt_generator.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
|
||||
raise RuntimeError(
|
||||
f"Cannot execute '{command_name}': unknown command."
|
||||
" Do not try to use this command again."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
318
autogpt/agents/base.py
Normal file
318
autogpt/agents/base.py
Normal file
@@ -0,0 +1,318 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import AIConfig, Config
|
||||
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.llm.base import ChatModelResponse, ChatSequence, Message
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
|
||||
from autogpt.llm.utils import count_message_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.message_history import MessageHistory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
CommandName = str
|
||||
CommandArgs = dict[str, str]
|
||||
AgentThoughts = dict[str, Any]
|
||||
|
||||
|
||||
class BaseAgent(metaclass=ABCMeta):
|
||||
"""Base class for all Auto-GPT agents."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
command_registry: CommandRegistry,
|
||||
config: Config,
|
||||
big_brain: bool = True,
|
||||
default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT,
|
||||
cycle_budget: Optional[int] = 1,
|
||||
send_token_limit: Optional[int] = None,
|
||||
summary_max_tlength: Optional[int] = None,
|
||||
):
|
||||
self.ai_config = ai_config
|
||||
"""The AIConfig or "personality" object associated with this agent."""
|
||||
|
||||
self.command_registry = command_registry
|
||||
"""The registry containing all commands available to the agent."""
|
||||
|
||||
self.config = config
|
||||
"""The applicable application configuration."""
|
||||
|
||||
self.big_brain = big_brain
|
||||
"""
|
||||
Whether this agent uses the configured smart LLM (default) to think,
|
||||
as opposed to the configured fast LLM.
|
||||
"""
|
||||
|
||||
self.default_cycle_instruction = default_cycle_instruction
|
||||
"""The default instruction passed to the AI for a thinking cycle."""
|
||||
|
||||
self.cycle_budget = cycle_budget
|
||||
"""
|
||||
The number of cycles that the agent is allowed to run unsupervised.
|
||||
|
||||
`None` for unlimited continuous execution,
|
||||
`1` to require user approval for every step,
|
||||
`0` to stop the agent.
|
||||
"""
|
||||
|
||||
self.cycles_remaining = cycle_budget
|
||||
"""The number of cycles remaining within the `cycle_budget`."""
|
||||
|
||||
self.cycle_count = 0
|
||||
"""The number of cycles that the agent has run since its initialization."""
|
||||
|
||||
self.system_prompt = ai_config.construct_full_prompt(config)
|
||||
"""
|
||||
The system prompt sets up the AI's personality and explains its goals,
|
||||
available resources, and restrictions.
|
||||
"""
|
||||
|
||||
llm_name = self.config.smart_llm if self.big_brain else self.config.fast_llm
|
||||
self.llm = OPEN_AI_CHAT_MODELS[llm_name]
|
||||
"""The LLM that the agent uses to think."""
|
||||
|
||||
self.send_token_limit = send_token_limit or self.llm.max_tokens * 3 // 4
|
||||
"""
|
||||
The token limit for prompt construction. Should leave room for the completion;
|
||||
defaults to 75% of `llm.max_tokens`.
|
||||
"""
|
||||
|
||||
self.history = MessageHistory(
|
||||
self.llm,
|
||||
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
|
||||
)
|
||||
|
||||
def think(
|
||||
self,
|
||||
instruction: Optional[str] = None,
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Runs the agent for one cycle.
|
||||
|
||||
Params:
|
||||
instruction: The instruction to put at the end of the prompt.
|
||||
|
||||
Returns:
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
|
||||
instruction = instruction or self.default_cycle_instruction
|
||||
|
||||
prompt: ChatSequence = self.construct_prompt(instruction)
|
||||
prompt = self.on_before_think(prompt, instruction)
|
||||
|
||||
raw_response = create_chat_completion(
|
||||
prompt,
|
||||
self.config,
|
||||
functions=get_openai_command_specs(self.command_registry)
|
||||
if self.config.openai_functions
|
||||
else None,
|
||||
)
|
||||
self.cycle_count += 1
|
||||
|
||||
return self.on_response(raw_response, prompt, instruction)
|
||||
|
||||
@abstractmethod
|
||||
def execute(
|
||||
self,
|
||||
command_name: str | None,
|
||||
command_args: dict[str, str] | None,
|
||||
user_input: str | None,
|
||||
) -> str:
|
||||
"""Executes the given command, if any, and returns the agent's response.
|
||||
|
||||
Params:
|
||||
command_name: The name of the command to execute, if any.
|
||||
command_args: The arguments to pass to the command, if any.
|
||||
user_input: The user's input, if any.
|
||||
|
||||
Returns:
|
||||
The results of the command.
|
||||
"""
|
||||
...
|
||||
|
||||
def construct_base_prompt(
|
||||
self,
|
||||
prepend_messages: list[Message] = [],
|
||||
append_messages: list[Message] = [],
|
||||
reserve_tokens: int = 0,
|
||||
) -> ChatSequence:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. `prepend_messages`
|
||||
3. Message history of the agent, truncated & prepended with running summary as needed
|
||||
4. `append_messages`
|
||||
|
||||
Params:
|
||||
prepend_messages: Messages to insert between the system prompt and message history
|
||||
append_messages: Messages to insert after the message history
|
||||
reserve_tokens: Number of tokens to reserve for content that is added later
|
||||
"""
|
||||
|
||||
prompt = ChatSequence.for_model(
|
||||
self.llm.name,
|
||||
[Message("system", self.system_prompt)] + prepend_messages,
|
||||
)
|
||||
|
||||
# Reserve tokens for messages to be appended later, if any
|
||||
reserve_tokens += self.history.max_summary_tlength
|
||||
if append_messages:
|
||||
reserve_tokens += count_message_tokens(append_messages, self.llm.name)
|
||||
|
||||
# Fill message history, up to a margin of reserved_tokens.
|
||||
# Trim remaining historical messages and add them to the running summary.
|
||||
history_start_index = len(prompt)
|
||||
trimmed_history = add_history_upto_token_limit(
|
||||
prompt, self.history, self.send_token_limit - reserve_tokens
|
||||
)
|
||||
if trimmed_history:
|
||||
new_summary_msg, _ = self.history.trim_messages(list(prompt), self.config)
|
||||
prompt.insert(history_start_index, new_summary_msg)
|
||||
|
||||
if append_messages:
|
||||
prompt.extend(append_messages)
|
||||
|
||||
return prompt
|
||||
|
||||
def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. Message history of the agent, truncated & prepended with running summary as needed
|
||||
3. `cycle_instruction`
|
||||
|
||||
Params:
|
||||
cycle_instruction: The final instruction for a thinking cycle
|
||||
"""
|
||||
|
||||
if not cycle_instruction:
|
||||
raise ValueError("No instruction given")
|
||||
|
||||
cycle_instruction_msg = Message("user", cycle_instruction)
|
||||
cycle_instruction_tlength = count_message_tokens(
|
||||
cycle_instruction_msg, self.llm.name
|
||||
)
|
||||
prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength)
|
||||
|
||||
# ADD user input message ("triggering prompt")
|
||||
prompt.append(cycle_instruction_msg)
|
||||
|
||||
return prompt
|
||||
|
||||
def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence:
|
||||
"""Called after constructing the prompt but before executing it.
|
||||
|
||||
Calls the `on_planning` hook of any enabled and capable plugins, adding their
|
||||
output to the prompt.
|
||||
|
||||
Params:
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The prompt to execute
|
||||
"""
|
||||
current_tokens_used = prompt.token_length
|
||||
plugin_count = len(self.config.plugins)
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
self.ai_config.prompt_generator, prompt.raw()
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
message_to_add = Message("system", plugin_response)
|
||||
tokens_to_add = count_message_tokens(message_to_add, self.llm.name)
|
||||
if current_tokens_used + tokens_to_add > self.send_token_limit:
|
||||
logger.debug(f"Plugin response too long, skipping: {plugin_response}")
|
||||
logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
|
||||
break
|
||||
prompt.insert(
|
||||
-1, message_to_add
|
||||
) # HACK: assumes cycle instruction to be at the end
|
||||
current_tokens_used += tokens_to_add
|
||||
return prompt
|
||||
|
||||
def on_response(
|
||||
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Called upon receiving a response from the chat model.
|
||||
|
||||
Adds the last/newest message in the prompt and the response to `history`,
|
||||
and calls `self.parse_and_process_response()` to do the rest.
|
||||
|
||||
Params:
|
||||
llm_response: The raw response from the chat model
|
||||
prompt: The prompt that was executed
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
|
||||
# Save assistant reply to message history
|
||||
self.history.append(prompt[-1])
|
||||
self.history.add(
|
||||
"assistant", llm_response.content, "ai_response"
|
||||
) # FIXME: support function calls
|
||||
|
||||
try:
|
||||
return self.parse_and_process_response(llm_response, prompt, instruction)
|
||||
except SyntaxError as e:
|
||||
logger.error(f"Response could not be parsed: {e}")
|
||||
# TODO: tune this message
|
||||
self.history.add(
|
||||
"system",
|
||||
f"Your response could not be parsed: {e}"
|
||||
"\n\nRemember to only respond using the specified format above!",
|
||||
)
|
||||
return None, None, {}
|
||||
|
||||
# TODO: update memory/context
|
||||
|
||||
@abstractmethod
|
||||
def parse_and_process_response(
|
||||
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Validate, parse & process the LLM's response.
|
||||
|
||||
Must be implemented by derivative classes: no base implementation is provided,
|
||||
since the implementation depends on the role of the derivative Agent.
|
||||
|
||||
Params:
|
||||
llm_response: The raw response from the chat model
|
||||
prompt: The prompt that was executed
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def add_history_upto_token_limit(
|
||||
prompt: ChatSequence, history: MessageHistory, t_limit: int
|
||||
) -> list[Message]:
|
||||
current_prompt_length = prompt.token_length
|
||||
insertion_index = len(prompt)
|
||||
limit_reached = False
|
||||
trimmed_messages: list[Message] = []
|
||||
for cycle in reversed(list(history.per_cycle())):
|
||||
messages_to_add = [msg for msg in cycle if msg is not None]
|
||||
tokens_to_add = count_message_tokens(messages_to_add, prompt.model.name)
|
||||
if current_prompt_length + tokens_to_add > t_limit:
|
||||
limit_reached = True
|
||||
|
||||
if not limit_reached:
|
||||
# Add the most recent message to the start of the chain,
|
||||
# after the system prompts.
|
||||
prompt.insert(insertion_index, *messages_to_add)
|
||||
current_prompt_length += tokens_to_add
|
||||
else:
|
||||
trimmed_messages = messages_to_add + trimmed_messages
|
||||
|
||||
return trimmed_messages
|
||||
147
autogpt/app/cli.py
Normal file
147
autogpt/app/cli.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help=(
|
||||
"Specifies which ai_settings.yaml file to use, relative to the Auto-GPT"
|
||||
" root directory. Will also automatically skip the re-prompt."
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
# TODO: this is a hidden option for now, necessary for integration testing.
|
||||
# We should make this public once we're ready to roll out agent specific workspaces.
|
||||
"--workspace-directory",
|
||||
"-w",
|
||||
type=click.Path(),
|
||||
hidden=True,
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-goal",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help="AI goal override; may be used multiple times to pass multiple goals",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
ai_goal: tuple[str],
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings=ai_settings,
|
||||
prompt_settings=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
working_directory=Path(
|
||||
__file__
|
||||
).parent.parent.parent, # TODO: make this an option
|
||||
workspace_directory=workspace_directory,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
ai_name=ai_name,
|
||||
ai_role=ai_role,
|
||||
ai_goals=ai_goal,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
187
autogpt/app/configurator.py
Normal file
187
autogpt/app/configurator.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Literal
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
|
||||
|
||||
def create_config(
|
||||
config: Config,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Args:
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
gpt3only (bool): Whether to enable GPT3.5 only mode
|
||||
gpt4only (bool): Whether to enable GPT4 only mode
|
||||
memory_type (str): The type of memory backend to use
|
||||
browser_name (str): The name of the browser to use when using selenium to scrape the web
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
config.debug_mode = False
|
||||
config.continuous_mode = False
|
||||
config.speak_mode = False
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.debug_mode = True
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.continuous_mode = True
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
config.continuous_limit = continuous_limit
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.speak_mode = True
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
|
||||
config.fast_llm = GPT_3_MODEL
|
||||
config.smart_llm = GPT_3_MODEL
|
||||
elif (
|
||||
gpt4only
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm", config=config)
|
||||
== GPT_4_MODEL
|
||||
):
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
|
||||
config.fast_llm = GPT_4_MODEL
|
||||
config.smart_llm = GPT_4_MODEL
|
||||
else:
|
||||
config.fast_llm = check_model(config.fast_llm, "fast_llm", config=config)
|
||||
config.smart_llm = check_model(config.smart_llm, "smart_llm", config=config)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
chosen = memory_type
|
||||
if chosen not in supported_memory:
|
||||
logger.typewriter_log(
|
||||
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
|
||||
else:
|
||||
config.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
config.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
config.ai_settings_file = file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||
config.prompt_settings_file = file
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
|
||||
+ "It is recommended that you monitor any files it downloads carefully.",
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
config.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
def check_model(
|
||||
model_name: str,
|
||||
model_type: Literal["smart_llm", "fast_llm"],
|
||||
config: Config,
|
||||
) -> str:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
openai_credentials = config.get_openai_credentials(model_name)
|
||||
api_manager = ApiManager()
|
||||
models = api_manager.get_models(**openai_credentials)
|
||||
|
||||
if any(model_name in m["id"] for m in models):
|
||||
return model_name
|
||||
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"You do not have access to {model_name}. Setting {model_type} to "
|
||||
f"gpt-3.5-turbo.",
|
||||
)
|
||||
return "gpt-3.5-turbo"
|
||||
597
autogpt/app/main.py
Normal file
597
autogpt/app/main.py
Normal file
@@ -0,0 +1,597 @@
|
||||
"""The application entry point. Can be invoked by a CLI or any other front end application."""
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
|
||||
from autogpt.app.configurator import create_config
|
||||
from autogpt.app.setup import prompt_user
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import (
|
||||
clean_input,
|
||||
get_current_git_branch,
|
||||
get_latest_bulletin,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
)
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
||||
|
||||
def run_auto_gpt(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
working_directory: Path,
|
||||
workspace_directory: str | Path,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str] = None,
|
||||
ai_role: Optional[str] = None,
|
||||
ai_goals: tuple[str] = tuple(),
|
||||
):
|
||||
# Configure logging before we do anything else.
|
||||
logger.set_level(logging.DEBUG if debug else logging.INFO)
|
||||
|
||||
config = ConfigBuilder.build_config_from_env(workdir=working_directory)
|
||||
|
||||
# HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
|
||||
# or import it directly.
|
||||
logger.config = config
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key(config)
|
||||
|
||||
create_config(
|
||||
config,
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
)
|
||||
|
||||
if config.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
|
||||
|
||||
if not config.skip_news:
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(motd_line, "NEWS:", Fore.GREEN)
|
||||
if is_new_motd and not config.chat_messages_enabled:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
f"You are running on `{git_branch}` branch "
|
||||
"- this is not a supported branch.",
|
||||
)
|
||||
if sys.version_info < (3, 10):
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of Auto-GPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
if install_plugin_deps:
|
||||
install_plugin_dependencies()
|
||||
|
||||
# TODO: have this directory live outside the repository (e.g. in a user's
|
||||
# home directory) and have it come in as a command line argument or part of
|
||||
# the env file.
|
||||
Workspace.set_workspace_directory(config, workspace_directory)
|
||||
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
Workspace.set_file_logger_path(config, config.workspace_path)
|
||||
|
||||
config.plugins = scan_plugins(config, config.debug_mode)
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
|
||||
logger.debug(
|
||||
f"The following command categories are disabled: {config.disabled_command_categories}"
|
||||
)
|
||||
enabled_command_categories = [
|
||||
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
|
||||
]
|
||||
|
||||
logger.debug(
|
||||
f"The following command categories are enabled: {enabled_command_categories}"
|
||||
)
|
||||
|
||||
for command_category in enabled_command_categories:
|
||||
command_registry.import_commands(command_category)
|
||||
|
||||
# Unregister commands that are incompatible with the current config
|
||||
incompatible_commands = []
|
||||
for command in command_registry.commands.values():
|
||||
if callable(command.enabled) and not command.enabled(config):
|
||||
command.enabled = False
|
||||
incompatible_commands.append(command)
|
||||
|
||||
for command in incompatible_commands:
|
||||
command_registry.unregister(command)
|
||||
logger.debug(
|
||||
f"Unregistering incompatible command: {command.name}, "
|
||||
f"reason - {command.disabled_reason or 'Disabled by current config.'}"
|
||||
)
|
||||
|
||||
ai_config = construct_main_ai_config(
|
||||
config,
|
||||
name=ai_name,
|
||||
role=ai_role,
|
||||
goals=ai_goals,
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
# print(prompt)
|
||||
|
||||
# add chat plugins capable of report to logger
|
||||
if config.chat_messages_enabled:
|
||||
for plugin in config.plugins:
|
||||
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
|
||||
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
|
||||
logger.chat_plugins.append(plugin)
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(config)
|
||||
memory.clear()
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
|
||||
|
||||
agent = Agent(
|
||||
memory=memory,
|
||||
command_registry=command_registry,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
ai_config=ai_config,
|
||||
config=config,
|
||||
)
|
||||
|
||||
run_interaction_loop(agent)
|
||||
|
||||
|
||||
def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | None:
|
||||
# Translate from the continuous_mode/continuous_limit config
|
||||
# to a cycle_budget (maximum number of cycles to run without checking in with the
|
||||
# user) and a count of cycles_remaining before we check in..
|
||||
if continuous_mode:
|
||||
cycle_budget = continuous_limit if continuous_limit else math.inf
|
||||
else:
|
||||
cycle_budget = 1
|
||||
|
||||
return cycle_budget
|
||||
|
||||
|
||||
class UserFeedback(str, enum.Enum):
|
||||
"""Enum for user feedback."""
|
||||
|
||||
AUTHORIZE = "GENERATE NEXT COMMAND JSON"
|
||||
EXIT = "EXIT"
|
||||
TEXT = "TEXT"
|
||||
|
||||
|
||||
def run_interaction_loop(
|
||||
agent: Agent,
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
Args:
|
||||
agent: The agent to run the interaction loop for.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
config = agent.config
|
||||
ai_config = agent.ai_config
|
||||
logger.debug(f"{ai_config.ai_name} System Prompt: {agent.system_prompt}")
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
config.continuous_mode, config.continuous_limit
|
||||
)
|
||||
spinner = Spinner("Thinking...", plain_output=config.plain_output)
|
||||
|
||||
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
|
||||
nonlocal cycle_budget, cycles_remaining, spinner
|
||||
if cycles_remaining in [0, 1, math.inf]:
|
||||
logger.typewriter_log(
|
||||
"Interrupt signal received. Stopping continuous command execution "
|
||||
"immediately.",
|
||||
Fore.RED,
|
||||
)
|
||||
sys.exit()
|
||||
else:
|
||||
restart_spinner = spinner.running
|
||||
if spinner.running:
|
||||
spinner.stop()
|
||||
|
||||
logger.typewriter_log(
|
||||
"Interrupt signal received. Stopping continuous command execution.",
|
||||
Fore.RED,
|
||||
)
|
||||
cycles_remaining = 1
|
||||
if restart_spinner:
|
||||
spinner.start()
|
||||
|
||||
# Set up an interrupt signal for the agent.
|
||||
signal.signal(signal.SIGINT, graceful_agent_interrupt)
|
||||
|
||||
#########################
|
||||
# Application Main Loop #
|
||||
#########################
|
||||
|
||||
while cycles_remaining > 0:
|
||||
logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
|
||||
|
||||
########
|
||||
# Plan #
|
||||
########
|
||||
# Have the agent determine the next action to take.
|
||||
with spinner:
|
||||
command_name, command_args, assistant_reply_dict = agent.think()
|
||||
|
||||
###############
|
||||
# Update User #
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(config, ai_config, command_name, command_args, assistant_reply_dict)
|
||||
|
||||
##################
|
||||
# Get user input #
|
||||
##################
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
user_feedback, user_input, new_cycles_remaining = get_user_feedback(
|
||||
config,
|
||||
ai_config,
|
||||
)
|
||||
|
||||
if user_feedback == UserFeedback.AUTHORIZE:
|
||||
if new_cycles_remaining is not None:
|
||||
# Case 1: User is altering the cycle budget.
|
||||
if cycle_budget > 1:
|
||||
cycle_budget = new_cycles_remaining + 1
|
||||
# Case 2: User is running iteratively and
|
||||
# has initiated a one-time continuous cycle
|
||||
cycles_remaining = new_cycles_remaining + 1
|
||||
else:
|
||||
# Case 1: Continuous iteration was interrupted -> resume
|
||||
if cycle_budget > 1:
|
||||
logger.typewriter_log(
|
||||
"RESUMING CONTINUOUS EXECUTION: ",
|
||||
Fore.MAGENTA,
|
||||
f"The cycle budget is {cycle_budget}.",
|
||||
)
|
||||
# Case 2: The agent used up its cycle budget -> reset
|
||||
cycles_remaining = cycle_budget + 1
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif user_feedback == UserFeedback.EXIT:
|
||||
logger.typewriter_log("Exiting...", Fore.YELLOW)
|
||||
exit()
|
||||
else: # user_feedback == UserFeedback.TEXT
|
||||
command_name = "human_feedback"
|
||||
else:
|
||||
user_input = None
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
if cycles_remaining != math.inf:
|
||||
# Print authorized commands left value
|
||||
logger.typewriter_log(
|
||||
"AUTHORISED COMMANDS LEFT: ", Fore.CYAN, f"{cycles_remaining}"
|
||||
)
|
||||
|
||||
###################
|
||||
# Execute Command #
|
||||
###################
|
||||
# Decrement the cycle counter first to reduce the likelihood of a SIGINT
|
||||
# happening during command execution, setting the cycles remaining to 1,
|
||||
# and then having the decrement set it to 0, exiting the application.
|
||||
if command_name != "human_feedback":
|
||||
cycles_remaining -= 1
|
||||
result = agent.execute(command_name, command_args, user_input)
|
||||
|
||||
if result is not None:
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
|
||||
|
||||
def update_user(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
command_name: CommandName | None,
|
||||
command_args: CommandArgs | None,
|
||||
assistant_reply_dict: AgentThoughts,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
command_name: The name of the command to execute.
|
||||
command_args: The arguments for the command.
|
||||
assistant_reply_dict: The assistant's reply.
|
||||
"""
|
||||
|
||||
print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config)
|
||||
|
||||
if command_name is not None:
|
||||
if config.speak_mode:
|
||||
say_text(f"I want to execute {command_name}", config)
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}",
|
||||
)
|
||||
elif command_name.lower().startswith("error"):
|
||||
logger.typewriter_log(
|
||||
"ERROR: ",
|
||||
Fore.RED,
|
||||
f"The Agent failed to select an action. " f"Error message: {command_name}",
|
||||
)
|
||||
else:
|
||||
logger.typewriter_log(
|
||||
"NO ACTION SELECTED: ",
|
||||
Fore.RED,
|
||||
f"The Agent failed to select an action.",
|
||||
)
|
||||
|
||||
|
||||
def get_user_feedback(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
|
||||
Returns:
|
||||
A tuple of the user's feedback, the user's input, and the number of
|
||||
cycles remaining if the user has initiated a continuous cycle.
|
||||
"""
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
logger.info(
|
||||
f"Enter '{config.authorise_key}' to authorise command, "
|
||||
f"'{config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{ai_config.ai_name}..."
|
||||
)
|
||||
|
||||
user_feedback = None
|
||||
user_input = ""
|
||||
new_cycles_remaining = None
|
||||
|
||||
while user_feedback is None:
|
||||
# Get input from user
|
||||
if config.chat_messages_enabled:
|
||||
console_input = clean_input(config, "Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
|
||||
# Parse user input
|
||||
if console_input.lower().strip() == config.authorise_key:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
elif console_input.lower().startswith(f"{config.authorise_key} -"):
|
||||
try:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
new_cycles_remaining = abs(int(console_input.split(" ")[1]))
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
f"Invalid input format. "
|
||||
f"Please enter '{config.authorise_key} -N'"
|
||||
" where N is the number of continuous tasks."
|
||||
)
|
||||
elif console_input.lower() in [config.exit_key, "exit"]:
|
||||
user_feedback = UserFeedback.EXIT
|
||||
else:
|
||||
user_feedback = UserFeedback.TEXT
|
||||
user_input = console_input
|
||||
|
||||
return user_feedback, user_input, new_cycles_remaining
|
||||
|
||||
|
||||
def construct_main_ai_config(
|
||||
config: Config,
|
||||
name: Optional[str] = None,
|
||||
role: Optional[str] = None,
|
||||
goals: tuple[str] = tuple(),
|
||||
) -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
str: The prompt string
|
||||
"""
|
||||
ai_config = AIConfig.load(config.workdir / config.ai_settings_file)
|
||||
|
||||
# Apply overrides
|
||||
if name:
|
||||
ai_config.ai_name = name
|
||||
if role:
|
||||
ai_config.ai_role = role
|
||||
if goals:
|
||||
ai_config.ai_goals = list(goals)
|
||||
|
||||
if (
|
||||
all([name, role, goals])
|
||||
or config.skip_reprompt
|
||||
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
|
||||
):
|
||||
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
|
||||
logger.typewriter_log(
|
||||
"API Budget:",
|
||||
Fore.GREEN,
|
||||
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
|
||||
)
|
||||
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
|
||||
logger.typewriter_log(
|
||||
"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {ai_config.ai_name}?",
|
||||
speak_text=True,
|
||||
)
|
||||
should_continue = clean_input(
|
||||
config,
|
||||
f"""Continue with the last settings?
|
||||
Name: {ai_config.ai_name}
|
||||
Role: {ai_config.ai_role}
|
||||
Goals: {ai_config.ai_goals}
|
||||
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
|
||||
Continue ({config.authorise_key}/{config.exit_key}): """,
|
||||
)
|
||||
if should_continue.lower() == config.exit_key:
|
||||
ai_config = AIConfig()
|
||||
|
||||
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
|
||||
ai_config = prompt_user(config)
|
||||
ai_config.save(config.workdir / config.ai_settings_file)
|
||||
|
||||
if config.restrict_to_workspace:
|
||||
logger.typewriter_log(
|
||||
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
|
||||
Fore.YELLOW,
|
||||
f"{config.workspace_path}",
|
||||
)
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(ai_config.api_budget)
|
||||
|
||||
# Agent Created, print message
|
||||
logger.typewriter_log(
|
||||
ai_config.ai_name,
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"has been created with the following details:",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Print the ai_config details
|
||||
# Name
|
||||
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
|
||||
# Role
|
||||
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
|
||||
# Goals
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
|
||||
for goal in ai_config.ai_goals:
|
||||
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
|
||||
|
||||
return ai_config
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: str,
|
||||
assistant_reply_json_valid: dict,
|
||||
config: Config,
|
||||
) -> None:
|
||||
from autogpt.speech import say_text
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
|
||||
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
||||
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = remove_ansi_escape(
|
||||
assistant_thoughts.get("reasoning", "")
|
||||
)
|
||||
assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", ""))
|
||||
assistant_thoughts_criticism = remove_ansi_escape(
|
||||
assistant_thoughts.get("criticism", "")
|
||||
)
|
||||
assistant_thoughts_speak = remove_ansi_escape(
|
||||
assistant_thoughts.get("speak", "")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak:
|
||||
if config.speak_mode:
|
||||
say_text(assistant_thoughts_speak, config)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
|
||||
|
||||
def remove_ansi_escape(s: str) -> str:
|
||||
return s.replace("\x1B", "")
|
||||
238
autogpt/app/setup.py
Normal file
238
autogpt/app/setup.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from jinja2 import Template
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.llm.base import ChatSequence, Message
|
||||
from autogpt.llm.utils import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_USER_DESIRE_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
def prompt_user(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
# Construct the prompt
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
ai_config_template_provided = ai_config_template is not None and any(
|
||||
[
|
||||
ai_config_template.ai_goals,
|
||||
ai_config_template.ai_name,
|
||||
ai_config_template.ai_role,
|
||||
]
|
||||
)
|
||||
|
||||
user_desire = ""
|
||||
if not ai_config_template_provided:
|
||||
# Get user desire if command line overrides have not been passed in
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
user_desire = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire.strip() == "":
|
||||
user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
|
||||
|
||||
# If user desire contains "--manual" or we have overridden any of the AI configuration
|
||||
if "--manual" in user_desire or ai_config_template_provided:
|
||||
logger.typewriter_log(
|
||||
"Manual Mode Selected",
|
||||
Fore.GREEN,
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual(config, ai_config_template)
|
||||
|
||||
else:
|
||||
try:
|
||||
return generate_aiconfig_automatic(user_desire, config)
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"Unable to automatically generate AI Config based on user desire.",
|
||||
Fore.RED,
|
||||
"Falling back to manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
return generate_aiconfig_manual(config)
|
||||
|
||||
|
||||
def generate_aiconfig_manual(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
This function guides the user through a series of prompts to collect the necessary information to create
|
||||
an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
|
||||
goals. If the user does not provide a value for any of the fields, default values will be used.
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
|
||||
"""
|
||||
|
||||
# Manual Setup Intro
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_name:
|
||||
ai_name = ai_config_template.ai_name
|
||||
else:
|
||||
ai_name = ""
|
||||
# Get AI Name from User
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input(config, "AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_role:
|
||||
ai_role = ai_config_template.ai_role
|
||||
else:
|
||||
# Get AI Role from User
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(config, f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
|
||||
if ai_config_template and ai_config_template.ai_goals:
|
||||
ai_goals = ai_config_template.ai_goals
|
||||
else:
|
||||
# Enter up to 5 goals for the AI
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
|
||||
)
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if not ai_goals:
|
||||
ai_goals = [
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
]
|
||||
|
||||
# Get API Budget from User
|
||||
logger.typewriter_log(
|
||||
"Enter your budget for API calls: ",
|
||||
Fore.GREEN,
|
||||
"For example: $1.50",
|
||||
)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
)
|
||||
if api_budget_input == "":
|
||||
api_budget = 0.0
|
||||
else:
|
||||
try:
|
||||
api_budget = float(api_budget_input.replace("$", ""))
|
||||
except ValueError:
|
||||
logger.typewriter_log(
|
||||
"Invalid budget input. Setting budget to unlimited.", Fore.RED
|
||||
)
|
||||
api_budget = 0.0
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
|
||||
def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC
|
||||
prompt_ai_config_automatic = Template(
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
|
||||
).render(user_prompt=user_prompt)
|
||||
# Call LLM with the string as user input
|
||||
output = create_chat_completion(
|
||||
ChatSequence.for_model(
|
||||
config.fast_llm,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("user", prompt_ai_config_automatic),
|
||||
],
|
||||
),
|
||||
config,
|
||||
).content
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
|
||||
# Parse the output
|
||||
ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
|
||||
ai_role = (
|
||||
re.search(
|
||||
r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)",
|
||||
output,
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
.group(1)
|
||||
.strip()
|
||||
)
|
||||
ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
|
||||
api_budget = 0.0 # TODO: parse api budget using a regular expression
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
@@ -1,5 +0,0 @@
|
||||
"""AutoGPT: A GPT powered AI Assistant"""
|
||||
import autogpt.app.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.app.cli.cli()
|
||||
@@ -1,108 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
|
||||
def create_agent(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
) -> Agent:
|
||||
if not task:
|
||||
raise ValueError("No task specified for new agent")
|
||||
ai_profile = ai_profile or AIProfile()
|
||||
directives = directives or AIDirectives()
|
||||
|
||||
agent = _configure_agent(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
def configure_agent_with_state(
|
||||
state: AgentSettings,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
return _configure_agent(
|
||||
state=state,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
|
||||
def _configure_agent(
|
||||
app_config: AppConfig,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
agent_id: str = "",
|
||||
task: str = "",
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
state: Optional[AgentSettings] = None,
|
||||
) -> Agent:
|
||||
if state:
|
||||
agent_state = state
|
||||
elif agent_id and task and ai_profile and directives:
|
||||
agent_state = state or create_agent_state(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
)
|
||||
else:
|
||||
raise TypeError(
|
||||
"Either (state) or (agent_id, task, ai_profile, directives)"
|
||||
" must be specified"
|
||||
)
|
||||
|
||||
return Agent(
|
||||
settings=agent_state,
|
||||
llm_provider=llm_provider,
|
||||
file_storage=file_storage,
|
||||
app_config=app_config,
|
||||
)
|
||||
|
||||
|
||||
def create_agent_state(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
) -> AgentSettings:
|
||||
return AgentSettings(
|
||||
agent_id=agent_id,
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=app_config.fast_llm,
|
||||
smart_llm=app_config.smart_llm,
|
||||
allow_fs_access=not app_config.restrict_to_workspace,
|
||||
use_functions_api=app_config.openai_functions,
|
||||
),
|
||||
history=Agent.default_settings.history.model_copy(deep=True),
|
||||
)
|
||||
@@ -1,36 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.app.config import AppConfig
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from .configurators import _configure_agent
|
||||
from .profile_generator import generate_agent_profile_for_task
|
||||
|
||||
|
||||
async def generate_agent_for_task(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
ai_profile, task_directives = await generate_agent_profile_for_task(
|
||||
task=task,
|
||||
app_config=app_config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
return _configure_agent(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=task_directives,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
@@ -1,241 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
llm_classification: LanguageModelClassification = UserConfigurable(
|
||||
default=LanguageModelClassification.SMART_MODEL
|
||||
)
|
||||
_example_call: object = {
|
||||
"name": "create_agent",
|
||||
"arguments": {
|
||||
"name": "CMOGPT",
|
||||
"description": (
|
||||
"a professional digital marketer AI that assists Solopreneurs "
|
||||
"in growing their businesses by providing "
|
||||
"world-class expertise in solving marketing problems "
|
||||
"for SaaS, content products, agencies, and more."
|
||||
),
|
||||
"directives": {
|
||||
"best_practices": [
|
||||
(
|
||||
"Engage in effective problem-solving, prioritization, "
|
||||
"planning, and supporting execution to address your "
|
||||
"marketing needs as your virtual "
|
||||
"Chief Marketing Officer."
|
||||
),
|
||||
(
|
||||
"Provide specific, actionable, and concise advice to "
|
||||
"help you make informed decisions without the use of "
|
||||
"platitudes or overly wordy explanations."
|
||||
),
|
||||
(
|
||||
"Identify and prioritize quick wins and cost-effective "
|
||||
"campaigns that maximize results with minimal time and "
|
||||
"budget investment."
|
||||
),
|
||||
(
|
||||
"Proactively take the lead in guiding you and offering "
|
||||
"suggestions when faced with unclear information or "
|
||||
"uncertainty to ensure your marketing strategy remains "
|
||||
"on track."
|
||||
),
|
||||
],
|
||||
"constraints": [
|
||||
"Do not suggest illegal or unethical plans or strategies.",
|
||||
"Take reasonable budgetary limits into account.",
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
system_prompt: str = UserConfigurable(
|
||||
default=(
|
||||
"Your job is to respond to a user-defined task, given in triple quotes, by "
|
||||
"invoking the `create_agent` function to generate an autonomous agent to "
|
||||
"complete the task. "
|
||||
"You should supply a role-based name for the agent (_GPT), "
|
||||
"an informative description for what the agent does, and 1 to 5 directives "
|
||||
"in each of the categories Best Practices and Constraints, "
|
||||
"that are optimally aligned with the successful completion "
|
||||
"of its assigned task.\n"
|
||||
"\n"
|
||||
"Example Input:\n"
|
||||
'"""Help me with marketing my business"""\n\n'
|
||||
"Example Call:\n"
|
||||
"```\n"
|
||||
f"{json.dumps(_example_call, indent=4)}"
|
||||
"\n```"
|
||||
)
|
||||
)
|
||||
user_prompt_template: str = UserConfigurable(default='"""{user_objective}"""')
|
||||
create_agent_function: dict = UserConfigurable(
|
||||
default=CompletionModelFunction(
|
||||
name="create_agent",
|
||||
description="Create a new autonomous AI agent to complete a given task.",
|
||||
parameters={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="A short role-based name for an autonomous agent.",
|
||||
required=True,
|
||||
),
|
||||
"description": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description=(
|
||||
"An informative one sentence description "
|
||||
"of what the AI agent does"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
"directives": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"best_practices": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five highly effective best practices "
|
||||
"that are optimally aligned with the completion "
|
||||
"of the given task"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
"constraints": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five reasonable and efficacious constraints "
|
||||
"that are optimally aligned with the completion "
|
||||
"of the given task"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
).model_dump()
|
||||
)
|
||||
|
||||
|
||||
class AgentProfileGenerator(PromptStrategy):
|
||||
default_configuration: AgentProfileGeneratorConfiguration = (
|
||||
AgentProfileGeneratorConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: dict,
|
||||
):
|
||||
self._llm_classification = llm_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = CompletionModelFunction.model_validate(
|
||||
create_agent_function
|
||||
)
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return self._llm_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
|
||||
system_message = ChatMessage.system(self._system_prompt_message)
|
||||
user_message = ChatMessage.user(
|
||||
self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
)
|
||||
)
|
||||
prompt = ChatPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[self._create_agent_function],
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response: AssistantChatMessage,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
"""
|
||||
try:
|
||||
if not response.tool_calls:
|
||||
raise ValueError(
|
||||
f"LLM did not call {self._create_agent_function.name} function; "
|
||||
"agent profile creation failed"
|
||||
)
|
||||
arguments: object = response.tool_calls[0].function.arguments
|
||||
ai_profile = AIProfile(
|
||||
ai_name=arguments.get("name"), # type: ignore
|
||||
ai_role=arguments.get("description"), # type: ignore
|
||||
)
|
||||
ai_directives = AIDirectives(
|
||||
best_practices=arguments.get("directives", {}).get("best_practices"),
|
||||
constraints=arguments.get("directives", {}).get("constraints"),
|
||||
resources=[],
|
||||
)
|
||||
except KeyError:
|
||||
logger.debug(f"Failed to parse this response content: {response}")
|
||||
raise
|
||||
return ai_profile, ai_directives
|
||||
|
||||
|
||||
async def generate_agent_profile_for_task(
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
llm_provider: MultiProvider,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
agent_profile_generator = AgentProfileGenerator(
|
||||
**AgentProfileGenerator.default_configuration.model_dump() # HACK
|
||||
)
|
||||
|
||||
prompt = agent_profile_generator.build_prompt(task)
|
||||
|
||||
# Call LLM with the string as user input
|
||||
output = await llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
model_name=app_config.smart_llm,
|
||||
functions=prompt.functions,
|
||||
completion_parser=agent_profile_generator.parse_response_content,
|
||||
)
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output.response}")
|
||||
|
||||
return output.parsed_result
|
||||
@@ -1,37 +0,0 @@
|
||||
# 🤖 Agents
|
||||
|
||||
Agent is composed of [🧩 Components](./components.md) and responsible for executing pipelines and some additional logic. The base class for all agents is `BaseAgent`, it has the necessary logic to collect components and execute protocols.
|
||||
|
||||
## Important methods
|
||||
|
||||
`BaseAgent` provides two abstract methods needed for any agent to work properly:
|
||||
1. `propose_action`: This method is responsible for proposing an action based on the current state of the agent, it returns `ThoughtProcessOutput`.
|
||||
2. `execute`: This method is responsible for executing the proposed action, returns `ActionResult`.
|
||||
|
||||
## AutoGPT Agent
|
||||
|
||||
`Agent` is the main agent provided by AutoGPT. It's a subclass of `BaseAgent`. It has all the [Built-in Components](./built-in-components.md). `Agent` implements the essential abstract methods from `BaseAgent`: `propose_action` and `execute`.
|
||||
|
||||
## Building your own Agent
|
||||
|
||||
The easiest way to build your own agent is to extend the `Agent` class and add additional components. By doing this you can reuse the existing components and the default logic for executing [⚙️ Protocols](./protocols.md).
|
||||
|
||||
```py
|
||||
class MyComponent(AgentComponent):
|
||||
pass
|
||||
|
||||
class MyAgent(Agent):
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
# Call the parent constructor to bring in the default components
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
# Add your custom component
|
||||
self.my_component = MyComponent()
|
||||
```
|
||||
|
||||
For more customization, you can override the `propose_action` and `execute` or even subclass `BaseAgent` directly. This way you can have full control over the agent's components and behavior. Have a look at the [implementation of Agent](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt/autogpt/agents/agent.py) for more details.
|
||||
@@ -1,9 +0,0 @@
|
||||
from .agent import Agent
|
||||
from .agent_manager import AgentManager
|
||||
from .prompt_strategies.one_shot import OneShotAgentActionProposal
|
||||
|
||||
__all__ = [
|
||||
"AgentManager",
|
||||
"Agent",
|
||||
"OneShotAgentActionProposal",
|
||||
]
|
||||
@@ -1,313 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, Optional
|
||||
|
||||
import sentry_sdk
|
||||
from forge.agent.base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from forge.agent.protocols import (
|
||||
AfterExecute,
|
||||
AfterParse,
|
||||
CommandProvider,
|
||||
DirectiveProvider,
|
||||
MessageProvider,
|
||||
)
|
||||
from forge.command.command import Command
|
||||
from forge.components.action_history import (
|
||||
ActionHistoryComponent,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from forge.components.action_history.action_history import ActionHistoryConfiguration
|
||||
from forge.components.code_executor.code_executor import (
|
||||
CodeExecutorComponent,
|
||||
CodeExecutorConfiguration,
|
||||
)
|
||||
from forge.components.context.context import AgentContext, ContextComponent
|
||||
from forge.components.file_manager import FileManagerComponent
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
from forge.components.image_gen import ImageGeneratorComponent
|
||||
from forge.components.system import SystemComponent
|
||||
from forge.components.user_interaction import UserInteractionComponent
|
||||
from forge.components.watchdog import WatchdogComponent
|
||||
from forge.components.web import WebSearchComponent, WebSeleniumComponent
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.prompting.schema import ChatPrompt
|
||||
from forge.llm.prompting.utils import dump_prompt
|
||||
from forge.llm.providers import (
|
||||
AssistantFunctionCall,
|
||||
ChatMessage,
|
||||
ChatModelResponse,
|
||||
MultiProvider,
|
||||
)
|
||||
from forge.llm.providers.utils import function_specs_from_commands
|
||||
from forge.models.action import (
|
||||
ActionErrorResult,
|
||||
ActionInterruptedByHuman,
|
||||
ActionResult,
|
||||
ActionSuccessResult,
|
||||
)
|
||||
from forge.models.config import Configurable
|
||||
from forge.utils.exceptions import (
|
||||
AgentException,
|
||||
AgentTerminated,
|
||||
CommandExecutionError,
|
||||
UnknownCommandError,
|
||||
)
|
||||
from pydantic import Field
|
||||
|
||||
from .prompt_strategies.one_shot import (
|
||||
OneShotAgentActionProposal,
|
||||
OneShotAgentPromptStrategy,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentConfiguration(BaseAgentConfiguration):
|
||||
pass
|
||||
|
||||
|
||||
class AgentSettings(BaseAgentSettings):
|
||||
config: AgentConfiguration = Field( # type: ignore
|
||||
default_factory=AgentConfiguration
|
||||
)
|
||||
|
||||
history: EpisodicActionHistory[OneShotAgentActionProposal] = Field(
|
||||
default_factory=EpisodicActionHistory[OneShotAgentActionProposal]
|
||||
)
|
||||
"""(STATE) The action history of the agent."""
|
||||
|
||||
context: AgentContext = Field(default_factory=AgentContext)
|
||||
|
||||
|
||||
class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
default_settings: ClassVar[AgentSettings] = AgentSettings(
|
||||
name="Agent",
|
||||
description=__doc__ if __doc__ else "",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
super().__init__(settings)
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
prompt_config.use_functions_api = (
|
||||
settings.config.use_functions_api
|
||||
# Anthropic currently doesn't support tools + prefilling :(
|
||||
and self.llm.provider_name != "anthropic"
|
||||
)
|
||||
self.prompt_strategy = OneShotAgentPromptStrategy(prompt_config, logger)
|
||||
self.commands: list[Command] = []
|
||||
|
||||
# Components
|
||||
self.system = SystemComponent()
|
||||
self.history = (
|
||||
ActionHistoryComponent(
|
||||
settings.history,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
llm_provider,
|
||||
ActionHistoryConfiguration(
|
||||
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
|
||||
),
|
||||
)
|
||||
.run_after(WatchdogComponent)
|
||||
.run_after(SystemComponent)
|
||||
)
|
||||
if not app_config.noninteractive_mode:
|
||||
self.user_interaction = UserInteractionComponent()
|
||||
self.file_manager = FileManagerComponent(file_storage, settings)
|
||||
self.code_executor = CodeExecutorComponent(
|
||||
self.file_manager.workspace,
|
||||
CodeExecutorConfiguration(
|
||||
docker_container_name=f"{settings.agent_id}_sandbox"
|
||||
),
|
||||
)
|
||||
self.git_ops = GitOperationsComponent()
|
||||
self.image_gen = ImageGeneratorComponent(self.file_manager.workspace)
|
||||
self.web_search = WebSearchComponent()
|
||||
self.web_selenium = WebSeleniumComponent(
|
||||
llm_provider,
|
||||
app_config.app_data_dir,
|
||||
)
|
||||
self.context = ContextComponent(self.file_manager.workspace, settings.context)
|
||||
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
|
||||
ContextComponent
|
||||
)
|
||||
|
||||
self.event_history = settings.history
|
||||
self.app_config = app_config
|
||||
|
||||
async def propose_action(self) -> OneShotAgentActionProposal:
|
||||
"""Proposes the next action to execute, based on the task and current state.
|
||||
|
||||
Returns:
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
self.reset_trace()
|
||||
|
||||
# Get directives
|
||||
resources = await self.run_pipeline(DirectiveProvider.get_resources)
|
||||
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
|
||||
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)
|
||||
|
||||
directives = self.state.directives.model_copy(deep=True)
|
||||
directives.resources += resources
|
||||
directives.constraints += constraints
|
||||
directives.best_practices += best_practices
|
||||
|
||||
# Get commands
|
||||
self.commands = await self.run_pipeline(CommandProvider.get_commands)
|
||||
self._remove_disabled_commands()
|
||||
|
||||
# Get messages
|
||||
messages = await self.run_pipeline(MessageProvider.get_messages)
|
||||
|
||||
include_os_info = (
|
||||
self.code_executor.config.execute_local_commands
|
||||
if hasattr(self, "code_executor")
|
||||
else False
|
||||
)
|
||||
|
||||
prompt: ChatPrompt = self.prompt_strategy.build_prompt(
|
||||
messages=messages,
|
||||
task=self.state.task,
|
||||
ai_profile=self.state.ai_profile,
|
||||
ai_directives=directives,
|
||||
commands=function_specs_from_commands(self.commands),
|
||||
include_os_info=include_os_info,
|
||||
)
|
||||
|
||||
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
|
||||
output = await self.complete_and_parse(prompt)
|
||||
self.config.cycle_count += 1
|
||||
|
||||
return output
|
||||
|
||||
async def complete_and_parse(
|
||||
self, prompt: ChatPrompt, exception: Optional[Exception] = None
|
||||
) -> OneShotAgentActionProposal:
|
||||
if exception:
|
||||
prompt.messages.append(ChatMessage.system(f"Error: {exception}"))
|
||||
|
||||
response: ChatModelResponse[
|
||||
OneShotAgentActionProposal
|
||||
] = await self.llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
model_name=self.llm.name,
|
||||
completion_parser=self.prompt_strategy.parse_response_content,
|
||||
functions=prompt.functions,
|
||||
prefill_response=prompt.prefill_response,
|
||||
)
|
||||
result = response.parsed_result
|
||||
|
||||
await self.run_pipeline(AfterParse.after_parse, result)
|
||||
|
||||
return result
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
proposal: OneShotAgentActionProposal,
|
||||
user_feedback: str = "",
|
||||
) -> ActionResult:
|
||||
tool = proposal.use_tool
|
||||
|
||||
# Get commands
|
||||
self.commands = await self.run_pipeline(CommandProvider.get_commands)
|
||||
self._remove_disabled_commands()
|
||||
|
||||
try:
|
||||
return_value = await self._execute_tool(tool)
|
||||
|
||||
result = ActionSuccessResult(outputs=return_value)
|
||||
except AgentTerminated:
|
||||
raise
|
||||
except AgentException as e:
|
||||
result = ActionErrorResult.from_exception(e)
|
||||
logger.warning(f"{tool} raised an error: {e}")
|
||||
sentry_sdk.capture_exception(e)
|
||||
|
||||
result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
|
||||
if result_tlength > self.send_token_limit // 3:
|
||||
result = ActionErrorResult(
|
||||
reason=f"Command {tool.name} returned too much output. "
|
||||
"Do not execute this command again with the same arguments."
|
||||
)
|
||||
|
||||
await self.run_pipeline(AfterExecute.after_execute, result)
|
||||
|
||||
logger.debug("\n".join(self.trace))
|
||||
|
||||
return result
|
||||
|
||||
async def do_not_execute(
|
||||
self, denied_proposal: OneShotAgentActionProposal, user_feedback: str
|
||||
) -> ActionResult:
|
||||
result = ActionInterruptedByHuman(feedback=user_feedback)
|
||||
|
||||
await self.run_pipeline(AfterExecute.after_execute, result)
|
||||
|
||||
logger.debug("\n".join(self.trace))
|
||||
|
||||
return result
|
||||
|
||||
async def _execute_tool(self, tool_call: AssistantFunctionCall) -> Any:
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
tool_call (AssistantFunctionCall): The tool call to execute
|
||||
|
||||
Returns:
|
||||
str: The execution result
|
||||
"""
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
command = self._get_command(tool_call.name)
|
||||
try:
|
||||
result = command(**tool_call.arguments)
|
||||
if inspect.isawaitable(result):
|
||||
return await result
|
||||
return result
|
||||
except AgentException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise CommandExecutionError(str(e))
|
||||
|
||||
def _get_command(self, command_name: str) -> Command:
|
||||
for command in reversed(self.commands):
|
||||
if command_name in command.names:
|
||||
return command
|
||||
|
||||
raise UnknownCommandError(
|
||||
f"Cannot execute command '{command_name}': unknown command."
|
||||
)
|
||||
|
||||
def _remove_disabled_commands(self) -> None:
|
||||
self.commands = [
|
||||
command
|
||||
for command in self.commands
|
||||
if not any(
|
||||
name in self.app_config.disabled_commands for name in command.names
|
||||
)
|
||||
]
|
||||
|
||||
def find_obscured_commands(self) -> list[Command]:
|
||||
seen_names = set()
|
||||
obscured_commands = []
|
||||
for command in reversed(self.commands):
|
||||
# If all of the command's names have been seen, it's obscured
|
||||
if seen_names.issuperset(command.names):
|
||||
obscured_commands.append(command)
|
||||
else:
|
||||
seen_names.update(command.names)
|
||||
return list(reversed(obscured_commands))
|
||||
@@ -1,46 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
from autogpt.agents.agent import AgentSettings
|
||||
|
||||
|
||||
class AgentManager:
|
||||
def __init__(self, file_storage: FileStorage):
|
||||
self.file_manager = file_storage.clone_with_subroot("agents")
|
||||
|
||||
@staticmethod
|
||||
def generate_id(agent_name: str) -> str:
|
||||
"""Generate a unique ID for an agent given agent name."""
|
||||
unique_id = str(uuid.uuid4())[:8]
|
||||
return f"{agent_name}-{unique_id}"
|
||||
|
||||
def list_agents(self) -> list[str]:
|
||||
"""Return all agent directories within storage."""
|
||||
agent_dirs: list[str] = []
|
||||
for file_path in self.file_manager.list_files():
|
||||
if len(file_path.parts) == 2 and file_path.name == "state.json":
|
||||
agent_dirs.append(file_path.parent.name)
|
||||
return agent_dirs
|
||||
|
||||
def get_agent_dir(self, agent_id: str) -> Path:
|
||||
"""Return the directory of the agent with the given ID."""
|
||||
assert len(agent_id) > 0
|
||||
agent_dir: Path | None = None
|
||||
if self.file_manager.exists(agent_id):
|
||||
agent_dir = self.file_manager.root / agent_id
|
||||
else:
|
||||
raise FileNotFoundError(f"No agent with ID '{agent_id}'")
|
||||
return agent_dir
|
||||
|
||||
def load_agent_state(self, agent_id: str) -> AgentSettings:
|
||||
"""Load the state of the agent with the given ID."""
|
||||
state_file_path = Path(agent_id) / "state.json"
|
||||
if not self.file_manager.exists(state_file_path):
|
||||
raise FileNotFoundError(f"Agent with ID '{agent_id}' has no state.json")
|
||||
|
||||
state = self.file_manager.read_file(state_file_path)
|
||||
return AgentSettings.parse_raw(state)
|
||||
@@ -1,281 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
from logging import Logger
|
||||
|
||||
import distro
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.json.parsing import extract_dict_from_json
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.prompting.utils import format_numbered_list
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from forge.models.action import ActionProposal
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.exceptions import InvalidAgentResponseError
|
||||
from pydantic import Field
|
||||
|
||||
_RESPONSE_INTERFACE_NAME = "AssistantResponse"
|
||||
|
||||
|
||||
class AssistantThoughts(ModelWithSummary):
|
||||
observations: str = Field(
|
||||
description="Relevant observations from your last action (if any)"
|
||||
)
|
||||
text: str = Field(description="Thoughts")
|
||||
reasoning: str = Field(description="Reasoning behind the thoughts")
|
||||
self_criticism: str = Field(description="Constructive self-criticism")
|
||||
plan: list[str] = Field(description="Short list that conveys the long-term plan")
|
||||
speak: str = Field(description="Summary of thoughts, to say to user")
|
||||
|
||||
def summary(self) -> str:
|
||||
return self.text
|
||||
|
||||
|
||||
class OneShotAgentActionProposal(ActionProposal):
|
||||
thoughts: AssistantThoughts # type: ignore
|
||||
|
||||
|
||||
class OneShotAgentPromptConfiguration(SystemConfiguration):
|
||||
DEFAULT_BODY_TEMPLATE: str = (
|
||||
"## Constraints\n"
|
||||
"You operate within the following constraints:\n"
|
||||
"{constraints}\n"
|
||||
"\n"
|
||||
"## Resources\n"
|
||||
"You can leverage access to the following resources:\n"
|
||||
"{resources}\n"
|
||||
"\n"
|
||||
"## Commands\n"
|
||||
"These are the ONLY commands you can use."
|
||||
" Any action you perform must be possible through one of these commands:\n"
|
||||
"{commands}\n"
|
||||
"\n"
|
||||
"## Best practices\n"
|
||||
"{best_practices}"
|
||||
)
|
||||
|
||||
DEFAULT_CHOOSE_ACTION_INSTRUCTION: str = (
|
||||
"Determine exactly one command to use next based on the given goals "
|
||||
"and the progress you have made so far, "
|
||||
"and respond using the JSON schema specified previously:"
|
||||
)
|
||||
|
||||
body_template: str = UserConfigurable(default=DEFAULT_BODY_TEMPLATE)
|
||||
choose_action_instruction: str = UserConfigurable(
|
||||
default=DEFAULT_CHOOSE_ACTION_INSTRUCTION
|
||||
)
|
||||
use_functions_api: bool = UserConfigurable(default=False)
|
||||
|
||||
#########
|
||||
# State #
|
||||
#########
|
||||
# progress_summaries: dict[tuple[int, int], str] = Field(
|
||||
# default_factory=lambda: {(0, 0): ""}
|
||||
# )
|
||||
|
||||
|
||||
class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
default_configuration: OneShotAgentPromptConfiguration = (
|
||||
OneShotAgentPromptConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
configuration: OneShotAgentPromptConfiguration,
|
||||
logger: Logger,
|
||||
):
|
||||
self.config = configuration
|
||||
self.response_schema = JSONSchema.from_dict(
|
||||
OneShotAgentActionProposal.model_json_schema()
|
||||
)
|
||||
self.logger = logger
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
*,
|
||||
messages: list[ChatMessage],
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
**extras,
|
||||
) -> ChatPrompt:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
3. `cycle_instruction`
|
||||
"""
|
||||
system_prompt, response_prefill = self.build_system_prompt(
|
||||
ai_profile=ai_profile,
|
||||
ai_directives=ai_directives,
|
||||
commands=commands,
|
||||
include_os_info=include_os_info,
|
||||
)
|
||||
|
||||
final_instruction_msg = ChatMessage.user(self.config.choose_action_instruction)
|
||||
|
||||
return ChatPrompt(
|
||||
messages=[
|
||||
ChatMessage.system(system_prompt),
|
||||
ChatMessage.user(f'"""{task}"""'),
|
||||
*messages,
|
||||
final_instruction_msg,
|
||||
],
|
||||
prefill_response=response_prefill,
|
||||
functions=commands if self.config.use_functions_api else [],
|
||||
)
|
||||
|
||||
def build_system_prompt(
|
||||
self,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Builds the system prompt.
|
||||
|
||||
Returns:
|
||||
str: The system prompt body
|
||||
str: The desired start for the LLM's response; used to steer the output
|
||||
"""
|
||||
response_fmt_instruction, response_prefill = self.response_format_instruction(
|
||||
self.config.use_functions_api
|
||||
)
|
||||
system_prompt_parts = (
|
||||
self._generate_intro_prompt(ai_profile)
|
||||
+ (self._generate_os_info() if include_os_info else [])
|
||||
+ [
|
||||
self.config.body_template.format(
|
||||
constraints=format_numbered_list(ai_directives.constraints),
|
||||
resources=format_numbered_list(ai_directives.resources),
|
||||
commands=self._generate_commands_list(commands),
|
||||
best_practices=format_numbered_list(ai_directives.best_practices),
|
||||
)
|
||||
]
|
||||
+ [
|
||||
"## Your Task\n"
|
||||
"The user will specify a task for you to execute, in triple quotes,"
|
||||
" in the next message. Your job is to complete the task while following"
|
||||
" your directives as given above, and terminate when your task is done."
|
||||
]
|
||||
+ ["## RESPONSE FORMAT\n" + response_fmt_instruction]
|
||||
)
|
||||
|
||||
# Join non-empty parts together into paragraph format
|
||||
return (
|
||||
"\n\n".join(filter(None, system_prompt_parts)).strip("\n"),
|
||||
response_prefill,
|
||||
)
|
||||
|
||||
def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
|
||||
response_schema = self.response_schema.model_copy(deep=True)
|
||||
assert response_schema.properties
|
||||
if use_functions_api and "use_tool" in response_schema.properties:
|
||||
del response_schema.properties["use_tool"]
|
||||
|
||||
# Unindent for performance
|
||||
response_format = re.sub(
|
||||
r"\n\s+",
|
||||
"\n",
|
||||
response_schema.to_typescript_object_interface(_RESPONSE_INTERFACE_NAME),
|
||||
)
|
||||
response_prefill = f'{{\n "{list(response_schema.properties.keys())[0]}":'
|
||||
|
||||
return (
|
||||
(
|
||||
f"YOU MUST ALWAYS RESPOND WITH A JSON OBJECT OF THE FOLLOWING TYPE:\n"
|
||||
f"{response_format}"
|
||||
+ ("\n\nYOU MUST ALSO INVOKE A TOOL!" if use_functions_api else "")
|
||||
),
|
||||
response_prefill,
|
||||
)
|
||||
|
||||
def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]:
|
||||
"""Generates the introduction part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of strings forming the introduction part of the prompt.
|
||||
"""
|
||||
return [
|
||||
f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.",
|
||||
"Your decisions must always be made independently without seeking "
|
||||
"user assistance. Play to your strengths as an LLM and pursue "
|
||||
"simple strategies with no legal complications.",
|
||||
]
|
||||
|
||||
def _generate_os_info(self) -> list[str]:
|
||||
"""Generates the OS information part of the prompt.
|
||||
|
||||
Params:
|
||||
config (Config): The configuration object.
|
||||
|
||||
Returns:
|
||||
str: The OS information part of the prompt.
|
||||
"""
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return [f"The OS you are running on is: {os_info}"]
|
||||
|
||||
def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str:
|
||||
"""Lists the commands available to the agent.
|
||||
|
||||
Params:
|
||||
agent: The agent for which the commands are being listed.
|
||||
|
||||
Returns:
|
||||
str: A string containing a numbered list of commands.
|
||||
"""
|
||||
try:
|
||||
return format_numbered_list([cmd.fmt_line() for cmd in commands])
|
||||
except AttributeError:
|
||||
self.logger.warning(f"Formatting commands failed. {commands}")
|
||||
raise
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response: AssistantChatMessage,
|
||||
) -> OneShotAgentActionProposal:
|
||||
if not response.content:
|
||||
raise InvalidAgentResponseError("Assistant response has no text content")
|
||||
|
||||
self.logger.debug(
|
||||
"LLM response content:"
|
||||
+ (
|
||||
f"\n{response.content}"
|
||||
if "\n" in response.content
|
||||
else f" '{response.content}'"
|
||||
)
|
||||
)
|
||||
assistant_reply_dict = extract_dict_from_json(response.content)
|
||||
self.logger.debug(
|
||||
"Parsing object extracted from LLM response:\n"
|
||||
f"{json.dumps(assistant_reply_dict, indent=4)}"
|
||||
)
|
||||
if self.config.use_functions_api:
|
||||
if not response.tool_calls:
|
||||
raise InvalidAgentResponseError("Assistant did not use a tool")
|
||||
assistant_reply_dict["use_tool"] = response.tool_calls[0].function
|
||||
|
||||
parsed_response = OneShotAgentActionProposal.model_validate(
|
||||
assistant_reply_dict
|
||||
)
|
||||
parsed_response.raw_message = response.copy()
|
||||
return parsed_response
|
||||
@@ -1,6 +0,0 @@
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load the users .env file into environment variables
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
del load_dotenv
|
||||
@@ -1,479 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
from collections import defaultdict
|
||||
from io import BytesIO
|
||||
from uuid import uuid4
|
||||
|
||||
import orjson
|
||||
from fastapi import APIRouter, FastAPI, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import RedirectResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from forge.agent_protocol.api_router import base_router
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.agent_protocol.middlewares import AgentMiddleware
|
||||
from forge.agent_protocol.models import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
Task,
|
||||
TaskArtifactsListResponse,
|
||||
TaskListResponse,
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ModelProviderBudget, MultiProvider
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
from forge.utils.const import ASK_COMMAND, FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentFinished, NotFoundError
|
||||
from hypercorn.asyncio import serve as hypercorn_serve
|
||||
from hypercorn.config import Config as HypercornConfig
|
||||
from sentry_sdk import set_user
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.app.config import AppConfig
|
||||
from autogpt.app.utils import is_port_free
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProtocolServer:
|
||||
_task_budgets: dict[str, ModelProviderBudget]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_config: AppConfig,
|
||||
database: AgentDB,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
):
|
||||
self.app_config = app_config
|
||||
self.db = database
|
||||
self.file_storage = file_storage
|
||||
self.llm_provider = llm_provider
|
||||
self.agent_manager = AgentManager(file_storage)
|
||||
self._task_budgets = defaultdict(ModelProviderBudget)
|
||||
|
||||
async def start(self, port: int = 8000, router: APIRouter = base_router):
|
||||
"""Start the agent server."""
|
||||
logger.debug("Starting the agent server...")
|
||||
if not is_port_free(port):
|
||||
logger.error(f"Port {port} is already in use.")
|
||||
logger.info(
|
||||
"You can specify a port by either setting the AP_SERVER_PORT "
|
||||
"environment variable or defining AP_SERVER_PORT in the .env file."
|
||||
)
|
||||
return
|
||||
|
||||
config = HypercornConfig()
|
||||
config.bind = [f"localhost:{port}"]
|
||||
app = FastAPI(
|
||||
title="AutoGPT Server",
|
||||
description="Forked from AutoGPT Forge; "
|
||||
"Modified version of The Agent Protocol.",
|
||||
version="v0.4",
|
||||
)
|
||||
|
||||
# Configure CORS middleware
|
||||
default_origins = [f"http://localhost:{port}"] # Default only local access
|
||||
configured_origins = [
|
||||
origin
|
||||
for origin in os.getenv("AP_SERVER_CORS_ALLOWED_ORIGINS", "").split(",")
|
||||
if origin # Empty list if not configured
|
||||
]
|
||||
origins = configured_origins or default_origins
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(router, prefix="/ap/v1")
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
frontend_path = (
|
||||
pathlib.Path(script_dir).joinpath("../../../frontend/build/web").resolve()
|
||||
)
|
||||
|
||||
if os.path.exists(frontend_path):
|
||||
app.mount("/app", StaticFiles(directory=frontend_path), name="app")
|
||||
|
||||
@app.get("/", include_in_schema=False)
|
||||
async def root():
|
||||
return RedirectResponse(url="/app/index.html", status_code=307)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Frontend not found. {frontend_path} does not exist. "
|
||||
"The frontend will not be available."
|
||||
)
|
||||
|
||||
# Used to access the methods on this class from API route handlers
|
||||
app.add_middleware(AgentMiddleware, agent=self)
|
||||
|
||||
config.loglevel = "ERROR"
|
||||
config.bind = [f"0.0.0.0:{port}"]
|
||||
|
||||
logger.info(f"AutoGPT server starting on http://localhost:{port}")
|
||||
await hypercorn_serve(app, config) # type: ignore
|
||||
|
||||
async def create_task(self, task_request: TaskRequestBody) -> Task:
|
||||
"""
|
||||
Create a task for the agent.
|
||||
"""
|
||||
if user_id := (task_request.additional_input or {}).get("user_id"):
|
||||
set_user({"id": user_id})
|
||||
|
||||
task = await self.db.create_task(
|
||||
input=task_request.input,
|
||||
additional_input=task_request.additional_input,
|
||||
)
|
||||
# TODO: re-evaluate performance benefit of task-oriented profiles
|
||||
# logger.debug(f"Creating agent for task: '{task.input}'")
|
||||
# task_agent = await generate_agent_for_task(
|
||||
task_agent = create_agent(
|
||||
agent_id=task_agent_id(task.task_id),
|
||||
task=task.input,
|
||||
app_config=self.app_config,
|
||||
file_storage=self.file_storage,
|
||||
llm_provider=self._get_task_llm_provider(task),
|
||||
)
|
||||
await task_agent.file_manager.save_state()
|
||||
|
||||
return task
|
||||
|
||||
async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse:
|
||||
"""
|
||||
List all tasks that the agent has created.
|
||||
"""
|
||||
logger.debug("Listing all tasks...")
|
||||
tasks, pagination = await self.db.list_tasks(page, pageSize)
|
||||
response = TaskListResponse(tasks=tasks, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def get_task(self, task_id: str) -> Task:
|
||||
"""
|
||||
Get a task by ID.
|
||||
"""
|
||||
logger.debug(f"Getting task with ID: {task_id}...")
|
||||
task = await self.db.get_task(task_id)
|
||||
return task
|
||||
|
||||
async def list_steps(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskStepsListResponse:
|
||||
"""
|
||||
List the IDs of all steps that the task has created.
|
||||
"""
|
||||
logger.debug(f"Listing all steps created by task with ID: {task_id}...")
|
||||
steps, pagination = await self.db.list_steps(task_id, page, pageSize)
|
||||
response = TaskStepsListResponse(steps=steps, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
|
||||
"""Create a step for the task."""
|
||||
logger.debug(f"Creating a step for task with ID: {task_id}...")
|
||||
|
||||
# Restore Agent instance
|
||||
task = await self.get_task(task_id)
|
||||
agent = configure_agent_with_state(
|
||||
state=self.agent_manager.load_agent_state(task_agent_id(task_id)),
|
||||
app_config=self.app_config,
|
||||
file_storage=self.file_storage,
|
||||
llm_provider=self._get_task_llm_provider(task),
|
||||
)
|
||||
|
||||
if user_id := (task.additional_input or {}).get("user_id"):
|
||||
set_user({"id": user_id})
|
||||
|
||||
# According to the Agent Protocol spec, the first execute_step request contains
|
||||
# the same task input as the parent create_task request.
|
||||
# To prevent this from interfering with the agent's process, we ignore the input
|
||||
# of this first step request, and just generate the first step proposal.
|
||||
is_init_step = not bool(agent.event_history)
|
||||
last_proposal, tool_result = None, None
|
||||
execute_approved = False
|
||||
|
||||
# HACK: only for compatibility with AGBenchmark
|
||||
if step_request.input == "y":
|
||||
step_request.input = ""
|
||||
|
||||
user_input = step_request.input if not is_init_step else ""
|
||||
|
||||
if (
|
||||
not is_init_step
|
||||
and agent.event_history.current_episode
|
||||
and not agent.event_history.current_episode.result
|
||||
):
|
||||
last_proposal = agent.event_history.current_episode.action
|
||||
execute_approved = not user_input
|
||||
|
||||
logger.debug(
|
||||
f"Agent proposed command {last_proposal.use_tool}."
|
||||
f" User input/feedback: {repr(user_input)}"
|
||||
)
|
||||
|
||||
# Save step request
|
||||
step = await self.db.create_step(
|
||||
task_id=task_id,
|
||||
input=step_request,
|
||||
is_last=(
|
||||
last_proposal is not None
|
||||
and last_proposal.use_tool.name == FINISH_COMMAND
|
||||
and execute_approved
|
||||
),
|
||||
)
|
||||
agent.llm_provider = self._get_task_llm_provider(task, step.step_id)
|
||||
|
||||
# Execute previously proposed action
|
||||
if last_proposal:
|
||||
agent.file_manager.workspace.on_write_file = (
|
||||
lambda path: self._on_agent_write_file(
|
||||
task=task, step=step, relative_path=path
|
||||
)
|
||||
)
|
||||
|
||||
if last_proposal.use_tool.name == ASK_COMMAND:
|
||||
tool_result = ActionSuccessResult(outputs=user_input)
|
||||
agent.event_history.register_result(tool_result)
|
||||
elif execute_approved:
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="running",
|
||||
)
|
||||
|
||||
try:
|
||||
# Execute previously proposed action
|
||||
tool_result = await agent.execute(last_proposal)
|
||||
except AgentFinished:
|
||||
additional_output = {}
|
||||
task_total_cost = agent.llm_provider.get_incurred_cost()
|
||||
if task_total_cost > 0:
|
||||
additional_output["task_total_cost"] = task_total_cost
|
||||
logger.info(
|
||||
f"Total LLM cost for task {task_id}: "
|
||||
f"${round(task_total_cost, 2)}"
|
||||
)
|
||||
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
output=last_proposal.use_tool.arguments["reason"],
|
||||
additional_output=additional_output,
|
||||
)
|
||||
await agent.file_manager.save_state()
|
||||
return step
|
||||
else:
|
||||
assert user_input
|
||||
tool_result = await agent.do_not_execute(last_proposal, user_input)
|
||||
|
||||
# Propose next action
|
||||
try:
|
||||
assistant_response = await agent.propose_action()
|
||||
next_tool_to_use = assistant_response.use_tool
|
||||
logger.debug(f"AI output: {assistant_response.thoughts}")
|
||||
except Exception as e:
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="completed",
|
||||
output=f"An error occurred while proposing the next action: {e}",
|
||||
)
|
||||
return step
|
||||
|
||||
# Format step output
|
||||
output = (
|
||||
(
|
||||
f"`{last_proposal.use_tool}` returned:"
|
||||
+ ("\n\n" if "\n" in str(tool_result) else " ")
|
||||
+ f"{tool_result}\n\n"
|
||||
)
|
||||
if last_proposal and last_proposal.use_tool.name != ASK_COMMAND
|
||||
else ""
|
||||
)
|
||||
output += f"{assistant_response.thoughts.speak}\n\n"
|
||||
output += (
|
||||
f"Next Command: {next_tool_to_use}"
|
||||
if next_tool_to_use.name != ASK_COMMAND
|
||||
else next_tool_to_use.arguments["question"]
|
||||
)
|
||||
|
||||
additional_output = {
|
||||
**(
|
||||
{
|
||||
"last_action": {
|
||||
"name": last_proposal.use_tool.name,
|
||||
"args": last_proposal.use_tool.arguments,
|
||||
"result": (
|
||||
""
|
||||
if tool_result is None
|
||||
else (
|
||||
orjson.loads(tool_result.model_dump_json())
|
||||
if not isinstance(tool_result, ActionErrorResult)
|
||||
else {
|
||||
"error": str(tool_result.error),
|
||||
"reason": tool_result.reason,
|
||||
}
|
||||
)
|
||||
),
|
||||
},
|
||||
}
|
||||
if last_proposal and tool_result
|
||||
else {}
|
||||
),
|
||||
**assistant_response.model_dump(),
|
||||
}
|
||||
|
||||
task_cumulative_cost = agent.llm_provider.get_incurred_cost()
|
||||
if task_cumulative_cost > 0:
|
||||
additional_output["task_cumulative_cost"] = task_cumulative_cost
|
||||
logger.debug(
|
||||
f"Running total LLM cost for task {task_id}: "
|
||||
f"${round(task_cumulative_cost, 3)}"
|
||||
)
|
||||
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="completed",
|
||||
output=output,
|
||||
additional_output=additional_output,
|
||||
)
|
||||
|
||||
await agent.file_manager.save_state()
|
||||
return step
|
||||
|
||||
async def _on_agent_write_file(
|
||||
self, task: Task, step: Step, relative_path: pathlib.Path
|
||||
) -> None:
|
||||
"""
|
||||
Creates an Artifact for the written file, or updates the Artifact if it exists.
|
||||
"""
|
||||
if relative_path.is_absolute():
|
||||
raise ValueError(f"File path '{relative_path}' is not relative")
|
||||
for a in task.artifacts or []:
|
||||
if a.relative_path == str(relative_path):
|
||||
logger.debug(f"Updating Artifact after writing to existing file: {a}")
|
||||
if not a.agent_created:
|
||||
await self.db.update_artifact(a.artifact_id, agent_created=True)
|
||||
break
|
||||
else:
|
||||
logger.debug(f"Creating Artifact for new file '{relative_path}'")
|
||||
await self.db.create_artifact(
|
||||
task_id=step.task_id,
|
||||
step_id=step.step_id,
|
||||
file_name=relative_path.parts[-1],
|
||||
agent_created=True,
|
||||
relative_path=str(relative_path),
|
||||
)
|
||||
|
||||
async def get_step(self, task_id: str, step_id: str) -> Step:
|
||||
"""
|
||||
Get a step by ID.
|
||||
"""
|
||||
step = await self.db.get_step(task_id, step_id)
|
||||
return step
|
||||
|
||||
async def list_artifacts(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskArtifactsListResponse:
|
||||
"""
|
||||
List the artifacts that the task has created.
|
||||
"""
|
||||
artifacts, pagination = await self.db.list_artifacts(task_id, page, pageSize)
|
||||
return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination)
|
||||
|
||||
async def create_artifact(
|
||||
self, task_id: str, file: UploadFile, relative_path: str
|
||||
) -> Artifact:
|
||||
"""
|
||||
Create an artifact for the task.
|
||||
"""
|
||||
file_name = file.filename or str(uuid4())
|
||||
data = b""
|
||||
while contents := file.file.read(1024 * 1024):
|
||||
data += contents
|
||||
# Check if relative path ends with filename
|
||||
if relative_path.endswith(file_name):
|
||||
file_path = relative_path
|
||||
else:
|
||||
file_path = os.path.join(relative_path, file_name)
|
||||
|
||||
workspace = self._get_task_agent_file_workspace(task_id)
|
||||
await workspace.write_file(file_path, data)
|
||||
|
||||
artifact = await self.db.create_artifact(
|
||||
task_id=task_id,
|
||||
file_name=file_name,
|
||||
relative_path=relative_path,
|
||||
agent_created=False,
|
||||
)
|
||||
return artifact
|
||||
|
||||
async def get_artifact(self, task_id: str, artifact_id: str) -> StreamingResponse:
|
||||
"""
|
||||
Download a task artifact by ID.
|
||||
"""
|
||||
try:
|
||||
workspace = self._get_task_agent_file_workspace(task_id)
|
||||
artifact = await self.db.get_artifact(artifact_id)
|
||||
if artifact.file_name not in artifact.relative_path:
|
||||
file_path = os.path.join(artifact.relative_path, artifact.file_name)
|
||||
else:
|
||||
file_path = artifact.relative_path
|
||||
retrieved_artifact = workspace.read_file(file_path, binary=True)
|
||||
except NotFoundError:
|
||||
raise
|
||||
except FileNotFoundError:
|
||||
raise
|
||||
|
||||
return StreamingResponse(
|
||||
BytesIO(retrieved_artifact),
|
||||
media_type="application/octet-stream",
|
||||
headers={
|
||||
"Content-Disposition": f'attachment; filename="{artifact.file_name}"'
|
||||
},
|
||||
)
|
||||
|
||||
def _get_task_agent_file_workspace(self, task_id: str | int) -> FileStorage:
|
||||
agent_id = task_agent_id(task_id)
|
||||
return self.file_storage.clone_with_subroot(f"agents/{agent_id}/workspace")
|
||||
|
||||
def _get_task_llm_provider(self, task: Task, step_id: str = "") -> MultiProvider:
|
||||
"""
|
||||
Configures the LLM provider with headers to link outgoing requests to the task.
|
||||
"""
|
||||
task_llm_budget = self._task_budgets[task.task_id]
|
||||
|
||||
task_llm_provider_config = self.llm_provider._configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
_extra_request_headers = task_llm_provider_config.extra_request_headers
|
||||
_extra_request_headers["AP-TaskID"] = task.task_id
|
||||
if step_id:
|
||||
_extra_request_headers["AP-StepID"] = step_id
|
||||
if task.additional_input and (user_id := task.additional_input.get("user_id")):
|
||||
_extra_request_headers["AutoGPT-UserID"] = user_id
|
||||
|
||||
settings = self.llm_provider._settings.model_copy()
|
||||
settings.budget = task_llm_budget
|
||||
settings.configuration = task_llm_provider_config
|
||||
task_llm_provider = self.llm_provider.__class__(
|
||||
settings=settings,
|
||||
logger=logger.getChild(
|
||||
f"Task-{task.task_id}_{self.llm_provider.__class__.__name__}"
|
||||
),
|
||||
)
|
||||
self._task_budgets[task.task_id] = task_llm_provider._budget # type: ignore
|
||||
|
||||
return task_llm_provider
|
||||
|
||||
|
||||
def task_agent_id(task_id: str | int) -> str:
|
||||
return f"AutoGPT-{task_id}"
|
||||
@@ -1,216 +0,0 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from logging import _nameToLevel as logLevelMap
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from forge.logging.config import LogFormatName
|
||||
|
||||
from .telemetry import setup_telemetry
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context):
|
||||
setup_telemetry()
|
||||
|
||||
# Invoke `run` by default
|
||||
if ctx.invoked_subcommand is None:
|
||||
ctx.invoke(run)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--constraint",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI constraints to include in the prompt;"
|
||||
" may be used multiple times to pass multiple constraints"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--resource",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI resources to include in the prompt;"
|
||||
" may be used multiple times to pass multiple resources"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--best-practice",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI best practices to include in the prompt;"
|
||||
" may be used multiple times to pass multiple best practices"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--override-directives",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"If specified, --constraint, --resource and --best-practice will override"
|
||||
" the AI's directives instead of being appended to them"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
|
||||
)
|
||||
@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
|
||||
@click.option(
|
||||
"--log-format",
|
||||
help=(
|
||||
"Choose a log format; defaults to 'simple'."
|
||||
" Also implies --log-file-format, unless it is specified explicitly."
|
||||
" Using the 'structured_google_cloud' format disables log file output."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--log-file-format",
|
||||
help=(
|
||||
"Override the format used for the log file output."
|
||||
" Defaults to the application's global --log-format."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--component-config-file",
|
||||
help="Path to a json configuration file",
|
||||
type=click.Path(exists=True, dir_okay=False, resolve_path=True, path_type=Path),
|
||||
)
|
||||
def run(
|
||||
continuous: bool,
|
||||
continuous_limit: Optional[int],
|
||||
speak: bool,
|
||||
install_plugin_deps: bool,
|
||||
skip_news: bool,
|
||||
skip_reprompt: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
resource: tuple[str],
|
||||
constraint: tuple[str],
|
||||
best_practice: tuple[str],
|
||||
override_directives: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
component_config_file: Optional[Path],
|
||||
) -> None:
|
||||
"""
|
||||
Sets up and runs an agent, based on the task specified by the user, or resumes an
|
||||
existing agent.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt
|
||||
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
skip_news=skip_news,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
override_ai_name=ai_name,
|
||||
override_ai_role=ai_role,
|
||||
resources=list(resource),
|
||||
constraints=list(constraint),
|
||||
best_practices=list(best_practice),
|
||||
override_directives=override_directives,
|
||||
component_config_file=component_config_file,
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
|
||||
)
|
||||
@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
|
||||
@click.option(
|
||||
"--log-format",
|
||||
help=(
|
||||
"Choose a log format; defaults to 'simple'."
|
||||
" Also implies --log-file-format, unless it is specified explicitly."
|
||||
" Using the 'structured_google_cloud' format disables log file output."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--log-file-format",
|
||||
help=(
|
||||
"Override the format used for the log file output."
|
||||
" Defaults to the application's global --log-format."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
def serve(
|
||||
install_plugin_deps: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
) -> None:
|
||||
"""
|
||||
Starts an Agent Protocol compliant AutoGPT server, which creates a custom agent for
|
||||
every task.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt_server
|
||||
|
||||
run_auto_gpt_server(
|
||||
debug=debug,
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -1,221 +0,0 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import forge
|
||||
from forge.config.base import BaseConfig
|
||||
from forge.llm.providers import CHAT_MODELS, ModelName
|
||||
from forge.llm.providers.openai import OpenAICredentials, OpenAIModelName
|
||||
from forge.logging.config import LoggingConfig
|
||||
from forge.models.config import Configurable, UserConfigurable
|
||||
from pydantic import SecretStr, ValidationInfo, field_validator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PROJECT_ROOT = Path(forge.__file__).parent.parent
|
||||
AZURE_CONFIG_FILE = Path("azure.yaml")
|
||||
|
||||
GPT_4_MODEL = OpenAIModelName.GPT4
|
||||
GPT_3_MODEL = OpenAIModelName.GPT3
|
||||
|
||||
|
||||
class AppConfig(BaseConfig):
|
||||
name: str = "Auto-GPT configuration"
|
||||
description: str = "Default configuration for the Auto-GPT application."
|
||||
|
||||
########################
|
||||
# Application Settings #
|
||||
########################
|
||||
project_root: Path = PROJECT_ROOT
|
||||
app_data_dir: Path = project_root / "data"
|
||||
skip_news: bool = False
|
||||
skip_reprompt: bool = False
|
||||
authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY")
|
||||
exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY")
|
||||
noninteractive_mode: bool = False
|
||||
logging: LoggingConfig = LoggingConfig()
|
||||
component_config_file: Optional[Path] = UserConfigurable(
|
||||
default=None, from_env="COMPONENT_CONFIG_FILE"
|
||||
)
|
||||
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Model configuration
|
||||
fast_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT3,
|
||||
from_env="FAST_LLM",
|
||||
)
|
||||
smart_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT4_TURBO,
|
||||
from_env="SMART_LLM",
|
||||
)
|
||||
temperature: float = UserConfigurable(default=0, from_env="TEMPERATURE")
|
||||
openai_functions: bool = UserConfigurable(
|
||||
default=False, from_env=lambda: os.getenv("OPENAI_FUNCTIONS", "False") == "True"
|
||||
)
|
||||
embedding_model: str = UserConfigurable(
|
||||
default="text-embedding-3-small", from_env="EMBEDDING_MODEL"
|
||||
)
|
||||
|
||||
# Run loop configuration
|
||||
continuous_mode: bool = False
|
||||
continuous_limit: int = 0
|
||||
|
||||
############
|
||||
# Commands #
|
||||
############
|
||||
# General
|
||||
disabled_commands: list[str] = UserConfigurable(
|
||||
default_factory=list,
|
||||
from_env=lambda: _safe_split(os.getenv("DISABLED_COMMANDS")),
|
||||
)
|
||||
|
||||
# File ops
|
||||
restrict_to_workspace: bool = UserConfigurable(
|
||||
default=True,
|
||||
from_env=lambda: os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True",
|
||||
)
|
||||
|
||||
###############
|
||||
# Credentials #
|
||||
###############
|
||||
# OpenAI
|
||||
openai_credentials: Optional[OpenAICredentials] = None
|
||||
azure_config_file: Optional[Path] = UserConfigurable(
|
||||
default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE"
|
||||
)
|
||||
|
||||
@field_validator("openai_functions")
|
||||
def validate_openai_functions(cls, value: bool, info: ValidationInfo):
|
||||
if value:
|
||||
smart_llm = info.data["smart_llm"]
|
||||
assert CHAT_MODELS[smart_llm].has_function_call_api, (
|
||||
f"Model {smart_llm} does not support tool calling. "
|
||||
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
class ConfigBuilder(Configurable[AppConfig]):
|
||||
default_settings = AppConfig()
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> AppConfig:
|
||||
"""Initialize the Config class"""
|
||||
|
||||
config = cls.build_agent_configuration()
|
||||
config.project_root = project_root
|
||||
|
||||
# Make relative paths absolute
|
||||
for k in {
|
||||
"azure_config_file", # TODO: move from project root
|
||||
}:
|
||||
setattr(config, k, project_root / getattr(config, k))
|
||||
|
||||
if (
|
||||
config.openai_credentials
|
||||
and config.openai_credentials.api_type == SecretStr("azure")
|
||||
and (config_file := config.azure_config_file)
|
||||
):
|
||||
config.openai_credentials.load_azure_config(config_file)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
async def assert_config_has_required_llm_api_keys(config: AppConfig) -> None:
|
||||
"""
|
||||
Check if API keys (if required) are set for the configured SMART_LLM and FAST_LLM.
|
||||
"""
|
||||
from forge.llm.providers.anthropic import AnthropicModelName
|
||||
from forge.llm.providers.groq import GroqModelName
|
||||
from pydantic import ValidationError
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(AnthropicModelName):
|
||||
from forge.llm.providers.anthropic import AnthropicCredentials
|
||||
|
||||
try:
|
||||
credentials = AnthropicCredentials.from_env()
|
||||
except ValidationError as e:
|
||||
if "api_key" in str(e):
|
||||
logger.error(
|
||||
"Set your Anthropic API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: "
|
||||
"https://docs.agpt.co/autogpt/setup/#anthropic"
|
||||
)
|
||||
|
||||
raise ValueError("Anthropic is unavailable: can't load credentials") from e
|
||||
|
||||
key_pattern = r"^sk-ant-api03-[\w\-]{95}"
|
||||
|
||||
# If key is set, but it looks invalid
|
||||
if not re.search(key_pattern, credentials.api_key.get_secret_value()):
|
||||
logger.warning(
|
||||
"Possibly invalid Anthropic API key! "
|
||||
f"Configured Anthropic API key does not match pattern '{key_pattern}'. "
|
||||
"If this is a valid key, please report this warning to the maintainers."
|
||||
)
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(GroqModelName):
|
||||
from forge.llm.providers.groq import GroqProvider
|
||||
from groq import AuthenticationError
|
||||
|
||||
try:
|
||||
groq = GroqProvider()
|
||||
await groq.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error("Set your Groq API key in .env or as an environment variable")
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The Groq API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: invalid API key") from e
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(OpenAIModelName):
|
||||
from forge.llm.providers.openai import OpenAIProvider
|
||||
from openai import AuthenticationError
|
||||
|
||||
try:
|
||||
openai = OpenAIProvider()
|
||||
await openai.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error(
|
||||
"Set your OpenAI API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The OpenAI API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: invalid API key") from e
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
@@ -1,83 +0,0 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from forge.llm.providers import ModelName, MultiProvider
|
||||
|
||||
from autogpt.app.config import GPT_3_MODEL, AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def apply_overrides_to_config(
|
||||
config: AppConfig,
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
skip_news: bool = False,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Args:
|
||||
config (Config): The config object to update.
|
||||
continuous (bool): Whether to run in continuous mode.
|
||||
continuous_limit (int): The number of times to run in continuous mode.
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages on start.
|
||||
speak (bool): Whether to enable speak mode.
|
||||
debug (bool): Whether to enable debug mode.
|
||||
log_level (int): The global log level for the application.
|
||||
log_format (str): The format for the log(s).
|
||||
log_file_format (str): Override the format for the log file.
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup.
|
||||
"""
|
||||
config.continuous_mode = False
|
||||
|
||||
if continuous:
|
||||
logger.warning(
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.continuous_mode = True
|
||||
|
||||
if continuous_limit:
|
||||
config.continuous_limit = continuous_limit
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
# Check availability of configured LLMs; fallback to other LLM if unavailable
|
||||
config.fast_llm, config.smart_llm = await check_models(
|
||||
(config.fast_llm, "fast_llm"), (config.smart_llm, "smart_llm")
|
||||
)
|
||||
|
||||
if skip_reprompt:
|
||||
config.skip_reprompt = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
async def check_models(
|
||||
*models: tuple[ModelName, Literal["smart_llm", "fast_llm"]]
|
||||
) -> tuple[ModelName, ...]:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
multi_provider = MultiProvider()
|
||||
available_models = await multi_provider.get_available_chat_models()
|
||||
|
||||
checked_models: list[ModelName] = []
|
||||
for model, model_type in models:
|
||||
if any(model == m.name for m in available_models):
|
||||
checked_models.append(model)
|
||||
else:
|
||||
logger.warning(
|
||||
f"You don't have access to {model}. "
|
||||
f"Setting {model_type} to {GPT_3_MODEL}."
|
||||
)
|
||||
checked_models.append(GPT_3_MODEL)
|
||||
|
||||
return tuple(checked_models)
|
||||
@@ -1,19 +0,0 @@
|
||||
import logging
|
||||
|
||||
import click
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def clean_input(prompt: str = ""):
|
||||
try:
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.debug("Asking user via keyboard...")
|
||||
|
||||
return click.prompt(
|
||||
text=prompt, prompt_suffix=" ", default="", show_default=False
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted AutoGPT")
|
||||
logger.info("Quitting...")
|
||||
exit(0)
|
||||
@@ -1,774 +0,0 @@
|
||||
"""
|
||||
The application entry point. Can be invoked by a CLI or any other front end application.
|
||||
"""
|
||||
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.components.code_executor.code_executor import (
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
from forge.logging.utils import print_attribute, speak
|
||||
from forge.models.action import ActionInterruptedByHuman, ActionProposal
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.const import FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
from autogpt.app.config import (
|
||||
AppConfig,
|
||||
ConfigBuilder,
|
||||
assert_config_has_required_llm_api_keys,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
from .configurator import apply_overrides_to_config
|
||||
from .input import clean_input
|
||||
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
|
||||
from .spinner import Spinner
|
||||
from .utils import (
|
||||
coroutine,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
print_git_branch_info,
|
||||
print_motd,
|
||||
print_python_version_info,
|
||||
)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt(
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
speak: bool = False,
|
||||
debug: bool = False,
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
skip_news: bool = False,
|
||||
install_plugin_deps: bool = False,
|
||||
override_ai_name: Optional[str] = None,
|
||||
override_ai_role: Optional[str] = None,
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
override_directives: bool = False,
|
||||
component_config_file: Optional[Path] = None,
|
||||
):
|
||||
# Set up configuration
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# Storage
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
restrict_to_root = not local or config.restrict_to_workspace
|
||||
file_storage = get_storage(
|
||||
config.file_storage_backend,
|
||||
root_path=Path("data"),
|
||||
restrict_to_root=restrict_to_root,
|
||||
)
|
||||
file_storage.initialize()
|
||||
|
||||
# Set up logging module
|
||||
if speak:
|
||||
config.tts_config.speak_mode = True
|
||||
configure_logging(
|
||||
debug=debug,
|
||||
level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
config=config.logging,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
skip_news=skip_news,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warning(
|
||||
extra={
|
||||
"title": "LEGAL:",
|
||||
"title_color": Fore.RED,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=markdown_to_ansi_style(line),
|
||||
)
|
||||
|
||||
if not config.skip_news:
|
||||
print_motd(logger)
|
||||
print_git_branch_info(logger)
|
||||
print_python_version_info(logger)
|
||||
print_attribute("Smart LLM", config.smart_llm)
|
||||
print_attribute("Fast LLM", config.fast_llm)
|
||||
if config.continuous_mode:
|
||||
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
|
||||
if continuous_limit:
|
||||
print_attribute("Continuous Limit", config.continuous_limit)
|
||||
if config.tts_config.speak_mode:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
if we_are_running_in_a_docker_container() or is_docker_available():
|
||||
print_attribute("Code Execution", "ENABLED")
|
||||
else:
|
||||
print_attribute(
|
||||
"Code Execution",
|
||||
"DISABLED (Docker unavailable)",
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
|
||||
# Let user choose an existing agent to run
|
||||
agent_manager = AgentManager(file_storage)
|
||||
existing_agents = agent_manager.list_agents()
|
||||
load_existing_agent = ""
|
||||
if existing_agents:
|
||||
print(
|
||||
"Existing agents\n---------------\n"
|
||||
+ "\n".join(f"{i} - {id}" for i, id in enumerate(existing_agents, 1))
|
||||
)
|
||||
load_existing_agent = clean_input(
|
||||
"Enter the number or name of the agent to run,"
|
||||
" or hit enter to create a new one:",
|
||||
)
|
||||
if re.match(r"^\d+$", load_existing_agent.strip()) and 0 < int(
|
||||
load_existing_agent
|
||||
) <= len(existing_agents):
|
||||
load_existing_agent = existing_agents[int(load_existing_agent) - 1]
|
||||
|
||||
if load_existing_agent != "" and load_existing_agent not in existing_agents:
|
||||
logger.info(
|
||||
f"Unknown agent '{load_existing_agent}', "
|
||||
f"creating a new one instead.",
|
||||
extra={"color": Fore.YELLOW},
|
||||
)
|
||||
load_existing_agent = ""
|
||||
|
||||
# Either load existing or set up new agent state
|
||||
agent = None
|
||||
agent_state = None
|
||||
|
||||
############################
|
||||
# Resume an Existing Agent #
|
||||
############################
|
||||
if load_existing_agent:
|
||||
agent_state = None
|
||||
while True:
|
||||
answer = clean_input("Resume? [Y/n]")
|
||||
if answer == "" or answer.lower() == "y":
|
||||
agent_state = agent_manager.load_agent_state(load_existing_agent)
|
||||
break
|
||||
elif answer.lower() == "n":
|
||||
break
|
||||
|
||||
if agent_state:
|
||||
agent = configure_agent_with_state(
|
||||
state=agent_state,
|
||||
app_config=config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
if (
|
||||
(current_episode := agent.event_history.current_episode)
|
||||
and current_episode.action.use_tool.name == FINISH_COMMAND
|
||||
and not current_episode.result
|
||||
):
|
||||
# Agent was resumed after `finish` -> rewrite result of `finish` action
|
||||
finish_reason = current_episode.action.use_tool.arguments["reason"]
|
||||
print(f"Agent previously self-terminated; reason: '{finish_reason}'")
|
||||
new_assignment = clean_input(
|
||||
"Please give a follow-up question or assignment:"
|
||||
)
|
||||
agent.event_history.register_result(
|
||||
ActionInterruptedByHuman(feedback=new_assignment)
|
||||
)
|
||||
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
ai_profile, ai_directives = await interactively_revise_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
######################
|
||||
# Set up a new Agent #
|
||||
######################
|
||||
if not agent:
|
||||
task = ""
|
||||
while task.strip() == "":
|
||||
task = clean_input(
|
||||
"Enter the task that you want AutoGPT to execute,"
|
||||
" with as much detail as possible:",
|
||||
)
|
||||
|
||||
ai_profile = AIProfile()
|
||||
additional_ai_directives = AIDirectives()
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
(
|
||||
ai_profile,
|
||||
additional_ai_directives,
|
||||
) = await interactively_revise_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
agent = create_agent(
|
||||
agent_id=agent_manager.generate_id(ai_profile.ai_name),
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
app_config=config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
file_manager = agent.file_manager
|
||||
|
||||
if file_manager and not agent.config.allow_fs_access:
|
||||
logger.info(
|
||||
f"{Fore.YELLOW}"
|
||||
"NOTE: All files/directories created by this agent can be found "
|
||||
f"inside its workspace at:{Fore.RESET} {file_manager.workspace.root}",
|
||||
extra={"preserve_color": True},
|
||||
)
|
||||
|
||||
# TODO: re-evaluate performance benefit of task-oriented profiles
|
||||
# # Concurrently generate a custom profile for the agent and apply it once done
|
||||
# def update_agent_directives(
|
||||
# task: asyncio.Task[tuple[AIProfile, AIDirectives]]
|
||||
# ):
|
||||
# logger.debug(f"Updating AIProfile: {task.result()[0]}")
|
||||
# logger.debug(f"Adding AIDirectives: {task.result()[1]}")
|
||||
# agent.state.ai_profile = task.result()[0]
|
||||
# agent.state.directives = agent.state.directives + task.result()[1]
|
||||
|
||||
# asyncio.create_task(
|
||||
# generate_agent_profile_for_task(
|
||||
# task, app_config=config, llm_provider=llm_provider
|
||||
# )
|
||||
# ).add_done_callback(update_agent_directives)
|
||||
|
||||
# Load component configuration from file
|
||||
if _config_file := component_config_file or config.component_config_file:
|
||||
try:
|
||||
logger.info(f"Loading component configuration from {_config_file}")
|
||||
agent.load_component_configs(_config_file.read_text())
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load component configuration: {e}")
|
||||
|
||||
#################
|
||||
# Run the Agent #
|
||||
#################
|
||||
try:
|
||||
await run_interaction_loop(agent)
|
||||
except AgentTerminated:
|
||||
agent_id = agent.state.agent_id
|
||||
logger.info(f"Saving state of {agent_id}...")
|
||||
|
||||
# Allow user to Save As other ID
|
||||
save_as_id = clean_input(
|
||||
f"Press enter to save as '{agent_id}',"
|
||||
" or enter a different ID to save to:",
|
||||
)
|
||||
# TODO: allow many-to-one relations of agents and workspaces
|
||||
await agent.file_manager.save_state(
|
||||
save_as_id.strip() if not save_as_id.isspace() else None
|
||||
)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt_server(
|
||||
debug: bool = False,
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
install_plugin_deps: bool = False,
|
||||
):
|
||||
from .agent_protocol_server import AgentProtocolServer
|
||||
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# Storage
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
restrict_to_root = not local or config.restrict_to_workspace
|
||||
file_storage = get_storage(
|
||||
config.file_storage_backend,
|
||||
root_path=Path("data"),
|
||||
restrict_to_root=restrict_to_root,
|
||||
)
|
||||
file_storage.initialize()
|
||||
|
||||
# Set up logging module
|
||||
configure_logging(
|
||||
debug=debug,
|
||||
level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
config=config.logging,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
|
||||
# Set up & start server
|
||||
database = AgentDB(
|
||||
database_string=os.getenv("AP_SERVER_DB_URL", "sqlite:///data/ap_server.db"),
|
||||
debug_enabled=debug,
|
||||
)
|
||||
port: int = int(os.getenv("AP_SERVER_PORT", default=8000))
|
||||
server = AgentProtocolServer(
|
||||
app_config=config,
|
||||
database=database,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
await server.start(port=port)
|
||||
|
||||
logging.getLogger().info(
|
||||
f"Total OpenAI session cost: "
|
||||
f"${round(sum(b.total_cost for b in server._task_budgets.values()), 2)}"
|
||||
)
|
||||
|
||||
|
||||
def _configure_llm_provider(config: AppConfig) -> MultiProvider:
|
||||
multi_provider = MultiProvider()
|
||||
for model in [config.smart_llm, config.fast_llm]:
|
||||
# Ensure model providers for configured LLMs are available
|
||||
multi_provider.get_model_provider(model)
|
||||
return multi_provider
|
||||
|
||||
|
||||
def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | float:
|
||||
# Translate from the continuous_mode/continuous_limit config
|
||||
# to a cycle_budget (maximum number of cycles to run without checking in with the
|
||||
# user) and a count of cycles_remaining before we check in..
|
||||
if continuous_mode:
|
||||
cycle_budget = continuous_limit if continuous_limit else math.inf
|
||||
else:
|
||||
cycle_budget = 1
|
||||
|
||||
return cycle_budget
|
||||
|
||||
|
||||
class UserFeedback(str, enum.Enum):
|
||||
"""Enum for user feedback."""
|
||||
|
||||
AUTHORIZE = "GENERATE NEXT COMMAND JSON"
|
||||
EXIT = "EXIT"
|
||||
TEXT = "TEXT"
|
||||
|
||||
|
||||
async def run_interaction_loop(
|
||||
agent: "Agent",
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
Args:
|
||||
agent: The agent to run the interaction loop for.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
app_config = agent.app_config
|
||||
ai_profile = agent.state.ai_profile
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
app_config.continuous_mode, app_config.continuous_limit
|
||||
)
|
||||
spinner = Spinner(
|
||||
"Thinking...", plain_output=app_config.logging.plain_console_output
|
||||
)
|
||||
stop_reason = None
|
||||
|
||||
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
|
||||
nonlocal cycle_budget, cycles_remaining, spinner, stop_reason
|
||||
if stop_reason:
|
||||
logger.error("Quitting immediately...")
|
||||
sys.exit()
|
||||
if cycles_remaining in [0, 1]:
|
||||
logger.warning("Interrupt signal received: shutting down gracefully.")
|
||||
logger.warning(
|
||||
"Press Ctrl+C again if you want to stop AutoGPT immediately."
|
||||
)
|
||||
stop_reason = AgentTerminated("Interrupt signal received")
|
||||
else:
|
||||
restart_spinner = spinner.running
|
||||
if spinner.running:
|
||||
spinner.stop()
|
||||
|
||||
logger.error(
|
||||
"Interrupt signal received: stopping continuous command execution."
|
||||
)
|
||||
cycles_remaining = 1
|
||||
if restart_spinner:
|
||||
spinner.start()
|
||||
|
||||
def handle_stop_signal() -> None:
|
||||
if stop_reason:
|
||||
raise stop_reason
|
||||
|
||||
# Set up an interrupt signal for the agent.
|
||||
signal.signal(signal.SIGINT, graceful_agent_interrupt)
|
||||
|
||||
#########################
|
||||
# Application Main Loop #
|
||||
#########################
|
||||
|
||||
# Keep track of consecutive failures of the agent
|
||||
consecutive_failures = 0
|
||||
|
||||
while cycles_remaining > 0:
|
||||
logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
|
||||
|
||||
########
|
||||
# Plan #
|
||||
########
|
||||
handle_stop_signal()
|
||||
# Have the agent determine the next action to take.
|
||||
if not (_ep := agent.event_history.current_episode) or _ep.result:
|
||||
with spinner:
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warning(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
continue
|
||||
else:
|
||||
action_proposal = _ep.action
|
||||
|
||||
consecutive_failures = 0
|
||||
|
||||
###############
|
||||
# Update User #
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(
|
||||
ai_profile,
|
||||
action_proposal,
|
||||
speak_mode=app_config.tts_config.speak_mode,
|
||||
)
|
||||
|
||||
##################
|
||||
# Get user input #
|
||||
##################
|
||||
handle_stop_signal()
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
feedback_type, feedback, new_cycles_remaining = await get_user_feedback(
|
||||
app_config,
|
||||
ai_profile,
|
||||
)
|
||||
|
||||
if feedback_type == UserFeedback.AUTHORIZE:
|
||||
if new_cycles_remaining is not None:
|
||||
# Case 1: User is altering the cycle budget.
|
||||
if cycle_budget > 1:
|
||||
cycle_budget = new_cycles_remaining + 1
|
||||
# Case 2: User is running iteratively and
|
||||
# has initiated a one-time continuous cycle
|
||||
cycles_remaining = new_cycles_remaining + 1
|
||||
else:
|
||||
# Case 1: Continuous iteration was interrupted -> resume
|
||||
if cycle_budget > 1:
|
||||
logger.info(
|
||||
f"The cycle budget is {cycle_budget}.",
|
||||
extra={
|
||||
"title": "RESUMING CONTINUOUS EXECUTION",
|
||||
"title_color": Fore.MAGENTA,
|
||||
},
|
||||
)
|
||||
# Case 2: The agent used up its cycle budget -> reset
|
||||
cycles_remaining = cycle_budget + 1
|
||||
logger.info(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
extra={"color": Fore.MAGENTA},
|
||||
)
|
||||
elif feedback_type == UserFeedback.EXIT:
|
||||
logger.warning("Exiting...")
|
||||
exit()
|
||||
else: # user_feedback == UserFeedback.TEXT
|
||||
pass
|
||||
else:
|
||||
feedback = ""
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
print()
|
||||
if cycles_remaining != math.inf:
|
||||
# Print authorized commands left value
|
||||
print_attribute(
|
||||
"AUTHORIZED_COMMANDS_LEFT", cycles_remaining, title_color=Fore.CYAN
|
||||
)
|
||||
|
||||
###################
|
||||
# Execute Command #
|
||||
###################
|
||||
# Decrement the cycle counter first to reduce the likelihood of a SIGINT
|
||||
# happening during command execution, setting the cycles remaining to 1,
|
||||
# and then having the decrement set it to 0, exiting the application.
|
||||
if not feedback:
|
||||
cycles_remaining -= 1
|
||||
|
||||
if not action_proposal.use_tool:
|
||||
continue
|
||||
|
||||
handle_stop_signal()
|
||||
|
||||
if not feedback:
|
||||
result = await agent.execute(action_proposal)
|
||||
else:
|
||||
result = await agent.do_not_execute(action_proposal, feedback)
|
||||
|
||||
if result.status == "success":
|
||||
logger.info(result, extra={"title": "SYSTEM:", "title_color": Fore.YELLOW})
|
||||
elif result.status == "error":
|
||||
logger.warning(
|
||||
f"Command {action_proposal.use_tool.name} returned an error: "
|
||||
f"{result.error or result.reason}"
|
||||
)
|
||||
|
||||
|
||||
def update_user(
|
||||
ai_profile: AIProfile,
|
||||
action_proposal: "ActionProposal",
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_profile: The AI's personality/profile
|
||||
command_name: The name of the command to execute.
|
||||
command_args: The arguments for the command.
|
||||
assistant_reply_dict: The assistant's reply.
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
print_assistant_thoughts(
|
||||
ai_name=ai_profile.ai_name,
|
||||
thoughts=action_proposal.thoughts,
|
||||
speak_mode=speak_mode,
|
||||
)
|
||||
|
||||
if speak_mode:
|
||||
speak(f"I want to execute {action_proposal.use_tool.name}")
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
print()
|
||||
safe_tool_name = remove_ansi_escape(action_proposal.use_tool.name)
|
||||
logger.info(
|
||||
f"COMMAND = {Fore.CYAN}{safe_tool_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{action_proposal.use_tool.arguments}{Style.RESET_ALL}",
|
||||
extra={
|
||||
"title": "NEXT ACTION:",
|
||||
"title_color": Fore.CYAN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def get_user_feedback(
|
||||
config: AppConfig,
|
||||
ai_profile: AIProfile,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_profile: The AI's configuration.
|
||||
|
||||
Returns:
|
||||
A tuple of the user's feedback, the user's input, and the number of
|
||||
cycles remaining if the user has initiated a continuous cycle.
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
logger.info(
|
||||
f"Enter '{config.authorise_key}' to authorise command, "
|
||||
f"'{config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{ai_profile.ai_name}..."
|
||||
)
|
||||
|
||||
user_feedback = None
|
||||
user_input = ""
|
||||
new_cycles_remaining = None
|
||||
|
||||
while user_feedback is None:
|
||||
# Get input from user
|
||||
console_input = clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
|
||||
# Parse user input
|
||||
if console_input.lower().strip() == config.authorise_key:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warning("Invalid input format.")
|
||||
elif console_input.lower().startswith(f"{config.authorise_key} -"):
|
||||
try:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
new_cycles_remaining = abs(int(console_input.split(" ")[1]))
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
f"Invalid input format. "
|
||||
f"Please enter '{config.authorise_key} -N'"
|
||||
" where N is the number of continuous tasks."
|
||||
)
|
||||
elif console_input.lower() in [config.exit_key, "exit"]:
|
||||
user_feedback = UserFeedback.EXIT
|
||||
else:
|
||||
user_feedback = UserFeedback.TEXT
|
||||
user_input = console_input
|
||||
|
||||
return user_feedback, user_input, new_cycles_remaining
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: str,
|
||||
thoughts: str | ModelWithSummary | AssistantThoughts,
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
thoughts_text = remove_ansi_escape(
|
||||
thoughts.text
|
||||
if isinstance(thoughts, AssistantThoughts)
|
||||
else thoughts.summary()
|
||||
if isinstance(thoughts, ModelWithSummary)
|
||||
else thoughts
|
||||
)
|
||||
print_attribute(
|
||||
f"{ai_name.upper()} THOUGHTS", thoughts_text, title_color=Fore.YELLOW
|
||||
)
|
||||
|
||||
if isinstance(thoughts, AssistantThoughts):
|
||||
print_attribute(
|
||||
"REASONING", remove_ansi_escape(thoughts.reasoning), title_color=Fore.YELLOW
|
||||
)
|
||||
if assistant_thoughts_plan := remove_ansi_escape(
|
||||
"\n".join(f"- {p}" for p in thoughts.plan)
|
||||
):
|
||||
print_attribute("PLAN", "", title_color=Fore.YELLOW)
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.info(
|
||||
line.strip(), extra={"title": "- ", "title_color": Fore.GREEN}
|
||||
)
|
||||
print_attribute(
|
||||
"CRITICISM",
|
||||
remove_ansi_escape(thoughts.self_criticism),
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak := remove_ansi_escape(thoughts.speak):
|
||||
if speak_mode:
|
||||
speak(assistant_thoughts_speak)
|
||||
else:
|
||||
print_attribute(
|
||||
"SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW
|
||||
)
|
||||
else:
|
||||
speak(thoughts_text)
|
||||
|
||||
|
||||
def remove_ansi_escape(s: str) -> str:
|
||||
return s.replace("\x1B", "")
|
||||
@@ -1,203 +0,0 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.logging.utils import print_attribute
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
from .input import clean_input
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def apply_overrides_to_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
override_name: Optional[str] = "",
|
||||
override_role: Optional[str] = "",
|
||||
replace_directives: bool = False,
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
):
|
||||
if override_name:
|
||||
ai_profile.ai_name = override_name
|
||||
if override_role:
|
||||
ai_profile.ai_role = override_role
|
||||
|
||||
if replace_directives:
|
||||
if resources:
|
||||
directives.resources = resources
|
||||
if constraints:
|
||||
directives.constraints = constraints
|
||||
if best_practices:
|
||||
directives.best_practices = best_practices
|
||||
else:
|
||||
if resources:
|
||||
directives.resources += resources
|
||||
if constraints:
|
||||
directives.constraints += constraints
|
||||
if best_practices:
|
||||
directives.best_practices += best_practices
|
||||
|
||||
|
||||
async def interactively_revise_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
"""Interactively revise the AI settings.
|
||||
|
||||
Args:
|
||||
ai_profile (AIConfig): The current AI profile.
|
||||
ai_directives (AIDirectives): The current AI directives.
|
||||
app_config (Config): The application configuration.
|
||||
|
||||
Returns:
|
||||
AIConfig: The revised AI settings.
|
||||
"""
|
||||
logger = logging.getLogger("revise_ai_profile")
|
||||
|
||||
revised = False
|
||||
|
||||
while True:
|
||||
# Print the current AI configuration
|
||||
print_ai_settings(
|
||||
title="Current AI Settings" if not revised else "Revised AI Settings",
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
if (
|
||||
clean_input("Continue with these settings? [Y/n]").lower()
|
||||
or app_config.authorise_key
|
||||
) == app_config.authorise_key:
|
||||
break
|
||||
|
||||
# Ask for revised ai_profile
|
||||
ai_profile.ai_name = (
|
||||
clean_input("Enter AI name (or press enter to keep current):")
|
||||
or ai_profile.ai_name
|
||||
)
|
||||
ai_profile.ai_role = (
|
||||
clean_input("Enter new AI role (or press enter to keep current):")
|
||||
or ai_profile.ai_role
|
||||
)
|
||||
|
||||
# Revise constraints
|
||||
i = 0
|
||||
while i < len(directives.constraints):
|
||||
constraint = directives.constraints[i]
|
||||
print_attribute(f"Constraint {i+1}:", f'"{constraint}"')
|
||||
new_constraint = (
|
||||
clean_input(
|
||||
f"Enter new constraint {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or constraint
|
||||
)
|
||||
|
||||
if new_constraint == "-":
|
||||
directives.constraints.remove(constraint)
|
||||
continue
|
||||
elif new_constraint:
|
||||
directives.constraints[i] = new_constraint
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new constraints
|
||||
while True:
|
||||
new_constraint = clean_input(
|
||||
"Press enter to finish, or enter a constraint to add:",
|
||||
)
|
||||
if not new_constraint:
|
||||
break
|
||||
directives.constraints.append(new_constraint)
|
||||
|
||||
# Revise resources
|
||||
i = 0
|
||||
while i < len(directives.resources):
|
||||
resource = directives.resources[i]
|
||||
print_attribute(f"Resource {i+1}:", f'"{resource}"')
|
||||
new_resource = (
|
||||
clean_input(
|
||||
f"Enter new resource {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or resource
|
||||
)
|
||||
if new_resource == "-":
|
||||
directives.resources.remove(resource)
|
||||
continue
|
||||
elif new_resource:
|
||||
directives.resources[i] = new_resource
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new resources
|
||||
while True:
|
||||
new_resource = clean_input(
|
||||
"Press enter to finish, or enter a resource to add:",
|
||||
)
|
||||
if not new_resource:
|
||||
break
|
||||
directives.resources.append(new_resource)
|
||||
|
||||
# Revise best practices
|
||||
i = 0
|
||||
while i < len(directives.best_practices):
|
||||
best_practice = directives.best_practices[i]
|
||||
print_attribute(f"Best Practice {i+1}:", f'"{best_practice}"')
|
||||
new_best_practice = (
|
||||
clean_input(
|
||||
f"Enter new best practice {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or best_practice
|
||||
)
|
||||
if new_best_practice == "-":
|
||||
directives.best_practices.remove(best_practice)
|
||||
continue
|
||||
elif new_best_practice:
|
||||
directives.best_practices[i] = new_best_practice
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new best practices
|
||||
while True:
|
||||
new_best_practice = clean_input(
|
||||
"Press enter to finish, or add a best practice to add:",
|
||||
)
|
||||
if not new_best_practice:
|
||||
break
|
||||
directives.best_practices.append(new_best_practice)
|
||||
|
||||
revised = True
|
||||
|
||||
return ai_profile, directives
|
||||
|
||||
|
||||
def print_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
logger: logging.Logger,
|
||||
title: str = "AI Settings",
|
||||
):
|
||||
print_attribute(title, "")
|
||||
print_attribute("-" * len(title), "")
|
||||
print_attribute("Name :", ai_profile.ai_name)
|
||||
print_attribute("Role :", ai_profile.ai_role)
|
||||
|
||||
print_attribute("Constraints:", "" if directives.constraints else "(none)")
|
||||
for constraint in directives.constraints:
|
||||
logger.info(f"- {constraint}")
|
||||
print_attribute("Resources:", "" if directives.resources else "(none)")
|
||||
for resource in directives.resources:
|
||||
logger.info(f"- {resource}")
|
||||
print_attribute("Best practices:", "" if directives.best_practices else "(none)")
|
||||
for best_practice in directives.best_practices:
|
||||
logger.info(f"- {best_practice}")
|
||||
@@ -1,64 +0,0 @@
|
||||
import os
|
||||
|
||||
import click
|
||||
from colorama import Fore, Style
|
||||
|
||||
from .utils import (
|
||||
env_file_exists,
|
||||
get_git_user_email,
|
||||
set_env_config_value,
|
||||
vcs_state_diverges_from_master,
|
||||
)
|
||||
|
||||
|
||||
def setup_telemetry() -> None:
|
||||
if os.getenv("TELEMETRY_OPT_IN") is None:
|
||||
# If no .env file is present, don't bother asking to enable telemetry,
|
||||
# to prevent repeated asking in non-persistent environments.
|
||||
if not env_file_exists():
|
||||
return
|
||||
|
||||
allow_telemetry = click.prompt(
|
||||
f"""
|
||||
{Style.BRIGHT}❓ Do you want to enable telemetry? ❓{Style.NORMAL}
|
||||
This means AutoGPT will send diagnostic data to the core development team when something
|
||||
goes wrong, and will help us to diagnose and fix problems earlier and faster. It also
|
||||
allows us to collect basic performance data, which helps us find bottlenecks and other
|
||||
things that slow down the application.
|
||||
|
||||
By entering 'yes', you confirm that you have read and agree to our Privacy Policy,
|
||||
which is available here:
|
||||
https://www.notion.so/auto-gpt/Privacy-Policy-ab11c9c20dbd4de1a15dcffe84d77984
|
||||
|
||||
Please enter 'yes' or 'no'""",
|
||||
type=bool,
|
||||
)
|
||||
set_env_config_value("TELEMETRY_OPT_IN", "true" if allow_telemetry else "false")
|
||||
click.echo(
|
||||
f"❤️ Thank you! Telemetry is {Fore.GREEN}enabled{Fore.RESET}."
|
||||
if allow_telemetry
|
||||
else f"👍 Telemetry is {Fore.RED}disabled{Fore.RESET}."
|
||||
)
|
||||
click.echo(
|
||||
"💡 If you ever change your mind, you can change 'TELEMETRY_OPT_IN' in .env"
|
||||
)
|
||||
click.echo()
|
||||
|
||||
if os.getenv("TELEMETRY_OPT_IN", "").lower() == "true":
|
||||
_setup_sentry()
|
||||
|
||||
|
||||
def _setup_sentry() -> None:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.init(
|
||||
dsn="https://dc266f2f7a2381194d1c0fa36dff67d8@o4505260022104064.ingest.sentry.io/4506739844710400", # noqa
|
||||
enable_tracing=True,
|
||||
environment=os.getenv(
|
||||
"TELEMETRY_ENVIRONMENT",
|
||||
"production" if not vcs_state_diverges_from_master() else "dev",
|
||||
),
|
||||
)
|
||||
|
||||
# Allow Sentry to distinguish between users
|
||||
sentry_sdk.set_user({"email": get_git_user_email(), "ip_address": "{{auto}}"})
|
||||
@@ -1,247 +0,0 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Coroutine, ParamSpec, TypeVar, cast
|
||||
|
||||
import requests
|
||||
from colorama import Fore, Style
|
||||
from git import InvalidGitRepositoryError, Repo
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_bulletin_from_web():
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpt/BULLETIN.md" # noqa: E501
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.text
|
||||
except requests.exceptions.RequestException:
|
||||
pass
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def get_current_git_branch() -> str:
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
branch = repo.active_branch
|
||||
return branch.name
|
||||
except InvalidGitRepositoryError:
|
||||
return ""
|
||||
|
||||
|
||||
def vcs_state_diverges_from_master() -> bool:
|
||||
"""
|
||||
Returns whether a git repo is present and contains changes that are not in `master`.
|
||||
"""
|
||||
paths_we_care_about = "autogpt/autogpt/**/*.py"
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
|
||||
# Check for uncommitted changes in the specified path
|
||||
uncommitted_changes = repo.index.diff(None, paths=paths_we_care_about)
|
||||
if uncommitted_changes:
|
||||
return True
|
||||
|
||||
# Find OG AutoGPT remote
|
||||
for remote in repo.remotes:
|
||||
if remote.url.endswith(
|
||||
tuple(
|
||||
# All permutations of old/new repo name and HTTP(S)/Git URLs
|
||||
f"{prefix}{path}"
|
||||
for prefix in ("://github.com/", "git@github.com:")
|
||||
for path in (
|
||||
f"Significant-Gravitas/{n}.git" for n in ("AutoGPT", "Auto-GPT")
|
||||
)
|
||||
)
|
||||
):
|
||||
og_remote = remote
|
||||
break
|
||||
else:
|
||||
# Original AutoGPT remote is not configured: assume local codebase diverges
|
||||
return True
|
||||
|
||||
master_branch = og_remote.refs.master
|
||||
with contextlib.suppress(StopIteration):
|
||||
next(repo.iter_commits(f"HEAD..{master_branch}", paths=paths_we_care_about))
|
||||
# Local repo is one or more commits ahead of OG AutoGPT master branch
|
||||
return True
|
||||
|
||||
# Relevant part of the codebase is on master
|
||||
return False
|
||||
except InvalidGitRepositoryError:
|
||||
# No git repo present: assume codebase is a clean download
|
||||
return False
|
||||
|
||||
|
||||
def get_git_user_email() -> str:
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
return cast(str, repo.config_reader().get_value("user", "email", default=""))
|
||||
except InvalidGitRepositoryError:
|
||||
return ""
|
||||
|
||||
|
||||
def get_latest_bulletin() -> tuple[str, bool]:
|
||||
exists = os.path.exists("data/CURRENT_BULLETIN.md")
|
||||
current_bulletin = ""
|
||||
if exists:
|
||||
current_bulletin = open(
|
||||
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
|
||||
).read()
|
||||
new_bulletin = get_bulletin_from_web()
|
||||
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
|
||||
|
||||
news_header = Fore.YELLOW + "Welcome to AutoGPT!\n"
|
||||
if new_bulletin or current_bulletin:
|
||||
news_header += (
|
||||
"Below you'll find the latest AutoGPT News and feature updates!\n"
|
||||
"If you don't wish to see this message, you "
|
||||
"can run AutoGPT with the *--skip-news* flag.\n"
|
||||
)
|
||||
|
||||
if new_bulletin and is_new_news:
|
||||
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
|
||||
|
||||
return f"{news_header}\n{current_bulletin}", is_new_news
|
||||
|
||||
|
||||
def markdown_to_ansi_style(markdown: str):
|
||||
ansi_lines: list[str] = []
|
||||
for line in markdown.split("\n"):
|
||||
line_style = ""
|
||||
|
||||
if line.startswith("# "):
|
||||
line_style += Style.BRIGHT
|
||||
else:
|
||||
line = re.sub(
|
||||
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
|
||||
rf"{Style.BRIGHT}\1{Style.NORMAL}",
|
||||
line,
|
||||
)
|
||||
|
||||
if re.match(r"^#+ ", line) is not None:
|
||||
line_style += Fore.CYAN
|
||||
line = re.sub(r"^#+ ", "", line)
|
||||
|
||||
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
||||
return "\n".join(ansi_lines)
|
||||
|
||||
|
||||
def get_legal_warning() -> str:
|
||||
legal_text = """
|
||||
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
|
||||
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
|
||||
|
||||
## Introduction
|
||||
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
|
||||
|
||||
## No Liability for Actions of the System
|
||||
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
|
||||
|
||||
## User Responsibility and Respondeat Superior Liability
|
||||
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
|
||||
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
|
||||
|
||||
## Indemnification
|
||||
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
||||
""" # noqa: E501
|
||||
return legal_text
|
||||
|
||||
|
||||
def print_motd(logger: logging.Logger):
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(
|
||||
extra={
|
||||
"title": "NEWS:",
|
||||
"title_color": Fore.GREEN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=motd_line,
|
||||
)
|
||||
if is_new_motd:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def print_git_branch_info(logger: logging.Logger):
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "master":
|
||||
logger.warning(
|
||||
f"You are running on `{git_branch}` branch"
|
||||
" - this is not a supported branch."
|
||||
)
|
||||
|
||||
|
||||
def print_python_version_info(logger: logging.Logger):
|
||||
if sys.version_info < (3, 10):
|
||||
logger.error(
|
||||
"WARNING: You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of AutoGPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
|
||||
ENV_FILE_PATH = Path(__file__).parent.parent.parent / ".env"
|
||||
|
||||
|
||||
def env_file_exists() -> bool:
|
||||
return ENV_FILE_PATH.is_file()
|
||||
|
||||
|
||||
def set_env_config_value(key: str, value: str) -> None:
|
||||
"""Sets the specified env variable and updates it in .env as well"""
|
||||
os.environ[key] = value
|
||||
|
||||
with ENV_FILE_PATH.open("r+") as file:
|
||||
lines = file.readlines()
|
||||
file.seek(0)
|
||||
key_already_in_file = False
|
||||
for line in lines:
|
||||
if re.match(rf"^(?:# )?{key}=.*$", line):
|
||||
file.write(f"{key}={value}\n")
|
||||
key_already_in_file = True
|
||||
else:
|
||||
file.write(line)
|
||||
|
||||
if not key_already_in_file:
|
||||
file.write(f"{key}={value}\n")
|
||||
|
||||
file.truncate()
|
||||
|
||||
|
||||
def is_port_free(port: int, host: str = "127.0.0.1"):
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
try:
|
||||
s.bind((host, port)) # Try to bind to the port
|
||||
return True # If successful, the port is free
|
||||
except OSError:
|
||||
return False # If failed, the port is likely in use
|
||||
|
||||
|
||||
def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]:
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs):
|
||||
return asyncio.run(f(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
@@ -1,7 +0,0 @@
|
||||
azure_api_type: azure
|
||||
azure_api_version: api-version-for-azure
|
||||
azure_endpoint: your-azure-openai-endpoint
|
||||
azure_model_map:
|
||||
gpt-3.5-turbo-0125: gpt35-deployment-id-for-azure
|
||||
gpt-4-turbo-preview: gpt4-deployment-id-for-azure
|
||||
text-embedding-3-small: embedding-deployment-id-for-azure
|
||||
57
autogpt/command_decorator.py
Normal file
57
autogpt/command_decorator.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import functools
|
||||
from typing import Any, Callable, Optional, TypedDict
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.models.command import Command, CommandParameter
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class CommandParameterSpec(TypedDict):
|
||||
type: str
|
||||
description: str
|
||||
required: bool
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
parameters: dict[str, CommandParameterSpec],
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
aliases: list[str] = [],
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
typed_parameters = [
|
||||
CommandParameter(
|
||||
name=param_name,
|
||||
description=parameter.get("description"),
|
||||
type=parameter.get("type", "string"),
|
||||
required=parameter.get("required", False),
|
||||
)
|
||||
for param_name, parameter in parameters.items()
|
||||
]
|
||||
cmd = Command(
|
||||
name=name,
|
||||
description=description,
|
||||
method=func,
|
||||
parameters=typed_parameters,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
aliases=aliases,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.command = cmd
|
||||
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
7
autogpt/commands/__init__.py
Normal file
7
autogpt/commands/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
COMMAND_CATEGORIES = [
|
||||
"autogpt.commands.execute_code",
|
||||
"autogpt.commands.file_operations",
|
||||
"autogpt.commands.web_search",
|
||||
"autogpt.commands.web_selenium",
|
||||
"autogpt.commands.task_statuses",
|
||||
]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user