mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 16:48:06 -05:00
Compare commits
22 Commits
updated-do
...
self-feedb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a609f8cd9 | ||
|
|
8b82421b9c | ||
|
|
75cc71f8d3 | ||
|
|
f287282e8c | ||
|
|
2a93aff512 | ||
|
|
6d1653b84f | ||
|
|
a7816b8c79 | ||
|
|
21913c4733 | ||
|
|
9d9c66d50f | ||
|
|
a00a7a2bd0 | ||
|
|
d6cb10432b | ||
|
|
0bea5e38a4 | ||
|
|
88b2d5fb2d | ||
|
|
f1032926cc | ||
|
|
e7ad51ce42 | ||
|
|
a3522223d9 | ||
|
|
4e3035efe4 | ||
|
|
a8cbf51489 | ||
|
|
317361da8c | ||
|
|
991bc77e0b | ||
|
|
83357f6c2f | ||
|
|
acf48d2d4d |
@@ -10,4 +10,4 @@ RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# Declare working directory
|
||||
WORKDIR /workspace/AutoGPT
|
||||
WORKDIR /workspace/Auto-GPT
|
||||
@@ -1,18 +1,17 @@
|
||||
{
|
||||
"dockerComposeFile": "./docker-compose.yml",
|
||||
"service": "auto-gpt",
|
||||
"workspaceFolder": "/workspace/AutoGPT",
|
||||
"workspaceFolder": "/workspace/Auto-GPT",
|
||||
"shutdownAction": "stopCompose",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
"installZsh": "true",
|
||||
"username": "vscode",
|
||||
"userUid": "1000",
|
||||
"userGid": "1000",
|
||||
"userUid": "6942",
|
||||
"userGid": "6942",
|
||||
"upgradePackages": "true"
|
||||
},
|
||||
"ghcr.io/devcontainers/features/desktop-lite:1": {},
|
||||
"ghcr.io/devcontainers/features/github-cli:1": {},
|
||||
"ghcr.io/devcontainers/features/python:1": "none",
|
||||
"ghcr.io/devcontainers/features/node:1": "none",
|
||||
"ghcr.io/devcontainers/features/git:1": {
|
||||
@@ -26,31 +25,16 @@
|
||||
"vscode": {
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python",
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.unittestEnabled": false
|
||||
},
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"VisualStudioExptTeam.vscodeintellicode",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.isort",
|
||||
"GitHub.vscode-pull-request-github",
|
||||
"GitHub.copilot",
|
||||
"github.vscode-github-actions"
|
||||
]
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python"
|
||||
}
|
||||
}
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
// "postCreateCommand": "poetry install",
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode",
|
||||
|
||||
// Add the freshly containerized repo to the list of safe repositories
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/AutoGPT && poetry install"
|
||||
"remoteUser": "vscode"
|
||||
}
|
||||
19
.devcontainer/docker-compose.yml
Normal file
19
.devcontainer/docker-compose.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
# To boot the app run the following:
|
||||
# docker-compose run auto-gpt
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
depends_on:
|
||||
- redis
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ../:/workspace/Auto-GPT
|
||||
redis:
|
||||
image: 'redis/redis-stack-server:latest'
|
||||
@@ -1,40 +1,8 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
*
|
||||
|
||||
# AutoGPT
|
||||
!autogpt/autogpt/
|
||||
!autogpt/pyproject.toml
|
||||
!autogpt/poetry.lock
|
||||
!autogpt/README.md
|
||||
!autogpt/tests/
|
||||
|
||||
# Benchmark
|
||||
!benchmark/agbenchmark/
|
||||
!benchmark/pyproject.toml
|
||||
!benchmark/poetry.lock
|
||||
!benchmark/README.md
|
||||
|
||||
# Forge
|
||||
!forge/forge/
|
||||
!forge/pyproject.toml
|
||||
!forge/poetry.lock
|
||||
!forge/README.md
|
||||
|
||||
# Frontend
|
||||
!frontend/build/web/
|
||||
|
||||
# rnd
|
||||
!rnd/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
# rnd
|
||||
rnd/autogpt_builder/.next/
|
||||
rnd/autogpt_builder/node_modules
|
||||
rnd/autogpt_builder/.env.example
|
||||
rnd/autogpt_builder/.env.local
|
||||
rnd/autogpt_server/.env
|
||||
rnd/autogpt_server/.venv/
|
||||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
|
||||
rnd/market/.env
|
||||
*.md
|
||||
*.png
|
||||
!BULLETIN.md
|
||||
|
||||
214
.env.template
Normal file
214
.env.template
Normal file
@@ -0,0 +1,214 @@
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# TEMPERATURE=0
|
||||
# USE_AZURE=False
|
||||
|
||||
### AZURE
|
||||
# moved to `azure.yaml.template`
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-4
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
### LLM MODEL SETTINGS
|
||||
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
## When using --gpt3only this needs to be set to 4000.
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
## local - Default
|
||||
## pinecone - Pinecone (if configured)
|
||||
## redis - Redis (if configured)
|
||||
## milvus - Milvus (if configured - also works with Zilliz)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=local
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### PINECONE
|
||||
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
# PINECONE_API_KEY=your-pinecone-api-key
|
||||
# PINECONE_ENV=your-pinecone-region
|
||||
|
||||
### REDIS
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# REDIS_HOST=localhost
|
||||
# REDIS_PORT=6379
|
||||
# REDIS_PASSWORD=
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
### WEAVIATE
|
||||
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
## WEAVIATE_HOST - Weaviate host IP
|
||||
## WEAVIATE_PORT - Weaviate host port
|
||||
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
## WEAVIATE_USERNAME - Weaviate username
|
||||
## WEAVIATE_PASSWORD - Weaviate password
|
||||
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# WEAVIATE_HOST="127.0.0.1"
|
||||
# WEAVIATE_PORT=8080
|
||||
# WEAVIATE_PROTOCOL="http"
|
||||
# USE_WEAVIATE_EMBEDDED=False
|
||||
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
# WEAVIATE_USERNAME=
|
||||
# WEAVIATE_PASSWORD=
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
### MILVUS
|
||||
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443)
|
||||
## MILVUS_USERNAME - username for your Milvus database
|
||||
## MILVUS_PASSWORD - password for your Milvus database
|
||||
## MILVUS_SECURE - True to enable TLS. (Default: False)
|
||||
## Setting MILVUS_ADDR to a `https://` URL will override this setting.
|
||||
## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
|
||||
# MILVUS_ADDR=localhost:19530
|
||||
# MILVUS_USERNAME=
|
||||
# MILVUS_PASSWORD=
|
||||
# MILVUS_SECURE=
|
||||
# MILVUS_COLLECTION=autogpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPEN AI
|
||||
## IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## DALLE: 256, 512, 1024
|
||||
# IMAGE_PROVIDER=dalle
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### HUGGINGFACE
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
|
||||
### STABLE DIFFUSION WEBUI
|
||||
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
# SD_WEBUI_AUTH=
|
||||
# SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
### HUGGINGFACE
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
|
||||
################################################################################
|
||||
### GIT Provider for repository actions
|
||||
################################################################################
|
||||
|
||||
### GITHUB
|
||||
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
## GITHUB_USERNAME - Github username
|
||||
# GITHUB_API_KEY=github_pat_123
|
||||
# GITHUB_USERNAME=your-github-username
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
# USE_WEB_BROWSER=chrome
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
### GOOGLE
|
||||
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
# GOOGLE_API_KEY=your-google-api-key
|
||||
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
|
||||
################################################################################
|
||||
### TTS PROVIDER
|
||||
################################################################################
|
||||
|
||||
### MAC OS
|
||||
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
# USE_MAC_OS_TTS=False
|
||||
|
||||
### STREAMELEMENTS
|
||||
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
# USE_BRIAN_TTS=False
|
||||
|
||||
### ELEVENLABS
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
|
||||
################################################################################
|
||||
### TWITTER API
|
||||
################################################################################
|
||||
|
||||
# TW_CONSUMER_KEY=
|
||||
# TW_CONSUMER_SECRET=
|
||||
# TW_ACCESS_TOKEN=
|
||||
# TW_ACCESS_TOKEN_SECRET=
|
||||
|
||||
################################################################################
|
||||
### ALLOWLISTED PLUGINS
|
||||
################################################################################
|
||||
|
||||
#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
|
||||
ALLOWLISTED_PLUGINS=
|
||||
|
||||
################################################################################
|
||||
### CHAT PLUGIN SETTINGS
|
||||
################################################################################
|
||||
# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
@@ -1,4 +1,4 @@
|
||||
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
|
||||
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use AutoGPT.
|
||||
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
|
||||
|
||||
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
|
||||
2
.flake8
2
.flake8
@@ -1,5 +1,6 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
select = "E303, W293, W291, W292, E305, E231, E302"
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
@@ -9,4 +10,3 @@ exclude =
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
data/*,
|
||||
|
||||
11
.gitattributes
vendored
11
.gitattributes
vendored
@@ -1,10 +1,5 @@
|
||||
frontend/build/** linguist-generated
|
||||
|
||||
**/poetry.lock linguist-generated
|
||||
|
||||
docs/_javascript/** linguist-vendored
|
||||
|
||||
# Exclude VCR cassettes from stats
|
||||
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
tests/**/cassettes/**.y*ml linguist-generated
|
||||
|
||||
* text=auto
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
|
||||
7
.github/CODEOWNERS
vendored
7
.github/CODEOWNERS
vendored
@@ -1,7 +0,0 @@
|
||||
* @Significant-Gravitas/maintainers
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
forge/ @Significant-Gravitas/forge-maintainers
|
||||
benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
rnd/infra @Significant-Gravitas/devops
|
||||
.github/CODEOWNERS @Significant-Gravitas/admins
|
||||
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: Torantulino
|
||||
188
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
188
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: Bug report 🐛
|
||||
description: Create a bug report for AutoGPT.
|
||||
description: Create a bug report for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
@@ -8,53 +8,45 @@ body:
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Thoroughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
description: >
|
||||
Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues)
|
||||
Please [search the history](https://github.com/Torantulino/Auto-GPT/issues)
|
||||
to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please confirm that the issue you have is described well and precise in the title above ⬆️.
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
For example:
|
||||
BAD - my AutoGPT keeps looping
|
||||
GOOD - After performing execute_python_file, AutoGPT goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
|
||||
Please help us help you by following these steps:
|
||||
- Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
|
||||
newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
|
||||
- Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it) if possible
|
||||
- If it's a pip/packages issue, mention this in the title and provide pip version, python version
|
||||
- If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we spend building AutoGPT.
|
||||
|
||||
Please help us help you:
|
||||
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
|
||||
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
|
||||
- Search for existing issues, "add comment" is tidier than "new issue"
|
||||
- Ask on our Discord (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
description: >
|
||||
Please select the operating system you were using to run AutoGPT when this problem occurred.
|
||||
Please select the operating system you were using to run Auto-GPT when this problem occurred.
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
@@ -62,112 +54,78 @@ body:
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other
|
||||
- Other (Please specify in your problem)
|
||||
validations:
|
||||
required: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the system
|
||||
description: Please specify the system you are working on.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of AutoGPT are you using?
|
||||
label: Which version of Auto-GPT are you using?
|
||||
description: |
|
||||
Please select which version of AutoGPT you were using when this issue occurred.
|
||||
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/AutoGPT/releases/) make sure you were using the latest code.
|
||||
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/AutoGPT/releases/)**.
|
||||
If installed with git you can run `git branch` to see which version of AutoGPT you are running.
|
||||
Please select which version of Auto-GPT you were using when this issue occurred.
|
||||
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code.
|
||||
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**.
|
||||
If installed with git you can run `git branch` to see which version of Auto-GPT you are running.
|
||||
options:
|
||||
- Latest Release
|
||||
- Stable (branch)
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: What LLM Provider do you use?
|
||||
label: GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- Azure
|
||||
- Groq
|
||||
- Anthropic
|
||||
- Llamafile
|
||||
- Other (detail in issue)
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which area covers your issue best?
|
||||
description: >
|
||||
Select the area related to the issue you are reporting.
|
||||
options:
|
||||
- Installation and setup
|
||||
- Memory
|
||||
- Performance
|
||||
- Prompt
|
||||
- Commands
|
||||
- Plugins
|
||||
- AI Model Limitations
|
||||
- Challenges
|
||||
- Documentation
|
||||
- Logging
|
||||
- Agents
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
autolabels: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: What commit or version are you using?
|
||||
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
#Following are optional file content uploads
|
||||
- type: markdown
|
||||
label: Steps to reproduce 🕹
|
||||
description: |
|
||||
**⚠️ Issues that we can't reproduce will be closed.**
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current behavior 😯
|
||||
description: Describe what happens instead of the expected behavior.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior 🤔
|
||||
description: Describe what should happen.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
description: >
|
||||
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
|
||||
value: |
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your Logs 📒
|
||||
description: |
|
||||
Please include the log showing your error and the command that caused it, if applicable.
|
||||
You can copy it from your terminal or from `logs/activity.log`.
|
||||
This will help us understand your issue better!
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main AutoGPT folder."
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main AutoGPT folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as
|
||||
any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
<details>
|
||||
<summary><i>Example</i></summary>
|
||||
```log
|
||||
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
|
||||
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
|
||||
Traceback (most recent call last):
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
|
||||
self._interpret_response_line(
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
|
||||
raise self.handle_error_response(
|
||||
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
|
||||
```
|
||||
</details>
|
||||
value: |
|
||||
```log
|
||||
<insert your logs here>
|
||||
```
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
9
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -1,16 +1,17 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for AutoGPT!
|
||||
description: Suggest a new idea for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
Thanks for contributing by creating an issue! ❤️
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
description: Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues) to see if an issue already exists for the same problem.
|
||||
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
@@ -25,4 +26,4 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation 🔦
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
59
.github/PULL_REQUEST_TEMPLATE.md
vendored
59
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,23 +1,40 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in rnd/
|
||||
|
||||
<!--
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
<!-- ⚠️ At the moment any non-essential commands are not being merged.
|
||||
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
|
||||
We are expecting to ship plugin support within the week (PR #757).
|
||||
Resources:
|
||||
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
|
||||
-->
|
||||
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
<!-- 📢 Announcement
|
||||
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
||||
|
||||
Focus on a single, specific change.
|
||||
Do not include any unrelated or "extra" modifications.
|
||||
Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
### Background
|
||||
<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
|
||||
|
||||
### Changes
|
||||
<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
|
||||
|
||||
### Documentation
|
||||
<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
|
||||
|
||||
### Test Plan
|
||||
<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
|
||||
|
||||
### PR Quality Checklist
|
||||
- [ ] My pull request is atomic and focuses on a single change.
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
|
||||
|
||||
27
.github/labeler.yml
vendored
27
.github/labeler.yml
vendored
@@ -1,27 +0,0 @@
|
||||
AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: forge/**
|
||||
|
||||
Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: benchmark/**
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: frontend/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Builder:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_builder/**
|
||||
|
||||
Server:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_server/**
|
||||
41
.github/workflows/autogpt-builder-ci.yml
vendored
41
.github/workflows/autogpt-builder-ci.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: AutoGPT Builder CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_builder
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '21'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Check formatting with Prettier
|
||||
run: |
|
||||
npx prettier --check .
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
npm run lint
|
||||
138
.github/workflows/autogpt-ci.yml
vendored
138
.github/workflows/autogpt-ci.yml
vendored
@@ -1,138 +0,0 @@
|
||||
name: AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: autogpt/logs/
|
||||
162
.github/workflows/autogpt-docker-ci.yml
vendored
162
.github/workflows/autogpt-docker-ci.yml
vendored
@@ -1,162 +0,0 @@
|
||||
name: AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: autogpt
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
|
||||
DEV_IMAGE_TAG: latest-dev
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:edge-cicd
|
||||
options: >
|
||||
--name=minio
|
||||
--health-interval=10s --health-timeout=5s --health-retries=3
|
||||
--health-cmd="curl -f http://localhost:9000/minio/health/live"
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- if: github.event_name == 'push'
|
||||
name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://minio:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
run: |
|
||||
set +e
|
||||
docker run --env CI --env OPENAI_API_KEY \
|
||||
--network container:minio \
|
||||
--env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
|
||||
--entrypoint poetry ${{ env.IMAGE_NAME }} run \
|
||||
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
--numprocesses=4 --durations=10 \
|
||||
tests/unit tests/integration 2>&1 | tee test_output.txt
|
||||
|
||||
test_failure=${PIPESTATUS[0]}
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$(cat test_output.txt)
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
|
||||
- if: github.event_name == 'push' && github.ref_name == 'master'
|
||||
name: Push image to Docker Hub
|
||||
run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
56
.github/workflows/autogpt-infra-ci.yml
vendored
56
.github/workflows/autogpt-infra-ci.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: AutoGPT Builder Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
155
.github/workflows/autogpt-server-ci.yml
vendored
155
.github/workflows/autogpt-server-ci.yml
vendored
@@ -1,155 +0,0 @@
|
||||
name: AutoGPT Server CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_server
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Setup PostgreSQL
|
||||
uses: ikalnytskyi/action-setup-postgres@v6
|
||||
with:
|
||||
username: ${{ secrets.DB_USER || 'postgres' }}
|
||||
password: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
database: postgres
|
||||
port: 5432
|
||||
id: postgres
|
||||
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: "."
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
|
||||
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
DB_NAME: postgres
|
||||
DB_PORT: 5432
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
41
.github/workflows/autogpt-server-docker.yml
vendored
41
.github/workflows/autogpt-server-docker.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: AutoGPT Server Docker Build & Push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ updated-docker-ci ]
|
||||
paths:
|
||||
- '**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: AutoGPT
|
||||
|
||||
env:
|
||||
PROJECT_ID: agpt-dev
|
||||
IMAGE_NAME: agpt-server-dev
|
||||
REGION: us-central1
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v0.2.1
|
||||
with:
|
||||
project_id: ${{ env.PROJECT_ID }}
|
||||
service_account_key: ${{ secrets.GCP_SA_KEY }}
|
||||
export_default_credentials: true
|
||||
|
||||
- name: Configure Docker
|
||||
run: gcloud auth configure-docker ${{ env.REGION }}-docker.pkg.dev
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -t ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.IMAGE_NAME }}:${{ github.sha }} -f rnd/autogpt_server/Dockerfile .
|
||||
|
||||
- name: Push Docker image
|
||||
run: docker push ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
97
.github/workflows/autogpts-benchmark.yml
vendored
97
.github/workflows/autogpts-benchmark.yml
vendored
@@ -1,97 +0,0 @@
|
||||
name: AutoGPTs Nightly Benchmark
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
REPORTS_BRANCH: data/benchmark-reports
|
||||
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Prepare reports folder
|
||||
run: mkdir -p ${{ env.REPORTS_FOLDER }}
|
||||
|
||||
- run: poetry -C benchmark install
|
||||
|
||||
- name: Benchmark ${{ matrix.agent-name }}
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Do not quit on non-zero exit codes
|
||||
poetry run agbenchmark run -N 3 \
|
||||
--test=ReadFile \
|
||||
--test=BasicRetrieval --test=RevenueRetrieval2 \
|
||||
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
|
||||
--test=UrlShortener --test=TicTacToe --test=Battleship \
|
||||
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
|
||||
--test=WebArenaTask_134 --test=WebArenaTask_163
|
||||
|
||||
# Convert exit code 1 (some challenges failed) to exit code 0
|
||||
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
|
||||
exit 0
|
||||
else
|
||||
exit $?
|
||||
fi
|
||||
env:
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
|
||||
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
|
||||
- name: Push reports to data branch
|
||||
run: |
|
||||
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
|
||||
rm ${{ env.REPORTS_FOLDER }}/*.json
|
||||
|
||||
# Find folder with newest (untracked) report in it
|
||||
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
|
||||
| xargs -I {} dirname {} \
|
||||
| xargs -I {} git ls-files --others --exclude-standard {} \
|
||||
| xargs -I {} dirname {} \
|
||||
| sort -u)
|
||||
json_report_file="$report_subfolder/report.json"
|
||||
|
||||
# Convert JSON report to Markdown
|
||||
markdown_report_file="$report_subfolder/report.md"
|
||||
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
|
||||
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
git config --global user.name 'GitHub Actions'
|
||||
git config --global user.email 'github-actions@agpt.co'
|
||||
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
|
||||
&& git checkout ${{ env.REPORTS_BRANCH }} \
|
||||
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
|
||||
git reset --hard
|
||||
git add ${{ env.REPORTS_FOLDER }}
|
||||
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
|
||||
&& git push origin ${{ env.REPORTS_BRANCH }}
|
||||
71
.github/workflows/autogpts-ci.yml
vendored
71
.github/workflows/autogpts-ci.yml
vendored
@@ -1,71 +0,0 @@
|
||||
name: Agent smoke tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
HELICONE_CACHE_ENABLED: false
|
||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
169
.github/workflows/benchmark-ci.yml
vendored
169
.github/workflows/benchmark-ci.yml
vendored
@@ -1,169 +0,0 @@
|
||||
name: AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
- '!benchmark/reports/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- '!benchmark/reports/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: benchmark
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
|
||||
self-test-with-agent:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ forge ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: .
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
poetry run agbenchmark --maintain --mock
|
||||
EXIT_CODE=$?
|
||||
set -e # Stop ignoring non-zero exit codes
|
||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||
if [ $EXIT_CODE -eq 5 ]; then
|
||||
echo "regression_tests.json is empty."
|
||||
fi
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock"
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||
poetry run agbenchmark --mock --category=data
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
55
.github/workflows/benchmark_publish_package.yml
vendored
55
.github/workflows/benchmark_publish_package.yml
vendored
@@ -1,55 +0,0 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./benchmark/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build project for distribution
|
||||
working-directory: ./benchmark/
|
||||
run: poetry build
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./benchmark/
|
||||
run: poetry install
|
||||
|
||||
- name: Check Version
|
||||
working-directory: ./benchmark/
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "benchmark/dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: false
|
||||
tag: agbenchmark-v${{ steps.check-version.outputs.version }}
|
||||
commit: master
|
||||
|
||||
- name: Build and publish
|
||||
working-directory: ./benchmark/
|
||||
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
|
||||
31
.github/workflows/benchmarks.yml
vendored
Normal file
31
.github/workflows/benchmarks.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Run Benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
python-version: '3.10'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ env.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: benchmark
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py
|
||||
77
.github/workflows/ci.yml
vendored
Normal file
77
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Lint with flake8
|
||||
run: flake8
|
||||
|
||||
- name: Check black formatting
|
||||
run: black . --check
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check isort formatting
|
||||
run: isort . --check
|
||||
if: success() || failure()
|
||||
|
||||
test:
|
||||
permissions:
|
||||
# Gives the action the necessary permissions for publishing new
|
||||
# comments in pull requests.
|
||||
pull-requests: write
|
||||
# Gives the action the necessary permissions for pushing data to the
|
||||
# python-coverage-comment-action branch, and for editing existing
|
||||
# comments (to avoid publishing multiple comments in the same PR)
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
34
.github/workflows/close-stale-issues.yml
vendored
34
.github/workflows/close-stale-issues.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: 'Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
This issue has automatically been marked as _stale_ because it has not had
|
||||
any activity in the last 50 days. You can _unstale_ it by commenting or
|
||||
removing the label. Otherwise, this issue will be closed in 10 days.
|
||||
stale-pr-message: >
|
||||
This pull request has automatically been marked as _stale_ because it has
|
||||
not had any activity in the last 50 days. You can _unstale_ it by commenting
|
||||
or removing the label.
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 50
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
# Do not affect pull requests:
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
@@ -1,11 +1,11 @@
|
||||
name: Purge Auto-GPT Docker CI cache
|
||||
name: Purge Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: development
|
||||
BASE_BRANCH: master
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
@@ -16,20 +16,19 @@ jobs:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
@@ -38,10 +37,10 @@ jobs:
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
115
.github/workflows/docker-ci.yml
vendored
Normal file
115
.github/workflows/docker-ci.yml
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
name: Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
# Docker setup needs fixing before this is going to work: #1843
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
@@ -1,4 +1,4 @@
|
||||
name: AutoGPT Docker Release
|
||||
name: Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
@@ -16,35 +16,31 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
@@ -52,11 +48,10 @@ jobs:
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release
|
||||
cache-to: type=gha,scope=docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
@@ -68,10 +63,10 @@ jobs:
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
prod_branch: stable
|
||||
dev_branch: master
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
37
.github/workflows/documentation-release.yml
vendored
Normal file
37
.github/workflows/documentation-release.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Docs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ stable ]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'mkdocs.yml'
|
||||
- '.github/workflows/documentation.yml'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Set up workflow cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
- run: pip install mkdocs-material
|
||||
|
||||
- run: mkdocs gh-deploy --force
|
||||
236
.github/workflows/forge-ci.yml
vendored
236
.github/workflows/forge-ci.yml
vendored
@@ -1,236 +0,0 @@
|
||||
name: Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: forge
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: forge/logs/
|
||||
60
.github/workflows/frontend-ci.yml
vendored
60
.github/workflows/frontend-ci.yml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
133
.github/workflows/hackathon.yml
vendored
133
.github/workflows/hackathon.yml
vendored
@@ -1,133 +0,0 @@
|
||||
name: Hackathon
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
agents:
|
||||
description: "Agents to run (comma-separated)"
|
||||
required: false
|
||||
default: "autogpt" # Default agents if none are specified
|
||||
|
||||
jobs:
|
||||
matrix-setup:
|
||||
runs-on: ubuntu-latest
|
||||
# Service containers to run with `matrix-setup`
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
env-name: ${{ steps.set-matrix.outputs.env-name }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "schedule" ]; then
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::[ 'irrelevant']"
|
||||
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
|
||||
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::$matrix_string"
|
||||
else
|
||||
echo "::set-output name=env-name::testing"
|
||||
echo "::set-output name=matrix::[ 'irrelevant' ]"
|
||||
fi
|
||||
|
||||
tests:
|
||||
environment:
|
||||
name: "${{ needs.matrix-setup.outputs.env-name }}"
|
||||
needs: matrix-setup
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
name: "${{ matrix.agent-name }}"
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
timeout-minutes: 50
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
|
||||
steps:
|
||||
- name: Print Environment Name
|
||||
run: |
|
||||
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
|
||||
|
||||
- name: Check Docker Container
|
||||
id: check
|
||||
run: docker ps
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: v18.15
|
||||
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
|
||||
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
|
||||
git clone "$link" -b "$branch" "$AGENT_NAME"
|
||||
cd $AGENT_NAME
|
||||
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
|
||||
./run agent start $AGENT_NAME
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
poetry run agbenchmark --no-dep
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
|
||||
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
25
.github/workflows/pr-label.yml
vendored
25
.github/workflows/pr-label.yml
vendored
@@ -3,10 +3,7 @@ name: "Pull Request auto-label"
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, development, release-* ]
|
||||
paths-ignore:
|
||||
- 'forge/tests/vcr_cassettes'
|
||||
- 'benchmark/reports/**'
|
||||
branches: [ master ]
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
@@ -48,19 +45,11 @@ jobs:
|
||||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 100
|
||||
m_max_size: 50
|
||||
l_label: 'size/l'
|
||||
l_max_size: 500
|
||||
l_max_size: 200
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl:
|
||||
|
||||
scope:
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
sync-labels: true
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 200 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size
|
||||
|
||||
151
.github/workflows/python-checks.yml
vendored
151
.github/workflows/python-checks.yml
vendored
@@ -1,151 +0,0 @@
|
||||
name: Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
get-changed-parts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: changes-in
|
||||
name: Determine affected subprojects
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
autogpt:
|
||||
- autogpt/autogpt/**
|
||||
- autogpt/tests/**
|
||||
- autogpt/poetry.lock
|
||||
forge:
|
||||
- forge/forge/**
|
||||
- forge/tests/**
|
||||
- forge/poetry.lock
|
||||
benchmark:
|
||||
- benchmark/agbenchmark/**
|
||||
- benchmark/tests/**
|
||||
- benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
lint:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
20
.github/workflows/repo-stats.yml
vendored
20
.github/workflows/repo-stats.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: github-repo-stats
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 23 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: github-repo-stats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
# Use latest release.
|
||||
uses: jgehrcke/github-repo-stats@HEAD
|
||||
with:
|
||||
ghtoken: ${{ secrets.ghrs_github_api_token }}
|
||||
|
||||
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Generate Sponsors README
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 */12 * * *'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Generate Sponsors 💖
|
||||
uses: JamesIves/github-sponsors-readme-action@v1
|
||||
with:
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
file: 'README.md'
|
||||
minimum: 2500
|
||||
maximum: 99999
|
||||
|
||||
- name: Deploy to GitHub Pages 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: master
|
||||
folder: '.'
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
31
.github/workflows/workflow-checker.yml
vendored
31
.github/workflows/workflow-checker.yml
vendored
@@ -1,31 +0,0 @@
|
||||
name: PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
name: Check PR Status
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install requests
|
||||
- name: Check PR Status
|
||||
run: |
|
||||
echo "Current directory before running Python script:"
|
||||
pwd
|
||||
echo "Attempting to run Python script:"
|
||||
python check_actions_status.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
34
.gitignore
vendored
34
.gitignore
vendored
@@ -1,21 +1,26 @@
|
||||
## Original ignores
|
||||
.github_access_token
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
autogpt/*json
|
||||
autogpt/node_modules/
|
||||
autogpt/__pycache__/keys.cpython-310.pyc
|
||||
autogpt/auto_gpt_workspace
|
||||
package-lock.json
|
||||
*.pyc
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
ai_settings.yaml
|
||||
last_run_ai_settings.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
/logs
|
||||
logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -27,11 +32,14 @@ __pycache__/
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
plugins/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
@@ -154,20 +162,4 @@ vicuna-*
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
|
||||
|
||||
# Allow for locally private items
|
||||
# private
|
||||
pri*
|
||||
# ignore
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
rnd/autogpt_server/settings.py
|
||||
CURRENT_BULLETIN.md
|
||||
6
.gitmodules
vendored
6
.gitmodules
vendored
@@ -1,6 +0,0 @@
|
||||
[submodule "forge/tests/vcr_cassettes"]
|
||||
path = forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "rnd/supabase"]
|
||||
path = rnd/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
10
.isort.cfg
Normal file
10
.isort.cfg
Normal file
@@ -0,0 +1,10 @@
|
||||
[settings]
|
||||
profile = black
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
force_grid_wrap = 0
|
||||
use_parentheses = true
|
||||
ensure_newline_before_comments = true
|
||||
line_length = 88
|
||||
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
||||
skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist
|
||||
@@ -1,6 +0,0 @@
|
||||
[pr_reviewer]
|
||||
num_code_suggestions=0
|
||||
|
||||
[pr_code_suggestions]
|
||||
commitable_code_suggestions=false
|
||||
num_code_suggestions=0
|
||||
@@ -3,125 +3,30 @@ repos:
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
args: ["--maxkb=500"]
|
||||
- id: fix-byte-order-marker
|
||||
args: ['--maxkb=500']
|
||||
- id: check-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C autogpt run isort
|
||||
files: ^autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C forge run isort
|
||||
files: ^forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C benchmark run isort
|
||||
files: ^benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.12.1
|
||||
# Black has sensible defaults, doesn't need package context, and ignores
|
||||
# everything in .gitignore, so it works fine without any config or arguments.
|
||||
hooks:
|
||||
- id: black
|
||||
name: Lint (Black)
|
||||
- id: isort
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
# To have flake8 load the config of the individual subprojects, we have to call
|
||||
# them separately.
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.3.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^forge/(forge|tests)/
|
||||
args: [--config=forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C forge run pyright
|
||||
args: [-p, forge, forge]
|
||||
files: ^forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
- id: black
|
||||
language_version: python3.10
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^forge/(forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
|
||||
61
.vscode/all-projects.code-workspace
vendored
61
.vscode/all-projects.code-workspace
vendored
@@ -1,61 +0,0 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "autogpt",
|
||||
"path": "../autogpt"
|
||||
},
|
||||
{
|
||||
"name": "benchmark",
|
||||
"path": "../benchmark"
|
||||
},
|
||||
{
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
{
|
||||
"name": "forge",
|
||||
"path": "../forge"
|
||||
},
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../frontend"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_server",
|
||||
"path": "../rnd/autogpt_server"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"path": "../rnd/autogpt_builder"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../rnd/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../rnd/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../rnd/infra"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
},
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"dart-code.flutter",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"prisma.prisma",
|
||||
"qwtel.sqlite-viewer"
|
||||
]
|
||||
}
|
||||
}
|
||||
9
BULLETIN.md
Normal file
9
BULLETIN.md
Normal file
@@ -0,0 +1,9 @@
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
|
||||
# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE
|
||||
Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
|
||||
|
||||
## Changes to Docker configuration
|
||||
The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
|
||||
|
||||
21
CITATION.cff
21
CITATION.cff
@@ -1,21 +0,0 @@
|
||||
# This CITATION.cff file was generated with cffinit.
|
||||
# Visit https://bit.ly/cffinit to generate yours today!
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: AutoGPT
|
||||
message: >-
|
||||
If you use this software, please cite it using the
|
||||
metadata from this file.
|
||||
type: software
|
||||
authors:
|
||||
- name: Significant Gravitas
|
||||
website: 'https://agpt.co'
|
||||
repository-code: 'https://github.com/Significant-Gravitas/AutoGPT'
|
||||
url: 'https://agpt.co'
|
||||
abstract: >-
|
||||
A collection of tools and experimental open-source attempts to make GPT-4 fully
|
||||
autonomous.
|
||||
keywords:
|
||||
- AI
|
||||
- Agent
|
||||
license: MIT
|
||||
182
CLI-USAGE.md
182
CLI-USAGE.md
@@ -1,182 +0,0 @@
|
||||
## CLI Documentation
|
||||
|
||||
This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
|
||||
|
||||
### 1. Entry Point for the CLI
|
||||
|
||||
Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
|
||||
|
||||
```sh
|
||||
./run
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
agent Commands to create, start and stop agents
|
||||
benchmark Commands to start the benchmark and list tests and categories
|
||||
setup Installs dependencies needed for your system.
|
||||
```
|
||||
|
||||
If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
|
||||
|
||||
```sh
|
||||
./run COMMAND --help
|
||||
```
|
||||
|
||||
This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
|
||||
|
||||
### 2. Setup Command
|
||||
|
||||
```sh
|
||||
./run setup
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Setup initiated
|
||||
Installation has been completed.
|
||||
```
|
||||
|
||||
This command initializes the setup of the project.
|
||||
|
||||
### 3. Agents Commands
|
||||
|
||||
**a. List All Agents**
|
||||
|
||||
```sh
|
||||
./run agent list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available agents: 🤖
|
||||
🐙 forge
|
||||
🐙 autogpt
|
||||
```
|
||||
|
||||
Lists all the available agents.
|
||||
|
||||
**b. Create a New Agent**
|
||||
|
||||
```sh
|
||||
./run agent create my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
|
||||
```
|
||||
|
||||
Creates a new agent named 'my_agent'.
|
||||
|
||||
**c. Start an Agent**
|
||||
|
||||
```sh
|
||||
./run agent start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
... (ASCII Art representing the agent startup)
|
||||
[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
|
||||
[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
|
||||
```
|
||||
|
||||
Starts the 'my_agent' and displays startup ASCII art and logs.
|
||||
|
||||
**d. Stop an Agent**
|
||||
|
||||
```sh
|
||||
./run agent stop
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Agent stopped
|
||||
```
|
||||
|
||||
Stops the running agent.
|
||||
|
||||
### 4. Benchmark Commands
|
||||
|
||||
**a. List Benchmark Categories**
|
||||
|
||||
```sh
|
||||
./run benchmark categories list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available categories: 📚
|
||||
📖 code
|
||||
📖 safety
|
||||
📖 memory
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark categories.
|
||||
|
||||
**b. List Benchmark Tests**
|
||||
|
||||
```sh
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available tests: 📚
|
||||
📖 interface
|
||||
🔬 Search - TestSearch
|
||||
🔬 Write File - TestWriteFile
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark tests.
|
||||
|
||||
**c. Show Details of a Benchmark Test**
|
||||
|
||||
```sh
|
||||
./run benchmark tests details TestWriteFile
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
TestWriteFile
|
||||
-------------
|
||||
|
||||
Category: interface
|
||||
Task: Write the word 'Washington' to a .txt file
|
||||
... (and other details)
|
||||
```
|
||||
|
||||
Displays the details of the 'TestWriteFile' benchmark test.
|
||||
|
||||
**d. Start Benchmark for the Agent**
|
||||
|
||||
```sh
|
||||
./run benchmark start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
(more details about the testing process shown whilst the test are running)
|
||||
============= 13 failed, 1 passed in 0.97s ============...
|
||||
```
|
||||
|
||||
Displays the results of the benchmark tests on 'my_agent'.
|
||||
@@ -1,12 +1,12 @@
|
||||
# Code of Conduct for AutoGPT
|
||||
# Code of Conduct for Auto-GPT
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The purpose of this Code of Conduct is to provide guidelines for contributors to the AutoGPT projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
|
||||
The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
|
||||
|
||||
## 2. Scope
|
||||
|
||||
This Code of Conduct applies to all contributors, maintainers, and users of the AutoGPT project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
|
||||
This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
|
||||
|
||||
## 3. Our Standards
|
||||
|
||||
@@ -36,5 +36,4 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont
|
||||
|
||||
## 6. Contact
|
||||
|
||||
If you have any questions or concerns, please contact the project maintainers on Discord:
|
||||
https://discord.gg/autogpt
|
||||
If you have any questions or concerns, please contact the project maintainers.
|
||||
|
||||
153
CONTRIBUTING.md
153
CONTRIBUTING.md
@@ -1,38 +1,129 @@
|
||||
# AutoGPT Contribution Guide
|
||||
If you are reading this, you are probably looking for the full **[contribution guide]**,
|
||||
which is part of our [wiki].
|
||||
# Contributing to Auto-GPT
|
||||
|
||||
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
|
||||
<!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
|
||||
First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
|
||||
|
||||
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
|
||||
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
This document provides guidelines and best practices to help you contribute effectively.
|
||||
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
[todo's][roadmap]!
|
||||
* We highly recommend to post your idea and discuss it in the [dev channel].
|
||||
3. Create a draft PR when starting work on bigger changes.
|
||||
4. Adhere to the [Code Guidelines]
|
||||
5. Clearly explain your changes when submitting a PR.
|
||||
6. Don't submit broken code: test/validate your changes.
|
||||
7. Avoid making unnecessary changes, especially if they're purely based on your personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
8. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
## Code of Conduct
|
||||
|
||||
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
|
||||
[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
|
||||
By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the
|
||||
wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
|
||||
[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md
|
||||
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and
|
||||
hop on our Discord. See you there! :-)
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
❤️ & 🔆
|
||||
The team @ AutoGPT
|
||||
https://discord.gg/autogpt
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins.
|
||||
Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository and clone your fork.
|
||||
2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
|
||||
3. Make your changes in the new branch.
|
||||
4. Test your changes thoroughly.
|
||||
5. Commit and push your changes to your fork.
|
||||
6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
If you find a bug in the project, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A description of the problem, including steps to reproduce the issue.
|
||||
- Any relevant logs, screenshots, or other supporting information.
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
|
||||
- Any relevant examples, mockups, or supporting information.
|
||||
|
||||
### Submitting Pull Requests
|
||||
|
||||
When submitting a pull request, please ensure that your changes meet the following criteria:
|
||||
|
||||
- Your pull request should be atomic and focus on a single change.
|
||||
- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status)
|
||||
- You should have thoroughly tested your changes with multiple different prompts.
|
||||
- You should have considered potential risks and mitigations for your changes.
|
||||
- You should have documented your changes clearly and comprehensively.
|
||||
- You should not include any unrelated or "extra" small tweaks or changes.
|
||||
|
||||
## Style Guidelines
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request.
|
||||
|
||||
To format your code, run the following commands in the project's root directory:
|
||||
|
||||
```bash
|
||||
python -m black .
|
||||
python -m isort .
|
||||
```
|
||||
|
||||
Or if you have these tools installed globally:
|
||||
```bash
|
||||
black .
|
||||
isort .
|
||||
```
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
|
||||
|
||||
Install the pre-commit package using pip:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Run the following command in the project's root directory to install the pre-commit hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
|
||||
|
||||
If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
|
||||
|
||||
Happy coding, and once again, thank you for your contributions!
|
||||
|
||||
Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts
|
||||
|
||||
## Testing your changes
|
||||
|
||||
If you add or change code, make sure the updated code is covered by tests.
|
||||
To increase coverage if necessary, [write tests using pytest].
|
||||
|
||||
For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/).
|
||||
|
||||
[write tests using pytest]: https://realpython.com/pytest-python-testing/
|
||||
|
||||
### API-dependent tests
|
||||
|
||||
To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known
|
||||
requests and matching responses in so-called *cassettes*, allowing us to run the tests
|
||||
in CI without needing actual API access.
|
||||
|
||||
When changes cause a test prompt to be generated differently, it will likely miss the
|
||||
cache and make a request to the API, updating the cassette with the new request+response.
|
||||
*Be sure to include the updated cassette in your PR!*
|
||||
|
||||
When you run Pytest locally:
|
||||
|
||||
- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required.
|
||||
- If the prompt changes in a way that the cassettes are not reusable:
|
||||
- If no API key, the test fails. It requires a new cassette. So, add an API key to .env.
|
||||
- If the API key is present, the tests will make a real call to OpenAI.
|
||||
- If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR.
|
||||
- If the test is unsuccessful:
|
||||
- Either: Your change made Auto-GPT less capable, in that case, you have to change your code.
|
||||
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
||||
|
||||
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
||||
|
||||
40
Dockerfile
Normal file
40
Dockerfile
Normal file
@@ -0,0 +1,40 @@
|
||||
# 'dev' or 'release' container build
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Install the required python packages globally
|
||||
ENV PATH="$PATH:/root/.local/bin"
|
||||
COPY requirements.txt .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "-m", "autogpt"]
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY . ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
@@ -1,61 +0,0 @@
|
||||
# 'dev' or 'release' container build
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver ca-certificates gcc \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_VIRTUALENVS_PATH="/venv" \
|
||||
POETRY_VIRTUALENVS_IN_PROJECT=0 \
|
||||
POETRY_NO_INTERACTION=1
|
||||
|
||||
# Install and configure Poetry
|
||||
RUN curl -sSL https://install.python-poetry.org | python3 -
|
||||
ENV PATH="$POETRY_HOME/bin:$PATH"
|
||||
RUN poetry config installer.max-workers 10
|
||||
|
||||
WORKDIR /app/autogpt
|
||||
COPY autogpt/pyproject.toml autogpt/poetry.lock ./
|
||||
|
||||
# Include forge so it can be used as a path dependency
|
||||
COPY forge/ ../forge
|
||||
|
||||
# Include frontend
|
||||
COPY frontend/ ../frontend
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["poetry", "run", "autogpt"]
|
||||
CMD []
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
RUN poetry install --no-cache --no-root \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY autogpt/ ./
|
||||
|
||||
# release build -> include bare minimum
|
||||
FROM autogpt-base as autogpt-release
|
||||
RUN poetry install --no-cache --no-root --without dev \
|
||||
&& rm -rf $(poetry env info --path)/src
|
||||
ONBUILD COPY autogpt/autogpt/ ./autogpt
|
||||
ONBUILD COPY autogpt/scripts/ ./scripts
|
||||
ONBUILD COPY autogpt/plugins/ ./plugins
|
||||
ONBUILD COPY autogpt/README.md ./README.md
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS autogpt
|
||||
RUN poetry install --only-root
|
||||
@@ -1,173 +0,0 @@
|
||||
# Quickstart Guide
|
||||
|
||||
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
|
||||
|
||||
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
|
||||
|
||||
## System Requirements
|
||||
|
||||
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
|
||||
|
||||
## Getting Setup
|
||||
1. **Fork the Repository**
|
||||
To fork the repository, follow these steps:
|
||||
- Navigate to the main page of the repository.
|
||||
|
||||

|
||||
- In the top-right corner of the page, click Fork.
|
||||
|
||||

|
||||
- On the next page, select your GitHub account to create the fork.
|
||||
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
|
||||
|
||||
2. **Clone the Repository**
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
- Open your terminal.
|
||||
- Navigate to the directory where you want to clone the repository.
|
||||
- Run the git clone command for the fork you just created
|
||||
|
||||

|
||||
|
||||
- Then open your project in your ide
|
||||
|
||||

|
||||
|
||||
4. **Setup the Project**
|
||||
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
|
||||
It can be accessed by running the `run` command by typing `./run` in the terminal.
|
||||
|
||||
The first command you need to use is `./run setup.` This will guide you through setting up your system.
|
||||
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
|
||||
|
||||

|
||||
|
||||
### For Windows Users
|
||||
|
||||
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
|
||||
|
||||
#### Update WSL
|
||||
Run the following command in Powershell or Command Prompt:
|
||||
1. Enable the optional WSL and Virtual Machine Platform components.
|
||||
2. Download and install the latest Linux kernel.
|
||||
3. Set WSL 2 as the default.
|
||||
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
|
||||
|
||||
```shell
|
||||
wsl --install
|
||||
```
|
||||
|
||||
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
|
||||
|
||||
#### Resolve FileNotFoundError or "No such file or directory" Errors
|
||||
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
|
||||
|
||||
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script:
|
||||
|
||||
```shell
|
||||
sudo apt update
|
||||
sudo apt install dos2unix
|
||||
dos2unix ./run
|
||||
```
|
||||
|
||||
After executing the above commands, running `./run setup` should work successfully.
|
||||
|
||||
#### Store Project Files within the WSL File System
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
|
||||
|
||||
You can keep running the command to get feedback on where you are up to with your setup.
|
||||
When setup has been completed, the command will return an output like this:
|
||||
|
||||

|
||||
|
||||
## Creating Your Agent
|
||||
|
||||
After completing the setup, the next step is to create your agent template.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
|
||||
|
||||
Tips for naming your agent:
|
||||
* Give it its own unique name, or name it after yourself
|
||||
* Include an important aspect of your agent in the name, such as its purpose
|
||||
|
||||
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
|
||||
|
||||

|
||||
|
||||
## Running your Agent
|
||||
|
||||
Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
|
||||
|
||||
This starts the agent on the URL: `http://localhost:8000/`
|
||||
|
||||

|
||||
|
||||
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
|
||||
|
||||

|
||||
|
||||
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
|
||||
|
||||

|
||||
|
||||
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
|
||||
|
||||
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
|
||||
|
||||
## Benchmarking your Agent
|
||||
|
||||
The benchmarking system can also be accessed using the CLI too:
|
||||
|
||||
```bash
|
||||
agpt % ./run benchmark
|
||||
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Commands to start the benchmark and list tests and categories
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
categories Benchmark categories group command
|
||||
start Starts the benchmark command
|
||||
tests Benchmark tests group command
|
||||
agpt % ./run benchmark categories
|
||||
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark categories group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
list List benchmark categories command
|
||||
agpt % ./run benchmark tests
|
||||
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark tests group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
details Benchmark test details command
|
||||
list List benchmark tests command
|
||||
```
|
||||
|
||||
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
|
||||
```bash
|
||||
./run benchmark categories list
|
||||
# And what tests are available with
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||

|
||||
|
||||
|
||||
Finally, you can run the benchmark with
|
||||
|
||||
```bash
|
||||
./run benchmark start YOUR_AGENT_NAME
|
||||
|
||||
```
|
||||
|
||||
>
|
||||
66
SECURITY.md
66
SECURITY.md
@@ -1,66 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
|
||||
- [Restrict Workspace](#restrict-workspace)
|
||||
- [Untrusted inputs](#untrusted-inputs)
|
||||
- [Data privacy](#data-privacy)
|
||||
- [Untrusted environments or networks](#untrusted-environments-or-networks)
|
||||
- [Multi-Tenant environments](#multi-tenant-environments)
|
||||
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
|
||||
|
||||
## Using AutoGPT Securely
|
||||
|
||||
### Restrict Workspace
|
||||
|
||||
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
|
||||
|
||||
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
|
||||
|
||||
### Untrusted inputs
|
||||
|
||||
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
|
||||
|
||||
For maximum security when handling untrusted inputs, you may need to employ the following:
|
||||
|
||||
* Sandboxing: Isolate the process.
|
||||
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
|
||||
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
|
||||
* Validation: Enforce strict rules on allowed characters and data types.
|
||||
* Filtering: Remove potentially malicious scripts or code fragments.
|
||||
* Encoding: Convert special characters into safe representations.
|
||||
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
|
||||
|
||||
### Data privacy
|
||||
|
||||
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
|
||||
|
||||
### Untrusted environments or networks
|
||||
|
||||
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
|
||||
Additionally, running it on an untrusted network can expose your data to potential network attacks.
|
||||
|
||||
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
|
||||
|
||||
### Multi-Tenant environments
|
||||
|
||||
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
|
||||
|
||||
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
|
||||
|
||||
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenant’s identity.
|
||||
|
||||
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
|
||||
|
||||
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
|
||||
|
||||
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
|
||||
|
||||
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
|
||||
|
||||
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
|
||||
@@ -1,23 +0,0 @@
|
||||
This page is a list of issues you could encounter along with their fixes.
|
||||
|
||||
# Forge
|
||||
**Poetry configuration invalid**
|
||||
|
||||
The poetry configuration is invalid:
|
||||
- Additional properties are not allowed ('group' was unexpected)
|
||||
<img width="487" alt="Screenshot 2023-09-22 at 5 42 59 PM" src="https://github.com/Significant-Gravitas/AutoGPT/assets/9652976/dd451e6b-8114-44de-9928-075f5f06d661">
|
||||
|
||||
**Pydantic Validation Error**
|
||||
|
||||
Remove your sqlite agent.db file. it's probably because some of your data is not complying with the new spec (we will create migrations soon to avoid this problem)
|
||||
|
||||
|
||||
*Solution*
|
||||
|
||||
Update poetry
|
||||
|
||||
# Benchmark
|
||||
TODO
|
||||
|
||||
# Frontend
|
||||
TODO
|
||||
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 1.1 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 49 KiB |
@@ -1,12 +0,0 @@
|
||||
# To boot the app run the following:
|
||||
# docker-compose run auto-gpt
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
volumes:
|
||||
- ../:/workspace/AutoGPT
|
||||
@@ -1,179 +0,0 @@
|
||||
################################################################################
|
||||
### AutoGPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# OPENAI_API_KEY=
|
||||
|
||||
## ANTHROPIC_API_KEY - Anthropic API Key (Example: sk-ant-api03-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# ANTHROPIC_API_KEY=
|
||||
|
||||
## GROQ_API_KEY - Groq API Key (Example: gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# GROQ_API_KEY=
|
||||
|
||||
## LLAMAFILE_API_BASE - Llamafile API base URL
|
||||
# LLAMAFILE_API_BASE=http://localhost:8080/v1
|
||||
|
||||
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
|
||||
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
|
||||
# TELEMETRY_OPT_IN=true
|
||||
|
||||
## COMPONENT_CONFIG_FILE - Path to the json config file (Default: None)
|
||||
# COMPONENT_CONFIG_FILE=
|
||||
|
||||
### Workspace ###
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./data/agents/<agent_id>/workspace (Default: True)
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## DISABLED_COMMANDS - The comma separated list of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMANDS=
|
||||
|
||||
## FILE_STORAGE_BACKEND - Choose a storage backend for contents
|
||||
## Options: local, gcs, s3
|
||||
# FILE_STORAGE_BACKEND=local
|
||||
|
||||
## STORAGE_BUCKET - GCS/S3 Bucket to store contents in
|
||||
# STORAGE_BUCKET=autogpt
|
||||
|
||||
## GCS Credentials
|
||||
# see https://cloud.google.com/storage/docs/authentication#libauth
|
||||
|
||||
## AWS/S3 Credentials
|
||||
# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
|
||||
|
||||
## S3_ENDPOINT_URL - If you're using non-AWS S3, set your endpoint here.
|
||||
# S3_ENDPOINT_URL=
|
||||
|
||||
### Miscellaneous ###
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AutoGPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
# OPENAI_API_TYPE=
|
||||
# OPENAI_API_VERSION=
|
||||
|
||||
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
## Note: this feature is only supported by OpenAI's newer models.
|
||||
# OPENAI_FUNCTIONS=False
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
# USE_AZURE=False
|
||||
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the folder containing this file. (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
# AZURE_OPENAI_AD_TOKEN=
|
||||
# AZURE_OPENAI_ENDPOINT=
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM - Smart language model (Default: gpt-4-turbo)
|
||||
# SMART_LLM=gpt-4-turbo
|
||||
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
################################################################################
|
||||
### GITHUB
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
|
||||
################################################################################
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
## Options: gtts, streamelements, elevenlabs, macos
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### LOGGING
|
||||
################################################################################
|
||||
|
||||
## LOG_LEVEL - Set the minimum level to filter log output by. Setting this to DEBUG implies LOG_FORMAT=debug, unless LOG_FORMAT is set explicitly.
|
||||
## Options: DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
# LOG_LEVEL=INFO
|
||||
|
||||
## LOG_FORMAT - The format in which to log messages to the console (and log files).
|
||||
## Options: simple, debug, structured_google_cloud
|
||||
# LOG_FORMAT=simple
|
||||
|
||||
## LOG_FILE_FORMAT - Normally follows the LOG_FORMAT setting, but can be set separately.
|
||||
## Note: Log file output is disabled if LOG_FORMAT=structured_google_cloud.
|
||||
# LOG_FILE_FORMAT=simple
|
||||
|
||||
## PLAIN_OUTPUT - Disables animated typing and the spinner in the console output. (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
|
||||
################################################################################
|
||||
### Agent Protocol Server Settings
|
||||
################################################################################
|
||||
## AP_SERVER_PORT - Specifies what port the agent protocol server will listen on. (Default: 8000)
|
||||
## AP_SERVER_DB_URL - Specifies what connection url the agent protocol database will connect to (Default: Internal SQLite)
|
||||
## AP_SERVER_CORS_ALLOWED_ORIGINS - Comma separated list of allowed origins for CORS. (Default: http://localhost:{AP_SERVER_PORT})
|
||||
# AP_SERVER_PORT=8000
|
||||
# AP_SERVER_DB_URL=sqlite:///data/ap_server.db
|
||||
# AP_SERVER_CORS_ALLOWED_ORIGINS=
|
||||
@@ -1,14 +0,0 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
# Ignore rules that conflict with Black code style
|
||||
extend-ignore = E203, W503
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__/,
|
||||
*.pyc,
|
||||
.pytest_cache/,
|
||||
venv*/,
|
||||
.venv/,
|
||||
data/,
|
||||
logs/,
|
||||
tests/unit/data/,
|
||||
167
autogpt/.gitignore
vendored
167
autogpt/.gitignore
vendored
@@ -1,167 +0,0 @@
|
||||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
/logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
data/*
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
/plugins/*
|
||||
plugins_config.yaml
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
ENV/
|
||||
env.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
llama-*
|
||||
vicuna-*
|
||||
|
||||
# mac
|
||||
.DS_Store
|
||||
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
package.json
|
||||
|
||||
# Keep
|
||||
!.keep
|
||||
3
autogpt/.vscode/settings.json
vendored
3
autogpt/.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co/autogpt.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing.
|
||||
|
||||
# v0.5.0 RELEASE HIGHLIGHTS! 🚀🚀
|
||||
# -------------------------------
|
||||
Cloud-readiness, a new UI, support for the newest Agent Protocol version, and much more:
|
||||
*v0.5.0 is our biggest release yet!*
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/releases.
|
||||
@@ -1,160 +0,0 @@
|
||||
# AutoGPT: An Autonomous GPT-4 Experiment
|
||||
|
||||
[📖 **Documentation**][docs]
|
||||
 | 
|
||||
[🚀 **Contributing**](../../CONTRIBUTING.md)
|
||||
|
||||
AutoGPT is an experimental open-source application showcasing the capabilities of modern Large Language Models. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, AutoGPT pushes the boundaries of what is possible with AI.
|
||||
|
||||
<h2 align="center"> Demo April 16th 2023 </h2>
|
||||
|
||||
https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
|
||||
|
||||
Demo made by <a href=https://twitter.com/BlakeWerlinger>Blake Werlinger</a>
|
||||
|
||||
## 🚀 Features
|
||||
|
||||
- 🔌 Agent Protocol ([docs](https://agentprotocol.ai))
|
||||
- 💻 Easy to use UI
|
||||
- 🌐 Internet access for searches and information gathering
|
||||
- 🧠 Powered by a mix of GPT-4 and GPT-3.5 Turbo
|
||||
- 🔗 Access to popular websites and platforms
|
||||
- 🗃️ File generation and editing capabilities
|
||||
- 🔌 Extensibility with Plugins
|
||||
<!-- - 💾 Long-term and short-term memory management -->
|
||||
|
||||
## Setting up AutoGPT
|
||||
1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys)
|
||||
2. Copy `.env.template` to `.env` and set `OPENAI_API_KEY`
|
||||
3. Make sure you have Poetry [installed](https://python-poetry.org/docs/#installation)
|
||||
|
||||
For more ways to run AutoGPT, more detailed instructions, and more configuration options,
|
||||
see the [setup guide][docs/setup].
|
||||
|
||||
## Running AutoGPT
|
||||
The CLI should be self-documenting:
|
||||
```shell
|
||||
$ ./autogpt.sh --help
|
||||
Usage: python -m autogpt [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
run Sets up and runs an agent, based on the task specified by the...
|
||||
serve Starts an Agent Protocol compliant AutoGPT server, which creates...
|
||||
```
|
||||
When run without a sub-command, it will default to `run` for legacy reasons.
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
<code>$ ./autogpt.sh run --help</code>
|
||||
</summary>
|
||||
|
||||
The `run` sub-command starts AutoGPT with the legacy CLI interface:
|
||||
|
||||
```shell
|
||||
$ ./autogpt.sh run --help
|
||||
Usage: python -m autogpt run [OPTIONS]
|
||||
|
||||
Sets up and runs an agent, based on the task specified by the user, or
|
||||
resumes an existing agent.
|
||||
|
||||
Options:
|
||||
-c, --continuous Enable Continuous Mode
|
||||
-y, --skip-reprompt Skips the re-prompting messages at the
|
||||
beginning of the script
|
||||
-l, --continuous-limit INTEGER Defines the number of times to run in
|
||||
continuous mode
|
||||
--speak Enable Speak Mode
|
||||
--debug Enable Debug Mode
|
||||
--skip-news Specifies whether to suppress the output of
|
||||
latest news on startup.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--ai-name TEXT AI name override
|
||||
--ai-role TEXT AI role override
|
||||
--constraint TEXT Add or override AI constraints to include in
|
||||
the prompt; may be used multiple times to
|
||||
pass multiple constraints
|
||||
--resource TEXT Add or override AI resources to include in
|
||||
the prompt; may be used multiple times to
|
||||
pass multiple resources
|
||||
--best-practice TEXT Add or override AI best practices to include
|
||||
in the prompt; may be used multiple times to
|
||||
pass multiple best practices
|
||||
--override-directives If specified, --constraint, --resource and
|
||||
--best-practice will override the AI's
|
||||
directives instead of being appended to them
|
||||
--component-config-file TEXT Path to the json configuration file.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
<code>$ ./autogpt.sh serve --help</code>
|
||||
</summary>
|
||||
|
||||
The `serve` sub-command starts AutoGPT wrapped in an Agent Protocol server:
|
||||
|
||||
```shell
|
||||
$ ./autogpt.sh serve --help
|
||||
Usage: python -m autogpt serve [OPTIONS]
|
||||
|
||||
Starts an Agent Protocol compliant AutoGPT server, which creates a custom
|
||||
agent for every task.
|
||||
|
||||
Options:
|
||||
--debug Enable Debug Mode
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
|
||||
With `serve`, the application exposes an Agent Protocol compliant API and serves a frontend,
|
||||
by default on `http://localhost:8000`.
|
||||
|
||||
For more comprehensive instructions, see the [user guide][docs/usage].
|
||||
|
||||
[docs]: https://docs.agpt.co/autogpt
|
||||
[docs/setup]: https://docs.agpt.co/autogpt/setup
|
||||
[docs/usage]: https://docs.agpt.co/autogpt/usage
|
||||
[docs/plugins]: https://docs.agpt.co/autogpt/plugins
|
||||
|
||||
## 📚 Resources
|
||||
* 📔 AutoGPT [project wiki](https://github.com/Significant-Gravitas/AutoGPT/wiki)
|
||||
* 🧮 AutoGPT [project kanban](https://github.com/orgs/Significant-Gravitas/projects/1)
|
||||
* 🌃 AutoGPT [roadmap](https://github.com/orgs/Significant-Gravitas/projects/2)
|
||||
|
||||
## ⚠️ Limitations
|
||||
|
||||
This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
|
||||
|
||||
1. Not a polished application or product, just an experiment
|
||||
2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results!
|
||||
3. Quite expensive to run, so set and monitor your API key limits with OpenAI!
|
||||
|
||||
## 🛡 Disclaimer
|
||||
|
||||
This project, AutoGPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise.
|
||||
|
||||
The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by AutoGPT.
|
||||
|
||||
**Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges.
|
||||
|
||||
As an autonomous experiment, AutoGPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software.
|
||||
|
||||
By using AutoGPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms.
|
||||
|
||||
---
|
||||
|
||||
In Q2 of 2023, AutoGPT became the fastest growing open-source project in history. Now that the dust has settled, we're committed to continued sustainable development and growth of the project.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT&Date">
|
||||
<img src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" alt="Star History Chart">
|
||||
</a>
|
||||
</p>
|
||||
@@ -2,6 +2,13 @@ import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
|
||||
print("Setting random seed to 42")
|
||||
random.seed(42)
|
||||
|
||||
# Load the users .env file into environment variables
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
del load_dotenv
|
||||
5
autogpt/__main__.py
Normal file
5
autogpt/__main__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Auto-GPT: A GPT powered AI Assistant"""
|
||||
import autogpt.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.cli.main()
|
||||
3
autogpt/agbenchmark_config/.gitignore
vendored
3
autogpt/agbenchmark_config/.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
logs/
|
||||
reports/
|
||||
temp_folder/
|
||||
@@ -1,143 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
info = "-v" in sys.argv
|
||||
debug = "-vv" in sys.argv
|
||||
granular = "--granular" in sys.argv
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if debug else logging.INFO if info else logging.WARNING
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get a list of all JSON files in the directory
|
||||
report_files = [
|
||||
report_file
|
||||
for dir in (Path(__file__).parent / "reports").iterdir()
|
||||
if re.match(r"^\d{8}T\d{6}_", dir.name)
|
||||
and (report_file := dir / "report.json").is_file()
|
||||
]
|
||||
|
||||
labels = list[str]()
|
||||
runs_per_label = defaultdict[str, int](lambda: 0)
|
||||
suite_names = list[str]()
|
||||
test_names = list[str]()
|
||||
|
||||
# Create a dictionary to store grouped success values by suffix and test
|
||||
grouped_success_values = defaultdict[str, list[str]](list[str])
|
||||
|
||||
# Loop through each JSON file to collect suffixes and success values
|
||||
for report_file in sorted(report_files):
|
||||
with open(report_file) as f:
|
||||
logger.info(f"Loading {report_file}...")
|
||||
|
||||
data = json.load(f)
|
||||
if "tests" in data:
|
||||
test_tree = data["tests"]
|
||||
label = data["agent_git_commit_sha"].rsplit("/", 1)[1][:7] # commit hash
|
||||
else:
|
||||
# Benchmark run still in progress
|
||||
test_tree = data
|
||||
label = report_file.parent.name.split("_", 1)[1]
|
||||
logger.info(f"Run '{label}' seems to be in progress")
|
||||
|
||||
runs_per_label[label] += 1
|
||||
|
||||
def process_test(test_name: str, test_data: dict):
|
||||
result_group = grouped_success_values[f"{label}|{test_name}"]
|
||||
|
||||
if "tests" in test_data:
|
||||
logger.debug(f"{test_name} is a test suite")
|
||||
|
||||
# Test suite
|
||||
suite_attempted = any(
|
||||
test["metrics"]["attempted"] for test in test_data["tests"].values()
|
||||
)
|
||||
logger.debug(f"suite_attempted: {suite_attempted}")
|
||||
if not suite_attempted:
|
||||
return
|
||||
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
if test_data["metrics"]["percentage"] == 0:
|
||||
result_indicator = "❌"
|
||||
else:
|
||||
highest_difficulty = test_data["metrics"]["highest_difficulty"]
|
||||
result_indicator = {
|
||||
"interface": "🔌",
|
||||
"novice": "🌑",
|
||||
"basic": "🌒",
|
||||
"intermediate": "🌓",
|
||||
"advanced": "🌔",
|
||||
"hard": "🌕",
|
||||
}[highest_difficulty]
|
||||
|
||||
logger.debug(f"result group: {result_group}")
|
||||
logger.debug(f"runs_per_label: {runs_per_label[label]}")
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
["❔"] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
logger.debug(f"result group (after): {result_group}")
|
||||
|
||||
if granular:
|
||||
for test_name, test in test_data["tests"].items():
|
||||
process_test(test_name, test)
|
||||
return
|
||||
|
||||
test_metrics = test_data["metrics"]
|
||||
result_indicator = "❔"
|
||||
|
||||
if "attempted" not in test_metrics:
|
||||
return
|
||||
elif test_metrics["attempted"]:
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
success_value = test_metrics["success"]
|
||||
result_indicator = {True: "✅", False: "❌"}[success_value]
|
||||
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
[" "] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
|
||||
for test_name, suite in test_tree.items():
|
||||
try:
|
||||
process_test(test_name, suite)
|
||||
except KeyError:
|
||||
print(f"{test_name}.metrics: {suite['metrics']}")
|
||||
raise
|
||||
|
||||
if label not in labels:
|
||||
labels.append(label)
|
||||
|
||||
# Create headers
|
||||
headers = ["Test Name"] + list(labels)
|
||||
|
||||
# Prepare data for tabulation
|
||||
table_data = list[list[str]]()
|
||||
for test_name in test_names:
|
||||
row = [test_name]
|
||||
for label in labels:
|
||||
results = grouped_success_values.get(f"{label}|{test_name}", ["❔"])
|
||||
if len(results) < runs_per_label[label]:
|
||||
results.extend(["❔"] * (runs_per_label[label] - len(results)))
|
||||
if len(results) > 1 and all(r == "❔" for r in results):
|
||||
results.clear()
|
||||
row.append(" ".join(results))
|
||||
table_data.append(row)
|
||||
|
||||
# Print tabulated data
|
||||
print(tabulate(table_data, headers=headers, tablefmt="grid"))
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"workspace": {
|
||||
"input": "agbenchmark_config/workspace",
|
||||
"output": "agbenchmark_config/workspace"
|
||||
},
|
||||
"entry_path": "agbenchmark.benchmarks",
|
||||
"host": "http://localhost:8000"
|
||||
}
|
||||
4
autogpt/agent/__init__.py
Normal file
4
autogpt/agent/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
|
||||
__all__ = ["Agent", "AgentManager"]
|
||||
352
autogpt/agent/agent.py
Normal file
352
autogpt/agent/agent.py
Normal file
@@ -0,0 +1,352 @@
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input, send_chat_message_to_user
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
class Agent:
|
||||
"""Agent class for interacting with Auto-GPT.
|
||||
|
||||
Attributes:
|
||||
ai_name: The name of the agent.
|
||||
memory: The memory object to use.
|
||||
full_message_history: The full message history.
|
||||
next_action_count: The number of actions to execute.
|
||||
system_prompt: The system prompt is the initial prompt that defines everything
|
||||
the AI needs to know to achieve its task successfully.
|
||||
Currently, the dynamic and customizable information in the system prompt are
|
||||
ai_name, description and goals.
|
||||
|
||||
triggering_prompt: The last sentence the AI will see before answering.
|
||||
For Auto-GPT, this prompt is:
|
||||
Determine which next command to use, and respond using the format specified
|
||||
above:
|
||||
The triggering prompt is not part of the system prompt because between the
|
||||
system prompt and the triggering
|
||||
prompt we have contextual information that can distract the AI and make it
|
||||
forget that its goal is to find the next task to achieve.
|
||||
SYSTEM PROMPT
|
||||
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
|
||||
TRIGGERING PROMPT
|
||||
|
||||
The triggering prompt reminds the AI about its short term meta task
|
||||
(defining the next task)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hilucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
cfg = Config()
|
||||
loop_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and loop_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
send_chat_message_to_user(
|
||||
f"Continuous Limit Reached: \n {cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
send_chat_message_to_user("Thinking... \n")
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat_with_ai(
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
send_chat_message_to_user("Thinking... \n")
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
send_chat_message_to_user(
|
||||
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
|
||||
f"ARGUMENTS = {arguments}"
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
print(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}...",
|
||||
flush=True,
|
||||
)
|
||||
while True:
|
||||
console_input = ""
|
||||
if cfg.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "s":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
|
||||
self_feedback_resp = self.get_self_feedback(self.full_message_history,
|
||||
assistant_reply_json, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
Fore.YELLOW,
|
||||
"",
|
||||
)
|
||||
if self_feedback_resp[0].lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
else:
|
||||
user_input = self_feedback_resp
|
||||
command_name = "human_feedback"
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
print("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print(
|
||||
f"Invalid input format. Please enter '{cfg.authorise_key} -N' where N is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif user_input == "EXIT":
|
||||
send_chat_message_to_user("Exiting...")
|
||||
print("Exiting...", flush=True)
|
||||
break
|
||||
else:
|
||||
# Print command
|
||||
send_chat_message_to_user(
|
||||
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
|
||||
f"ARGUMENTS = {arguments}"
|
||||
)
|
||||
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
else:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
command_name, arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.full_message_history.append(create_chat_message("system", result))
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.full_message_history.append(
|
||||
create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
|
||||
def _resolve_pathlike_command_args(self, command_args):
|
||||
if "directory" in command_args and command_args["directory"] in {"", "/"}:
|
||||
command_args["directory"] = str(self.workspace.root)
|
||||
else:
|
||||
for pathlike in ["filename", "directory", "clone_path"]:
|
||||
if pathlike in command_args:
|
||||
command_args[pathlike] = str(
|
||||
self.workspace.get_path(command_args[pathlike])
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, full_message_history, latest_response_json, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
feedback message and uses the create_chat_completion() function to generate a
|
||||
response based on the input message.
|
||||
Args:
|
||||
thoughts (dict): A dictionary containing thought elements like reasoning,
|
||||
plan, thoughts, and criticism.
|
||||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
thoughts = latest_response_json.get("thoughts", {})
|
||||
command = latest_response_json.get("command", {})
|
||||
|
||||
|
||||
from autogpt.llm.token_counter import count_message_tokens
|
||||
import json
|
||||
|
||||
# Get ~2000 tokens from the full message history
|
||||
# !!WARNING: THIS IMPLEMENTATION IS BAD - CAUSES BUG SIMILAR TO THIS: https://github.com/Significant-Gravitas/Auto-GPT/pull/3619
|
||||
trimmed_message_history = []
|
||||
for i in range(len(full_message_history) - 1, -1, -1):
|
||||
message = full_message_history[i]
|
||||
# Skip all messages from the user
|
||||
if message["role"] == "user":
|
||||
continue
|
||||
# If the message is from the assistant, remove the "thoughts" dictionary from the content
|
||||
elif message["role"] == "assistant":
|
||||
try:
|
||||
content_dict = json.loads(message["content"])
|
||||
content_dict = content_dict.copy()
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
message["content"] = json.dumps(content_dict)
|
||||
except:
|
||||
pass
|
||||
trimmed_message_history.append(message)
|
||||
|
||||
|
||||
|
||||
|
||||
feedback_prompt = f"""Below is a message from an AI agent with the role: '{ai_role}'.
|
||||
Please review the provided Recent History, Agent's Plan, The Agent's proposed action and their Reasoning.
|
||||
|
||||
If the agent's command makes sense and the agent is on the right track, respond with the letter 'Y' followed by a space.
|
||||
If the provided information is not suitable for achieving the role's objectives or a red flag is raised, please clearly and concisely tell the agent about the issue and suggesting an alternative action.
|
||||
"""
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
# thought = thoughts.get("thoughts", "")
|
||||
# criticism = thoughts.get("criticism", "")
|
||||
# feedback_thoughts = thought + reasoning + plan + criticism
|
||||
return create_chat_completion(
|
||||
[
|
||||
{"role": "system", "content": f""""You are AgentReviewerGPT.\n\nRespond with Y if the agent passes your review.\n\nBe wary of the following red flags in the agent's behaviour:
|
||||
- The agent is repeating itself.
|
||||
- The agent is stuck in a loop.
|
||||
- The agent is using '<text>' instead of the actual text.
|
||||
- The agent is using the wrong command for the situation.
|
||||
- The agent is executing a python file that does not exist (it should check if the file exists and read it's contents before executing it).
|
||||
|
||||
Notes:
|
||||
+ Hardcoded paths are okay""" },
|
||||
{"role": "user", "content": f"{feedback_prompt}\n\nRecent History:\n{trimmed_message_history}\n\n\n\n\Agent's Plan:\n{plan}\n\nAgent's Proposed Action:\n{command}\n\nAgent's Reasoning:\n{reasoning}" }
|
||||
],
|
||||
llm_model,
|
||||
)
|
||||
145
autogpt/agent/agent_manager.py
Normal file
145
autogpt/agent/agent_manager.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import Message, create_chat_completion
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class AgentManager(metaclass=Singleton):
|
||||
"""Agent manager for managing GPT agents"""
|
||||
|
||||
def __init__(self):
|
||||
self.next_key = 0
|
||||
self.agents = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
|
||||
"""Create a new agent and return its key
|
||||
|
||||
Args:
|
||||
task: The task to perform
|
||||
prompt: The prompt to use
|
||||
model: The model to use
|
||||
|
||||
Returns:
|
||||
The key of the new agent
|
||||
"""
|
||||
messages: List[Message] = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
messages.extend(iter(plugin_messages))
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
key = self.next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
self.next_key += 1
|
||||
|
||||
self.agents[key] = (task, messages, model)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return key, agent_reply
|
||||
|
||||
def message_agent(self, key: str | int, message: str) -> str:
|
||||
"""Send a message to an agent and return its response
|
||||
|
||||
Args:
|
||||
key: The key of the agent to message
|
||||
message: The message to send to the agent
|
||||
|
||||
Returns:
|
||||
The agent's response
|
||||
"""
|
||||
task, messages, model = self.agents[int(key)]
|
||||
|
||||
# Add user message to message history before sending to agent
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
for plugin_message in plugin_messages:
|
||||
messages.append(plugin_message)
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
# Update full message history
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return agent_reply
|
||||
|
||||
def list_agents(self) -> list[tuple[str | int, str]]:
|
||||
"""Return a list of all agents
|
||||
|
||||
Returns:
|
||||
A list of tuples of the form (key, task)
|
||||
"""
|
||||
|
||||
# Return a list of agent keys and their tasks
|
||||
return [(key, task) for key, (task, _, _) in self.agents.items()]
|
||||
|
||||
def delete_agent(self, key: str | int) -> bool:
|
||||
"""Delete an agent from the agent manager
|
||||
|
||||
Args:
|
||||
key: The key of the agent to delete
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
del self.agents[int(key)]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
254
autogpt/app.py
Normal file
254
autogpt/app.py
Normal file
@@ -0,0 +1,254 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
|
||||
|
||||
def is_valid_int(value: str) -> bool:
|
||||
"""Check if the value is a valid integer
|
||||
|
||||
Args:
|
||||
value (str): The value to check
|
||||
|
||||
Returns:
|
||||
bool: True if the value is a valid integer, False otherwise
|
||||
"""
|
||||
try:
|
||||
int(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def get_command(response_json: Dict):
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
response_json (json): The response from the AI
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
try:
|
||||
if "command" not in response_json:
|
||||
return "Error:", "Missing 'command' object in JSON"
|
||||
|
||||
if not isinstance(response_json, dict):
|
||||
return "Error:", f"'response_json' object is not dictionary {response_json}"
|
||||
|
||||
command = response_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", "'command' object is not a dictionary"
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", "Missing 'name' field in 'command' object"
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", "Invalid JSON"
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", str(e)
|
||||
|
||||
|
||||
def map_command_synonyms(command_name: str):
|
||||
"""Takes the original command name given by the AI, and checks if the
|
||||
string matches a list of common/known hallucinations
|
||||
"""
|
||||
synonyms = [
|
||||
("write_file", "write_to_file"),
|
||||
("create_file", "write_to_file"),
|
||||
("search", "google"),
|
||||
]
|
||||
for seen_command, actual_command_name in synonyms:
|
||||
if command_name == seen_command:
|
||||
return actual_command_name
|
||||
return command_name
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
question (str): The question to summarize the text for
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
summary = summarize_text(url, text, question)
|
||||
return f""" "Result" : {summary}"""
|
||||
|
||||
|
||||
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
|
||||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url)
|
||||
|
||||
|
||||
def shutdown() -> NoReturn:
|
||||
"""Shut down the program"""
|
||||
print("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
@command(
|
||||
"start_agent",
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent
|
||||
task (str): The task of the agent
|
||||
prompt (str): The prompt for the agent
|
||||
model (str): The model to use for the agent
|
||||
|
||||
Returns:
|
||||
str: The response of the agent
|
||||
"""
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
|
||||
|
||||
if CFG.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = AGENT_MANAGER.message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = AGENT_MANAGER.message_agent(int(key), message)
|
||||
else:
|
||||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "")
|
||||
def list_agents() -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
str: A list of all agents
|
||||
"""
|
||||
return "List of agents:\n" + "\n".join(
|
||||
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
|
||||
)
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
key (str): The key of the agent to delete
|
||||
|
||||
Returns:
|
||||
str: A message indicating whether the agent was deleted or not
|
||||
"""
|
||||
result = AGENT_MANAGER.delete_agent(key)
|
||||
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
||||
@@ -1,27 +0,0 @@
|
||||
@echo off
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
:FindPythonCommand
|
||||
for %%A in (python3 python) do (
|
||||
where /Q %%A
|
||||
if !errorlevel! EQU 0 (
|
||||
set "PYTHON_CMD=%%A"
|
||||
goto :Found
|
||||
)
|
||||
)
|
||||
|
||||
echo Python not found. Please install Python.
|
||||
pause
|
||||
exit /B 1
|
||||
|
||||
:Found
|
||||
%PYTHON_CMD% scripts/check_requirements.py
|
||||
if errorlevel 1 (
|
||||
echo
|
||||
poetry install --without dev
|
||||
echo
|
||||
echo Finished installing packages! Starting AutoGPT...
|
||||
echo
|
||||
)
|
||||
poetry run autogpt %*
|
||||
pause
|
||||
@@ -1,29 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function find_python_command() {
|
||||
if command -v python3 &> /dev/null
|
||||
then
|
||||
echo "python3"
|
||||
elif command -v python &> /dev/null
|
||||
then
|
||||
echo "python"
|
||||
else
|
||||
echo "Python not found. Please install Python."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
PYTHON_CMD=$(find_python_command)
|
||||
|
||||
if $PYTHON_CMD -c "import sys; sys.exit(sys.version_info < (3, 10))"; then
|
||||
if ! $PYTHON_CMD scripts/check_requirements.py; then
|
||||
echo
|
||||
poetry install --without dev
|
||||
echo
|
||||
echo "Finished installing packages! Starting AutoGPT..."
|
||||
echo
|
||||
fi
|
||||
poetry run autogpt "$@"
|
||||
else
|
||||
echo "Python 3.10 or higher is required to run Auto GPT."
|
||||
fi
|
||||
@@ -1,5 +0,0 @@
|
||||
"""AutoGPT: A GPT powered AI Assistant"""
|
||||
import autogpt.app.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.app.cli.cli()
|
||||
@@ -1,108 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
|
||||
def create_agent(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
) -> Agent:
|
||||
if not task:
|
||||
raise ValueError("No task specified for new agent")
|
||||
ai_profile = ai_profile or AIProfile()
|
||||
directives = directives or AIDirectives()
|
||||
|
||||
agent = _configure_agent(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
def configure_agent_with_state(
|
||||
state: AgentSettings,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
return _configure_agent(
|
||||
state=state,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
|
||||
def _configure_agent(
|
||||
app_config: AppConfig,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
agent_id: str = "",
|
||||
task: str = "",
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
directives: Optional[AIDirectives] = None,
|
||||
state: Optional[AgentSettings] = None,
|
||||
) -> Agent:
|
||||
if state:
|
||||
agent_state = state
|
||||
elif agent_id and task and ai_profile and directives:
|
||||
agent_state = state or create_agent_state(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
app_config=app_config,
|
||||
)
|
||||
else:
|
||||
raise TypeError(
|
||||
"Either (state) or (agent_id, task, ai_profile, directives)"
|
||||
" must be specified"
|
||||
)
|
||||
|
||||
return Agent(
|
||||
settings=agent_state,
|
||||
llm_provider=llm_provider,
|
||||
file_storage=file_storage,
|
||||
app_config=app_config,
|
||||
)
|
||||
|
||||
|
||||
def create_agent_state(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
) -> AgentSettings:
|
||||
return AgentSettings(
|
||||
agent_id=agent_id,
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
config=AgentConfiguration(
|
||||
fast_llm=app_config.fast_llm,
|
||||
smart_llm=app_config.smart_llm,
|
||||
allow_fs_access=not app_config.restrict_to_workspace,
|
||||
use_functions_api=app_config.openai_functions,
|
||||
),
|
||||
history=Agent.default_settings.history.model_copy(deep=True),
|
||||
)
|
||||
@@ -1,36 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.app.config import AppConfig
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from .configurators import _configure_agent
|
||||
from .profile_generator import generate_agent_profile_for_task
|
||||
|
||||
|
||||
async def generate_agent_for_task(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
ai_profile, task_directives = await generate_agent_profile_for_task(
|
||||
task=task,
|
||||
app_config=app_config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
return _configure_agent(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=task_directives,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
@@ -1,241 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
llm_classification: LanguageModelClassification = UserConfigurable(
|
||||
default=LanguageModelClassification.SMART_MODEL
|
||||
)
|
||||
_example_call: object = {
|
||||
"name": "create_agent",
|
||||
"arguments": {
|
||||
"name": "CMOGPT",
|
||||
"description": (
|
||||
"a professional digital marketer AI that assists Solopreneurs "
|
||||
"in growing their businesses by providing "
|
||||
"world-class expertise in solving marketing problems "
|
||||
"for SaaS, content products, agencies, and more."
|
||||
),
|
||||
"directives": {
|
||||
"best_practices": [
|
||||
(
|
||||
"Engage in effective problem-solving, prioritization, "
|
||||
"planning, and supporting execution to address your "
|
||||
"marketing needs as your virtual "
|
||||
"Chief Marketing Officer."
|
||||
),
|
||||
(
|
||||
"Provide specific, actionable, and concise advice to "
|
||||
"help you make informed decisions without the use of "
|
||||
"platitudes or overly wordy explanations."
|
||||
),
|
||||
(
|
||||
"Identify and prioritize quick wins and cost-effective "
|
||||
"campaigns that maximize results with minimal time and "
|
||||
"budget investment."
|
||||
),
|
||||
(
|
||||
"Proactively take the lead in guiding you and offering "
|
||||
"suggestions when faced with unclear information or "
|
||||
"uncertainty to ensure your marketing strategy remains "
|
||||
"on track."
|
||||
),
|
||||
],
|
||||
"constraints": [
|
||||
"Do not suggest illegal or unethical plans or strategies.",
|
||||
"Take reasonable budgetary limits into account.",
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
system_prompt: str = UserConfigurable(
|
||||
default=(
|
||||
"Your job is to respond to a user-defined task, given in triple quotes, by "
|
||||
"invoking the `create_agent` function to generate an autonomous agent to "
|
||||
"complete the task. "
|
||||
"You should supply a role-based name for the agent (_GPT), "
|
||||
"an informative description for what the agent does, and 1 to 5 directives "
|
||||
"in each of the categories Best Practices and Constraints, "
|
||||
"that are optimally aligned with the successful completion "
|
||||
"of its assigned task.\n"
|
||||
"\n"
|
||||
"Example Input:\n"
|
||||
'"""Help me with marketing my business"""\n\n'
|
||||
"Example Call:\n"
|
||||
"```\n"
|
||||
f"{json.dumps(_example_call, indent=4)}"
|
||||
"\n```"
|
||||
)
|
||||
)
|
||||
user_prompt_template: str = UserConfigurable(default='"""{user_objective}"""')
|
||||
create_agent_function: dict = UserConfigurable(
|
||||
default=CompletionModelFunction(
|
||||
name="create_agent",
|
||||
description="Create a new autonomous AI agent to complete a given task.",
|
||||
parameters={
|
||||
"name": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description="A short role-based name for an autonomous agent.",
|
||||
required=True,
|
||||
),
|
||||
"description": JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
description=(
|
||||
"An informative one sentence description "
|
||||
"of what the AI agent does"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
"directives": JSONSchema(
|
||||
type=JSONSchema.Type.OBJECT,
|
||||
properties={
|
||||
"best_practices": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five highly effective best practices "
|
||||
"that are optimally aligned with the completion "
|
||||
"of the given task"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
"constraints": JSONSchema(
|
||||
type=JSONSchema.Type.ARRAY,
|
||||
minItems=1,
|
||||
maxItems=5,
|
||||
items=JSONSchema(
|
||||
type=JSONSchema.Type.STRING,
|
||||
),
|
||||
description=(
|
||||
"One to five reasonable and efficacious constraints "
|
||||
"that are optimally aligned with the completion "
|
||||
"of the given task"
|
||||
),
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
).model_dump()
|
||||
)
|
||||
|
||||
|
||||
class AgentProfileGenerator(PromptStrategy):
|
||||
default_configuration: AgentProfileGeneratorConfiguration = (
|
||||
AgentProfileGeneratorConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: dict,
|
||||
):
|
||||
self._llm_classification = llm_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = CompletionModelFunction.model_validate(
|
||||
create_agent_function
|
||||
)
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return self._llm_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
|
||||
system_message = ChatMessage.system(self._system_prompt_message)
|
||||
user_message = ChatMessage.user(
|
||||
self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
)
|
||||
)
|
||||
prompt = ChatPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[self._create_agent_function],
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response: AssistantChatMessage,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
"""
|
||||
try:
|
||||
if not response.tool_calls:
|
||||
raise ValueError(
|
||||
f"LLM did not call {self._create_agent_function.name} function; "
|
||||
"agent profile creation failed"
|
||||
)
|
||||
arguments: object = response.tool_calls[0].function.arguments
|
||||
ai_profile = AIProfile(
|
||||
ai_name=arguments.get("name"), # type: ignore
|
||||
ai_role=arguments.get("description"), # type: ignore
|
||||
)
|
||||
ai_directives = AIDirectives(
|
||||
best_practices=arguments.get("directives", {}).get("best_practices"),
|
||||
constraints=arguments.get("directives", {}).get("constraints"),
|
||||
resources=[],
|
||||
)
|
||||
except KeyError:
|
||||
logger.debug(f"Failed to parse this response content: {response}")
|
||||
raise
|
||||
return ai_profile, ai_directives
|
||||
|
||||
|
||||
async def generate_agent_profile_for_task(
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
llm_provider: MultiProvider,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
agent_profile_generator = AgentProfileGenerator(
|
||||
**AgentProfileGenerator.default_configuration.model_dump() # HACK
|
||||
)
|
||||
|
||||
prompt = agent_profile_generator.build_prompt(task)
|
||||
|
||||
# Call LLM with the string as user input
|
||||
output = await llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
model_name=app_config.smart_llm,
|
||||
functions=prompt.functions,
|
||||
completion_parser=agent_profile_generator.parse_response_content,
|
||||
)
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output.response}")
|
||||
|
||||
return output.parsed_result
|
||||
@@ -1,37 +0,0 @@
|
||||
# 🤖 Agents
|
||||
|
||||
Agent is composed of [🧩 Components](./components.md) and responsible for executing pipelines and some additional logic. The base class for all agents is `BaseAgent`, it has the necessary logic to collect components and execute protocols.
|
||||
|
||||
## Important methods
|
||||
|
||||
`BaseAgent` provides two abstract methods needed for any agent to work properly:
|
||||
1. `propose_action`: This method is responsible for proposing an action based on the current state of the agent, it returns `ThoughtProcessOutput`.
|
||||
2. `execute`: This method is responsible for executing the proposed action, returns `ActionResult`.
|
||||
|
||||
## AutoGPT Agent
|
||||
|
||||
`Agent` is the main agent provided by AutoGPT. It's a subclass of `BaseAgent`. It has all the [Built-in Components](./built-in-components.md). `Agent` implements the essential abstract methods from `BaseAgent`: `propose_action` and `execute`.
|
||||
|
||||
## Building your own Agent
|
||||
|
||||
The easiest way to build your own agent is to extend the `Agent` class and add additional components. By doing this you can reuse the existing components and the default logic for executing [⚙️ Protocols](./protocols.md).
|
||||
|
||||
```py
|
||||
class MyComponent(AgentComponent):
|
||||
pass
|
||||
|
||||
class MyAgent(Agent):
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
# Call the parent constructor to bring in the default components
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
# Add your custom component
|
||||
self.my_component = MyComponent()
|
||||
```
|
||||
|
||||
For more customization, you can override the `propose_action` and `execute` or even subclass `BaseAgent` directly. This way you can have full control over the agent's components and behavior. Have a look at the [implementation of Agent](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt/autogpt/agents/agent.py) for more details.
|
||||
@@ -1,9 +0,0 @@
|
||||
from .agent import Agent
|
||||
from .agent_manager import AgentManager
|
||||
from .prompt_strategies.one_shot import OneShotAgentActionProposal
|
||||
|
||||
__all__ = [
|
||||
"AgentManager",
|
||||
"Agent",
|
||||
"OneShotAgentActionProposal",
|
||||
]
|
||||
@@ -1,313 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, Optional
|
||||
|
||||
import sentry_sdk
|
||||
from forge.agent.base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from forge.agent.protocols import (
|
||||
AfterExecute,
|
||||
AfterParse,
|
||||
CommandProvider,
|
||||
DirectiveProvider,
|
||||
MessageProvider,
|
||||
)
|
||||
from forge.command.command import Command
|
||||
from forge.components.action_history import (
|
||||
ActionHistoryComponent,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from forge.components.action_history.action_history import ActionHistoryConfiguration
|
||||
from forge.components.code_executor.code_executor import (
|
||||
CodeExecutorComponent,
|
||||
CodeExecutorConfiguration,
|
||||
)
|
||||
from forge.components.context.context import AgentContext, ContextComponent
|
||||
from forge.components.file_manager import FileManagerComponent
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
from forge.components.image_gen import ImageGeneratorComponent
|
||||
from forge.components.system import SystemComponent
|
||||
from forge.components.user_interaction import UserInteractionComponent
|
||||
from forge.components.watchdog import WatchdogComponent
|
||||
from forge.components.web import WebSearchComponent, WebSeleniumComponent
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.prompting.schema import ChatPrompt
|
||||
from forge.llm.prompting.utils import dump_prompt
|
||||
from forge.llm.providers import (
|
||||
AssistantFunctionCall,
|
||||
ChatMessage,
|
||||
ChatModelResponse,
|
||||
MultiProvider,
|
||||
)
|
||||
from forge.llm.providers.utils import function_specs_from_commands
|
||||
from forge.models.action import (
|
||||
ActionErrorResult,
|
||||
ActionInterruptedByHuman,
|
||||
ActionResult,
|
||||
ActionSuccessResult,
|
||||
)
|
||||
from forge.models.config import Configurable
|
||||
from forge.utils.exceptions import (
|
||||
AgentException,
|
||||
AgentTerminated,
|
||||
CommandExecutionError,
|
||||
UnknownCommandError,
|
||||
)
|
||||
from pydantic import Field
|
||||
|
||||
from .prompt_strategies.one_shot import (
|
||||
OneShotAgentActionProposal,
|
||||
OneShotAgentPromptStrategy,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentConfiguration(BaseAgentConfiguration):
|
||||
pass
|
||||
|
||||
|
||||
class AgentSettings(BaseAgentSettings):
|
||||
config: AgentConfiguration = Field( # type: ignore
|
||||
default_factory=AgentConfiguration
|
||||
)
|
||||
|
||||
history: EpisodicActionHistory[OneShotAgentActionProposal] = Field(
|
||||
default_factory=EpisodicActionHistory[OneShotAgentActionProposal]
|
||||
)
|
||||
"""(STATE) The action history of the agent."""
|
||||
|
||||
context: AgentContext = Field(default_factory=AgentContext)
|
||||
|
||||
|
||||
class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
default_settings: ClassVar[AgentSettings] = AgentSettings(
|
||||
name="Agent",
|
||||
description=__doc__ if __doc__ else "",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
super().__init__(settings)
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
prompt_config.use_functions_api = (
|
||||
settings.config.use_functions_api
|
||||
# Anthropic currently doesn't support tools + prefilling :(
|
||||
and self.llm.provider_name != "anthropic"
|
||||
)
|
||||
self.prompt_strategy = OneShotAgentPromptStrategy(prompt_config, logger)
|
||||
self.commands: list[Command] = []
|
||||
|
||||
# Components
|
||||
self.system = SystemComponent()
|
||||
self.history = (
|
||||
ActionHistoryComponent(
|
||||
settings.history,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
llm_provider,
|
||||
ActionHistoryConfiguration(
|
||||
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
|
||||
),
|
||||
)
|
||||
.run_after(WatchdogComponent)
|
||||
.run_after(SystemComponent)
|
||||
)
|
||||
if not app_config.noninteractive_mode:
|
||||
self.user_interaction = UserInteractionComponent()
|
||||
self.file_manager = FileManagerComponent(file_storage, settings)
|
||||
self.code_executor = CodeExecutorComponent(
|
||||
self.file_manager.workspace,
|
||||
CodeExecutorConfiguration(
|
||||
docker_container_name=f"{settings.agent_id}_sandbox"
|
||||
),
|
||||
)
|
||||
self.git_ops = GitOperationsComponent()
|
||||
self.image_gen = ImageGeneratorComponent(self.file_manager.workspace)
|
||||
self.web_search = WebSearchComponent()
|
||||
self.web_selenium = WebSeleniumComponent(
|
||||
llm_provider,
|
||||
app_config.app_data_dir,
|
||||
)
|
||||
self.context = ContextComponent(self.file_manager.workspace, settings.context)
|
||||
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
|
||||
ContextComponent
|
||||
)
|
||||
|
||||
self.event_history = settings.history
|
||||
self.app_config = app_config
|
||||
|
||||
async def propose_action(self) -> OneShotAgentActionProposal:
|
||||
"""Proposes the next action to execute, based on the task and current state.
|
||||
|
||||
Returns:
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
self.reset_trace()
|
||||
|
||||
# Get directives
|
||||
resources = await self.run_pipeline(DirectiveProvider.get_resources)
|
||||
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
|
||||
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)
|
||||
|
||||
directives = self.state.directives.model_copy(deep=True)
|
||||
directives.resources += resources
|
||||
directives.constraints += constraints
|
||||
directives.best_practices += best_practices
|
||||
|
||||
# Get commands
|
||||
self.commands = await self.run_pipeline(CommandProvider.get_commands)
|
||||
self._remove_disabled_commands()
|
||||
|
||||
# Get messages
|
||||
messages = await self.run_pipeline(MessageProvider.get_messages)
|
||||
|
||||
include_os_info = (
|
||||
self.code_executor.config.execute_local_commands
|
||||
if hasattr(self, "code_executor")
|
||||
else False
|
||||
)
|
||||
|
||||
prompt: ChatPrompt = self.prompt_strategy.build_prompt(
|
||||
messages=messages,
|
||||
task=self.state.task,
|
||||
ai_profile=self.state.ai_profile,
|
||||
ai_directives=directives,
|
||||
commands=function_specs_from_commands(self.commands),
|
||||
include_os_info=include_os_info,
|
||||
)
|
||||
|
||||
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
|
||||
output = await self.complete_and_parse(prompt)
|
||||
self.config.cycle_count += 1
|
||||
|
||||
return output
|
||||
|
||||
async def complete_and_parse(
|
||||
self, prompt: ChatPrompt, exception: Optional[Exception] = None
|
||||
) -> OneShotAgentActionProposal:
|
||||
if exception:
|
||||
prompt.messages.append(ChatMessage.system(f"Error: {exception}"))
|
||||
|
||||
response: ChatModelResponse[
|
||||
OneShotAgentActionProposal
|
||||
] = await self.llm_provider.create_chat_completion(
|
||||
prompt.messages,
|
||||
model_name=self.llm.name,
|
||||
completion_parser=self.prompt_strategy.parse_response_content,
|
||||
functions=prompt.functions,
|
||||
prefill_response=prompt.prefill_response,
|
||||
)
|
||||
result = response.parsed_result
|
||||
|
||||
await self.run_pipeline(AfterParse.after_parse, result)
|
||||
|
||||
return result
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
proposal: OneShotAgentActionProposal,
|
||||
user_feedback: str = "",
|
||||
) -> ActionResult:
|
||||
tool = proposal.use_tool
|
||||
|
||||
# Get commands
|
||||
self.commands = await self.run_pipeline(CommandProvider.get_commands)
|
||||
self._remove_disabled_commands()
|
||||
|
||||
try:
|
||||
return_value = await self._execute_tool(tool)
|
||||
|
||||
result = ActionSuccessResult(outputs=return_value)
|
||||
except AgentTerminated:
|
||||
raise
|
||||
except AgentException as e:
|
||||
result = ActionErrorResult.from_exception(e)
|
||||
logger.warning(f"{tool} raised an error: {e}")
|
||||
sentry_sdk.capture_exception(e)
|
||||
|
||||
result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
|
||||
if result_tlength > self.send_token_limit // 3:
|
||||
result = ActionErrorResult(
|
||||
reason=f"Command {tool.name} returned too much output. "
|
||||
"Do not execute this command again with the same arguments."
|
||||
)
|
||||
|
||||
await self.run_pipeline(AfterExecute.after_execute, result)
|
||||
|
||||
logger.debug("\n".join(self.trace))
|
||||
|
||||
return result
|
||||
|
||||
async def do_not_execute(
|
||||
self, denied_proposal: OneShotAgentActionProposal, user_feedback: str
|
||||
) -> ActionResult:
|
||||
result = ActionInterruptedByHuman(feedback=user_feedback)
|
||||
|
||||
await self.run_pipeline(AfterExecute.after_execute, result)
|
||||
|
||||
logger.debug("\n".join(self.trace))
|
||||
|
||||
return result
|
||||
|
||||
async def _execute_tool(self, tool_call: AssistantFunctionCall) -> Any:
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
tool_call (AssistantFunctionCall): The tool call to execute
|
||||
|
||||
Returns:
|
||||
str: The execution result
|
||||
"""
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
command = self._get_command(tool_call.name)
|
||||
try:
|
||||
result = command(**tool_call.arguments)
|
||||
if inspect.isawaitable(result):
|
||||
return await result
|
||||
return result
|
||||
except AgentException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise CommandExecutionError(str(e))
|
||||
|
||||
def _get_command(self, command_name: str) -> Command:
|
||||
for command in reversed(self.commands):
|
||||
if command_name in command.names:
|
||||
return command
|
||||
|
||||
raise UnknownCommandError(
|
||||
f"Cannot execute command '{command_name}': unknown command."
|
||||
)
|
||||
|
||||
def _remove_disabled_commands(self) -> None:
|
||||
self.commands = [
|
||||
command
|
||||
for command in self.commands
|
||||
if not any(
|
||||
name in self.app_config.disabled_commands for name in command.names
|
||||
)
|
||||
]
|
||||
|
||||
def find_obscured_commands(self) -> list[Command]:
|
||||
seen_names = set()
|
||||
obscured_commands = []
|
||||
for command in reversed(self.commands):
|
||||
# If all of the command's names have been seen, it's obscured
|
||||
if seen_names.issuperset(command.names):
|
||||
obscured_commands.append(command)
|
||||
else:
|
||||
seen_names.update(command.names)
|
||||
return list(reversed(obscured_commands))
|
||||
@@ -1,46 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
from autogpt.agents.agent import AgentSettings
|
||||
|
||||
|
||||
class AgentManager:
|
||||
def __init__(self, file_storage: FileStorage):
|
||||
self.file_manager = file_storage.clone_with_subroot("agents")
|
||||
|
||||
@staticmethod
|
||||
def generate_id(agent_name: str) -> str:
|
||||
"""Generate a unique ID for an agent given agent name."""
|
||||
unique_id = str(uuid.uuid4())[:8]
|
||||
return f"{agent_name}-{unique_id}"
|
||||
|
||||
def list_agents(self) -> list[str]:
|
||||
"""Return all agent directories within storage."""
|
||||
agent_dirs: list[str] = []
|
||||
for file_path in self.file_manager.list_files():
|
||||
if len(file_path.parts) == 2 and file_path.name == "state.json":
|
||||
agent_dirs.append(file_path.parent.name)
|
||||
return agent_dirs
|
||||
|
||||
def get_agent_dir(self, agent_id: str) -> Path:
|
||||
"""Return the directory of the agent with the given ID."""
|
||||
assert len(agent_id) > 0
|
||||
agent_dir: Path | None = None
|
||||
if self.file_manager.exists(agent_id):
|
||||
agent_dir = self.file_manager.root / agent_id
|
||||
else:
|
||||
raise FileNotFoundError(f"No agent with ID '{agent_id}'")
|
||||
return agent_dir
|
||||
|
||||
def load_agent_state(self, agent_id: str) -> AgentSettings:
|
||||
"""Load the state of the agent with the given ID."""
|
||||
state_file_path = Path(agent_id) / "state.json"
|
||||
if not self.file_manager.exists(state_file_path):
|
||||
raise FileNotFoundError(f"Agent with ID '{agent_id}' has no state.json")
|
||||
|
||||
state = self.file_manager.read_file(state_file_path)
|
||||
return AgentSettings.parse_raw(state)
|
||||
@@ -1,281 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
from logging import Logger
|
||||
|
||||
import distro
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.json.parsing import extract_dict_from_json
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.prompting.utils import format_numbered_list
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from forge.models.action import ActionProposal
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.exceptions import InvalidAgentResponseError
|
||||
from pydantic import Field
|
||||
|
||||
_RESPONSE_INTERFACE_NAME = "AssistantResponse"
|
||||
|
||||
|
||||
class AssistantThoughts(ModelWithSummary):
|
||||
observations: str = Field(
|
||||
description="Relevant observations from your last action (if any)"
|
||||
)
|
||||
text: str = Field(description="Thoughts")
|
||||
reasoning: str = Field(description="Reasoning behind the thoughts")
|
||||
self_criticism: str = Field(description="Constructive self-criticism")
|
||||
plan: list[str] = Field(description="Short list that conveys the long-term plan")
|
||||
speak: str = Field(description="Summary of thoughts, to say to user")
|
||||
|
||||
def summary(self) -> str:
|
||||
return self.text
|
||||
|
||||
|
||||
class OneShotAgentActionProposal(ActionProposal):
|
||||
thoughts: AssistantThoughts # type: ignore
|
||||
|
||||
|
||||
class OneShotAgentPromptConfiguration(SystemConfiguration):
|
||||
DEFAULT_BODY_TEMPLATE: str = (
|
||||
"## Constraints\n"
|
||||
"You operate within the following constraints:\n"
|
||||
"{constraints}\n"
|
||||
"\n"
|
||||
"## Resources\n"
|
||||
"You can leverage access to the following resources:\n"
|
||||
"{resources}\n"
|
||||
"\n"
|
||||
"## Commands\n"
|
||||
"These are the ONLY commands you can use."
|
||||
" Any action you perform must be possible through one of these commands:\n"
|
||||
"{commands}\n"
|
||||
"\n"
|
||||
"## Best practices\n"
|
||||
"{best_practices}"
|
||||
)
|
||||
|
||||
DEFAULT_CHOOSE_ACTION_INSTRUCTION: str = (
|
||||
"Determine exactly one command to use next based on the given goals "
|
||||
"and the progress you have made so far, "
|
||||
"and respond using the JSON schema specified previously:"
|
||||
)
|
||||
|
||||
body_template: str = UserConfigurable(default=DEFAULT_BODY_TEMPLATE)
|
||||
choose_action_instruction: str = UserConfigurable(
|
||||
default=DEFAULT_CHOOSE_ACTION_INSTRUCTION
|
||||
)
|
||||
use_functions_api: bool = UserConfigurable(default=False)
|
||||
|
||||
#########
|
||||
# State #
|
||||
#########
|
||||
# progress_summaries: dict[tuple[int, int], str] = Field(
|
||||
# default_factory=lambda: {(0, 0): ""}
|
||||
# )
|
||||
|
||||
|
||||
class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
default_configuration: OneShotAgentPromptConfiguration = (
|
||||
OneShotAgentPromptConfiguration()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
configuration: OneShotAgentPromptConfiguration,
|
||||
logger: Logger,
|
||||
):
|
||||
self.config = configuration
|
||||
self.response_schema = JSONSchema.from_dict(
|
||||
OneShotAgentActionProposal.model_json_schema()
|
||||
)
|
||||
self.logger = logger
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
*,
|
||||
messages: list[ChatMessage],
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
**extras,
|
||||
) -> ChatPrompt:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
3. `cycle_instruction`
|
||||
"""
|
||||
system_prompt, response_prefill = self.build_system_prompt(
|
||||
ai_profile=ai_profile,
|
||||
ai_directives=ai_directives,
|
||||
commands=commands,
|
||||
include_os_info=include_os_info,
|
||||
)
|
||||
|
||||
final_instruction_msg = ChatMessage.user(self.config.choose_action_instruction)
|
||||
|
||||
return ChatPrompt(
|
||||
messages=[
|
||||
ChatMessage.system(system_prompt),
|
||||
ChatMessage.user(f'"""{task}"""'),
|
||||
*messages,
|
||||
final_instruction_msg,
|
||||
],
|
||||
prefill_response=response_prefill,
|
||||
functions=commands if self.config.use_functions_api else [],
|
||||
)
|
||||
|
||||
def build_system_prompt(
|
||||
self,
|
||||
ai_profile: AIProfile,
|
||||
ai_directives: AIDirectives,
|
||||
commands: list[CompletionModelFunction],
|
||||
include_os_info: bool,
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Builds the system prompt.
|
||||
|
||||
Returns:
|
||||
str: The system prompt body
|
||||
str: The desired start for the LLM's response; used to steer the output
|
||||
"""
|
||||
response_fmt_instruction, response_prefill = self.response_format_instruction(
|
||||
self.config.use_functions_api
|
||||
)
|
||||
system_prompt_parts = (
|
||||
self._generate_intro_prompt(ai_profile)
|
||||
+ (self._generate_os_info() if include_os_info else [])
|
||||
+ [
|
||||
self.config.body_template.format(
|
||||
constraints=format_numbered_list(ai_directives.constraints),
|
||||
resources=format_numbered_list(ai_directives.resources),
|
||||
commands=self._generate_commands_list(commands),
|
||||
best_practices=format_numbered_list(ai_directives.best_practices),
|
||||
)
|
||||
]
|
||||
+ [
|
||||
"## Your Task\n"
|
||||
"The user will specify a task for you to execute, in triple quotes,"
|
||||
" in the next message. Your job is to complete the task while following"
|
||||
" your directives as given above, and terminate when your task is done."
|
||||
]
|
||||
+ ["## RESPONSE FORMAT\n" + response_fmt_instruction]
|
||||
)
|
||||
|
||||
# Join non-empty parts together into paragraph format
|
||||
return (
|
||||
"\n\n".join(filter(None, system_prompt_parts)).strip("\n"),
|
||||
response_prefill,
|
||||
)
|
||||
|
||||
def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
|
||||
response_schema = self.response_schema.model_copy(deep=True)
|
||||
assert response_schema.properties
|
||||
if use_functions_api and "use_tool" in response_schema.properties:
|
||||
del response_schema.properties["use_tool"]
|
||||
|
||||
# Unindent for performance
|
||||
response_format = re.sub(
|
||||
r"\n\s+",
|
||||
"\n",
|
||||
response_schema.to_typescript_object_interface(_RESPONSE_INTERFACE_NAME),
|
||||
)
|
||||
response_prefill = f'{{\n "{list(response_schema.properties.keys())[0]}":'
|
||||
|
||||
return (
|
||||
(
|
||||
f"YOU MUST ALWAYS RESPOND WITH A JSON OBJECT OF THE FOLLOWING TYPE:\n"
|
||||
f"{response_format}"
|
||||
+ ("\n\nYOU MUST ALSO INVOKE A TOOL!" if use_functions_api else "")
|
||||
),
|
||||
response_prefill,
|
||||
)
|
||||
|
||||
def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]:
|
||||
"""Generates the introduction part of the prompt.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of strings forming the introduction part of the prompt.
|
||||
"""
|
||||
return [
|
||||
f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.",
|
||||
"Your decisions must always be made independently without seeking "
|
||||
"user assistance. Play to your strengths as an LLM and pursue "
|
||||
"simple strategies with no legal complications.",
|
||||
]
|
||||
|
||||
def _generate_os_info(self) -> list[str]:
|
||||
"""Generates the OS information part of the prompt.
|
||||
|
||||
Params:
|
||||
config (Config): The configuration object.
|
||||
|
||||
Returns:
|
||||
str: The OS information part of the prompt.
|
||||
"""
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return [f"The OS you are running on is: {os_info}"]
|
||||
|
||||
def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str:
|
||||
"""Lists the commands available to the agent.
|
||||
|
||||
Params:
|
||||
agent: The agent for which the commands are being listed.
|
||||
|
||||
Returns:
|
||||
str: A string containing a numbered list of commands.
|
||||
"""
|
||||
try:
|
||||
return format_numbered_list([cmd.fmt_line() for cmd in commands])
|
||||
except AttributeError:
|
||||
self.logger.warning(f"Formatting commands failed. {commands}")
|
||||
raise
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response: AssistantChatMessage,
|
||||
) -> OneShotAgentActionProposal:
|
||||
if not response.content:
|
||||
raise InvalidAgentResponseError("Assistant response has no text content")
|
||||
|
||||
self.logger.debug(
|
||||
"LLM response content:"
|
||||
+ (
|
||||
f"\n{response.content}"
|
||||
if "\n" in response.content
|
||||
else f" '{response.content}'"
|
||||
)
|
||||
)
|
||||
assistant_reply_dict = extract_dict_from_json(response.content)
|
||||
self.logger.debug(
|
||||
"Parsing object extracted from LLM response:\n"
|
||||
f"{json.dumps(assistant_reply_dict, indent=4)}"
|
||||
)
|
||||
if self.config.use_functions_api:
|
||||
if not response.tool_calls:
|
||||
raise InvalidAgentResponseError("Assistant did not use a tool")
|
||||
assistant_reply_dict["use_tool"] = response.tool_calls[0].function
|
||||
|
||||
parsed_response = OneShotAgentActionProposal.model_validate(
|
||||
assistant_reply_dict
|
||||
)
|
||||
parsed_response.raw_message = response.copy()
|
||||
return parsed_response
|
||||
@@ -1,6 +0,0 @@
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load the users .env file into environment variables
|
||||
load_dotenv(verbose=True, override=True)
|
||||
|
||||
del load_dotenv
|
||||
@@ -1,479 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
from collections import defaultdict
|
||||
from io import BytesIO
|
||||
from uuid import uuid4
|
||||
|
||||
import orjson
|
||||
from fastapi import APIRouter, FastAPI, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import RedirectResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from forge.agent_protocol.api_router import base_router
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.agent_protocol.middlewares import AgentMiddleware
|
||||
from forge.agent_protocol.models import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
Task,
|
||||
TaskArtifactsListResponse,
|
||||
TaskListResponse,
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ModelProviderBudget, MultiProvider
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
from forge.utils.const import ASK_COMMAND, FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentFinished, NotFoundError
|
||||
from hypercorn.asyncio import serve as hypercorn_serve
|
||||
from hypercorn.config import Config as HypercornConfig
|
||||
from sentry_sdk import set_user
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.app.config import AppConfig
|
||||
from autogpt.app.utils import is_port_free
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProtocolServer:
|
||||
_task_budgets: dict[str, ModelProviderBudget]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_config: AppConfig,
|
||||
database: AgentDB,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
):
|
||||
self.app_config = app_config
|
||||
self.db = database
|
||||
self.file_storage = file_storage
|
||||
self.llm_provider = llm_provider
|
||||
self.agent_manager = AgentManager(file_storage)
|
||||
self._task_budgets = defaultdict(ModelProviderBudget)
|
||||
|
||||
async def start(self, port: int = 8000, router: APIRouter = base_router):
|
||||
"""Start the agent server."""
|
||||
logger.debug("Starting the agent server...")
|
||||
if not is_port_free(port):
|
||||
logger.error(f"Port {port} is already in use.")
|
||||
logger.info(
|
||||
"You can specify a port by either setting the AP_SERVER_PORT "
|
||||
"environment variable or defining AP_SERVER_PORT in the .env file."
|
||||
)
|
||||
return
|
||||
|
||||
config = HypercornConfig()
|
||||
config.bind = [f"localhost:{port}"]
|
||||
app = FastAPI(
|
||||
title="AutoGPT Server",
|
||||
description="Forked from AutoGPT Forge; "
|
||||
"Modified version of The Agent Protocol.",
|
||||
version="v0.4",
|
||||
)
|
||||
|
||||
# Configure CORS middleware
|
||||
default_origins = [f"http://localhost:{port}"] # Default only local access
|
||||
configured_origins = [
|
||||
origin
|
||||
for origin in os.getenv("AP_SERVER_CORS_ALLOWED_ORIGINS", "").split(",")
|
||||
if origin # Empty list if not configured
|
||||
]
|
||||
origins = configured_origins or default_origins
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(router, prefix="/ap/v1")
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
frontend_path = (
|
||||
pathlib.Path(script_dir).joinpath("../../../frontend/build/web").resolve()
|
||||
)
|
||||
|
||||
if os.path.exists(frontend_path):
|
||||
app.mount("/app", StaticFiles(directory=frontend_path), name="app")
|
||||
|
||||
@app.get("/", include_in_schema=False)
|
||||
async def root():
|
||||
return RedirectResponse(url="/app/index.html", status_code=307)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Frontend not found. {frontend_path} does not exist. "
|
||||
"The frontend will not be available."
|
||||
)
|
||||
|
||||
# Used to access the methods on this class from API route handlers
|
||||
app.add_middleware(AgentMiddleware, agent=self)
|
||||
|
||||
config.loglevel = "ERROR"
|
||||
config.bind = [f"0.0.0.0:{port}"]
|
||||
|
||||
logger.info(f"AutoGPT server starting on http://localhost:{port}")
|
||||
await hypercorn_serve(app, config) # type: ignore
|
||||
|
||||
async def create_task(self, task_request: TaskRequestBody) -> Task:
|
||||
"""
|
||||
Create a task for the agent.
|
||||
"""
|
||||
if user_id := (task_request.additional_input or {}).get("user_id"):
|
||||
set_user({"id": user_id})
|
||||
|
||||
task = await self.db.create_task(
|
||||
input=task_request.input,
|
||||
additional_input=task_request.additional_input,
|
||||
)
|
||||
# TODO: re-evaluate performance benefit of task-oriented profiles
|
||||
# logger.debug(f"Creating agent for task: '{task.input}'")
|
||||
# task_agent = await generate_agent_for_task(
|
||||
task_agent = create_agent(
|
||||
agent_id=task_agent_id(task.task_id),
|
||||
task=task.input,
|
||||
app_config=self.app_config,
|
||||
file_storage=self.file_storage,
|
||||
llm_provider=self._get_task_llm_provider(task),
|
||||
)
|
||||
await task_agent.file_manager.save_state()
|
||||
|
||||
return task
|
||||
|
||||
async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse:
|
||||
"""
|
||||
List all tasks that the agent has created.
|
||||
"""
|
||||
logger.debug("Listing all tasks...")
|
||||
tasks, pagination = await self.db.list_tasks(page, pageSize)
|
||||
response = TaskListResponse(tasks=tasks, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def get_task(self, task_id: str) -> Task:
|
||||
"""
|
||||
Get a task by ID.
|
||||
"""
|
||||
logger.debug(f"Getting task with ID: {task_id}...")
|
||||
task = await self.db.get_task(task_id)
|
||||
return task
|
||||
|
||||
async def list_steps(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskStepsListResponse:
|
||||
"""
|
||||
List the IDs of all steps that the task has created.
|
||||
"""
|
||||
logger.debug(f"Listing all steps created by task with ID: {task_id}...")
|
||||
steps, pagination = await self.db.list_steps(task_id, page, pageSize)
|
||||
response = TaskStepsListResponse(steps=steps, pagination=pagination)
|
||||
return response
|
||||
|
||||
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
|
||||
"""Create a step for the task."""
|
||||
logger.debug(f"Creating a step for task with ID: {task_id}...")
|
||||
|
||||
# Restore Agent instance
|
||||
task = await self.get_task(task_id)
|
||||
agent = configure_agent_with_state(
|
||||
state=self.agent_manager.load_agent_state(task_agent_id(task_id)),
|
||||
app_config=self.app_config,
|
||||
file_storage=self.file_storage,
|
||||
llm_provider=self._get_task_llm_provider(task),
|
||||
)
|
||||
|
||||
if user_id := (task.additional_input or {}).get("user_id"):
|
||||
set_user({"id": user_id})
|
||||
|
||||
# According to the Agent Protocol spec, the first execute_step request contains
|
||||
# the same task input as the parent create_task request.
|
||||
# To prevent this from interfering with the agent's process, we ignore the input
|
||||
# of this first step request, and just generate the first step proposal.
|
||||
is_init_step = not bool(agent.event_history)
|
||||
last_proposal, tool_result = None, None
|
||||
execute_approved = False
|
||||
|
||||
# HACK: only for compatibility with AGBenchmark
|
||||
if step_request.input == "y":
|
||||
step_request.input = ""
|
||||
|
||||
user_input = step_request.input if not is_init_step else ""
|
||||
|
||||
if (
|
||||
not is_init_step
|
||||
and agent.event_history.current_episode
|
||||
and not agent.event_history.current_episode.result
|
||||
):
|
||||
last_proposal = agent.event_history.current_episode.action
|
||||
execute_approved = not user_input
|
||||
|
||||
logger.debug(
|
||||
f"Agent proposed command {last_proposal.use_tool}."
|
||||
f" User input/feedback: {repr(user_input)}"
|
||||
)
|
||||
|
||||
# Save step request
|
||||
step = await self.db.create_step(
|
||||
task_id=task_id,
|
||||
input=step_request,
|
||||
is_last=(
|
||||
last_proposal is not None
|
||||
and last_proposal.use_tool.name == FINISH_COMMAND
|
||||
and execute_approved
|
||||
),
|
||||
)
|
||||
agent.llm_provider = self._get_task_llm_provider(task, step.step_id)
|
||||
|
||||
# Execute previously proposed action
|
||||
if last_proposal:
|
||||
agent.file_manager.workspace.on_write_file = (
|
||||
lambda path: self._on_agent_write_file(
|
||||
task=task, step=step, relative_path=path
|
||||
)
|
||||
)
|
||||
|
||||
if last_proposal.use_tool.name == ASK_COMMAND:
|
||||
tool_result = ActionSuccessResult(outputs=user_input)
|
||||
agent.event_history.register_result(tool_result)
|
||||
elif execute_approved:
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="running",
|
||||
)
|
||||
|
||||
try:
|
||||
# Execute previously proposed action
|
||||
tool_result = await agent.execute(last_proposal)
|
||||
except AgentFinished:
|
||||
additional_output = {}
|
||||
task_total_cost = agent.llm_provider.get_incurred_cost()
|
||||
if task_total_cost > 0:
|
||||
additional_output["task_total_cost"] = task_total_cost
|
||||
logger.info(
|
||||
f"Total LLM cost for task {task_id}: "
|
||||
f"${round(task_total_cost, 2)}"
|
||||
)
|
||||
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
output=last_proposal.use_tool.arguments["reason"],
|
||||
additional_output=additional_output,
|
||||
)
|
||||
await agent.file_manager.save_state()
|
||||
return step
|
||||
else:
|
||||
assert user_input
|
||||
tool_result = await agent.do_not_execute(last_proposal, user_input)
|
||||
|
||||
# Propose next action
|
||||
try:
|
||||
assistant_response = await agent.propose_action()
|
||||
next_tool_to_use = assistant_response.use_tool
|
||||
logger.debug(f"AI output: {assistant_response.thoughts}")
|
||||
except Exception as e:
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="completed",
|
||||
output=f"An error occurred while proposing the next action: {e}",
|
||||
)
|
||||
return step
|
||||
|
||||
# Format step output
|
||||
output = (
|
||||
(
|
||||
f"`{last_proposal.use_tool}` returned:"
|
||||
+ ("\n\n" if "\n" in str(tool_result) else " ")
|
||||
+ f"{tool_result}\n\n"
|
||||
)
|
||||
if last_proposal and last_proposal.use_tool.name != ASK_COMMAND
|
||||
else ""
|
||||
)
|
||||
output += f"{assistant_response.thoughts.speak}\n\n"
|
||||
output += (
|
||||
f"Next Command: {next_tool_to_use}"
|
||||
if next_tool_to_use.name != ASK_COMMAND
|
||||
else next_tool_to_use.arguments["question"]
|
||||
)
|
||||
|
||||
additional_output = {
|
||||
**(
|
||||
{
|
||||
"last_action": {
|
||||
"name": last_proposal.use_tool.name,
|
||||
"args": last_proposal.use_tool.arguments,
|
||||
"result": (
|
||||
""
|
||||
if tool_result is None
|
||||
else (
|
||||
orjson.loads(tool_result.model_dump_json())
|
||||
if not isinstance(tool_result, ActionErrorResult)
|
||||
else {
|
||||
"error": str(tool_result.error),
|
||||
"reason": tool_result.reason,
|
||||
}
|
||||
)
|
||||
),
|
||||
},
|
||||
}
|
||||
if last_proposal and tool_result
|
||||
else {}
|
||||
),
|
||||
**assistant_response.model_dump(),
|
||||
}
|
||||
|
||||
task_cumulative_cost = agent.llm_provider.get_incurred_cost()
|
||||
if task_cumulative_cost > 0:
|
||||
additional_output["task_cumulative_cost"] = task_cumulative_cost
|
||||
logger.debug(
|
||||
f"Running total LLM cost for task {task_id}: "
|
||||
f"${round(task_cumulative_cost, 3)}"
|
||||
)
|
||||
|
||||
step = await self.db.update_step(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
status="completed",
|
||||
output=output,
|
||||
additional_output=additional_output,
|
||||
)
|
||||
|
||||
await agent.file_manager.save_state()
|
||||
return step
|
||||
|
||||
async def _on_agent_write_file(
|
||||
self, task: Task, step: Step, relative_path: pathlib.Path
|
||||
) -> None:
|
||||
"""
|
||||
Creates an Artifact for the written file, or updates the Artifact if it exists.
|
||||
"""
|
||||
if relative_path.is_absolute():
|
||||
raise ValueError(f"File path '{relative_path}' is not relative")
|
||||
for a in task.artifacts or []:
|
||||
if a.relative_path == str(relative_path):
|
||||
logger.debug(f"Updating Artifact after writing to existing file: {a}")
|
||||
if not a.agent_created:
|
||||
await self.db.update_artifact(a.artifact_id, agent_created=True)
|
||||
break
|
||||
else:
|
||||
logger.debug(f"Creating Artifact for new file '{relative_path}'")
|
||||
await self.db.create_artifact(
|
||||
task_id=step.task_id,
|
||||
step_id=step.step_id,
|
||||
file_name=relative_path.parts[-1],
|
||||
agent_created=True,
|
||||
relative_path=str(relative_path),
|
||||
)
|
||||
|
||||
async def get_step(self, task_id: str, step_id: str) -> Step:
|
||||
"""
|
||||
Get a step by ID.
|
||||
"""
|
||||
step = await self.db.get_step(task_id, step_id)
|
||||
return step
|
||||
|
||||
async def list_artifacts(
|
||||
self, task_id: str, page: int = 1, pageSize: int = 10
|
||||
) -> TaskArtifactsListResponse:
|
||||
"""
|
||||
List the artifacts that the task has created.
|
||||
"""
|
||||
artifacts, pagination = await self.db.list_artifacts(task_id, page, pageSize)
|
||||
return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination)
|
||||
|
||||
async def create_artifact(
|
||||
self, task_id: str, file: UploadFile, relative_path: str
|
||||
) -> Artifact:
|
||||
"""
|
||||
Create an artifact for the task.
|
||||
"""
|
||||
file_name = file.filename or str(uuid4())
|
||||
data = b""
|
||||
while contents := file.file.read(1024 * 1024):
|
||||
data += contents
|
||||
# Check if relative path ends with filename
|
||||
if relative_path.endswith(file_name):
|
||||
file_path = relative_path
|
||||
else:
|
||||
file_path = os.path.join(relative_path, file_name)
|
||||
|
||||
workspace = self._get_task_agent_file_workspace(task_id)
|
||||
await workspace.write_file(file_path, data)
|
||||
|
||||
artifact = await self.db.create_artifact(
|
||||
task_id=task_id,
|
||||
file_name=file_name,
|
||||
relative_path=relative_path,
|
||||
agent_created=False,
|
||||
)
|
||||
return artifact
|
||||
|
||||
async def get_artifact(self, task_id: str, artifact_id: str) -> StreamingResponse:
|
||||
"""
|
||||
Download a task artifact by ID.
|
||||
"""
|
||||
try:
|
||||
workspace = self._get_task_agent_file_workspace(task_id)
|
||||
artifact = await self.db.get_artifact(artifact_id)
|
||||
if artifact.file_name not in artifact.relative_path:
|
||||
file_path = os.path.join(artifact.relative_path, artifact.file_name)
|
||||
else:
|
||||
file_path = artifact.relative_path
|
||||
retrieved_artifact = workspace.read_file(file_path, binary=True)
|
||||
except NotFoundError:
|
||||
raise
|
||||
except FileNotFoundError:
|
||||
raise
|
||||
|
||||
return StreamingResponse(
|
||||
BytesIO(retrieved_artifact),
|
||||
media_type="application/octet-stream",
|
||||
headers={
|
||||
"Content-Disposition": f'attachment; filename="{artifact.file_name}"'
|
||||
},
|
||||
)
|
||||
|
||||
def _get_task_agent_file_workspace(self, task_id: str | int) -> FileStorage:
|
||||
agent_id = task_agent_id(task_id)
|
||||
return self.file_storage.clone_with_subroot(f"agents/{agent_id}/workspace")
|
||||
|
||||
def _get_task_llm_provider(self, task: Task, step_id: str = "") -> MultiProvider:
|
||||
"""
|
||||
Configures the LLM provider with headers to link outgoing requests to the task.
|
||||
"""
|
||||
task_llm_budget = self._task_budgets[task.task_id]
|
||||
|
||||
task_llm_provider_config = self.llm_provider._configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
_extra_request_headers = task_llm_provider_config.extra_request_headers
|
||||
_extra_request_headers["AP-TaskID"] = task.task_id
|
||||
if step_id:
|
||||
_extra_request_headers["AP-StepID"] = step_id
|
||||
if task.additional_input and (user_id := task.additional_input.get("user_id")):
|
||||
_extra_request_headers["AutoGPT-UserID"] = user_id
|
||||
|
||||
settings = self.llm_provider._settings.model_copy()
|
||||
settings.budget = task_llm_budget
|
||||
settings.configuration = task_llm_provider_config
|
||||
task_llm_provider = self.llm_provider.__class__(
|
||||
settings=settings,
|
||||
logger=logger.getChild(
|
||||
f"Task-{task.task_id}_{self.llm_provider.__class__.__name__}"
|
||||
),
|
||||
)
|
||||
self._task_budgets[task.task_id] = task_llm_provider._budget # type: ignore
|
||||
|
||||
return task_llm_provider
|
||||
|
||||
|
||||
def task_agent_id(task_id: str | int) -> str:
|
||||
return f"AutoGPT-{task_id}"
|
||||
@@ -1,216 +0,0 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from logging import _nameToLevel as logLevelMap
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from forge.logging.config import LogFormatName
|
||||
|
||||
from .telemetry import setup_telemetry
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context):
|
||||
setup_telemetry()
|
||||
|
||||
# Invoke `run` by default
|
||||
if ctx.invoked_subcommand is None:
|
||||
ctx.invoke(run)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--constraint",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI constraints to include in the prompt;"
|
||||
" may be used multiple times to pass multiple constraints"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--resource",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI resources to include in the prompt;"
|
||||
" may be used multiple times to pass multiple resources"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--best-practice",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help=(
|
||||
"Add or override AI best practices to include in the prompt;"
|
||||
" may be used multiple times to pass multiple best practices"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--override-directives",
|
||||
is_flag=True,
|
||||
help=(
|
||||
"If specified, --constraint, --resource and --best-practice will override"
|
||||
" the AI's directives instead of being appended to them"
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
|
||||
)
|
||||
@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
|
||||
@click.option(
|
||||
"--log-format",
|
||||
help=(
|
||||
"Choose a log format; defaults to 'simple'."
|
||||
" Also implies --log-file-format, unless it is specified explicitly."
|
||||
" Using the 'structured_google_cloud' format disables log file output."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--log-file-format",
|
||||
help=(
|
||||
"Override the format used for the log file output."
|
||||
" Defaults to the application's global --log-format."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--component-config-file",
|
||||
help="Path to a json configuration file",
|
||||
type=click.Path(exists=True, dir_okay=False, resolve_path=True, path_type=Path),
|
||||
)
|
||||
def run(
|
||||
continuous: bool,
|
||||
continuous_limit: Optional[int],
|
||||
speak: bool,
|
||||
install_plugin_deps: bool,
|
||||
skip_news: bool,
|
||||
skip_reprompt: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
resource: tuple[str],
|
||||
constraint: tuple[str],
|
||||
best_practice: tuple[str],
|
||||
override_directives: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
component_config_file: Optional[Path],
|
||||
) -> None:
|
||||
"""
|
||||
Sets up and runs an agent, based on the task specified by the user, or resumes an
|
||||
existing agent.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt
|
||||
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
skip_news=skip_news,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
override_ai_name=ai_name,
|
||||
override_ai_role=ai_role,
|
||||
resources=list(resource),
|
||||
constraints=list(constraint),
|
||||
best_practices=list(best_practice),
|
||||
override_directives=override_directives,
|
||||
component_config_file=component_config_file,
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
|
||||
)
|
||||
@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
|
||||
@click.option(
|
||||
"--log-format",
|
||||
help=(
|
||||
"Choose a log format; defaults to 'simple'."
|
||||
" Also implies --log-file-format, unless it is specified explicitly."
|
||||
" Using the 'structured_google_cloud' format disables log file output."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--log-file-format",
|
||||
help=(
|
||||
"Override the format used for the log file output."
|
||||
" Defaults to the application's global --log-format."
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
def serve(
|
||||
install_plugin_deps: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
) -> None:
|
||||
"""
|
||||
Starts an Agent Protocol compliant AutoGPT server, which creates a custom agent for
|
||||
every task.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt_server
|
||||
|
||||
run_auto_gpt_server(
|
||||
debug=debug,
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -1,221 +0,0 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import forge
|
||||
from forge.config.base import BaseConfig
|
||||
from forge.llm.providers import CHAT_MODELS, ModelName
|
||||
from forge.llm.providers.openai import OpenAICredentials, OpenAIModelName
|
||||
from forge.logging.config import LoggingConfig
|
||||
from forge.models.config import Configurable, UserConfigurable
|
||||
from pydantic import SecretStr, ValidationInfo, field_validator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PROJECT_ROOT = Path(forge.__file__).parent.parent
|
||||
AZURE_CONFIG_FILE = Path("azure.yaml")
|
||||
|
||||
GPT_4_MODEL = OpenAIModelName.GPT4
|
||||
GPT_3_MODEL = OpenAIModelName.GPT3
|
||||
|
||||
|
||||
class AppConfig(BaseConfig):
|
||||
name: str = "Auto-GPT configuration"
|
||||
description: str = "Default configuration for the Auto-GPT application."
|
||||
|
||||
########################
|
||||
# Application Settings #
|
||||
########################
|
||||
project_root: Path = PROJECT_ROOT
|
||||
app_data_dir: Path = project_root / "data"
|
||||
skip_news: bool = False
|
||||
skip_reprompt: bool = False
|
||||
authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY")
|
||||
exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY")
|
||||
noninteractive_mode: bool = False
|
||||
logging: LoggingConfig = LoggingConfig()
|
||||
component_config_file: Optional[Path] = UserConfigurable(
|
||||
default=None, from_env="COMPONENT_CONFIG_FILE"
|
||||
)
|
||||
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Model configuration
|
||||
fast_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT3,
|
||||
from_env="FAST_LLM",
|
||||
)
|
||||
smart_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT4_TURBO,
|
||||
from_env="SMART_LLM",
|
||||
)
|
||||
temperature: float = UserConfigurable(default=0, from_env="TEMPERATURE")
|
||||
openai_functions: bool = UserConfigurable(
|
||||
default=False, from_env=lambda: os.getenv("OPENAI_FUNCTIONS", "False") == "True"
|
||||
)
|
||||
embedding_model: str = UserConfigurable(
|
||||
default="text-embedding-3-small", from_env="EMBEDDING_MODEL"
|
||||
)
|
||||
|
||||
# Run loop configuration
|
||||
continuous_mode: bool = False
|
||||
continuous_limit: int = 0
|
||||
|
||||
############
|
||||
# Commands #
|
||||
############
|
||||
# General
|
||||
disabled_commands: list[str] = UserConfigurable(
|
||||
default_factory=list,
|
||||
from_env=lambda: _safe_split(os.getenv("DISABLED_COMMANDS")),
|
||||
)
|
||||
|
||||
# File ops
|
||||
restrict_to_workspace: bool = UserConfigurable(
|
||||
default=True,
|
||||
from_env=lambda: os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True",
|
||||
)
|
||||
|
||||
###############
|
||||
# Credentials #
|
||||
###############
|
||||
# OpenAI
|
||||
openai_credentials: Optional[OpenAICredentials] = None
|
||||
azure_config_file: Optional[Path] = UserConfigurable(
|
||||
default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE"
|
||||
)
|
||||
|
||||
@field_validator("openai_functions")
|
||||
def validate_openai_functions(cls, value: bool, info: ValidationInfo):
|
||||
if value:
|
||||
smart_llm = info.data["smart_llm"]
|
||||
assert CHAT_MODELS[smart_llm].has_function_call_api, (
|
||||
f"Model {smart_llm} does not support tool calling. "
|
||||
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
class ConfigBuilder(Configurable[AppConfig]):
|
||||
default_settings = AppConfig()
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> AppConfig:
|
||||
"""Initialize the Config class"""
|
||||
|
||||
config = cls.build_agent_configuration()
|
||||
config.project_root = project_root
|
||||
|
||||
# Make relative paths absolute
|
||||
for k in {
|
||||
"azure_config_file", # TODO: move from project root
|
||||
}:
|
||||
setattr(config, k, project_root / getattr(config, k))
|
||||
|
||||
if (
|
||||
config.openai_credentials
|
||||
and config.openai_credentials.api_type == SecretStr("azure")
|
||||
and (config_file := config.azure_config_file)
|
||||
):
|
||||
config.openai_credentials.load_azure_config(config_file)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
async def assert_config_has_required_llm_api_keys(config: AppConfig) -> None:
|
||||
"""
|
||||
Check if API keys (if required) are set for the configured SMART_LLM and FAST_LLM.
|
||||
"""
|
||||
from forge.llm.providers.anthropic import AnthropicModelName
|
||||
from forge.llm.providers.groq import GroqModelName
|
||||
from pydantic import ValidationError
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(AnthropicModelName):
|
||||
from forge.llm.providers.anthropic import AnthropicCredentials
|
||||
|
||||
try:
|
||||
credentials = AnthropicCredentials.from_env()
|
||||
except ValidationError as e:
|
||||
if "api_key" in str(e):
|
||||
logger.error(
|
||||
"Set your Anthropic API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: "
|
||||
"https://docs.agpt.co/autogpt/setup/#anthropic"
|
||||
)
|
||||
|
||||
raise ValueError("Anthropic is unavailable: can't load credentials") from e
|
||||
|
||||
key_pattern = r"^sk-ant-api03-[\w\-]{95}"
|
||||
|
||||
# If key is set, but it looks invalid
|
||||
if not re.search(key_pattern, credentials.api_key.get_secret_value()):
|
||||
logger.warning(
|
||||
"Possibly invalid Anthropic API key! "
|
||||
f"Configured Anthropic API key does not match pattern '{key_pattern}'. "
|
||||
"If this is a valid key, please report this warning to the maintainers."
|
||||
)
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(GroqModelName):
|
||||
from forge.llm.providers.groq import GroqProvider
|
||||
from groq import AuthenticationError
|
||||
|
||||
try:
|
||||
groq = GroqProvider()
|
||||
await groq.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error("Set your Groq API key in .env or as an environment variable")
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The Groq API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: invalid API key") from e
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(OpenAIModelName):
|
||||
from forge.llm.providers.openai import OpenAIProvider
|
||||
from openai import AuthenticationError
|
||||
|
||||
try:
|
||||
openai = OpenAIProvider()
|
||||
await openai.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error(
|
||||
"Set your OpenAI API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The OpenAI API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: invalid API key") from e
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
@@ -1,83 +0,0 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from forge.llm.providers import ModelName, MultiProvider
|
||||
|
||||
from autogpt.app.config import GPT_3_MODEL, AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def apply_overrides_to_config(
|
||||
config: AppConfig,
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
skip_news: bool = False,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Args:
|
||||
config (Config): The config object to update.
|
||||
continuous (bool): Whether to run in continuous mode.
|
||||
continuous_limit (int): The number of times to run in continuous mode.
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages on start.
|
||||
speak (bool): Whether to enable speak mode.
|
||||
debug (bool): Whether to enable debug mode.
|
||||
log_level (int): The global log level for the application.
|
||||
log_format (str): The format for the log(s).
|
||||
log_file_format (str): Override the format for the log file.
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup.
|
||||
"""
|
||||
config.continuous_mode = False
|
||||
|
||||
if continuous:
|
||||
logger.warning(
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.continuous_mode = True
|
||||
|
||||
if continuous_limit:
|
||||
config.continuous_limit = continuous_limit
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
# Check availability of configured LLMs; fallback to other LLM if unavailable
|
||||
config.fast_llm, config.smart_llm = await check_models(
|
||||
(config.fast_llm, "fast_llm"), (config.smart_llm, "smart_llm")
|
||||
)
|
||||
|
||||
if skip_reprompt:
|
||||
config.skip_reprompt = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
async def check_models(
|
||||
*models: tuple[ModelName, Literal["smart_llm", "fast_llm"]]
|
||||
) -> tuple[ModelName, ...]:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
multi_provider = MultiProvider()
|
||||
available_models = await multi_provider.get_available_chat_models()
|
||||
|
||||
checked_models: list[ModelName] = []
|
||||
for model, model_type in models:
|
||||
if any(model == m.name for m in available_models):
|
||||
checked_models.append(model)
|
||||
else:
|
||||
logger.warning(
|
||||
f"You don't have access to {model}. "
|
||||
f"Setting {model_type} to {GPT_3_MODEL}."
|
||||
)
|
||||
checked_models.append(GPT_3_MODEL)
|
||||
|
||||
return tuple(checked_models)
|
||||
@@ -1,19 +0,0 @@
|
||||
import logging
|
||||
|
||||
import click
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def clean_input(prompt: str = ""):
|
||||
try:
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.debug("Asking user via keyboard...")
|
||||
|
||||
return click.prompt(
|
||||
text=prompt, prompt_suffix=" ", default="", show_default=False
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted AutoGPT")
|
||||
logger.info("Quitting...")
|
||||
exit(0)
|
||||
@@ -1,774 +0,0 @@
|
||||
"""
|
||||
The application entry point. Can be invoked by a CLI or any other front end application.
|
||||
"""
|
||||
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.components.code_executor.code_executor import (
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
from forge.logging.utils import print_attribute, speak
|
||||
from forge.models.action import ActionInterruptedByHuman, ActionProposal
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.const import FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
from autogpt.app.config import (
|
||||
AppConfig,
|
||||
ConfigBuilder,
|
||||
assert_config_has_required_llm_api_keys,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
from .configurator import apply_overrides_to_config
|
||||
from .input import clean_input
|
||||
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
|
||||
from .spinner import Spinner
|
||||
from .utils import (
|
||||
coroutine,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
print_git_branch_info,
|
||||
print_motd,
|
||||
print_python_version_info,
|
||||
)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt(
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
speak: bool = False,
|
||||
debug: bool = False,
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
skip_news: bool = False,
|
||||
install_plugin_deps: bool = False,
|
||||
override_ai_name: Optional[str] = None,
|
||||
override_ai_role: Optional[str] = None,
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
override_directives: bool = False,
|
||||
component_config_file: Optional[Path] = None,
|
||||
):
|
||||
# Set up configuration
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# Storage
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
restrict_to_root = not local or config.restrict_to_workspace
|
||||
file_storage = get_storage(
|
||||
config.file_storage_backend,
|
||||
root_path=Path("data"),
|
||||
restrict_to_root=restrict_to_root,
|
||||
)
|
||||
file_storage.initialize()
|
||||
|
||||
# Set up logging module
|
||||
if speak:
|
||||
config.tts_config.speak_mode = True
|
||||
configure_logging(
|
||||
debug=debug,
|
||||
level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
config=config.logging,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
skip_news=skip_news,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warning(
|
||||
extra={
|
||||
"title": "LEGAL:",
|
||||
"title_color": Fore.RED,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=markdown_to_ansi_style(line),
|
||||
)
|
||||
|
||||
if not config.skip_news:
|
||||
print_motd(logger)
|
||||
print_git_branch_info(logger)
|
||||
print_python_version_info(logger)
|
||||
print_attribute("Smart LLM", config.smart_llm)
|
||||
print_attribute("Fast LLM", config.fast_llm)
|
||||
if config.continuous_mode:
|
||||
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
|
||||
if continuous_limit:
|
||||
print_attribute("Continuous Limit", config.continuous_limit)
|
||||
if config.tts_config.speak_mode:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
if we_are_running_in_a_docker_container() or is_docker_available():
|
||||
print_attribute("Code Execution", "ENABLED")
|
||||
else:
|
||||
print_attribute(
|
||||
"Code Execution",
|
||||
"DISABLED (Docker unavailable)",
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
|
||||
# Let user choose an existing agent to run
|
||||
agent_manager = AgentManager(file_storage)
|
||||
existing_agents = agent_manager.list_agents()
|
||||
load_existing_agent = ""
|
||||
if existing_agents:
|
||||
print(
|
||||
"Existing agents\n---------------\n"
|
||||
+ "\n".join(f"{i} - {id}" for i, id in enumerate(existing_agents, 1))
|
||||
)
|
||||
load_existing_agent = clean_input(
|
||||
"Enter the number or name of the agent to run,"
|
||||
" or hit enter to create a new one:",
|
||||
)
|
||||
if re.match(r"^\d+$", load_existing_agent.strip()) and 0 < int(
|
||||
load_existing_agent
|
||||
) <= len(existing_agents):
|
||||
load_existing_agent = existing_agents[int(load_existing_agent) - 1]
|
||||
|
||||
if load_existing_agent != "" and load_existing_agent not in existing_agents:
|
||||
logger.info(
|
||||
f"Unknown agent '{load_existing_agent}', "
|
||||
f"creating a new one instead.",
|
||||
extra={"color": Fore.YELLOW},
|
||||
)
|
||||
load_existing_agent = ""
|
||||
|
||||
# Either load existing or set up new agent state
|
||||
agent = None
|
||||
agent_state = None
|
||||
|
||||
############################
|
||||
# Resume an Existing Agent #
|
||||
############################
|
||||
if load_existing_agent:
|
||||
agent_state = None
|
||||
while True:
|
||||
answer = clean_input("Resume? [Y/n]")
|
||||
if answer == "" or answer.lower() == "y":
|
||||
agent_state = agent_manager.load_agent_state(load_existing_agent)
|
||||
break
|
||||
elif answer.lower() == "n":
|
||||
break
|
||||
|
||||
if agent_state:
|
||||
agent = configure_agent_with_state(
|
||||
state=agent_state,
|
||||
app_config=config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
if (
|
||||
(current_episode := agent.event_history.current_episode)
|
||||
and current_episode.action.use_tool.name == FINISH_COMMAND
|
||||
and not current_episode.result
|
||||
):
|
||||
# Agent was resumed after `finish` -> rewrite result of `finish` action
|
||||
finish_reason = current_episode.action.use_tool.arguments["reason"]
|
||||
print(f"Agent previously self-terminated; reason: '{finish_reason}'")
|
||||
new_assignment = clean_input(
|
||||
"Please give a follow-up question or assignment:"
|
||||
)
|
||||
agent.event_history.register_result(
|
||||
ActionInterruptedByHuman(feedback=new_assignment)
|
||||
)
|
||||
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
ai_profile, ai_directives = await interactively_revise_ai_settings(
|
||||
ai_profile=agent.state.ai_profile,
|
||||
directives=agent.state.directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
######################
|
||||
# Set up a new Agent #
|
||||
######################
|
||||
if not agent:
|
||||
task = ""
|
||||
while task.strip() == "":
|
||||
task = clean_input(
|
||||
"Enter the task that you want AutoGPT to execute,"
|
||||
" with as much detail as possible:",
|
||||
)
|
||||
|
||||
ai_profile = AIProfile()
|
||||
additional_ai_directives = AIDirectives()
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
override_name=override_ai_name,
|
||||
override_role=override_ai_role,
|
||||
resources=resources,
|
||||
constraints=constraints,
|
||||
best_practices=best_practices,
|
||||
replace_directives=override_directives,
|
||||
)
|
||||
|
||||
# If any of these are specified as arguments,
|
||||
# assume the user doesn't want to revise them
|
||||
if not any(
|
||||
[
|
||||
override_ai_name,
|
||||
override_ai_role,
|
||||
resources,
|
||||
constraints,
|
||||
best_practices,
|
||||
]
|
||||
):
|
||||
(
|
||||
ai_profile,
|
||||
additional_ai_directives,
|
||||
) = await interactively_revise_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
app_config=config,
|
||||
)
|
||||
else:
|
||||
logger.info("AI config overrides specified through CLI; skipping revision")
|
||||
|
||||
agent = create_agent(
|
||||
agent_id=agent_manager.generate_id(ai_profile.ai_name),
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=additional_ai_directives,
|
||||
app_config=config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
|
||||
file_manager = agent.file_manager
|
||||
|
||||
if file_manager and not agent.config.allow_fs_access:
|
||||
logger.info(
|
||||
f"{Fore.YELLOW}"
|
||||
"NOTE: All files/directories created by this agent can be found "
|
||||
f"inside its workspace at:{Fore.RESET} {file_manager.workspace.root}",
|
||||
extra={"preserve_color": True},
|
||||
)
|
||||
|
||||
# TODO: re-evaluate performance benefit of task-oriented profiles
|
||||
# # Concurrently generate a custom profile for the agent and apply it once done
|
||||
# def update_agent_directives(
|
||||
# task: asyncio.Task[tuple[AIProfile, AIDirectives]]
|
||||
# ):
|
||||
# logger.debug(f"Updating AIProfile: {task.result()[0]}")
|
||||
# logger.debug(f"Adding AIDirectives: {task.result()[1]}")
|
||||
# agent.state.ai_profile = task.result()[0]
|
||||
# agent.state.directives = agent.state.directives + task.result()[1]
|
||||
|
||||
# asyncio.create_task(
|
||||
# generate_agent_profile_for_task(
|
||||
# task, app_config=config, llm_provider=llm_provider
|
||||
# )
|
||||
# ).add_done_callback(update_agent_directives)
|
||||
|
||||
# Load component configuration from file
|
||||
if _config_file := component_config_file or config.component_config_file:
|
||||
try:
|
||||
logger.info(f"Loading component configuration from {_config_file}")
|
||||
agent.load_component_configs(_config_file.read_text())
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load component configuration: {e}")
|
||||
|
||||
#################
|
||||
# Run the Agent #
|
||||
#################
|
||||
try:
|
||||
await run_interaction_loop(agent)
|
||||
except AgentTerminated:
|
||||
agent_id = agent.state.agent_id
|
||||
logger.info(f"Saving state of {agent_id}...")
|
||||
|
||||
# Allow user to Save As other ID
|
||||
save_as_id = clean_input(
|
||||
f"Press enter to save as '{agent_id}',"
|
||||
" or enter a different ID to save to:",
|
||||
)
|
||||
# TODO: allow many-to-one relations of agents and workspaces
|
||||
await agent.file_manager.save_state(
|
||||
save_as_id.strip() if not save_as_id.isspace() else None
|
||||
)
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt_server(
|
||||
debug: bool = False,
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
install_plugin_deps: bool = False,
|
||||
):
|
||||
from .agent_protocol_server import AgentProtocolServer
|
||||
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
# Storage
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
restrict_to_root = not local or config.restrict_to_workspace
|
||||
file_storage = get_storage(
|
||||
config.file_storage_backend,
|
||||
root_path=Path("data"),
|
||||
restrict_to_root=restrict_to_root,
|
||||
)
|
||||
file_storage.initialize()
|
||||
|
||||
# Set up logging module
|
||||
configure_logging(
|
||||
debug=debug,
|
||||
level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
config=config.logging,
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
|
||||
# Set up & start server
|
||||
database = AgentDB(
|
||||
database_string=os.getenv("AP_SERVER_DB_URL", "sqlite:///data/ap_server.db"),
|
||||
debug_enabled=debug,
|
||||
)
|
||||
port: int = int(os.getenv("AP_SERVER_PORT", default=8000))
|
||||
server = AgentProtocolServer(
|
||||
app_config=config,
|
||||
database=database,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
await server.start(port=port)
|
||||
|
||||
logging.getLogger().info(
|
||||
f"Total OpenAI session cost: "
|
||||
f"${round(sum(b.total_cost for b in server._task_budgets.values()), 2)}"
|
||||
)
|
||||
|
||||
|
||||
def _configure_llm_provider(config: AppConfig) -> MultiProvider:
|
||||
multi_provider = MultiProvider()
|
||||
for model in [config.smart_llm, config.fast_llm]:
|
||||
# Ensure model providers for configured LLMs are available
|
||||
multi_provider.get_model_provider(model)
|
||||
return multi_provider
|
||||
|
||||
|
||||
def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | float:
|
||||
# Translate from the continuous_mode/continuous_limit config
|
||||
# to a cycle_budget (maximum number of cycles to run without checking in with the
|
||||
# user) and a count of cycles_remaining before we check in..
|
||||
if continuous_mode:
|
||||
cycle_budget = continuous_limit if continuous_limit else math.inf
|
||||
else:
|
||||
cycle_budget = 1
|
||||
|
||||
return cycle_budget
|
||||
|
||||
|
||||
class UserFeedback(str, enum.Enum):
|
||||
"""Enum for user feedback."""
|
||||
|
||||
AUTHORIZE = "GENERATE NEXT COMMAND JSON"
|
||||
EXIT = "EXIT"
|
||||
TEXT = "TEXT"
|
||||
|
||||
|
||||
async def run_interaction_loop(
|
||||
agent: "Agent",
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
Args:
|
||||
agent: The agent to run the interaction loop for.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
app_config = agent.app_config
|
||||
ai_profile = agent.state.ai_profile
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
app_config.continuous_mode, app_config.continuous_limit
|
||||
)
|
||||
spinner = Spinner(
|
||||
"Thinking...", plain_output=app_config.logging.plain_console_output
|
||||
)
|
||||
stop_reason = None
|
||||
|
||||
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
|
||||
nonlocal cycle_budget, cycles_remaining, spinner, stop_reason
|
||||
if stop_reason:
|
||||
logger.error("Quitting immediately...")
|
||||
sys.exit()
|
||||
if cycles_remaining in [0, 1]:
|
||||
logger.warning("Interrupt signal received: shutting down gracefully.")
|
||||
logger.warning(
|
||||
"Press Ctrl+C again if you want to stop AutoGPT immediately."
|
||||
)
|
||||
stop_reason = AgentTerminated("Interrupt signal received")
|
||||
else:
|
||||
restart_spinner = spinner.running
|
||||
if spinner.running:
|
||||
spinner.stop()
|
||||
|
||||
logger.error(
|
||||
"Interrupt signal received: stopping continuous command execution."
|
||||
)
|
||||
cycles_remaining = 1
|
||||
if restart_spinner:
|
||||
spinner.start()
|
||||
|
||||
def handle_stop_signal() -> None:
|
||||
if stop_reason:
|
||||
raise stop_reason
|
||||
|
||||
# Set up an interrupt signal for the agent.
|
||||
signal.signal(signal.SIGINT, graceful_agent_interrupt)
|
||||
|
||||
#########################
|
||||
# Application Main Loop #
|
||||
#########################
|
||||
|
||||
# Keep track of consecutive failures of the agent
|
||||
consecutive_failures = 0
|
||||
|
||||
while cycles_remaining > 0:
|
||||
logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
|
||||
|
||||
########
|
||||
# Plan #
|
||||
########
|
||||
handle_stop_signal()
|
||||
# Have the agent determine the next action to take.
|
||||
if not (_ep := agent.event_history.current_episode) or _ep.result:
|
||||
with spinner:
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warning(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
continue
|
||||
else:
|
||||
action_proposal = _ep.action
|
||||
|
||||
consecutive_failures = 0
|
||||
|
||||
###############
|
||||
# Update User #
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(
|
||||
ai_profile,
|
||||
action_proposal,
|
||||
speak_mode=app_config.tts_config.speak_mode,
|
||||
)
|
||||
|
||||
##################
|
||||
# Get user input #
|
||||
##################
|
||||
handle_stop_signal()
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
feedback_type, feedback, new_cycles_remaining = await get_user_feedback(
|
||||
app_config,
|
||||
ai_profile,
|
||||
)
|
||||
|
||||
if feedback_type == UserFeedback.AUTHORIZE:
|
||||
if new_cycles_remaining is not None:
|
||||
# Case 1: User is altering the cycle budget.
|
||||
if cycle_budget > 1:
|
||||
cycle_budget = new_cycles_remaining + 1
|
||||
# Case 2: User is running iteratively and
|
||||
# has initiated a one-time continuous cycle
|
||||
cycles_remaining = new_cycles_remaining + 1
|
||||
else:
|
||||
# Case 1: Continuous iteration was interrupted -> resume
|
||||
if cycle_budget > 1:
|
||||
logger.info(
|
||||
f"The cycle budget is {cycle_budget}.",
|
||||
extra={
|
||||
"title": "RESUMING CONTINUOUS EXECUTION",
|
||||
"title_color": Fore.MAGENTA,
|
||||
},
|
||||
)
|
||||
# Case 2: The agent used up its cycle budget -> reset
|
||||
cycles_remaining = cycle_budget + 1
|
||||
logger.info(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
extra={"color": Fore.MAGENTA},
|
||||
)
|
||||
elif feedback_type == UserFeedback.EXIT:
|
||||
logger.warning("Exiting...")
|
||||
exit()
|
||||
else: # user_feedback == UserFeedback.TEXT
|
||||
pass
|
||||
else:
|
||||
feedback = ""
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
print()
|
||||
if cycles_remaining != math.inf:
|
||||
# Print authorized commands left value
|
||||
print_attribute(
|
||||
"AUTHORIZED_COMMANDS_LEFT", cycles_remaining, title_color=Fore.CYAN
|
||||
)
|
||||
|
||||
###################
|
||||
# Execute Command #
|
||||
###################
|
||||
# Decrement the cycle counter first to reduce the likelihood of a SIGINT
|
||||
# happening during command execution, setting the cycles remaining to 1,
|
||||
# and then having the decrement set it to 0, exiting the application.
|
||||
if not feedback:
|
||||
cycles_remaining -= 1
|
||||
|
||||
if not action_proposal.use_tool:
|
||||
continue
|
||||
|
||||
handle_stop_signal()
|
||||
|
||||
if not feedback:
|
||||
result = await agent.execute(action_proposal)
|
||||
else:
|
||||
result = await agent.do_not_execute(action_proposal, feedback)
|
||||
|
||||
if result.status == "success":
|
||||
logger.info(result, extra={"title": "SYSTEM:", "title_color": Fore.YELLOW})
|
||||
elif result.status == "error":
|
||||
logger.warning(
|
||||
f"Command {action_proposal.use_tool.name} returned an error: "
|
||||
f"{result.error or result.reason}"
|
||||
)
|
||||
|
||||
|
||||
def update_user(
|
||||
ai_profile: AIProfile,
|
||||
action_proposal: "ActionProposal",
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_profile: The AI's personality/profile
|
||||
command_name: The name of the command to execute.
|
||||
command_args: The arguments for the command.
|
||||
assistant_reply_dict: The assistant's reply.
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
print_assistant_thoughts(
|
||||
ai_name=ai_profile.ai_name,
|
||||
thoughts=action_proposal.thoughts,
|
||||
speak_mode=speak_mode,
|
||||
)
|
||||
|
||||
if speak_mode:
|
||||
speak(f"I want to execute {action_proposal.use_tool.name}")
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
print()
|
||||
safe_tool_name = remove_ansi_escape(action_proposal.use_tool.name)
|
||||
logger.info(
|
||||
f"COMMAND = {Fore.CYAN}{safe_tool_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{action_proposal.use_tool.arguments}{Style.RESET_ALL}",
|
||||
extra={
|
||||
"title": "NEXT ACTION:",
|
||||
"title_color": Fore.CYAN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def get_user_feedback(
|
||||
config: AppConfig,
|
||||
ai_profile: AIProfile,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_profile: The AI's configuration.
|
||||
|
||||
Returns:
|
||||
A tuple of the user's feedback, the user's input, and the number of
|
||||
cycles remaining if the user has initiated a continuous cycle.
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
logger.info(
|
||||
f"Enter '{config.authorise_key}' to authorise command, "
|
||||
f"'{config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{ai_profile.ai_name}..."
|
||||
)
|
||||
|
||||
user_feedback = None
|
||||
user_input = ""
|
||||
new_cycles_remaining = None
|
||||
|
||||
while user_feedback is None:
|
||||
# Get input from user
|
||||
console_input = clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
|
||||
# Parse user input
|
||||
if console_input.lower().strip() == config.authorise_key:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warning("Invalid input format.")
|
||||
elif console_input.lower().startswith(f"{config.authorise_key} -"):
|
||||
try:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
new_cycles_remaining = abs(int(console_input.split(" ")[1]))
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
f"Invalid input format. "
|
||||
f"Please enter '{config.authorise_key} -N'"
|
||||
" where N is the number of continuous tasks."
|
||||
)
|
||||
elif console_input.lower() in [config.exit_key, "exit"]:
|
||||
user_feedback = UserFeedback.EXIT
|
||||
else:
|
||||
user_feedback = UserFeedback.TEXT
|
||||
user_input = console_input
|
||||
|
||||
return user_feedback, user_input, new_cycles_remaining
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: str,
|
||||
thoughts: str | ModelWithSummary | AssistantThoughts,
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
thoughts_text = remove_ansi_escape(
|
||||
thoughts.text
|
||||
if isinstance(thoughts, AssistantThoughts)
|
||||
else thoughts.summary()
|
||||
if isinstance(thoughts, ModelWithSummary)
|
||||
else thoughts
|
||||
)
|
||||
print_attribute(
|
||||
f"{ai_name.upper()} THOUGHTS", thoughts_text, title_color=Fore.YELLOW
|
||||
)
|
||||
|
||||
if isinstance(thoughts, AssistantThoughts):
|
||||
print_attribute(
|
||||
"REASONING", remove_ansi_escape(thoughts.reasoning), title_color=Fore.YELLOW
|
||||
)
|
||||
if assistant_thoughts_plan := remove_ansi_escape(
|
||||
"\n".join(f"- {p}" for p in thoughts.plan)
|
||||
):
|
||||
print_attribute("PLAN", "", title_color=Fore.YELLOW)
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.info(
|
||||
line.strip(), extra={"title": "- ", "title_color": Fore.GREEN}
|
||||
)
|
||||
print_attribute(
|
||||
"CRITICISM",
|
||||
remove_ansi_escape(thoughts.self_criticism),
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak := remove_ansi_escape(thoughts.speak):
|
||||
if speak_mode:
|
||||
speak(assistant_thoughts_speak)
|
||||
else:
|
||||
print_attribute(
|
||||
"SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW
|
||||
)
|
||||
else:
|
||||
speak(thoughts_text)
|
||||
|
||||
|
||||
def remove_ansi_escape(s: str) -> str:
|
||||
return s.replace("\x1B", "")
|
||||
@@ -1,203 +0,0 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.logging.utils import print_attribute
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
from .input import clean_input
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def apply_overrides_to_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
override_name: Optional[str] = "",
|
||||
override_role: Optional[str] = "",
|
||||
replace_directives: bool = False,
|
||||
resources: Optional[list[str]] = None,
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
):
|
||||
if override_name:
|
||||
ai_profile.ai_name = override_name
|
||||
if override_role:
|
||||
ai_profile.ai_role = override_role
|
||||
|
||||
if replace_directives:
|
||||
if resources:
|
||||
directives.resources = resources
|
||||
if constraints:
|
||||
directives.constraints = constraints
|
||||
if best_practices:
|
||||
directives.best_practices = best_practices
|
||||
else:
|
||||
if resources:
|
||||
directives.resources += resources
|
||||
if constraints:
|
||||
directives.constraints += constraints
|
||||
if best_practices:
|
||||
directives.best_practices += best_practices
|
||||
|
||||
|
||||
async def interactively_revise_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
):
|
||||
"""Interactively revise the AI settings.
|
||||
|
||||
Args:
|
||||
ai_profile (AIConfig): The current AI profile.
|
||||
ai_directives (AIDirectives): The current AI directives.
|
||||
app_config (Config): The application configuration.
|
||||
|
||||
Returns:
|
||||
AIConfig: The revised AI settings.
|
||||
"""
|
||||
logger = logging.getLogger("revise_ai_profile")
|
||||
|
||||
revised = False
|
||||
|
||||
while True:
|
||||
# Print the current AI configuration
|
||||
print_ai_settings(
|
||||
title="Current AI Settings" if not revised else "Revised AI Settings",
|
||||
ai_profile=ai_profile,
|
||||
directives=directives,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
if (
|
||||
clean_input("Continue with these settings? [Y/n]").lower()
|
||||
or app_config.authorise_key
|
||||
) == app_config.authorise_key:
|
||||
break
|
||||
|
||||
# Ask for revised ai_profile
|
||||
ai_profile.ai_name = (
|
||||
clean_input("Enter AI name (or press enter to keep current):")
|
||||
or ai_profile.ai_name
|
||||
)
|
||||
ai_profile.ai_role = (
|
||||
clean_input("Enter new AI role (or press enter to keep current):")
|
||||
or ai_profile.ai_role
|
||||
)
|
||||
|
||||
# Revise constraints
|
||||
i = 0
|
||||
while i < len(directives.constraints):
|
||||
constraint = directives.constraints[i]
|
||||
print_attribute(f"Constraint {i+1}:", f'"{constraint}"')
|
||||
new_constraint = (
|
||||
clean_input(
|
||||
f"Enter new constraint {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or constraint
|
||||
)
|
||||
|
||||
if new_constraint == "-":
|
||||
directives.constraints.remove(constraint)
|
||||
continue
|
||||
elif new_constraint:
|
||||
directives.constraints[i] = new_constraint
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new constraints
|
||||
while True:
|
||||
new_constraint = clean_input(
|
||||
"Press enter to finish, or enter a constraint to add:",
|
||||
)
|
||||
if not new_constraint:
|
||||
break
|
||||
directives.constraints.append(new_constraint)
|
||||
|
||||
# Revise resources
|
||||
i = 0
|
||||
while i < len(directives.resources):
|
||||
resource = directives.resources[i]
|
||||
print_attribute(f"Resource {i+1}:", f'"{resource}"')
|
||||
new_resource = (
|
||||
clean_input(
|
||||
f"Enter new resource {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or resource
|
||||
)
|
||||
if new_resource == "-":
|
||||
directives.resources.remove(resource)
|
||||
continue
|
||||
elif new_resource:
|
||||
directives.resources[i] = new_resource
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new resources
|
||||
while True:
|
||||
new_resource = clean_input(
|
||||
"Press enter to finish, or enter a resource to add:",
|
||||
)
|
||||
if not new_resource:
|
||||
break
|
||||
directives.resources.append(new_resource)
|
||||
|
||||
# Revise best practices
|
||||
i = 0
|
||||
while i < len(directives.best_practices):
|
||||
best_practice = directives.best_practices[i]
|
||||
print_attribute(f"Best Practice {i+1}:", f'"{best_practice}"')
|
||||
new_best_practice = (
|
||||
clean_input(
|
||||
f"Enter new best practice {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
or best_practice
|
||||
)
|
||||
if new_best_practice == "-":
|
||||
directives.best_practices.remove(best_practice)
|
||||
continue
|
||||
elif new_best_practice:
|
||||
directives.best_practices[i] = new_best_practice
|
||||
|
||||
i += 1
|
||||
|
||||
# Add new best practices
|
||||
while True:
|
||||
new_best_practice = clean_input(
|
||||
"Press enter to finish, or add a best practice to add:",
|
||||
)
|
||||
if not new_best_practice:
|
||||
break
|
||||
directives.best_practices.append(new_best_practice)
|
||||
|
||||
revised = True
|
||||
|
||||
return ai_profile, directives
|
||||
|
||||
|
||||
def print_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
logger: logging.Logger,
|
||||
title: str = "AI Settings",
|
||||
):
|
||||
print_attribute(title, "")
|
||||
print_attribute("-" * len(title), "")
|
||||
print_attribute("Name :", ai_profile.ai_name)
|
||||
print_attribute("Role :", ai_profile.ai_role)
|
||||
|
||||
print_attribute("Constraints:", "" if directives.constraints else "(none)")
|
||||
for constraint in directives.constraints:
|
||||
logger.info(f"- {constraint}")
|
||||
print_attribute("Resources:", "" if directives.resources else "(none)")
|
||||
for resource in directives.resources:
|
||||
logger.info(f"- {resource}")
|
||||
print_attribute("Best practices:", "" if directives.best_practices else "(none)")
|
||||
for best_practice in directives.best_practices:
|
||||
logger.info(f"- {best_practice}")
|
||||
@@ -1,64 +0,0 @@
|
||||
import os
|
||||
|
||||
import click
|
||||
from colorama import Fore, Style
|
||||
|
||||
from .utils import (
|
||||
env_file_exists,
|
||||
get_git_user_email,
|
||||
set_env_config_value,
|
||||
vcs_state_diverges_from_master,
|
||||
)
|
||||
|
||||
|
||||
def setup_telemetry() -> None:
|
||||
if os.getenv("TELEMETRY_OPT_IN") is None:
|
||||
# If no .env file is present, don't bother asking to enable telemetry,
|
||||
# to prevent repeated asking in non-persistent environments.
|
||||
if not env_file_exists():
|
||||
return
|
||||
|
||||
allow_telemetry = click.prompt(
|
||||
f"""
|
||||
{Style.BRIGHT}❓ Do you want to enable telemetry? ❓{Style.NORMAL}
|
||||
This means AutoGPT will send diagnostic data to the core development team when something
|
||||
goes wrong, and will help us to diagnose and fix problems earlier and faster. It also
|
||||
allows us to collect basic performance data, which helps us find bottlenecks and other
|
||||
things that slow down the application.
|
||||
|
||||
By entering 'yes', you confirm that you have read and agree to our Privacy Policy,
|
||||
which is available here:
|
||||
https://www.notion.so/auto-gpt/Privacy-Policy-ab11c9c20dbd4de1a15dcffe84d77984
|
||||
|
||||
Please enter 'yes' or 'no'""",
|
||||
type=bool,
|
||||
)
|
||||
set_env_config_value("TELEMETRY_OPT_IN", "true" if allow_telemetry else "false")
|
||||
click.echo(
|
||||
f"❤️ Thank you! Telemetry is {Fore.GREEN}enabled{Fore.RESET}."
|
||||
if allow_telemetry
|
||||
else f"👍 Telemetry is {Fore.RED}disabled{Fore.RESET}."
|
||||
)
|
||||
click.echo(
|
||||
"💡 If you ever change your mind, you can change 'TELEMETRY_OPT_IN' in .env"
|
||||
)
|
||||
click.echo()
|
||||
|
||||
if os.getenv("TELEMETRY_OPT_IN", "").lower() == "true":
|
||||
_setup_sentry()
|
||||
|
||||
|
||||
def _setup_sentry() -> None:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.init(
|
||||
dsn="https://dc266f2f7a2381194d1c0fa36dff67d8@o4505260022104064.ingest.sentry.io/4506739844710400", # noqa
|
||||
enable_tracing=True,
|
||||
environment=os.getenv(
|
||||
"TELEMETRY_ENVIRONMENT",
|
||||
"production" if not vcs_state_diverges_from_master() else "dev",
|
||||
),
|
||||
)
|
||||
|
||||
# Allow Sentry to distinguish between users
|
||||
sentry_sdk.set_user({"email": get_git_user_email(), "ip_address": "{{auto}}"})
|
||||
@@ -1,247 +0,0 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Coroutine, ParamSpec, TypeVar, cast
|
||||
|
||||
import requests
|
||||
from colorama import Fore, Style
|
||||
from git import InvalidGitRepositoryError, Repo
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_bulletin_from_web():
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpt/BULLETIN.md" # noqa: E501
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.text
|
||||
except requests.exceptions.RequestException:
|
||||
pass
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def get_current_git_branch() -> str:
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
branch = repo.active_branch
|
||||
return branch.name
|
||||
except InvalidGitRepositoryError:
|
||||
return ""
|
||||
|
||||
|
||||
def vcs_state_diverges_from_master() -> bool:
|
||||
"""
|
||||
Returns whether a git repo is present and contains changes that are not in `master`.
|
||||
"""
|
||||
paths_we_care_about = "autogpt/autogpt/**/*.py"
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
|
||||
# Check for uncommitted changes in the specified path
|
||||
uncommitted_changes = repo.index.diff(None, paths=paths_we_care_about)
|
||||
if uncommitted_changes:
|
||||
return True
|
||||
|
||||
# Find OG AutoGPT remote
|
||||
for remote in repo.remotes:
|
||||
if remote.url.endswith(
|
||||
tuple(
|
||||
# All permutations of old/new repo name and HTTP(S)/Git URLs
|
||||
f"{prefix}{path}"
|
||||
for prefix in ("://github.com/", "git@github.com:")
|
||||
for path in (
|
||||
f"Significant-Gravitas/{n}.git" for n in ("AutoGPT", "Auto-GPT")
|
||||
)
|
||||
)
|
||||
):
|
||||
og_remote = remote
|
||||
break
|
||||
else:
|
||||
# Original AutoGPT remote is not configured: assume local codebase diverges
|
||||
return True
|
||||
|
||||
master_branch = og_remote.refs.master
|
||||
with contextlib.suppress(StopIteration):
|
||||
next(repo.iter_commits(f"HEAD..{master_branch}", paths=paths_we_care_about))
|
||||
# Local repo is one or more commits ahead of OG AutoGPT master branch
|
||||
return True
|
||||
|
||||
# Relevant part of the codebase is on master
|
||||
return False
|
||||
except InvalidGitRepositoryError:
|
||||
# No git repo present: assume codebase is a clean download
|
||||
return False
|
||||
|
||||
|
||||
def get_git_user_email() -> str:
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
return cast(str, repo.config_reader().get_value("user", "email", default=""))
|
||||
except InvalidGitRepositoryError:
|
||||
return ""
|
||||
|
||||
|
||||
def get_latest_bulletin() -> tuple[str, bool]:
|
||||
exists = os.path.exists("data/CURRENT_BULLETIN.md")
|
||||
current_bulletin = ""
|
||||
if exists:
|
||||
current_bulletin = open(
|
||||
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
|
||||
).read()
|
||||
new_bulletin = get_bulletin_from_web()
|
||||
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
|
||||
|
||||
news_header = Fore.YELLOW + "Welcome to AutoGPT!\n"
|
||||
if new_bulletin or current_bulletin:
|
||||
news_header += (
|
||||
"Below you'll find the latest AutoGPT News and feature updates!\n"
|
||||
"If you don't wish to see this message, you "
|
||||
"can run AutoGPT with the *--skip-news* flag.\n"
|
||||
)
|
||||
|
||||
if new_bulletin and is_new_news:
|
||||
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
|
||||
|
||||
return f"{news_header}\n{current_bulletin}", is_new_news
|
||||
|
||||
|
||||
def markdown_to_ansi_style(markdown: str):
|
||||
ansi_lines: list[str] = []
|
||||
for line in markdown.split("\n"):
|
||||
line_style = ""
|
||||
|
||||
if line.startswith("# "):
|
||||
line_style += Style.BRIGHT
|
||||
else:
|
||||
line = re.sub(
|
||||
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
|
||||
rf"{Style.BRIGHT}\1{Style.NORMAL}",
|
||||
line,
|
||||
)
|
||||
|
||||
if re.match(r"^#+ ", line) is not None:
|
||||
line_style += Fore.CYAN
|
||||
line = re.sub(r"^#+ ", "", line)
|
||||
|
||||
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
||||
return "\n".join(ansi_lines)
|
||||
|
||||
|
||||
def get_legal_warning() -> str:
|
||||
legal_text = """
|
||||
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
|
||||
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
|
||||
|
||||
## Introduction
|
||||
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
|
||||
|
||||
## No Liability for Actions of the System
|
||||
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
|
||||
|
||||
## User Responsibility and Respondeat Superior Liability
|
||||
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
|
||||
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
|
||||
|
||||
## Indemnification
|
||||
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
||||
""" # noqa: E501
|
||||
return legal_text
|
||||
|
||||
|
||||
def print_motd(logger: logging.Logger):
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(
|
||||
extra={
|
||||
"title": "NEWS:",
|
||||
"title_color": Fore.GREEN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
msg=motd_line,
|
||||
)
|
||||
if is_new_motd:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def print_git_branch_info(logger: logging.Logger):
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "master":
|
||||
logger.warning(
|
||||
f"You are running on `{git_branch}` branch"
|
||||
" - this is not a supported branch."
|
||||
)
|
||||
|
||||
|
||||
def print_python_version_info(logger: logging.Logger):
|
||||
if sys.version_info < (3, 10):
|
||||
logger.error(
|
||||
"WARNING: You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of AutoGPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
|
||||
ENV_FILE_PATH = Path(__file__).parent.parent.parent / ".env"
|
||||
|
||||
|
||||
def env_file_exists() -> bool:
|
||||
return ENV_FILE_PATH.is_file()
|
||||
|
||||
|
||||
def set_env_config_value(key: str, value: str) -> None:
|
||||
"""Sets the specified env variable and updates it in .env as well"""
|
||||
os.environ[key] = value
|
||||
|
||||
with ENV_FILE_PATH.open("r+") as file:
|
||||
lines = file.readlines()
|
||||
file.seek(0)
|
||||
key_already_in_file = False
|
||||
for line in lines:
|
||||
if re.match(rf"^(?:# )?{key}=.*$", line):
|
||||
file.write(f"{key}={value}\n")
|
||||
key_already_in_file = True
|
||||
else:
|
||||
file.write(line)
|
||||
|
||||
if not key_already_in_file:
|
||||
file.write(f"{key}={value}\n")
|
||||
|
||||
file.truncate()
|
||||
|
||||
|
||||
def is_port_free(port: int, host: str = "127.0.0.1"):
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
try:
|
||||
s.bind((host, port)) # Try to bind to the port
|
||||
return True # If successful, the port is free
|
||||
except OSError:
|
||||
return False # If failed, the port is likely in use
|
||||
|
||||
|
||||
def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]:
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs):
|
||||
return asyncio.run(f(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
@@ -1,7 +0,0 @@
|
||||
azure_api_type: azure
|
||||
azure_api_version: api-version-for-azure
|
||||
azure_endpoint: your-azure-openai-endpoint
|
||||
azure_model_map:
|
||||
gpt-3.5-turbo-0125: gpt35-deployment-id-for-azure
|
||||
gpt-4-turbo-preview: gpt4-deployment-id-for-azure
|
||||
text-embedding-3-small: embedding-deployment-id-for-azure
|
||||
109
autogpt/cli.py
Normal file
109
autogpt/cli.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import click
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--skip-news",
|
||||
is_flag=True,
|
||||
help="Specifies whether to suppress the output of latest news on startup.",
|
||||
)
|
||||
@click.option(
|
||||
# TODO: this is a hidden option for now, necessary for integration testing.
|
||||
# We should make this public once we're ready to roll out agent specific workspaces.
|
||||
"--workspace-directory",
|
||||
"-w",
|
||||
type=click.Path(),
|
||||
hidden=True,
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.main import run_auto_gpt
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
run_auto_gpt(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
workspace_directory,
|
||||
install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user