mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
22 Commits
v0.4.7
...
summary_me
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb6214e647 | ||
|
|
8b82421b9c | ||
|
|
75cc71f8d3 | ||
|
|
f287282e8c | ||
|
|
2a93aff512 | ||
|
|
6d1653b84f | ||
|
|
a7816b8c79 | ||
|
|
21913c4733 | ||
|
|
9d9c66d50f | ||
|
|
a00a7a2bd0 | ||
|
|
d6cb10432b | ||
|
|
0bea5e38a4 | ||
|
|
88b2d5fb2d | ||
|
|
f1032926cc | ||
|
|
e7ad51ce42 | ||
|
|
a3522223d9 | ||
|
|
4e3035efe4 | ||
|
|
a8cbf51489 | ||
|
|
317361da8c | ||
|
|
991bc77e0b | ||
|
|
83357f6c2f | ||
|
|
acf48d2d4d |
@@ -7,12 +7,11 @@
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
"installZsh": "true",
|
||||
"username": "vscode",
|
||||
"userUid": "1000",
|
||||
"userGid": "1000",
|
||||
"userUid": "6942",
|
||||
"userGid": "6942",
|
||||
"upgradePackages": "true"
|
||||
},
|
||||
"ghcr.io/devcontainers/features/desktop-lite:1": {},
|
||||
"ghcr.io/devcontainers/features/github-cli:1": {},
|
||||
"ghcr.io/devcontainers/features/python:1": "none",
|
||||
"ghcr.io/devcontainers/features/node:1": "none",
|
||||
"ghcr.io/devcontainers/features/git:1": {
|
||||
@@ -26,20 +25,8 @@
|
||||
"vscode": {
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python",
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.unittestEnabled": false
|
||||
},
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"VisualStudioExptTeam.vscodeintellicode",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.isort",
|
||||
"GitHub.vscode-pull-request-github",
|
||||
"GitHub.copilot",
|
||||
"github.vscode-github-actions"
|
||||
]
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python"
|
||||
}
|
||||
}
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
@@ -49,8 +36,5 @@
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode",
|
||||
|
||||
// Add the freshly containerized repo to the list of safe repositories
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/Auto-GPT && pip3 install --user -r requirements.txt"
|
||||
}
|
||||
"remoteUser": "vscode"
|
||||
}
|
||||
|
||||
@@ -4,9 +4,16 @@ version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
depends_on:
|
||||
- redis
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ../:/workspace/Auto-GPT
|
||||
redis:
|
||||
image: 'redis/redis-stack-server:latest'
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
!prompt_settings.yaml
|
||||
|
||||
*.md
|
||||
*.png
|
||||
|
||||
252
.env.template
252
.env.template
@@ -1,212 +1,214 @@
|
||||
# For further descriptions of these settings see docs/configuration/options.md or go to docs.agpt.co
|
||||
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the Auto-GPT root directory. (defaults to ai_settings.yaml)
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the Auto-GPT root directory. (Default plugins_config.yaml)
|
||||
# PLUGINS_CONFIG_FILE=plugins_config.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the Auto-GPT root directory. (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
|
||||
# OPENAI_FUNCTIONS=False
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
## PLAIN_OUTPUT - Plain output, which disables the spinner (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMAND_CATEGORIES=
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# TEMPERATURE=0
|
||||
# USE_AZURE=False
|
||||
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the Auto-GPT root directory. (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
### AZURE
|
||||
# moved to `azure.yaml.template`
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM - Smart language model (Default: gpt-4)
|
||||
# SMART_LLM=gpt-4
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-4
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
|
||||
################################################################################
|
||||
### SHELL EXECUTION
|
||||
################################################################################
|
||||
|
||||
## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
|
||||
# SHELL_COMMAND_CONTROL=denylist
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
|
||||
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by Auto-GPT (Default: sudo,su)
|
||||
# SHELL_DENYLIST=sudo,su
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
|
||||
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by Auto-GPT (Default: None)
|
||||
# SHELL_ALLOWLIST=
|
||||
### LLM MODEL SETTINGS
|
||||
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
## When using --gpt3only this needs to be set to 4000.
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### General
|
||||
|
||||
## MEMORY_BACKEND - Memory backend type
|
||||
# MEMORY_BACKEND=json_file
|
||||
|
||||
## MEMORY_INDEX - Value used in the Memory backend for scoping, naming, or indexing (Default: auto-gpt)
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
## local - Default
|
||||
## pinecone - Pinecone (if configured)
|
||||
## redis - Redis (if configured)
|
||||
## milvus - Milvus (if configured - also works with Zilliz)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=local
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### Redis
|
||||
### PINECONE
|
||||
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
# PINECONE_API_KEY=your-pinecone-api-key
|
||||
# PINECONE_ENV=your-pinecone-region
|
||||
|
||||
### REDIS
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
# REDIS_HOST=localhost
|
||||
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PORT=6379
|
||||
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
# REDIS_PASSWORD=
|
||||
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# REDIS_HOST=localhost
|
||||
# REDIS_PORT=6379
|
||||
# REDIS_PASSWORD=
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
### WEAVIATE
|
||||
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
## WEAVIATE_HOST - Weaviate host IP
|
||||
## WEAVIATE_PORT - Weaviate host port
|
||||
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
## WEAVIATE_USERNAME - Weaviate username
|
||||
## WEAVIATE_PASSWORD - Weaviate password
|
||||
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# WEAVIATE_HOST="127.0.0.1"
|
||||
# WEAVIATE_PORT=8080
|
||||
# WEAVIATE_PROTOCOL="http"
|
||||
# USE_WEAVIATE_EMBEDDED=False
|
||||
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
# WEAVIATE_USERNAME=
|
||||
# WEAVIATE_PASSWORD=
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
### MILVUS
|
||||
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443)
|
||||
## MILVUS_USERNAME - username for your Milvus database
|
||||
## MILVUS_PASSWORD - password for your Milvus database
|
||||
## MILVUS_SECURE - True to enable TLS. (Default: False)
|
||||
## Setting MILVUS_ADDR to a `https://` URL will override this setting.
|
||||
## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
|
||||
# MILVUS_ADDR=localhost:19530
|
||||
# MILVUS_USERNAME=
|
||||
# MILVUS_PASSWORD=
|
||||
# MILVUS_SECURE=
|
||||
# MILVUS_COLLECTION=autogpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Common
|
||||
|
||||
## IMAGE_PROVIDER - Image provider (Default: dalle)
|
||||
### OPEN AI
|
||||
## IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## DALLE: 256, 512, 1024
|
||||
# IMAGE_PROVIDER=dalle
|
||||
|
||||
## IMAGE_SIZE - Image size (Default: 256)
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
### HUGGINGFACE
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
### STABLE DIFFUSION WEBUI
|
||||
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
|
||||
# SD_WEBUI_URL=http://localhost:7860
|
||||
# SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
|
||||
# AUDIO_TO_TEXT_PROVIDER=huggingface
|
||||
|
||||
## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
|
||||
### HUGGINGFACE
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
|
||||
################################################################################
|
||||
### GIT Provider for repository actions
|
||||
################################################################################
|
||||
|
||||
### GITHUB
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
## GITHUB_USERNAME - Github username
|
||||
# GITHUB_API_KEY=github_pat_123
|
||||
# GITHUB_USERNAME=your-github-username
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
### GOOGLE
|
||||
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
# GOOGLE_API_KEY=your-google-api-key
|
||||
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
|
||||
################################################################################
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
### TTS PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
### MAC OS
|
||||
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
# USE_MAC_OS_TTS=False
|
||||
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=streamelements
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
### STREAMELEMENTS
|
||||
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
# USE_BRIAN_TTS=False
|
||||
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=elevenlabs
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
### ELEVENLABS
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
|
||||
################################################################################
|
||||
### CHAT MESSAGES
|
||||
### TWITTER API
|
||||
################################################################################
|
||||
|
||||
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# TW_CONSUMER_KEY=
|
||||
# TW_CONSUMER_SECRET=
|
||||
# TW_ACCESS_TOKEN=
|
||||
# TW_ACCESS_TOKEN_SECRET=
|
||||
|
||||
################################################################################
|
||||
### ALLOWLISTED PLUGINS
|
||||
################################################################################
|
||||
|
||||
#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
|
||||
ALLOWLISTED_PLUGINS=
|
||||
|
||||
################################################################################
|
||||
### CHAT PLUGIN SETTINGS
|
||||
################################################################################
|
||||
# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,5 +1,5 @@
|
||||
# Exclude VCR cassettes from stats
|
||||
tests/Auto-GPT-test-cassettes/**/**.y*ml linguist-generated
|
||||
tests/**/cassettes/**.y*ml linguist-generated
|
||||
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
.github/workflows/ @Significant-Gravitas/maintainers
|
||||
autogpt/core @collijk
|
||||
153
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
153
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -8,16 +8,14 @@ body:
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Thoroughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
@@ -27,29 +25,23 @@ body:
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please confirm that the issue you have is described well and precise in the title above ⬆️.
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
For example:
|
||||
BAD - my auto-gpt keeps looping
|
||||
GOOD - After performing execute_python_file, auto-gpt goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
|
||||
Please help us help you by following these steps:
|
||||
- Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
|
||||
newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
|
||||
- Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it) if possible
|
||||
- If it's a pip/packages issue, mention this in the title and provide pip version, python version
|
||||
- If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we spend building AutoGPT.
|
||||
|
||||
Please help us help you:
|
||||
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
|
||||
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
|
||||
- Search for existing issues, "add comment" is tidier than "new issue"
|
||||
- Ask on our Discord (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
@@ -62,15 +54,9 @@ body:
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other
|
||||
- Other (Please specify in your problem)
|
||||
validations:
|
||||
required: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the system
|
||||
description: Please specify the system you are working on.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of Auto-GPT are you using?
|
||||
@@ -85,80 +71,61 @@ body:
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Do you use OpenAI GPT-3 or GPT-4?
|
||||
label: GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
- GPT-4(32k)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Which area covers your issue best?
|
||||
label: Steps to reproduce 🕹
|
||||
description: |
|
||||
**⚠️ Issues that we can't reproduce will be closed.**
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current behavior 😯
|
||||
description: Describe what happens instead of the expected behavior.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior 🤔
|
||||
description: Describe what should happen.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
description: >
|
||||
Select the area related to the issue you are reporting.
|
||||
options:
|
||||
- Installation and setup
|
||||
- Memory
|
||||
- Performance
|
||||
- Prompt
|
||||
- Commands
|
||||
- Plugins
|
||||
- AI Model Limitations
|
||||
- Challenges
|
||||
- Documentation
|
||||
- Logging
|
||||
- Agents
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
autolabels: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
#Following are optional file content uploads
|
||||
- type: markdown
|
||||
attributes:
|
||||
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
|
||||
value: |
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your Logs 📒
|
||||
description: |
|
||||
Please include the log showing your error and the command that caused it, if applicable.
|
||||
You can copy it from your terminal or from `logs/activity.log`.
|
||||
This will help us understand your issue better!
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
<details>
|
||||
<summary><i>Example</i></summary>
|
||||
```log
|
||||
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
|
||||
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
|
||||
Traceback (most recent call last):
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
|
||||
self._interpret_response_line(
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
|
||||
raise self.handle_error_response(
|
||||
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
|
||||
```
|
||||
</details>
|
||||
value: |
|
||||
```log
|
||||
<insert your logs here>
|
||||
```
|
||||
|
||||
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -1,12 +1,13 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for Auto-GPT!
|
||||
description: Suggest a new idea for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
Thanks for contributing by creating an issue! ❤️
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
@@ -25,4 +26,4 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation 🔦
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -14,8 +14,6 @@ Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
### Background
|
||||
@@ -35,14 +33,7 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes. <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
- [ ] I have run the following commands against my code to ensure it passes our linters:
|
||||
```shell
|
||||
black .
|
||||
isort .
|
||||
mypy
|
||||
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
|
||||
```
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
|
||||
82
.github/workflows/benchmarks.yml
vendored
82
.github/workflows/benchmarks.yml
vendored
@@ -1,73 +1,31 @@
|
||||
name: Benchmarks
|
||||
name: Run Benchmarks
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Benchmark:
|
||||
name: ${{ matrix.config.task-name }}
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- python-version: "3.10"
|
||||
task: "tests/challenges"
|
||||
task-name: "Mandatory Tasks"
|
||||
- python-version: "3.10"
|
||||
task: "--beat-challenges -ra tests/challenges"
|
||||
task-name: "Challenging Tasks"
|
||||
|
||||
env:
|
||||
python-version: '3.10'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: master
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.config.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
- name: Set up Python ${{ env.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
rm -rf tests/Auto-GPT-test-cassettes
|
||||
pytest -n auto --record-mode=all ${{ matrix.config.task }}
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
AGENT_MODE: ${{ secrets.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload logs as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs-${{ matrix.config.task-name }}
|
||||
path: logs/
|
||||
|
||||
- name: Upload cassettes as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: cassettes-${{ matrix.config.task-name }}
|
||||
path: tests/Auto-GPT-test-cassettes/
|
||||
- name: benchmark
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py
|
||||
|
||||
206
.github/workflows/ci.yml
vendored
206
.github/workflows/ci.yml
vendored
@@ -2,24 +2,16 @@ name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, ci-test* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ stable, master, release-* ]
|
||||
pull_request_target:
|
||||
branches: [ master, release-*, ci-test* ]
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
@@ -27,26 +19,12 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
@@ -63,19 +41,7 @@ jobs:
|
||||
run: isort . --check
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check mypy formatting
|
||||
run: mypy
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check for unused imports and pass statements
|
||||
run: |
|
||||
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
|
||||
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
|
||||
|
||||
test:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
permissions:
|
||||
# Gives the action the necessary permissions for publishing new
|
||||
# comments in pull requests.
|
||||
@@ -85,177 +51,27 @@ jobs:
|
||||
# comments (to avoid publishing multiple comments in the same PR)
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
python-version: ["3.10", "3.11"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest with coverage
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
pytest -vv --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration tests/challenges
|
||||
python tests/challenges/utils/build_current_score.py
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }}
|
||||
AGENT_MODE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_MODE || '' }}
|
||||
AGENT_TYPE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_TYPE || '' }}
|
||||
OPENAI_API_KEY: ${{ github.event_name != 'pull_request_target' && secrets.OPENAI_API_KEY || '' }}
|
||||
PLAIN_OUTPUT: True
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Push updated challenge scores
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
score_file="tests/challenges/current_score.json"
|
||||
|
||||
if ! git diff --quiet $score_file; then
|
||||
git add $score_file
|
||||
git commit -m "Update challenge scores"
|
||||
git push origin HEAD:${{ github.ref_name }}
|
||||
else
|
||||
echo "The challenge scores didn't change."
|
||||
fi
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || success() || failure()
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/Auto-GPT-test-cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
TOKEN=${{ secrets.PAT_REVIEW }}
|
||||
REPO=${{ github.repository }}
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels \
|
||||
-d '{"labels":["behaviour change"]}'
|
||||
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs
|
||||
path: logs/
|
||||
|
||||
81
.github/workflows/docker-ci.yml
vendored
81
.github/workflows/docker-ci.yml
vendored
@@ -3,11 +3,8 @@ name: Docker CI
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master, release-*, stable ]
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
@@ -73,52 +70,46 @@ jobs:
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
# Docker setup needs fixing before this is going to work: #1843
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs: build
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
--numprocesses=4 --durations=10 \
|
||||
tests/unit tests/integration 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
12
.github/workflows/pr-label.yml
vendored
12
.github/workflows/pr-label.yml
vendored
@@ -3,10 +3,7 @@ name: "Pull Request auto-label"
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, release-* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
branches: [ master ]
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
@@ -48,10 +45,11 @@ jobs:
|
||||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 100
|
||||
m_max_size: 50
|
||||
l_label: 'size/l'
|
||||
l_max_size: 500
|
||||
l_max_size: 200
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 500 lines.
|
||||
This PR exceeds the recommended size of 200 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size
|
||||
|
||||
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Generate Sponsors README
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 */12 * * *'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Generate Sponsors 💖
|
||||
uses: JamesIves/github-sponsors-readme-action@v1
|
||||
with:
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
file: 'README.md'
|
||||
minimum: 2500
|
||||
maximum: 99999
|
||||
|
||||
- name: Deploy to GitHub Pages 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: master
|
||||
folder: '.'
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
22
.gitignore
vendored
22
.gitignore
vendored
@@ -1,6 +1,11 @@
|
||||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
autogpt/*json
|
||||
autogpt/node_modules/
|
||||
autogpt/__pycache__/keys.cpython-310.pyc
|
||||
autogpt/auto_gpt_workspace
|
||||
package-lock.json
|
||||
*.pyc
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
@@ -12,11 +17,10 @@ last_run_ai_settings.yaml
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
/logs
|
||||
logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -31,8 +35,7 @@ __pycache__/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
/plugins/
|
||||
plugins_config.yaml
|
||||
plugins/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
@@ -159,11 +162,4 @@ vicuna-*
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
package.json
|
||||
CURRENT_BULLETIN.md
|
||||
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -1,4 +0,0 @@
|
||||
[submodule "tests/Auto-GPT-test-cassettes"]
|
||||
path = tests/Auto-GPT-test-cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
branch = master
|
||||
@@ -22,21 +22,11 @@ repos:
|
||||
- id: black
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: 'v1.3.0'
|
||||
hooks:
|
||||
- id: mypy
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: autoflake
|
||||
name: autoflake
|
||||
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
|
||||
language: python
|
||||
types: [ python ]
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt tests/unit
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
|
||||
24
BULLETIN.md
24
BULLETIN.md
@@ -1,21 +1,9 @@
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
|
||||
# v0.4.7 RELEASE HIGHLIGHTS! 🚀
|
||||
# -----------------------------
|
||||
This release introduces initial REST API support, powered by e2b's agent
|
||||
protocol SDK (https://github.com/e2b-dev/agent-protocol#sdk).
|
||||
# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE
|
||||
Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
|
||||
|
||||
It also includes improvements to prompt generation and support
|
||||
for our new benchmarking tool, Auto-GPT-Benchmarks
|
||||
(https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks).
|
||||
## Changes to Docker configuration
|
||||
The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
|
||||
|
||||
We've also moved our documentation to Material Theme, at https://docs.agpt.co.
|
||||
|
||||
As usual, we've squashed a few bugs and made some under-the-hood improvements.
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases.
|
||||
|
||||
150
CONTRIBUTING.md
150
CONTRIBUTING.md
@@ -1,14 +1,148 @@
|
||||
We maintain a knowledgebase at this [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
|
||||
# Contributing to Auto-GPT
|
||||
|
||||
We would like to say "We value all contributions". After all, we are an open-source project, so we should say something fluffy like this, right?
|
||||
First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
|
||||
|
||||
However the reality is that some contributions are SUPER-valuable, while others create more trouble than they are worth and actually _create_ work for the core team.
|
||||
This document provides guidelines and best practices to help you contribute effectively.
|
||||
|
||||
If you wish to contribute, please look through the wiki [contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) page.
|
||||
## Code of Conduct
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
|
||||
By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and hop on our discord (you'll find it in the wiki).
|
||||
[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md
|
||||
|
||||
❤️ & 🔆
|
||||
The team @ Auto-GPT
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins.
|
||||
Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository and clone your fork.
|
||||
2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
|
||||
3. Make your changes in the new branch.
|
||||
4. Test your changes thoroughly.
|
||||
5. Commit and push your changes to your fork.
|
||||
6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
If you find a bug in the project, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A description of the problem, including steps to reproduce the issue.
|
||||
- Any relevant logs, screenshots, or other supporting information.
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
|
||||
- Any relevant examples, mockups, or supporting information.
|
||||
|
||||
### Submitting Pull Requests
|
||||
|
||||
When submitting a pull request, please ensure that your changes meet the following criteria:
|
||||
|
||||
- Your pull request should be atomic and focus on a single change.
|
||||
- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status)
|
||||
- You should have thoroughly tested your changes with multiple different prompts.
|
||||
- You should have considered potential risks and mitigations for your changes.
|
||||
- You should have documented your changes clearly and comprehensively.
|
||||
- You should not include any unrelated or "extra" small tweaks or changes.
|
||||
|
||||
## Style Guidelines
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request.
|
||||
|
||||
To format your code, run the following commands in the project's root directory:
|
||||
|
||||
```bash
|
||||
python -m black .
|
||||
python -m isort .
|
||||
```
|
||||
|
||||
Or if you have these tools installed globally:
|
||||
```bash
|
||||
black .
|
||||
isort .
|
||||
```
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
|
||||
|
||||
Install the pre-commit package using pip:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Run the following command in the project's root directory to install the pre-commit hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
|
||||
|
||||
If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
|
||||
|
||||
Happy coding, and once again, thank you for your contributions!
|
||||
|
||||
Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts
|
||||
|
||||
## Testing your changes
|
||||
|
||||
If you add or change code, make sure the updated code is covered by tests.
|
||||
To increase coverage if necessary, [write tests using pytest].
|
||||
|
||||
For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/).
|
||||
|
||||
[write tests using pytest]: https://realpython.com/pytest-python-testing/
|
||||
|
||||
### API-dependent tests
|
||||
|
||||
To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known
|
||||
requests and matching responses in so-called *cassettes*, allowing us to run the tests
|
||||
in CI without needing actual API access.
|
||||
|
||||
When changes cause a test prompt to be generated differently, it will likely miss the
|
||||
cache and make a request to the API, updating the cassette with the new request+response.
|
||||
*Be sure to include the updated cassette in your PR!*
|
||||
|
||||
When you run Pytest locally:
|
||||
|
||||
- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required.
|
||||
- If the prompt changes in a way that the cassettes are not reusable:
|
||||
- If no API key, the test fails. It requires a new cassette. So, add an API key to .env.
|
||||
- If the API key is present, the tests will make a real call to OpenAI.
|
||||
- If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR.
|
||||
- If the test is unsuccessful:
|
||||
- Either: Your change made Auto-GPT less capable, in that case, you have to change your code.
|
||||
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
||||
|
||||
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
||||
|
||||
|
||||
### Community Challenges
|
||||
Challenges are goals we need Auto-GPT to achieve.
|
||||
To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on.
|
||||
- a challenge is new if level_currently_beaten is None
|
||||
- a challenge is in progress if level_currently_beaten is greater or equal to 1
|
||||
- a challenge is beaten if level_currently_beaten = max_level
|
||||
|
||||
Here is an example of how to run the memory challenge A and attempt to beat level 3.
|
||||
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3
|
||||
|
||||
To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder
|
||||
|
||||
Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline.
|
||||
Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run.
|
||||
|
||||
Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge.
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -6,13 +6,11 @@ FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr ca-certificates \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
@@ -24,7 +22,7 @@ ENV PATH="$PATH:/root/.local/bin"
|
||||
COPY requirements.txt .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
|
||||
ENTRYPOINT ["python", "-m", "autogpt"]
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
@@ -38,9 +36,5 @@ RUN sed -i '/Items below this point will not be included in the Docker Image/,$d
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Tuple
|
||||
|
||||
from autogpt.agents import Agent
|
||||
from autogpt.app.main import run_interaction_loop
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIConfig, Config, ConfigBuilder
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
PROJECT_DIR = Path().resolve()
|
||||
|
||||
|
||||
def run_specific_agent(task, continuous_mode=False) -> Tuple[str, int]:
|
||||
agent = bootstrap_agent(task, continuous_mode)
|
||||
run_interaction_loop(agent)
|
||||
|
||||
|
||||
def bootstrap_agent(task, continuous_mode) -> Agent:
|
||||
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
|
||||
config.debug_mode = True
|
||||
config.continuous_mode = continuous_mode
|
||||
config.temperature = 0
|
||||
config.plain_output = True
|
||||
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
|
||||
config.memory_backend = "no_memory"
|
||||
config.workspace_path = Workspace.init_workspace_directory(config)
|
||||
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
|
||||
ai_config = AIConfig(
|
||||
ai_name="Auto-GPT",
|
||||
ai_role="a multi-purpose AI assistant.",
|
||||
ai_goals=[task],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
return Agent(
|
||||
memory=get_memory(config),
|
||||
command_registry=command_registry,
|
||||
ai_config=ai_config,
|
||||
config=config,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# The first argument is the script name itself, second is the task
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python script.py <task>")
|
||||
sys.exit(1)
|
||||
task = sys.argv[1]
|
||||
run_specific_agent(task, continuous_mode=True)
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"workspace": "auto_gpt_workspace",
|
||||
"entry_path": "agbenchmark.benchmarks"
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"TestBasicCodeGeneration": {
|
||||
"difficulty": "basic",
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"data_path": "agbenchmark/challenges/code/d3"
|
||||
},
|
||||
"TestBasicMemory": {
|
||||
"difficulty": "basic",
|
||||
"data_path": "agbenchmark/challenges/memory/m1"
|
||||
},
|
||||
"TestReadFile": {
|
||||
"difficulty": "basic",
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"data_path": "agbenchmark/challenges/interface/read_file"
|
||||
},
|
||||
"TestWriteFile": {
|
||||
"dependencies": [],
|
||||
"data_path": "agbenchmark/challenges/interface/write_file"
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Auto-GPT: A GPT powered AI Assistant"""
|
||||
import autogpt.app.cli
|
||||
import autogpt.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.app.cli.main()
|
||||
autogpt.cli.main()
|
||||
|
||||
4
autogpt/agent/__init__.py
Normal file
4
autogpt/agent/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
|
||||
__all__ = ["Agent", "AgentManager"]
|
||||
290
autogpt/agent/agent.py
Normal file
290
autogpt/agent/agent.py
Normal file
@@ -0,0 +1,290 @@
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
class Agent:
|
||||
"""Agent class for interacting with Auto-GPT.
|
||||
|
||||
Attributes:
|
||||
ai_name: The name of the agent.
|
||||
memory: The memory object to use.
|
||||
full_message_history: The full message history.
|
||||
next_action_count: The number of actions to execute.
|
||||
system_prompt: The system prompt is the initial prompt that defines everything
|
||||
the AI needs to know to achieve its task successfully.
|
||||
Currently, the dynamic and customizable information in the system prompt are
|
||||
ai_name, description and goals.
|
||||
|
||||
triggering_prompt: The last sentence the AI will see before answering.
|
||||
For Auto-GPT, this prompt is:
|
||||
Determine which next command to use, and respond using the format specified
|
||||
above:
|
||||
The triggering prompt is not part of the system prompt because between the
|
||||
system prompt and the triggering
|
||||
prompt we have contextual information that can distract the AI and make it
|
||||
forget that its goal is to find the next task to achieve.
|
||||
SYSTEM PROMPT
|
||||
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
|
||||
TRIGGERING PROMPT
|
||||
|
||||
The triggering prompt reminds the AI about its short term meta task
|
||||
(defining the next task)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hilucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
cfg = Config()
|
||||
loop_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and loop_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat_with_ai(
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
if cfg.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "s":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
Fore.YELLOW,
|
||||
"",
|
||||
)
|
||||
if self_feedback_resp[0].lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
else:
|
||||
user_input = self_feedback_resp
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif user_input == "EXIT":
|
||||
logger.info("Exiting...")
|
||||
break
|
||||
else:
|
||||
# Print command
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
else:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
command_name, arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.full_message_history.append(create_chat_message("system", result))
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.full_message_history.append(
|
||||
create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
|
||||
def _resolve_pathlike_command_args(self, command_args):
|
||||
if "directory" in command_args and command_args["directory"] in {"", "/"}:
|
||||
command_args["directory"] = str(self.workspace.root)
|
||||
else:
|
||||
for pathlike in ["filename", "directory", "clone_path"]:
|
||||
if pathlike in command_args:
|
||||
command_args[pathlike] = str(
|
||||
self.workspace.get_path(command_args[pathlike])
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
feedback message and uses the create_chat_completion() function to generate a
|
||||
response based on the input message.
|
||||
Args:
|
||||
thoughts (dict): A dictionary containing thought elements like reasoning,
|
||||
plan, thoughts, and criticism.
|
||||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
|
||||
feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
thought = thoughts.get("thoughts", "")
|
||||
criticism = thoughts.get("criticism", "")
|
||||
feedback_thoughts = thought + reasoning + plan + criticism
|
||||
return create_chat_completion(
|
||||
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
|
||||
llm_model,
|
||||
)
|
||||
145
autogpt/agent/agent_manager.py
Normal file
145
autogpt/agent/agent_manager.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import Message, create_chat_completion
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class AgentManager(metaclass=Singleton):
|
||||
"""Agent manager for managing GPT agents"""
|
||||
|
||||
def __init__(self):
|
||||
self.next_key = 0
|
||||
self.agents = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
|
||||
"""Create a new agent and return its key
|
||||
|
||||
Args:
|
||||
task: The task to perform
|
||||
prompt: The prompt to use
|
||||
model: The model to use
|
||||
|
||||
Returns:
|
||||
The key of the new agent
|
||||
"""
|
||||
messages: List[Message] = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
messages.extend(iter(plugin_messages))
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
key = self.next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
self.next_key += 1
|
||||
|
||||
self.agents[key] = (task, messages, model)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return key, agent_reply
|
||||
|
||||
def message_agent(self, key: str | int, message: str) -> str:
|
||||
"""Send a message to an agent and return its response
|
||||
|
||||
Args:
|
||||
key: The key of the agent to message
|
||||
message: The message to send to the agent
|
||||
|
||||
Returns:
|
||||
The agent's response
|
||||
"""
|
||||
task, messages, model = self.agents[int(key)]
|
||||
|
||||
# Add user message to message history before sending to agent
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
for plugin_message in plugin_messages:
|
||||
messages.append(plugin_message)
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
# Update full message history
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return agent_reply
|
||||
|
||||
def list_agents(self) -> list[tuple[str | int, str]]:
|
||||
"""Return a list of all agents
|
||||
|
||||
Returns:
|
||||
A list of tuples of the form (key, task)
|
||||
"""
|
||||
|
||||
# Return a list of agent keys and their tasks
|
||||
return [(key, task) for key, (task, _, _) in self.agents.items()]
|
||||
|
||||
def delete_agent(self, key: str | int) -> bool:
|
||||
"""Delete an agent from the agent manager
|
||||
|
||||
Args:
|
||||
key: The key of the agent to delete
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
del self.agents[int(key)]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
@@ -1,4 +0,0 @@
|
||||
from .agent import Agent
|
||||
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
|
||||
|
||||
__all__ = ["BaseAgent", "Agent", "CommandName", "CommandArgs", "AgentThoughts"]
|
||||
@@ -1,306 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import AIConfig, Config
|
||||
from autogpt.llm.base import ChatModelResponse, ChatSequence
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.logs import logger
|
||||
from autogpt.logs.log_cycle import (
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
|
||||
|
||||
|
||||
class Agent(BaseAgent):
|
||||
"""Agent class for interacting with Auto-GPT."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
command_registry: CommandRegistry,
|
||||
memory: VectorMemory,
|
||||
triggering_prompt: str,
|
||||
config: Config,
|
||||
cycle_budget: Optional[int] = None,
|
||||
):
|
||||
super().__init__(
|
||||
ai_config=ai_config,
|
||||
command_registry=command_registry,
|
||||
config=config,
|
||||
default_cycle_instruction=triggering_prompt,
|
||||
cycle_budget=cycle_budget,
|
||||
)
|
||||
|
||||
self.memory = memory
|
||||
"""VectorMemoryProvider used to manage the agent's context (TODO)"""
|
||||
|
||||
self.workspace = Workspace(config.workspace_path, config.restrict_to_workspace)
|
||||
"""Workspace that the agent has access to, e.g. for reading/writing files."""
|
||||
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
"""Timestamp the agent was created; only used for structured debug logging."""
|
||||
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
"""LogCycleHandler for structured debug logging."""
|
||||
|
||||
def construct_base_prompt(self, *args, **kwargs) -> ChatSequence:
|
||||
if kwargs.get("prepend_messages") is None:
|
||||
kwargs["prepend_messages"] = []
|
||||
|
||||
# Clock
|
||||
kwargs["prepend_messages"].append(
|
||||
Message("system", f"The current time and date is {time.strftime('%c')}"),
|
||||
)
|
||||
|
||||
# Add budget information (if any) to prompt
|
||||
api_manager = ApiManager()
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
remaining_budget = (
|
||||
api_manager.get_total_budget() - api_manager.get_total_cost()
|
||||
)
|
||||
if remaining_budget < 0:
|
||||
remaining_budget = 0
|
||||
|
||||
budget_msg = Message(
|
||||
"system",
|
||||
f"Your remaining API budget is ${remaining_budget:.3f}"
|
||||
+ (
|
||||
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
|
||||
if remaining_budget == 0
|
||||
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
|
||||
if remaining_budget < 0.005
|
||||
else " Budget nearly exceeded. Finish up.\n\n"
|
||||
if remaining_budget < 0.01
|
||||
else ""
|
||||
),
|
||||
)
|
||||
logger.debug(budget_msg)
|
||||
|
||||
if kwargs.get("append_messages") is None:
|
||||
kwargs["append_messages"] = []
|
||||
kwargs["append_messages"].append(budget_msg)
|
||||
|
||||
return super().construct_base_prompt(*args, **kwargs)
|
||||
|
||||
def on_before_think(self, *args, **kwargs) -> ChatSequence:
|
||||
prompt = super().on_before_think(*args, **kwargs)
|
||||
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.history.raw(),
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
prompt.raw(),
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command_name: str | None,
|
||||
command_args: dict[str, str] | None,
|
||||
user_input: str | None,
|
||||
) -> str:
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = f"Could not execute command: {command_name}{command_args}"
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
USER_INPUT_FILE_NAME,
|
||||
)
|
||||
|
||||
else:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(command_name, command_args)
|
||||
command_result = execute_command(
|
||||
command_name=command_name,
|
||||
arguments=command_args,
|
||||
agent=self,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(str(command_result), self.llm.name)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), self.llm.name
|
||||
)
|
||||
if result_tlength + memory_tlength > self.send_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
# Check if there's a result from the command append it to the message
|
||||
if result is None:
|
||||
self.history.add("system", "Unable to execute command", "action_result")
|
||||
else:
|
||||
self.history.add("system", result, "action_result")
|
||||
|
||||
return result
|
||||
|
||||
def parse_and_process_response(
|
||||
self, llm_response: ChatModelResponse, *args, **kwargs
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
if not llm_response.content:
|
||||
raise SyntaxError("Assistant response has no text content")
|
||||
|
||||
assistant_reply_dict = extract_dict_from_response(llm_response.content)
|
||||
|
||||
valid, errors = validate_dict(assistant_reply_dict, self.config)
|
||||
if not valid:
|
||||
raise SyntaxError(
|
||||
"Validation of response failed:\n "
|
||||
+ ";\n ".join([str(e) for e in errors])
|
||||
)
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_dict = plugin.post_planning(assistant_reply_dict)
|
||||
|
||||
response = None, None, assistant_reply_dict
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_dict != {}:
|
||||
# Get command name and arguments
|
||||
try:
|
||||
command_name, arguments = extract_command(
|
||||
assistant_reply_dict, llm_response, self.config
|
||||
)
|
||||
response = command_name, arguments, assistant_reply_dict
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_dict,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def extract_command(
|
||||
assistant_reply_json: dict, assistant_reply: ChatModelResponse, config: Config
|
||||
) -> tuple[str, dict[str, str]]:
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
assistant_reply_json (dict): The response object from the AI
|
||||
assistant_reply (ChatModelResponse): The model response from the AI
|
||||
config (Config): The config object
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
if config.openai_functions:
|
||||
if assistant_reply.function_call is None:
|
||||
return "Error:", {"message": "No 'function_call' in assistant reply"}
|
||||
assistant_reply_json["command"] = {
|
||||
"name": assistant_reply.function_call.name,
|
||||
"args": json.loads(assistant_reply.function_call.arguments),
|
||||
}
|
||||
try:
|
||||
if "command" not in assistant_reply_json:
|
||||
return "Error:", {"message": "Missing 'command' object in JSON"}
|
||||
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
return (
|
||||
"Error:",
|
||||
{
|
||||
"message": f"The previous message sent was not a dictionary {assistant_reply_json}"
|
||||
},
|
||||
)
|
||||
|
||||
command = assistant_reply_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", {"message": "'command' object is not a dictionary"}
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", {"message": "Missing 'name' field in 'command' object"}
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", {"message": "Invalid JSON"}
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", {"message": str(e)}
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_name: str,
|
||||
arguments: dict[str, str],
|
||||
agent: Agent,
|
||||
) -> Any:
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
agent (Agent): The agent that is executing the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
if command := agent.command_registry.get_command(command_name):
|
||||
return command(**arguments, agent=agent)
|
||||
|
||||
# Handle non-native commands (e.g. from plugins)
|
||||
for command in agent.ai_config.prompt_generator.commands:
|
||||
if (
|
||||
command_name == command.label.lower()
|
||||
or command_name == command.name.lower()
|
||||
):
|
||||
return command.function(**arguments)
|
||||
|
||||
raise RuntimeError(
|
||||
f"Cannot execute '{command_name}': unknown command."
|
||||
" Do not try to use this command again."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
@@ -1,408 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import AIConfig, Config
|
||||
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.llm.base import ChatModelResponse, ChatSequence, Message
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
|
||||
from autogpt.llm.utils import count_message_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.message_history import MessageHistory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
CommandName = str
|
||||
CommandArgs = dict[str, str]
|
||||
AgentThoughts = dict[str, Any]
|
||||
|
||||
|
||||
class BaseAgent(metaclass=ABCMeta):
|
||||
"""Base class for all Auto-GPT agents."""
|
||||
|
||||
ThoughtProcessID = Literal["one-shot"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
command_registry: CommandRegistry,
|
||||
config: Config,
|
||||
big_brain: bool = True,
|
||||
default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT,
|
||||
cycle_budget: Optional[int] = 1,
|
||||
send_token_limit: Optional[int] = None,
|
||||
summary_max_tlength: Optional[int] = None,
|
||||
):
|
||||
self.ai_config = ai_config
|
||||
"""The AIConfig or "personality" object associated with this agent."""
|
||||
|
||||
self.command_registry = command_registry
|
||||
"""The registry containing all commands available to the agent."""
|
||||
|
||||
self.config = config
|
||||
"""The applicable application configuration."""
|
||||
|
||||
self.big_brain = big_brain
|
||||
"""
|
||||
Whether this agent uses the configured smart LLM (default) to think,
|
||||
as opposed to the configured fast LLM.
|
||||
"""
|
||||
|
||||
self.default_cycle_instruction = default_cycle_instruction
|
||||
"""The default instruction passed to the AI for a thinking cycle."""
|
||||
|
||||
self.cycle_budget = cycle_budget
|
||||
"""
|
||||
The number of cycles that the agent is allowed to run unsupervised.
|
||||
|
||||
`None` for unlimited continuous execution,
|
||||
`1` to require user approval for every step,
|
||||
`0` to stop the agent.
|
||||
"""
|
||||
|
||||
self.cycles_remaining = cycle_budget
|
||||
"""The number of cycles remaining within the `cycle_budget`."""
|
||||
|
||||
self.cycle_count = 0
|
||||
"""The number of cycles that the agent has run since its initialization."""
|
||||
|
||||
self.system_prompt = ai_config.construct_full_prompt(config)
|
||||
"""
|
||||
The system prompt sets up the AI's personality and explains its goals,
|
||||
available resources, and restrictions.
|
||||
"""
|
||||
|
||||
llm_name = self.config.smart_llm if self.big_brain else self.config.fast_llm
|
||||
self.llm = OPEN_AI_CHAT_MODELS[llm_name]
|
||||
"""The LLM that the agent uses to think."""
|
||||
|
||||
self.send_token_limit = send_token_limit or self.llm.max_tokens * 3 // 4
|
||||
"""
|
||||
The token limit for prompt construction. Should leave room for the completion;
|
||||
defaults to 75% of `llm.max_tokens`.
|
||||
"""
|
||||
|
||||
self.history = MessageHistory(
|
||||
self.llm,
|
||||
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
|
||||
)
|
||||
|
||||
def think(
|
||||
self,
|
||||
instruction: Optional[str] = None,
|
||||
thought_process_id: ThoughtProcessID = "one-shot",
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Runs the agent for one cycle.
|
||||
|
||||
Params:
|
||||
instruction: The instruction to put at the end of the prompt.
|
||||
|
||||
Returns:
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
|
||||
instruction = instruction or self.default_cycle_instruction
|
||||
|
||||
prompt: ChatSequence = self.construct_prompt(instruction, thought_process_id)
|
||||
prompt = self.on_before_think(prompt, thought_process_id, instruction)
|
||||
raw_response = create_chat_completion(
|
||||
prompt,
|
||||
self.config,
|
||||
functions=get_openai_command_specs(self.command_registry)
|
||||
if self.config.openai_functions
|
||||
else None,
|
||||
)
|
||||
self.cycle_count += 1
|
||||
|
||||
return self.on_response(raw_response, thought_process_id, prompt, instruction)
|
||||
|
||||
@abstractmethod
|
||||
def execute(
|
||||
self,
|
||||
command_name: str | None,
|
||||
command_args: dict[str, str] | None,
|
||||
user_input: str | None,
|
||||
) -> str:
|
||||
"""Executes the given command, if any, and returns the agent's response.
|
||||
|
||||
Params:
|
||||
command_name: The name of the command to execute, if any.
|
||||
command_args: The arguments to pass to the command, if any.
|
||||
user_input: The user's input, if any.
|
||||
|
||||
Returns:
|
||||
The results of the command.
|
||||
"""
|
||||
...
|
||||
|
||||
def construct_base_prompt(
|
||||
self,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
prepend_messages: list[Message] = [],
|
||||
append_messages: list[Message] = [],
|
||||
reserve_tokens: int = 0,
|
||||
) -> ChatSequence:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. `prepend_messages`
|
||||
3. Message history of the agent, truncated & prepended with running summary as needed
|
||||
4. `append_messages`
|
||||
|
||||
Params:
|
||||
prepend_messages: Messages to insert between the system prompt and message history
|
||||
append_messages: Messages to insert after the message history
|
||||
reserve_tokens: Number of tokens to reserve for content that is added later
|
||||
"""
|
||||
|
||||
prompt = ChatSequence.for_model(
|
||||
self.llm.name,
|
||||
[Message("system", self.system_prompt)] + prepend_messages,
|
||||
)
|
||||
|
||||
# Reserve tokens for messages to be appended later, if any
|
||||
reserve_tokens += self.history.max_summary_tlength
|
||||
if append_messages:
|
||||
reserve_tokens += count_message_tokens(append_messages, self.llm.name)
|
||||
|
||||
# Fill message history, up to a margin of reserved_tokens.
|
||||
# Trim remaining historical messages and add them to the running summary.
|
||||
history_start_index = len(prompt)
|
||||
trimmed_history = add_history_upto_token_limit(
|
||||
prompt, self.history, self.send_token_limit - reserve_tokens
|
||||
)
|
||||
if trimmed_history:
|
||||
new_summary_msg, _ = self.history.trim_messages(list(prompt), self.config)
|
||||
prompt.insert(history_start_index, new_summary_msg)
|
||||
|
||||
if append_messages:
|
||||
prompt.extend(append_messages)
|
||||
|
||||
return prompt
|
||||
|
||||
def construct_prompt(
|
||||
self,
|
||||
cycle_instruction: str,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
) -> ChatSequence:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. Message history of the agent, truncated & prepended with running summary as needed
|
||||
3. `cycle_instruction`
|
||||
|
||||
Params:
|
||||
cycle_instruction: The final instruction for a thinking cycle
|
||||
"""
|
||||
|
||||
if not cycle_instruction:
|
||||
raise ValueError("No instruction given")
|
||||
|
||||
cycle_instruction_msg = Message("user", cycle_instruction)
|
||||
cycle_instruction_tlength = count_message_tokens(
|
||||
cycle_instruction_msg, self.llm.name
|
||||
)
|
||||
|
||||
append_messages: list[Message] = []
|
||||
|
||||
response_format_instr = self.response_format_instruction(thought_process_id)
|
||||
if response_format_instr:
|
||||
append_messages.append(Message("system", response_format_instr))
|
||||
|
||||
prompt = self.construct_base_prompt(
|
||||
thought_process_id,
|
||||
append_messages=append_messages,
|
||||
reserve_tokens=cycle_instruction_tlength,
|
||||
)
|
||||
|
||||
# ADD user input message ("triggering prompt")
|
||||
prompt.append(cycle_instruction_msg)
|
||||
|
||||
return prompt
|
||||
|
||||
# This can be expanded to support multiple types of (inter)actions within an agent
|
||||
def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str:
|
||||
if thought_process_id != "one-shot":
|
||||
raise NotImplementedError(f"Unknown thought process '{thought_process_id}'")
|
||||
|
||||
RESPONSE_FORMAT_WITH_COMMAND = """```ts
|
||||
interface Response {
|
||||
thoughts: {
|
||||
// Thoughts
|
||||
text: string;
|
||||
reasoning: string;
|
||||
// Short markdown-style bullet list that conveys the long-term plan
|
||||
plan: string;
|
||||
// Constructive self-criticism
|
||||
criticism: string;
|
||||
// Summary of thoughts to say to the user
|
||||
speak: string;
|
||||
};
|
||||
command: {
|
||||
name: string;
|
||||
args: Record<string, any>;
|
||||
};
|
||||
}
|
||||
```"""
|
||||
|
||||
RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts
|
||||
interface Response {
|
||||
thoughts: {
|
||||
// Thoughts
|
||||
text: string;
|
||||
reasoning: string;
|
||||
// Short markdown-style bullet list that conveys the long-term plan
|
||||
plan: string;
|
||||
// Constructive self-criticism
|
||||
criticism: string;
|
||||
// Summary of thoughts to say to the user
|
||||
speak: string;
|
||||
};
|
||||
}
|
||||
```"""
|
||||
|
||||
response_format = re.sub(
|
||||
r"\n\s+",
|
||||
"\n",
|
||||
RESPONSE_FORMAT_WITHOUT_COMMAND
|
||||
if self.config.openai_functions
|
||||
else RESPONSE_FORMAT_WITH_COMMAND,
|
||||
)
|
||||
|
||||
use_functions = self.config.openai_functions and self.command_registry.commands
|
||||
return (
|
||||
f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. "
|
||||
"The JSON should be compatible with the TypeScript type `Response` from the following:\n"
|
||||
f"{response_format}\n"
|
||||
)
|
||||
|
||||
def on_before_think(
|
||||
self,
|
||||
prompt: ChatSequence,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
instruction: str,
|
||||
) -> ChatSequence:
|
||||
"""Called after constructing the prompt but before executing it.
|
||||
|
||||
Calls the `on_planning` hook of any enabled and capable plugins, adding their
|
||||
output to the prompt.
|
||||
|
||||
Params:
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The prompt to execute
|
||||
"""
|
||||
current_tokens_used = prompt.token_length
|
||||
plugin_count = len(self.config.plugins)
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
self.ai_config.prompt_generator, prompt.raw()
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
message_to_add = Message("system", plugin_response)
|
||||
tokens_to_add = count_message_tokens(message_to_add, self.llm.name)
|
||||
if current_tokens_used + tokens_to_add > self.send_token_limit:
|
||||
logger.debug(f"Plugin response too long, skipping: {plugin_response}")
|
||||
logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
|
||||
break
|
||||
prompt.insert(
|
||||
-1, message_to_add
|
||||
) # HACK: assumes cycle instruction to be at the end
|
||||
current_tokens_used += tokens_to_add
|
||||
return prompt
|
||||
|
||||
def on_response(
|
||||
self,
|
||||
llm_response: ChatModelResponse,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
prompt: ChatSequence,
|
||||
instruction: str,
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Called upon receiving a response from the chat model.
|
||||
|
||||
Adds the last/newest message in the prompt and the response to `history`,
|
||||
and calls `self.parse_and_process_response()` to do the rest.
|
||||
|
||||
Params:
|
||||
llm_response: The raw response from the chat model
|
||||
prompt: The prompt that was executed
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
|
||||
# Save assistant reply to message history
|
||||
self.history.append(prompt[-1])
|
||||
self.history.add(
|
||||
"assistant", llm_response.content, "ai_response"
|
||||
) # FIXME: support function calls
|
||||
|
||||
try:
|
||||
return self.parse_and_process_response(
|
||||
llm_response, thought_process_id, prompt, instruction
|
||||
)
|
||||
except SyntaxError as e:
|
||||
logger.error(f"Response could not be parsed: {e}")
|
||||
# TODO: tune this message
|
||||
self.history.add(
|
||||
"system",
|
||||
f"Your response could not be parsed: {e}"
|
||||
"\n\nRemember to only respond using the specified format above!",
|
||||
)
|
||||
return None, None, {}
|
||||
|
||||
# TODO: update memory/context
|
||||
|
||||
@abstractmethod
|
||||
def parse_and_process_response(
|
||||
self,
|
||||
llm_response: ChatModelResponse,
|
||||
thought_process_id: ThoughtProcessID,
|
||||
prompt: ChatSequence,
|
||||
instruction: str,
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Validate, parse & process the LLM's response.
|
||||
|
||||
Must be implemented by derivative classes: no base implementation is provided,
|
||||
since the implementation depends on the role of the derivative Agent.
|
||||
|
||||
Params:
|
||||
llm_response: The raw response from the chat model
|
||||
prompt: The prompt that was executed
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def add_history_upto_token_limit(
|
||||
prompt: ChatSequence, history: MessageHistory, t_limit: int
|
||||
) -> list[Message]:
|
||||
current_prompt_length = prompt.token_length
|
||||
insertion_index = len(prompt)
|
||||
limit_reached = False
|
||||
trimmed_messages: list[Message] = []
|
||||
for cycle in reversed(list(history.per_cycle())):
|
||||
messages_to_add = [msg for msg in cycle if msg is not None]
|
||||
tokens_to_add = count_message_tokens(messages_to_add, prompt.model.name)
|
||||
if current_prompt_length + tokens_to_add > t_limit:
|
||||
limit_reached = True
|
||||
|
||||
if not limit_reached:
|
||||
# Add the most recent message to the start of the chain,
|
||||
# after the system prompts.
|
||||
prompt.insert(insertion_index, *messages_to_add)
|
||||
current_prompt_length += tokens_to_add
|
||||
else:
|
||||
trimmed_messages = messages_to_add + trimmed_messages
|
||||
|
||||
return trimmed_messages
|
||||
255
autogpt/app.py
Normal file
255
autogpt/app.py
Normal file
@@ -0,0 +1,255 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
|
||||
|
||||
def is_valid_int(value: str) -> bool:
|
||||
"""Check if the value is a valid integer
|
||||
|
||||
Args:
|
||||
value (str): The value to check
|
||||
|
||||
Returns:
|
||||
bool: True if the value is a valid integer, False otherwise
|
||||
"""
|
||||
try:
|
||||
int(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def get_command(response_json: Dict):
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
response_json (json): The response from the AI
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
try:
|
||||
if "command" not in response_json:
|
||||
return "Error:", "Missing 'command' object in JSON"
|
||||
|
||||
if not isinstance(response_json, dict):
|
||||
return "Error:", f"'response_json' object is not dictionary {response_json}"
|
||||
|
||||
command = response_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", "'command' object is not a dictionary"
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", "Missing 'name' field in 'command' object"
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", "Invalid JSON"
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", str(e)
|
||||
|
||||
|
||||
def map_command_synonyms(command_name: str):
|
||||
"""Takes the original command name given by the AI, and checks if the
|
||||
string matches a list of common/known hallucinations
|
||||
"""
|
||||
synonyms = [
|
||||
("write_file", "write_to_file"),
|
||||
("create_file", "write_to_file"),
|
||||
("search", "google"),
|
||||
]
|
||||
for seen_command, actual_command_name in synonyms:
|
||||
if command_name == seen_command:
|
||||
return actual_command_name
|
||||
return command_name
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
question (str): The question to summarize the text for
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
summary = summarize_text(url, text, question)
|
||||
return f""" "Result" : {summary}"""
|
||||
|
||||
|
||||
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
|
||||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url)
|
||||
|
||||
|
||||
def shutdown() -> NoReturn:
|
||||
"""Shut down the program"""
|
||||
logger.info("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
@command(
|
||||
"start_agent",
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent
|
||||
task (str): The task of the agent
|
||||
prompt (str): The prompt for the agent
|
||||
model (str): The model to use for the agent
|
||||
|
||||
Returns:
|
||||
str: The response of the agent
|
||||
"""
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
|
||||
|
||||
if CFG.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = AGENT_MANAGER.message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = AGENT_MANAGER.message_agent(int(key), message)
|
||||
else:
|
||||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "")
|
||||
def list_agents() -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
str: A list of all agents
|
||||
"""
|
||||
return "List of agents:\n" + "\n".join(
|
||||
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
|
||||
)
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
key (str): The key of the agent to delete
|
||||
|
||||
Returns:
|
||||
str: A message indicating whether the agent was deleted or not
|
||||
"""
|
||||
result = AGENT_MANAGER.delete_agent(key)
|
||||
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
||||
@@ -1,574 +0,0 @@
|
||||
"""The application entry point. Can be invoked by a CLI or any other front end application."""
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
|
||||
from autogpt.app.configurator import create_config
|
||||
from autogpt.app.setup import prompt_user
|
||||
from autogpt.app.spinner import Spinner
|
||||
from autogpt.app.utils import (
|
||||
clean_input,
|
||||
get_current_git_branch,
|
||||
get_latest_bulletin,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
)
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
||||
|
||||
def run_auto_gpt(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
working_directory: Path,
|
||||
workspace_directory: str | Path,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str] = None,
|
||||
ai_role: Optional[str] = None,
|
||||
ai_goals: tuple[str] = tuple(),
|
||||
):
|
||||
# Configure logging before we do anything else.
|
||||
logger.set_level(logging.DEBUG if debug else logging.INFO)
|
||||
|
||||
config = ConfigBuilder.build_config_from_env(workdir=working_directory)
|
||||
|
||||
# HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
|
||||
# or import it directly.
|
||||
logger.config = config
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key(config)
|
||||
|
||||
create_config(
|
||||
config,
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
)
|
||||
|
||||
if config.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
|
||||
|
||||
if not config.skip_news:
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(motd_line, "NEWS:", Fore.GREEN)
|
||||
if is_new_motd and not config.chat_messages_enabled:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
f"You are running on `{git_branch}` branch "
|
||||
"- this is not a supported branch.",
|
||||
)
|
||||
if sys.version_info < (3, 10):
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of Auto-GPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
if install_plugin_deps:
|
||||
install_plugin_dependencies()
|
||||
|
||||
# TODO: have this directory live outside the repository (e.g. in a user's
|
||||
# home directory) and have it come in as a command line argument or part of
|
||||
# the env file.
|
||||
config.workspace_path = Workspace.init_workspace_directory(
|
||||
config, workspace_directory
|
||||
)
|
||||
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
|
||||
|
||||
config.plugins = scan_plugins(config, config.debug_mode)
|
||||
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
|
||||
|
||||
ai_config = construct_main_ai_config(
|
||||
config,
|
||||
name=ai_name,
|
||||
role=ai_role,
|
||||
goals=ai_goals,
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
# print(prompt)
|
||||
|
||||
# add chat plugins capable of report to logger
|
||||
if config.chat_messages_enabled:
|
||||
for plugin in config.plugins:
|
||||
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
|
||||
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
|
||||
logger.chat_plugins.append(plugin)
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(config)
|
||||
memory.clear()
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
|
||||
|
||||
agent = Agent(
|
||||
memory=memory,
|
||||
command_registry=command_registry,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
ai_config=ai_config,
|
||||
config=config,
|
||||
)
|
||||
|
||||
run_interaction_loop(agent)
|
||||
|
||||
|
||||
def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | None:
|
||||
# Translate from the continuous_mode/continuous_limit config
|
||||
# to a cycle_budget (maximum number of cycles to run without checking in with the
|
||||
# user) and a count of cycles_remaining before we check in..
|
||||
if continuous_mode:
|
||||
cycle_budget = continuous_limit if continuous_limit else math.inf
|
||||
else:
|
||||
cycle_budget = 1
|
||||
|
||||
return cycle_budget
|
||||
|
||||
|
||||
class UserFeedback(str, enum.Enum):
|
||||
"""Enum for user feedback."""
|
||||
|
||||
AUTHORIZE = "GENERATE NEXT COMMAND JSON"
|
||||
EXIT = "EXIT"
|
||||
TEXT = "TEXT"
|
||||
|
||||
|
||||
def run_interaction_loop(
|
||||
agent: Agent,
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
Args:
|
||||
agent: The agent to run the interaction loop for.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
config = agent.config
|
||||
ai_config = agent.ai_config
|
||||
logger.debug(f"{ai_config.ai_name} System Prompt: {agent.system_prompt}")
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
config.continuous_mode, config.continuous_limit
|
||||
)
|
||||
spinner = Spinner("Thinking...", plain_output=config.plain_output)
|
||||
|
||||
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
|
||||
nonlocal cycle_budget, cycles_remaining, spinner
|
||||
if cycles_remaining in [0, 1, math.inf]:
|
||||
logger.typewriter_log(
|
||||
"Interrupt signal received. Stopping continuous command execution "
|
||||
"immediately.",
|
||||
Fore.RED,
|
||||
)
|
||||
sys.exit()
|
||||
else:
|
||||
restart_spinner = spinner.running
|
||||
if spinner.running:
|
||||
spinner.stop()
|
||||
|
||||
logger.typewriter_log(
|
||||
"Interrupt signal received. Stopping continuous command execution.",
|
||||
Fore.RED,
|
||||
)
|
||||
cycles_remaining = 1
|
||||
if restart_spinner:
|
||||
spinner.start()
|
||||
|
||||
# Set up an interrupt signal for the agent.
|
||||
signal.signal(signal.SIGINT, graceful_agent_interrupt)
|
||||
|
||||
#########################
|
||||
# Application Main Loop #
|
||||
#########################
|
||||
|
||||
while cycles_remaining > 0:
|
||||
logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
|
||||
|
||||
########
|
||||
# Plan #
|
||||
########
|
||||
# Have the agent determine the next action to take.
|
||||
with spinner:
|
||||
command_name, command_args, assistant_reply_dict = agent.think()
|
||||
|
||||
###############
|
||||
# Update User #
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(config, ai_config, command_name, command_args, assistant_reply_dict)
|
||||
|
||||
##################
|
||||
# Get user input #
|
||||
##################
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
user_feedback, user_input, new_cycles_remaining = get_user_feedback(
|
||||
config,
|
||||
ai_config,
|
||||
)
|
||||
|
||||
if user_feedback == UserFeedback.AUTHORIZE:
|
||||
if new_cycles_remaining is not None:
|
||||
# Case 1: User is altering the cycle budget.
|
||||
if cycle_budget > 1:
|
||||
cycle_budget = new_cycles_remaining + 1
|
||||
# Case 2: User is running iteratively and
|
||||
# has initiated a one-time continuous cycle
|
||||
cycles_remaining = new_cycles_remaining + 1
|
||||
else:
|
||||
# Case 1: Continuous iteration was interrupted -> resume
|
||||
if cycle_budget > 1:
|
||||
logger.typewriter_log(
|
||||
"RESUMING CONTINUOUS EXECUTION: ",
|
||||
Fore.MAGENTA,
|
||||
f"The cycle budget is {cycle_budget}.",
|
||||
)
|
||||
# Case 2: The agent used up its cycle budget -> reset
|
||||
cycles_remaining = cycle_budget + 1
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif user_feedback == UserFeedback.EXIT:
|
||||
logger.typewriter_log("Exiting...", Fore.YELLOW)
|
||||
exit()
|
||||
else: # user_feedback == UserFeedback.TEXT
|
||||
command_name = "human_feedback"
|
||||
else:
|
||||
user_input = None
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
if cycles_remaining != math.inf:
|
||||
# Print authorized commands left value
|
||||
logger.typewriter_log(
|
||||
"AUTHORISED COMMANDS LEFT: ", Fore.CYAN, f"{cycles_remaining}"
|
||||
)
|
||||
|
||||
###################
|
||||
# Execute Command #
|
||||
###################
|
||||
# Decrement the cycle counter first to reduce the likelihood of a SIGINT
|
||||
# happening during command execution, setting the cycles remaining to 1,
|
||||
# and then having the decrement set it to 0, exiting the application.
|
||||
if command_name != "human_feedback":
|
||||
cycles_remaining -= 1
|
||||
result = agent.execute(command_name, command_args, user_input)
|
||||
|
||||
if result is not None:
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
|
||||
|
||||
def update_user(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
command_name: CommandName | None,
|
||||
command_args: CommandArgs | None,
|
||||
assistant_reply_dict: AgentThoughts,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
command_name: The name of the command to execute.
|
||||
command_args: The arguments for the command.
|
||||
assistant_reply_dict: The assistant's reply.
|
||||
"""
|
||||
|
||||
print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config)
|
||||
|
||||
if command_name is not None:
|
||||
if command_name.lower().startswith("error"):
|
||||
logger.typewriter_log(
|
||||
"ERROR: ",
|
||||
Fore.RED,
|
||||
f"The Agent failed to select an action. "
|
||||
f"Error message: {command_name}",
|
||||
)
|
||||
else:
|
||||
if config.speak_mode:
|
||||
say_text(f"I want to execute {command_name}", config)
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}",
|
||||
)
|
||||
else:
|
||||
logger.typewriter_log(
|
||||
"NO ACTION SELECTED: ",
|
||||
Fore.RED,
|
||||
f"The Agent failed to select an action.",
|
||||
)
|
||||
|
||||
|
||||
def get_user_feedback(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
|
||||
Returns:
|
||||
A tuple of the user's feedback, the user's input, and the number of
|
||||
cycles remaining if the user has initiated a continuous cycle.
|
||||
"""
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
logger.info(
|
||||
f"Enter '{config.authorise_key}' to authorise command, "
|
||||
f"'{config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{ai_config.ai_name}..."
|
||||
)
|
||||
|
||||
user_feedback = None
|
||||
user_input = ""
|
||||
new_cycles_remaining = None
|
||||
|
||||
while user_feedback is None:
|
||||
# Get input from user
|
||||
if config.chat_messages_enabled:
|
||||
console_input = clean_input(config, "Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
|
||||
# Parse user input
|
||||
if console_input.lower().strip() == config.authorise_key:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
elif console_input.lower().startswith(f"{config.authorise_key} -"):
|
||||
try:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
new_cycles_remaining = abs(int(console_input.split(" ")[1]))
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
f"Invalid input format. "
|
||||
f"Please enter '{config.authorise_key} -N'"
|
||||
" where N is the number of continuous tasks."
|
||||
)
|
||||
elif console_input.lower() in [config.exit_key, "exit"]:
|
||||
user_feedback = UserFeedback.EXIT
|
||||
else:
|
||||
user_feedback = UserFeedback.TEXT
|
||||
user_input = console_input
|
||||
|
||||
return user_feedback, user_input, new_cycles_remaining
|
||||
|
||||
|
||||
def construct_main_ai_config(
|
||||
config: Config,
|
||||
name: Optional[str] = None,
|
||||
role: Optional[str] = None,
|
||||
goals: tuple[str] = tuple(),
|
||||
) -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
str: The prompt string
|
||||
"""
|
||||
ai_config = AIConfig.load(config.workdir / config.ai_settings_file)
|
||||
|
||||
# Apply overrides
|
||||
if name:
|
||||
ai_config.ai_name = name
|
||||
if role:
|
||||
ai_config.ai_role = role
|
||||
if goals:
|
||||
ai_config.ai_goals = list(goals)
|
||||
|
||||
if (
|
||||
all([name, role, goals])
|
||||
or config.skip_reprompt
|
||||
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
|
||||
):
|
||||
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
|
||||
logger.typewriter_log(
|
||||
"API Budget:",
|
||||
Fore.GREEN,
|
||||
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
|
||||
)
|
||||
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
|
||||
logger.typewriter_log(
|
||||
"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {ai_config.ai_name}?",
|
||||
speak_text=True,
|
||||
)
|
||||
should_continue = clean_input(
|
||||
config,
|
||||
f"""Continue with the last settings?
|
||||
Name: {ai_config.ai_name}
|
||||
Role: {ai_config.ai_role}
|
||||
Goals: {ai_config.ai_goals}
|
||||
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
|
||||
Continue ({config.authorise_key}/{config.exit_key}): """,
|
||||
)
|
||||
if should_continue.lower() == config.exit_key:
|
||||
ai_config = AIConfig()
|
||||
|
||||
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
|
||||
ai_config = prompt_user(config)
|
||||
ai_config.save(config.workdir / config.ai_settings_file)
|
||||
|
||||
if config.restrict_to_workspace:
|
||||
logger.typewriter_log(
|
||||
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
|
||||
Fore.YELLOW,
|
||||
f"{config.workspace_path}",
|
||||
)
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(ai_config.api_budget)
|
||||
|
||||
# Agent Created, print message
|
||||
logger.typewriter_log(
|
||||
ai_config.ai_name,
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"has been created with the following details:",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Print the ai_config details
|
||||
# Name
|
||||
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
|
||||
# Role
|
||||
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
|
||||
# Goals
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
|
||||
for goal in ai_config.ai_goals:
|
||||
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
|
||||
|
||||
return ai_config
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: str,
|
||||
assistant_reply_json_valid: dict,
|
||||
config: Config,
|
||||
) -> None:
|
||||
from autogpt.speech import say_text
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
|
||||
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
||||
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = remove_ansi_escape(
|
||||
assistant_thoughts.get("reasoning", "")
|
||||
)
|
||||
assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", ""))
|
||||
assistant_thoughts_criticism = remove_ansi_escape(
|
||||
assistant_thoughts.get("criticism", "")
|
||||
)
|
||||
assistant_thoughts_speak = remove_ansi_escape(
|
||||
assistant_thoughts.get("speak", "")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak:
|
||||
if config.speak_mode:
|
||||
say_text(assistant_thoughts_speak, config)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
|
||||
|
||||
def remove_ansi_escape(s: str) -> str:
|
||||
return s.replace("\x1B", "")
|
||||
@@ -1,239 +0,0 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from jinja2 import Template
|
||||
|
||||
from autogpt.app import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.llm.base import ChatSequence, Message
|
||||
from autogpt.llm.utils import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_USER_DESIRE_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
def prompt_user(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
# Construct the prompt
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
ai_config_template_provided = ai_config_template is not None and any(
|
||||
[
|
||||
ai_config_template.ai_goals,
|
||||
ai_config_template.ai_name,
|
||||
ai_config_template.ai_role,
|
||||
]
|
||||
)
|
||||
|
||||
user_desire = ""
|
||||
if not ai_config_template_provided:
|
||||
# Get user desire if command line overrides have not been passed in
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
user_desire = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire.strip() == "":
|
||||
user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
|
||||
|
||||
# If user desire contains "--manual" or we have overridden any of the AI configuration
|
||||
if "--manual" in user_desire or ai_config_template_provided:
|
||||
logger.typewriter_log(
|
||||
"Manual Mode Selected",
|
||||
Fore.GREEN,
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual(config, ai_config_template)
|
||||
|
||||
else:
|
||||
try:
|
||||
return generate_aiconfig_automatic(user_desire, config)
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"Unable to automatically generate AI Config based on user desire.",
|
||||
Fore.RED,
|
||||
"Falling back to manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
logger.debug(f"Error during AIConfig generation: {e}")
|
||||
|
||||
return generate_aiconfig_manual(config)
|
||||
|
||||
|
||||
def generate_aiconfig_manual(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
This function guides the user through a series of prompts to collect the necessary information to create
|
||||
an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
|
||||
goals. If the user does not provide a value for any of the fields, default values will be used.
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
|
||||
"""
|
||||
|
||||
# Manual Setup Intro
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_name:
|
||||
ai_name = ai_config_template.ai_name
|
||||
else:
|
||||
ai_name = ""
|
||||
# Get AI Name from User
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input(config, "AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_role:
|
||||
ai_role = ai_config_template.ai_role
|
||||
else:
|
||||
# Get AI Role from User
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(config, f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
|
||||
if ai_config_template and ai_config_template.ai_goals:
|
||||
ai_goals = ai_config_template.ai_goals
|
||||
else:
|
||||
# Enter up to 5 goals for the AI
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
|
||||
)
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if not ai_goals:
|
||||
ai_goals = [
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
]
|
||||
|
||||
# Get API Budget from User
|
||||
logger.typewriter_log(
|
||||
"Enter your budget for API calls: ",
|
||||
Fore.GREEN,
|
||||
"For example: $1.50",
|
||||
)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
)
|
||||
if api_budget_input == "":
|
||||
api_budget = 0.0
|
||||
else:
|
||||
try:
|
||||
api_budget = float(api_budget_input.replace("$", ""))
|
||||
except ValueError:
|
||||
logger.typewriter_log(
|
||||
"Invalid budget input. Setting budget to unlimited.", Fore.RED
|
||||
)
|
||||
api_budget = 0.0
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
|
||||
def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC
|
||||
prompt_ai_config_automatic = Template(
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
|
||||
).render(user_prompt=user_prompt)
|
||||
# Call LLM with the string as user input
|
||||
output = create_chat_completion(
|
||||
ChatSequence.for_model(
|
||||
config.fast_llm,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("user", prompt_ai_config_automatic),
|
||||
],
|
||||
),
|
||||
config,
|
||||
).content
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
|
||||
# Parse the output
|
||||
ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
|
||||
ai_role = (
|
||||
re.search(
|
||||
r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)",
|
||||
output,
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
.group(1)
|
||||
.strip()
|
||||
)
|
||||
ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
|
||||
api_budget = 0.0 # TODO: parse api budget using a regular expression
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
@@ -1,147 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
import requests
|
||||
from colorama import Fore, Style
|
||||
from git.repo import Repo
|
||||
from prompt_toolkit import ANSI, PromptSession
|
||||
from prompt_toolkit.history import InMemoryHistory
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
session = PromptSession(history=InMemoryHistory())
|
||||
|
||||
|
||||
def clean_input(config: Config, prompt: str = "", talk=False):
|
||||
try:
|
||||
if config.chat_messages_enabled:
|
||||
for plugin in config.plugins:
|
||||
if not hasattr(plugin, "can_handle_user_input"):
|
||||
continue
|
||||
if not plugin.can_handle_user_input(user_input=prompt):
|
||||
continue
|
||||
plugin_response = plugin.user_input(user_input=prompt)
|
||||
if not plugin_response:
|
||||
continue
|
||||
if plugin_response.lower() in [
|
||||
"yes",
|
||||
"yeah",
|
||||
"y",
|
||||
"ok",
|
||||
"okay",
|
||||
"sure",
|
||||
"alright",
|
||||
]:
|
||||
return config.authorise_key
|
||||
elif plugin_response.lower() in [
|
||||
"no",
|
||||
"nope",
|
||||
"n",
|
||||
"negative",
|
||||
]:
|
||||
return config.exit_key
|
||||
return plugin_response
|
||||
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.info("Asking user via keyboard...")
|
||||
|
||||
# handle_sigint must be set to False, so the signal handler in the
|
||||
# autogpt/main.py could be employed properly. This referes to
|
||||
# https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
|
||||
answer = session.prompt(ANSI(prompt), handle_sigint=False)
|
||||
return answer
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted Auto-GPT")
|
||||
logger.info("Quitting...")
|
||||
exit(0)
|
||||
|
||||
|
||||
def get_bulletin_from_web():
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.text
|
||||
except requests.exceptions.RequestException:
|
||||
pass
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def get_current_git_branch() -> str:
|
||||
try:
|
||||
repo = Repo(search_parent_directories=True)
|
||||
branch = repo.active_branch
|
||||
return branch.name
|
||||
except:
|
||||
return ""
|
||||
|
||||
|
||||
def get_latest_bulletin() -> tuple[str, bool]:
|
||||
exists = os.path.exists("data/CURRENT_BULLETIN.md")
|
||||
current_bulletin = ""
|
||||
if exists:
|
||||
current_bulletin = open(
|
||||
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
|
||||
).read()
|
||||
new_bulletin = get_bulletin_from_web()
|
||||
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
|
||||
|
||||
news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
|
||||
if new_bulletin or current_bulletin:
|
||||
news_header += (
|
||||
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
|
||||
"If you don't wish to see this message, you "
|
||||
"can run Auto-GPT with the *--skip-news* flag.\n"
|
||||
)
|
||||
|
||||
if new_bulletin and is_new_news:
|
||||
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
||||
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
|
||||
|
||||
return f"{news_header}\n{current_bulletin}", is_new_news
|
||||
|
||||
|
||||
def markdown_to_ansi_style(markdown: str):
|
||||
ansi_lines: list[str] = []
|
||||
for line in markdown.split("\n"):
|
||||
line_style = ""
|
||||
|
||||
if line.startswith("# "):
|
||||
line_style += Style.BRIGHT
|
||||
else:
|
||||
line = re.sub(
|
||||
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
|
||||
rf"{Style.BRIGHT}\1{Style.NORMAL}",
|
||||
line,
|
||||
)
|
||||
|
||||
if re.match(r"^#+ ", line) is not None:
|
||||
line_style += Fore.CYAN
|
||||
line = re.sub(r"^#+ ", "", line)
|
||||
|
||||
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
||||
return "\n".join(ansi_lines)
|
||||
|
||||
|
||||
def get_legal_warning() -> str:
|
||||
legal_text = """
|
||||
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
|
||||
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
|
||||
|
||||
## Introduction
|
||||
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
|
||||
|
||||
## No Liability for Actions of the System
|
||||
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
|
||||
|
||||
## User Responsibility and Respondeat Superior Liability
|
||||
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
|
||||
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
|
||||
|
||||
## Indemnification
|
||||
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
||||
"""
|
||||
return legal_text
|
||||
@@ -1,7 +1,4 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@@ -16,15 +13,7 @@ import click
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help=(
|
||||
"Specifies which ai_settings.yaml file to use, relative to the Auto-GPT"
|
||||
" root directory. Will also automatically skip the re-prompt."
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
@@ -71,29 +60,12 @@ import click
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-goal",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help="AI goal override; may be used multiple times to pass multiple goals",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -105,9 +77,6 @@ def main(
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
ai_goal: tuple[str],
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
@@ -115,31 +84,24 @@ def main(
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.app.main import run_auto_gpt
|
||||
from autogpt.main import run_auto_gpt
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings=ai_settings,
|
||||
prompt_settings=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
working_directory=Path(
|
||||
__file__
|
||||
).parent.parent.parent, # TODO: make this an option
|
||||
workspace_directory=workspace_directory,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
ai_name=ai_name,
|
||||
ai_role=ai_role,
|
||||
ai_goals=ai_goal,
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
workspace_directory,
|
||||
install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional, TypedDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt.models.command import Command, CommandParameter
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class CommandParameterSpec(TypedDict):
|
||||
type: str
|
||||
description: str
|
||||
required: bool
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
parameters: dict[str, CommandParameterSpec],
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
aliases: list[str] = [],
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
typed_parameters = [
|
||||
CommandParameter(
|
||||
name=param_name,
|
||||
description=parameter.get("description"),
|
||||
type=parameter.get("type", "string"),
|
||||
required=parameter.get("required", False),
|
||||
)
|
||||
for param_name, parameter in parameters.items()
|
||||
]
|
||||
cmd = Command(
|
||||
name=name,
|
||||
description=description,
|
||||
method=func,
|
||||
parameters=typed_parameters,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
aliases=aliases,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.command = cmd
|
||||
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -1,7 +0,0 @@
|
||||
COMMAND_CATEGORIES = [
|
||||
"autogpt.commands.execute_code",
|
||||
"autogpt.commands.file_operations",
|
||||
"autogpt.commands.web_search",
|
||||
"autogpt.commands.web_selenium",
|
||||
"autogpt.commands.system",
|
||||
]
|
||||
|
||||
31
autogpt/commands/analyze_code.py
Normal file
31
autogpt/commands/analyze_code.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"analyze_code",
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Parameters:
|
||||
code (str): Code to be evaluated.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> list[str]:"
|
||||
args = [code]
|
||||
description_string = (
|
||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
61
autogpt/commands/audio_text.py
Normal file
61
autogpt/commands/audio_text.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"read_audio_from_file",
|
||||
"Convert Audio to text",
|
||||
'"filename": "<filename>"',
|
||||
CFG.huggingface_audio_to_text_model,
|
||||
"Configure huggingface_audio_to_text_model.",
|
||||
)
|
||||
def read_audio_from_file(filename: str) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
filename (str): The path to the audio file
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio)
|
||||
|
||||
|
||||
def read_audio(audio: bytes) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
audio (bytes): The audio to convert
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
model = CFG.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = CFG.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
|
||||
response = requests.post(
|
||||
api_url,
|
||||
headers=headers,
|
||||
data=audio,
|
||||
)
|
||||
|
||||
text = json.loads(response.content.decode("utf-8"))["text"]
|
||||
return f"The audio says: {text}"
|
||||
156
autogpt/commands/command.py
Normal file
156
autogpt/commands/command.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import functools
|
||||
import importlib
|
||||
import inspect
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class Command:
|
||||
"""A class representing a command.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the command.
|
||||
description (str): A brief description of what the command does.
|
||||
signature (str): The signature of the function that the command executes. Defaults to None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
method: Callable[..., Any],
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.method = method
|
||||
self.signature = signature if signature else str(inspect.signature(self.method))
|
||||
self.enabled = enabled
|
||||
self.disabled_reason = disabled_reason
|
||||
|
||||
def __call__(self, *args, **kwargs) -> Any:
|
||||
if not self.enabled:
|
||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
return self.method(*args, **kwargs)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.name}: {self.description}, args: {self.signature}"
|
||||
|
||||
|
||||
class CommandRegistry:
|
||||
"""
|
||||
The CommandRegistry class is a manager for a collection of Command objects.
|
||||
It allows the registration, modification, and retrieval of Command objects,
|
||||
as well as the scanning and loading of command plugins from a specified
|
||||
directory.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.commands = {}
|
||||
|
||||
def _import_module(self, module_name: str) -> Any:
|
||||
return importlib.import_module(module_name)
|
||||
|
||||
def _reload_module(self, module: Any) -> Any:
|
||||
return importlib.reload(module)
|
||||
|
||||
def register(self, cmd: Command) -> None:
|
||||
self.commands[cmd.name] = cmd
|
||||
|
||||
def unregister(self, command_name: str):
|
||||
if command_name in self.commands:
|
||||
del self.commands[command_name]
|
||||
else:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
|
||||
def reload_commands(self) -> None:
|
||||
"""Reloads all loaded command plugins."""
|
||||
for cmd_name in self.commands:
|
||||
cmd = self.commands[cmd_name]
|
||||
module = self._import_module(cmd.__module__)
|
||||
reloaded_module = self._reload_module(module)
|
||||
if hasattr(reloaded_module, "register"):
|
||||
reloaded_module.register(self)
|
||||
|
||||
def get_command(self, name: str) -> Callable[..., Any]:
|
||||
return self.commands[name]
|
||||
|
||||
def call(self, command_name: str, **kwargs) -> Any:
|
||||
if command_name not in self.commands:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
command = self.commands[command_name]
|
||||
return command(**kwargs)
|
||||
|
||||
def command_prompt(self) -> str:
|
||||
"""
|
||||
Returns a string representation of all registered `Command` objects for use in a prompt
|
||||
"""
|
||||
commands_list = [
|
||||
f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
|
||||
]
|
||||
return "\n".join(commands_list)
|
||||
|
||||
def import_commands(self, module_name: str) -> None:
|
||||
"""
|
||||
Imports the specified Python module containing command plugins.
|
||||
|
||||
This method imports the associated module and registers any functions or
|
||||
classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
|
||||
as `Command` objects. The registered `Command` objects are then added to the
|
||||
`commands` dictionary of the `CommandRegistry` object.
|
||||
|
||||
Args:
|
||||
module_name (str): The name of the module to import for command plugins.
|
||||
"""
|
||||
|
||||
module = importlib.import_module(module_name)
|
||||
|
||||
for attr_name in dir(module):
|
||||
attr = getattr(module, attr_name)
|
||||
# Register decorated functions
|
||||
if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
|
||||
attr, AUTO_GPT_COMMAND_IDENTIFIER
|
||||
):
|
||||
self.register(attr.command)
|
||||
# Register command classes
|
||||
elif (
|
||||
inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
|
||||
):
|
||||
cmd_instance = attr()
|
||||
self.register(cmd_instance)
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
cmd = Command(
|
||||
name=name,
|
||||
description=description,
|
||||
method=func,
|
||||
signature=signature,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.command = cmd
|
||||
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -1,64 +0,0 @@
|
||||
import functools
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
def sanitize_path_arg(arg_name: str):
|
||||
def decorator(func: Callable):
|
||||
# Get position of path parameter, in case it is passed as a positional argument
|
||||
try:
|
||||
arg_index = list(func.__annotations__.keys()).index(arg_name)
|
||||
except ValueError:
|
||||
raise TypeError(
|
||||
f"Sanitized parameter '{arg_name}' absent or not annotated on function '{func.__name__}'"
|
||||
)
|
||||
|
||||
# Get position of agent parameter, in case it is passed as a positional argument
|
||||
try:
|
||||
agent_arg_index = list(func.__annotations__.keys()).index("agent")
|
||||
except ValueError:
|
||||
raise TypeError(
|
||||
f"Parameter 'agent' absent or not annotated on function '{func.__name__}'"
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
logger.debug(f"Sanitizing arg '{arg_name}' on function '{func.__name__}'")
|
||||
logger.debug(f"Function annotations: {func.__annotations__}")
|
||||
|
||||
# Get Agent from the called function's arguments
|
||||
agent = kwargs.get(
|
||||
"agent", len(args) > agent_arg_index and args[agent_arg_index]
|
||||
)
|
||||
logger.debug(f"Args: {args}")
|
||||
logger.debug(f"KWArgs: {kwargs}")
|
||||
logger.debug(f"Agent argument lifted from function call: {agent}")
|
||||
if not isinstance(agent, Agent):
|
||||
raise RuntimeError("Could not get Agent from decorated command's args")
|
||||
|
||||
# Sanitize the specified path argument, if one is given
|
||||
given_path: str | Path | None = kwargs.get(
|
||||
arg_name, len(args) > arg_index and args[arg_index] or None
|
||||
)
|
||||
if given_path:
|
||||
if given_path in {"", "/"}:
|
||||
sanitized_path = str(agent.workspace.root)
|
||||
else:
|
||||
sanitized_path = str(agent.workspace.get_path(given_path))
|
||||
|
||||
if arg_name in kwargs:
|
||||
kwargs[arg_name] = sanitized_path
|
||||
else:
|
||||
# args is an immutable tuple; must be converted to a list to update
|
||||
arg_list = list(args)
|
||||
arg_list[arg_index] = sanitized_path
|
||||
args = tuple(arg_list)
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -1,89 +1,20 @@
|
||||
"""Commands to execute code"""
|
||||
|
||||
COMMAND_CATEGORY = "execute_code"
|
||||
COMMAND_CATEGORY_TITLE = "Execute Code"
|
||||
|
||||
"""Execute code in a Docker container"""
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
from docker.errors import DockerException, ImageNotFound
|
||||
from docker.models.containers import Container as DockerContainer
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
from .decorators import sanitize_path_arg
|
||||
|
||||
ALLOWLIST_CONTROL = "allowlist"
|
||||
DENYLIST_CONTROL = "denylist"
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"execute_python_code",
|
||||
"Creates a Python file and executes it",
|
||||
{
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code to run",
|
||||
"required": True,
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "A name to be given to the python file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
def execute_python_code(code: str, name: str, agent: Agent) -> str:
|
||||
"""Create and execute a Python file in a Docker container and return the STDOUT of the
|
||||
executed code. If there is any data that needs to be captured use a print statement
|
||||
|
||||
Args:
|
||||
code (str): The Python code to run
|
||||
name (str): A name to be given to the Python file
|
||||
|
||||
Returns:
|
||||
str: The STDOUT captured from the code when it ran
|
||||
"""
|
||||
ai_name = agent.ai_config.ai_name
|
||||
code_dir = agent.workspace.get_path(Path(ai_name, "executed_code"))
|
||||
os.makedirs(code_dir, exist_ok=True)
|
||||
|
||||
if not name.endswith(".py"):
|
||||
name = name + ".py"
|
||||
|
||||
# The `name` arg is not covered by @sanitize_path_arg,
|
||||
# so sanitization must be done here to prevent path traversal.
|
||||
file_path = agent.workspace.get_path(code_dir / name)
|
||||
if not file_path.is_relative_to(code_dir):
|
||||
return "Error: 'name' argument resulted in path traversal, operation aborted"
|
||||
|
||||
try:
|
||||
with open(file_path, "w+", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
return execute_python_file(str(file_path), agent)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"execute_python_file",
|
||||
"Executes an existing Python file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of te file to execute",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
@@ -92,36 +23,23 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(
|
||||
f"Executing python file '{filename}' in working directory '{agent.config.workspace_path}'"
|
||||
)
|
||||
logger.info(f"Executing file '{filename}'")
|
||||
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
file_path = Path(filename)
|
||||
if not file_path.is_file():
|
||||
# Mimic the response that you get from the command line so that it's easier to identify
|
||||
return (
|
||||
f"python: can't open file '{filename}': [Errno 2] No such file or directory"
|
||||
)
|
||||
if not os.path.isfile(filename):
|
||||
return f"Error: File '{filename}' does not exist."
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
logger.debug(
|
||||
f"Auto-GPT is running in a Docker container; executing {file_path} directly..."
|
||||
)
|
||||
result = subprocess.run(
|
||||
["python", str(file_path)],
|
||||
capture_output=True,
|
||||
encoding="utf8",
|
||||
cwd=agent.config.workspace_path,
|
||||
f"python {filename}", capture_output=True, encoding="utf8", shell=True
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
else:
|
||||
return f"Error: {result.stderr}"
|
||||
|
||||
logger.debug("Auto-GPT is not running in a Docker container")
|
||||
try:
|
||||
client = docker.from_env()
|
||||
# You can replace this with the desired Python image/version
|
||||
@@ -130,10 +48,10 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
image_name = "python:3-alpine"
|
||||
try:
|
||||
client.images.get(image_name)
|
||||
logger.debug(f"Image '{image_name}' found locally")
|
||||
logger.warn(f"Image '{image_name}' found locally")
|
||||
except ImageNotFound:
|
||||
logger.info(
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub..."
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||
)
|
||||
# Use the low-level API to stream the pull response
|
||||
low_level_client = docker.APIClient()
|
||||
@@ -145,25 +63,20 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
logger.info(f"{status}: {progress}")
|
||||
elif status:
|
||||
logger.info(status)
|
||||
|
||||
logger.debug(f"Running {file_path} in a {image_name} container...")
|
||||
container: DockerContainer = client.containers.run(
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
[
|
||||
"python",
|
||||
file_path.relative_to(agent.workspace.root).as_posix(),
|
||||
],
|
||||
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
||||
volumes={
|
||||
str(agent.config.workspace_path): {
|
||||
CFG.workspace_path: {
|
||||
"bind": "/workspace",
|
||||
"mode": "rw",
|
||||
"mode": "ro",
|
||||
}
|
||||
},
|
||||
working_dir="/workspace",
|
||||
stderr=True,
|
||||
stdout=True,
|
||||
detach=True,
|
||||
) # type: ignore
|
||||
)
|
||||
|
||||
container.wait()
|
||||
logs = container.logs().decode("utf-8")
|
||||
@@ -174,7 +87,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
|
||||
return logs
|
||||
|
||||
except DockerException as e:
|
||||
except docker.errors.DockerException as e:
|
||||
logger.warn(
|
||||
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
||||
)
|
||||
@@ -184,43 +97,16 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def validate_command(command: str, config: Config) -> bool:
|
||||
"""Validate a command to ensure it is allowed
|
||||
|
||||
Args:
|
||||
command (str): The command to validate
|
||||
config (Config): The config to use to validate the command
|
||||
|
||||
Returns:
|
||||
bool: True if the command is allowed, False otherwise
|
||||
"""
|
||||
if not command:
|
||||
return False
|
||||
|
||||
command_name = command.split()[0]
|
||||
|
||||
if config.shell_command_control == ALLOWLIST_CONTROL:
|
||||
return command_name in config.shell_allowlist
|
||||
else:
|
||||
return command_name not in config.shell_denylist
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell",
|
||||
"Executes a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"command_line": {
|
||||
"type": "string",
|
||||
"description": "The command line to execute",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
enabled=lambda config: config.execute_local_commands,
|
||||
disabled_reason="You are not allowed to run local shell commands. To execute"
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
def execute_shell(command_line: str) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
Args:
|
||||
@@ -229,14 +115,11 @@ def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = Path.cwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(agent.config.workspace_path):
|
||||
os.chdir(agent.config.workspace_path)
|
||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
@@ -253,20 +136,14 @@ def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
|
||||
@command(
|
||||
"execute_shell_popen",
|
||||
"Executes a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"command_line": {
|
||||
"type": "string",
|
||||
"description": "The command line to execute",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
lambda config: config.execute_local_commands,
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line, agent: Agent) -> str:
|
||||
def execute_shell_popen(command_line) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
@@ -276,14 +153,11 @@ def execute_shell_popen(command_line, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if agent.config.workspace_path not in current_dir:
|
||||
os.chdir(agent.config.workspace_path)
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
|
||||
@@ -1,148 +1,83 @@
|
||||
"""Commands to perform operations on files"""
|
||||
|
||||
"""File operations for AutoGPT"""
|
||||
from __future__ import annotations
|
||||
|
||||
COMMAND_CATEGORY = "file_operations"
|
||||
COMMAND_CATEGORY_TITLE = "File Operations"
|
||||
|
||||
import contextlib
|
||||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
from typing import Generator, Literal
|
||||
from typing import Generator
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
from .decorators import sanitize_path_arg
|
||||
from .file_operations_utils import read_textual_file
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def text_checksum(text: str) -> str:
|
||||
"""Get the hex checksum for the given text."""
|
||||
return hashlib.md5(text.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def operations_from_log(
|
||||
log_path: str | Path,
|
||||
) -> Generator[tuple[Operation, str, str | None], None, None]:
|
||||
"""Parse the file operations log and return a tuple containing the log entries"""
|
||||
try:
|
||||
log = open(log_path, "r", encoding="utf-8")
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
for line in log:
|
||||
line = line.replace("File Operation Logger", "").strip()
|
||||
if not line:
|
||||
continue
|
||||
operation, tail = line.split(": ", maxsplit=1)
|
||||
operation = operation.strip()
|
||||
if operation in ("write", "append"):
|
||||
try:
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
except ValueError:
|
||||
logger.warn(f"File log entry lacks checksum: '{line}'")
|
||||
path, checksum = tail.strip(), None
|
||||
yield (operation, path, checksum)
|
||||
elif operation == "delete":
|
||||
yield (operation, tail.strip(), None)
|
||||
|
||||
log.close()
|
||||
|
||||
|
||||
def file_operations_state(log_path: str | Path) -> dict[str, str]:
|
||||
"""Iterates over the operations log and returns the expected state.
|
||||
|
||||
Parses a log file at config.file_logger_path to construct a dictionary that maps
|
||||
each file path written or appended to its checksum. Deleted files are removed
|
||||
from the dictionary.
|
||||
|
||||
Returns:
|
||||
A dictionary mapping file paths to their checksums.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If config.file_logger_path is not found.
|
||||
ValueError: If the log file content is not in the expected format.
|
||||
"""
|
||||
state = {}
|
||||
for operation, path, checksum in operations_from_log(log_path):
|
||||
if operation in ("write", "append"):
|
||||
state[path] = checksum
|
||||
elif operation == "delete":
|
||||
del state[path]
|
||||
return state
|
||||
|
||||
|
||||
@sanitize_path_arg("filename")
|
||||
def is_duplicate_operation(
|
||||
operation: Operation, filename: str, agent: Agent, checksum: str | None = None
|
||||
) -> bool:
|
||||
"""Check if the operation has already been performed
|
||||
def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
"""Check if the operation has already been performed on the given file
|
||||
|
||||
Args:
|
||||
operation: The operation to check for
|
||||
filename: The name of the file to check for
|
||||
agent: The agent
|
||||
checksum: The checksum of the contents to be written
|
||||
operation (str): The operation to check for
|
||||
filename (str): The name of the file to check for
|
||||
|
||||
Returns:
|
||||
True if the operation has already been performed on the file
|
||||
bool: True if the operation has already been performed on the file
|
||||
"""
|
||||
# Make the filename into a relative path if possible
|
||||
with contextlib.suppress(ValueError):
|
||||
filename = str(Path(filename).relative_to(agent.workspace.root))
|
||||
|
||||
state = file_operations_state(agent.config.file_logger_path)
|
||||
if operation == "delete" and filename not in state:
|
||||
return True
|
||||
if operation == "write" and state.get(filename) == checksum:
|
||||
return True
|
||||
return False
|
||||
log_content = read_file(CFG.file_logger_path)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
return log_entry in log_content
|
||||
|
||||
|
||||
@sanitize_path_arg("filename")
|
||||
def log_operation(
|
||||
operation: Operation, filename: str, agent: Agent, checksum: str | None = None
|
||||
) -> None:
|
||||
def log_operation(operation: str, filename: str) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
Args:
|
||||
operation: The operation to log
|
||||
filename: The name of the file the operation was performed on
|
||||
checksum: The checksum of the contents to be written
|
||||
operation (str): The operation to log
|
||||
filename (str): The name of the file the operation was performed on
|
||||
"""
|
||||
# Make the filename into a relative path if possible
|
||||
with contextlib.suppress(ValueError):
|
||||
filename = str(Path(filename).relative_to(agent.workspace.root))
|
||||
|
||||
log_entry = f"{operation}: {filename}"
|
||||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(
|
||||
agent.config.file_logger_path, f"{log_entry}\n", agent, should_log=False
|
||||
)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
append_to_file(CFG.file_logger_path, log_entry, should_log=False)
|
||||
|
||||
|
||||
@command(
|
||||
"read_file",
|
||||
"Read an existing file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The path of the file to read",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def read_file(filename: str, agent: Agent) -> str:
|
||||
def split_file(
|
||||
content: str, max_length: int = 4000, overlap: int = 0
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Split text into chunks of a specified maximum length with a specified overlap
|
||||
between chunks.
|
||||
|
||||
:param content: The input text to be split into chunks
|
||||
:param max_length: The maximum length of each chunk,
|
||||
default is 4000 (about 1k token)
|
||||
:param overlap: The number of overlapping characters between chunks,
|
||||
default is no overlap
|
||||
:return: A generator yielding chunks of text
|
||||
"""
|
||||
start = 0
|
||||
content_length = len(content)
|
||||
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + overlap - 1]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
|
||||
if len(chunk) <= overlap:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
start += max_length - overlap
|
||||
|
||||
|
||||
@command("read_file", "Read file", '"filename": "<filename>"')
|
||||
def read_file(filename: str) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
Args:
|
||||
@@ -152,63 +87,49 @@ def read_file(filename: str, agent: Agent) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
content = read_textual_file(filename, logger)
|
||||
|
||||
# TODO: invalidate/update memory when file is edited
|
||||
file_memory = MemoryItem.from_text_file(content, filename, agent.config)
|
||||
if len(file_memory.chunks) > 1:
|
||||
return file_memory.summary
|
||||
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def ingest_file(
|
||||
filename: str,
|
||||
memory: VectorMemory,
|
||||
filename: str, memory, max_length: int = 4000, overlap: int = 200
|
||||
) -> None:
|
||||
"""
|
||||
Ingest a file by reading its content, splitting it into chunks with a specified
|
||||
maximum length and overlap, and adding the chunks to the memory storage.
|
||||
|
||||
Args:
|
||||
filename: The name of the file to ingest
|
||||
memory: An object with an add() method to store the chunks in memory
|
||||
:param filename: The name of the file to ingest
|
||||
:param memory: An object with an add() method to store the chunks in memory
|
||||
:param max_length: The maximum length of each chunk, default is 4000
|
||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Ingesting file {filename}")
|
||||
logger.info(f"Working with file {filename}")
|
||||
content = read_file(filename)
|
||||
content_length = len(content)
|
||||
logger.info(f"File length: {content_length} characters")
|
||||
|
||||
# TODO: differentiate between different types of files
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
logger.debug(f"Created memory: {file_memory.dump(True)}")
|
||||
memory.add(file_memory)
|
||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||
|
||||
logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
|
||||
except Exception as err:
|
||||
logger.warn(f"Error while ingesting file '{filename}': {err}")
|
||||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
except Exception as e:
|
||||
logger.info(f"Error while ingesting file '{filename}': {str(e)}")
|
||||
|
||||
|
||||
@command(
|
||||
"write_to_file",
|
||||
"Writes to a file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write to",
|
||||
"required": True,
|
||||
},
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "The text to write to the file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
aliases=["write_file", "create_file"],
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
Args:
|
||||
@@ -218,24 +139,23 @@ def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, agent, checksum):
|
||||
if check_duplicate_operation("write", filename):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename, agent, checksum)
|
||||
log_operation("write", filename)
|
||||
return "File written to successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@sanitize_path_arg("filename")
|
||||
def append_to_file(
|
||||
filename: str, text: str, agent: Agent, should_log: bool = True
|
||||
) -> str:
|
||||
@command(
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
)
|
||||
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
Args:
|
||||
@@ -249,33 +169,40 @@ def append_to_file(
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "a", encoding="utf-8") as f:
|
||||
with open(filename, "a") as f:
|
||||
f.write(text)
|
||||
|
||||
if should_log:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, agent, checksum=checksum)
|
||||
log_operation("append", filename)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"list_files",
|
||||
"Lists Files in a Directory",
|
||||
{
|
||||
"directory": {
|
||||
"type": "string",
|
||||
"description": "The directory to list files in",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("directory")
|
||||
def list_files(directory: str, agent: Agent) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str) -> str:
|
||||
"""Delete a file
|
||||
|
||||
Args:
|
||||
filename (str): The name of the file to delete
|
||||
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if check_duplicate_operation("delete", filename):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename)
|
||||
return "File deleted successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command("search_files", "Search Files", '"directory": "<directory>"')
|
||||
def search_files(directory: str) -> list[str]:
|
||||
"""Search for files in a directory
|
||||
|
||||
Args:
|
||||
directory (str): The directory to search in
|
||||
@@ -290,8 +217,56 @@ def list_files(directory: str, agent: Agent) -> list[str]:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), agent.config.workspace_path
|
||||
os.path.join(root, file), CFG.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
return found_files
|
||||
|
||||
|
||||
@command(
|
||||
"download_file",
|
||||
"Download File",
|
||||
'"url": "<url>", "filename": "<filename>"',
|
||||
CFG.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
filename (str): Filename to save the file as
|
||||
"""
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
|
||||
total_size = 0
|
||||
downloaded_size = 0
|
||||
|
||||
with session.get(url, allow_redirects=True, stream=True) as r:
|
||||
r.raise_for_status()
|
||||
total_size = int(r.headers.get("Content-Length", 0))
|
||||
downloaded_size = 0
|
||||
|
||||
with open(filename, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
|
||||
# Update the progress message
|
||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||
spinner.update_message(f"{message} {progress}")
|
||||
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
|
||||
except requests.HTTPError as e:
|
||||
return f"Got an HTTP Error whilst trying to download file: {e}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import charset_normalizer
|
||||
import docx
|
||||
import markdown
|
||||
import PyPDF2
|
||||
import yaml
|
||||
from bs4 import BeautifulSoup
|
||||
from pylatexenc.latex2text import LatexNodes2Text
|
||||
|
||||
from autogpt import logs
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class ParserStrategy:
|
||||
def read(self, file_path: str) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
# Basic text file reading
|
||||
class TXTParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
charset_match = charset_normalizer.from_path(file_path).best()
|
||||
logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
|
||||
return str(charset_match)
|
||||
|
||||
|
||||
# Reading text from binary file using pdf parser
|
||||
class PDFParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
parser = PyPDF2.PdfReader(file_path)
|
||||
text = ""
|
||||
for page_idx in range(len(parser.pages)):
|
||||
text += parser.pages[page_idx].extract_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading text from binary file using docs parser
|
||||
class DOCXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
doc_file = docx.Document(file_path)
|
||||
text = ""
|
||||
for para in doc_file.paragraphs:
|
||||
text += para.text
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class JSONParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = json.load(f)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class XMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "xml")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class YAMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class HTMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "html.parser")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
class MarkdownParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
html = markdown.markdown(f.read())
|
||||
text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True))
|
||||
return text
|
||||
|
||||
|
||||
class LaTeXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
latex = f.read()
|
||||
text = LatexNodes2Text().latex_to_text(latex)
|
||||
return text
|
||||
|
||||
|
||||
class FileContext:
|
||||
def __init__(self, parser: ParserStrategy, logger: logs.Logger):
|
||||
self.parser = parser
|
||||
self.logger = logger
|
||||
|
||||
def set_parser(self, parser: ParserStrategy) -> None:
|
||||
self.logger.debug(f"Setting Context Parser to {parser}")
|
||||
self.parser = parser
|
||||
|
||||
def read_file(self, file_path) -> str:
|
||||
self.logger.debug(f"Reading file {file_path} with parser {self.parser}")
|
||||
return self.parser.read(file_path)
|
||||
|
||||
|
||||
extension_to_parser = {
|
||||
".txt": TXTParser(),
|
||||
".csv": TXTParser(),
|
||||
".pdf": PDFParser(),
|
||||
".docx": DOCXParser(),
|
||||
".json": JSONParser(),
|
||||
".xml": XMLParser(),
|
||||
".yaml": YAMLParser(),
|
||||
".yml": YAMLParser(),
|
||||
".html": HTMLParser(),
|
||||
".htm": HTMLParser(),
|
||||
".xhtml": HTMLParser(),
|
||||
".md": MarkdownParser(),
|
||||
".markdown": MarkdownParser(),
|
||||
".tex": LaTeXParser(),
|
||||
}
|
||||
|
||||
|
||||
def is_file_binary_fn(file_path: str):
|
||||
"""Given a file path load all its content and checks if the null bytes is present
|
||||
|
||||
Args:
|
||||
file_path (_type_): _description_
|
||||
|
||||
Returns:
|
||||
bool: is_binary
|
||||
"""
|
||||
with open(file_path, "rb") as f:
|
||||
file_data = f.read()
|
||||
if b"\x00" in file_data:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def read_textual_file(file_path: str, logger: logs.Logger) -> str:
|
||||
if not os.path.isfile(file_path):
|
||||
raise FileNotFoundError(
|
||||
f"read_file {file_path} failed: no such file or directory"
|
||||
)
|
||||
is_binary = is_file_binary_fn(file_path)
|
||||
file_extension = os.path.splitext(file_path)[1].lower()
|
||||
parser = extension_to_parser.get(file_extension)
|
||||
if not parser:
|
||||
if is_binary:
|
||||
raise ValueError(f"Unsupported binary file format: {file_extension}")
|
||||
# fallback to txt file parser (to support script and code files loading)
|
||||
parser = TXTParser()
|
||||
file_context = FileContext(parser, logger)
|
||||
return file_context.read_file(file_path)
|
||||
@@ -1,38 +1,22 @@
|
||||
"""Commands to perform Git operations"""
|
||||
|
||||
COMMAND_CATEGORY = "git_operations"
|
||||
COMMAND_CATEGORY_TITLE = "Git Operations"
|
||||
|
||||
"""Git operations for autogpt"""
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
from .decorators import sanitize_path_arg
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"clone_repository",
|
||||
"Clones a Repository",
|
||||
{
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the repository to clone",
|
||||
"required": True,
|
||||
},
|
||||
"clone_path": {
|
||||
"type": "string",
|
||||
"description": "The path to clone the repository to",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
lambda config: bool(config.github_username and config.github_api_key),
|
||||
"Clone Repository",
|
||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
CFG.github_username and CFG.github_api_key,
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@sanitize_path_arg("clone_path")
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
|
||||
def clone_repository(url: str, clone_path: str) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
@@ -43,11 +27,7 @@ def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = (
|
||||
f"//{agent.config.github_username}:{agent.config.github_api_key}@".join(
|
||||
split_url
|
||||
)
|
||||
)
|
||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||
try:
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
|
||||
@@ -1,35 +1,18 @@
|
||||
"""Commands to search the web with"""
|
||||
|
||||
"""Google search command for Autogpt."""
|
||||
from __future__ import annotations
|
||||
|
||||
COMMAND_CATEGORY = "web_search"
|
||||
COMMAND_CATEGORY_TITLE = "Web Search"
|
||||
|
||||
import json
|
||||
import time
|
||||
from itertools import islice
|
||||
|
||||
from duckduckgo_search import DDGS
|
||||
from duckduckgo_search import ddg
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"web_search",
|
||||
"Searches the web",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
aliases=["search"],
|
||||
)
|
||||
def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
|
||||
def google_search(query: str, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
@@ -40,20 +23,15 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
str: The results of the search.
|
||||
"""
|
||||
search_results = []
|
||||
attempts = 0
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
while attempts < DUCKDUCKGO_MAX_ATTEMPTS:
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
results = ddg(query, max_results=num_results)
|
||||
if not results:
|
||||
return json.dumps(search_results)
|
||||
|
||||
results = DDGS().text(query)
|
||||
search_results = list(islice(results, num_results))
|
||||
|
||||
if search_results:
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
attempts += 1
|
||||
for j in results:
|
||||
search_results.append(j)
|
||||
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
@@ -62,19 +40,11 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
@command(
|
||||
"google",
|
||||
"Google Search",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
lambda config: bool(config.google_api_key)
|
||||
and bool(config.google_custom_search_engine_id),
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
aliases=["search"],
|
||||
'"query": "<query>"',
|
||||
bool(CFG.google_api_key),
|
||||
"Configure google_api_key.",
|
||||
)
|
||||
def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
Args:
|
||||
@@ -90,8 +60,8 @@ def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = agent.config.google_api_key
|
||||
custom_search_engine_id = agent.config.google_custom_search_engine_id
|
||||
api_key = CFG.google_api_key
|
||||
custom_search_engine_id = CFG.custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
@@ -130,7 +100,7 @@ def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
Return the results of a Google search in a safe format.
|
||||
Return the results of a google search in a safe format.
|
||||
|
||||
Args:
|
||||
results (str | list): The search results.
|
||||
@@ -140,7 +110,7 @@ def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
if isinstance(results, list):
|
||||
safe_message = json.dumps(
|
||||
[result.encode("utf-8", "ignore").decode("utf-8") for result in results]
|
||||
[result.encode("utf-8", "ignore") for result in results]
|
||||
)
|
||||
else:
|
||||
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
|
||||
@@ -1,11 +1,5 @@
|
||||
"""Commands to generate images based on text input"""
|
||||
|
||||
COMMAND_CATEGORY = "text_to_image"
|
||||
COMMAND_CATEGORY_TITLE = "Text to Image"
|
||||
|
||||
""" Image Generation Module for AutoGPT."""
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
|
||||
@@ -13,25 +7,15 @@ import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@command(
|
||||
"generate_image",
|
||||
"Generates an Image",
|
||||
{
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "The prompt used to generate the image",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
lambda config: bool(config.image_provider),
|
||||
"Requires a image provider to be set.",
|
||||
)
|
||||
def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
|
||||
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
|
||||
def generate_image(prompt: str, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
@@ -41,21 +25,21 @@ def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = agent.config.workspace_path / f"{str(uuid.uuid4())}.jpg"
|
||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if agent.config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, agent)
|
||||
if CFG.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size)
|
||||
# HuggingFace
|
||||
elif agent.config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, agent)
|
||||
elif CFG.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename)
|
||||
# SD WebUI
|
||||
elif agent.config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, agent, size)
|
||||
elif CFG.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
|
||||
def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
"""Generate an image with HuggingFace's API.
|
||||
|
||||
Args:
|
||||
@@ -65,57 +49,35 @@ def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = f"https://api-inference.huggingface.co/models/{agent.config.huggingface_image_model}"
|
||||
if agent.config.huggingface_api_token is None:
|
||||
API_URL = (
|
||||
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
|
||||
)
|
||||
if CFG.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {agent.config.huggingface_api_token}",
|
||||
"Authorization": f"Bearer {CFG.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
retry_count = 0
|
||||
while retry_count < 10:
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
try:
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
image.save(filename)
|
||||
return f"Saved to disk:{filename}"
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
error = json.loads(response.text)
|
||||
if "estimated_time" in error:
|
||||
delay = error["estimated_time"]
|
||||
logger.debug(response.text)
|
||||
logger.info("Retrying in", delay)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
retry_count += 1
|
||||
image.save(filename)
|
||||
|
||||
return f"Error creating image."
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
||||
def generate_image_with_dalle(
|
||||
prompt: str, filename: str, size: int, agent: Agent
|
||||
) -> str:
|
||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
Args:
|
||||
@@ -140,7 +102,7 @@ def generate_image_with_dalle(
|
||||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=agent.config.openai_api_key,
|
||||
api_key=CFG.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
@@ -156,7 +118,6 @@ def generate_image_with_dalle(
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
agent: Agent,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
@@ -173,19 +134,19 @@ def generate_image_with_sd_webui(
|
||||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if agent.config.sd_webui_auth:
|
||||
username, password = agent.config.sd_webui_auth.split(":")
|
||||
if CFG.sd_webui_auth:
|
||||
username, password = CFG.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{agent.config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"sampler_index": "DDIM",
|
||||
"steps": 20,
|
||||
"config_scale": 7.0,
|
||||
"cfg_scale": 7.0,
|
||||
"width": size,
|
||||
"height": size,
|
||||
"n_iter": 1,
|
||||
|
||||
35
autogpt/commands/improve_code.py
Normal file
35
autogpt/commands/improve_code.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"improve_code",
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code to be improved.
|
||||
Returns:
|
||||
A result string from create chat completion. Improved code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def generate_improved_code(suggestions: list[str], code: str) -> str:"
|
||||
)
|
||||
args = [json.dumps(suggestions), code]
|
||||
description_string = (
|
||||
"Improves the provided code based on the suggestions"
|
||||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
@@ -1,37 +0,0 @@
|
||||
"""Commands to control the internal state of the program"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
COMMAND_CATEGORY = "system"
|
||||
COMMAND_CATEGORY_TITLE = "System"
|
||||
|
||||
from typing import NoReturn
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
@command(
|
||||
"goals_accomplished",
|
||||
"Goals are accomplished and there is nothing left to do",
|
||||
{
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "A summary to the user of how the goals were accomplished",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
def task_complete(reason: str, agent: Agent) -> NoReturn:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
Parameters:
|
||||
reason (str): A summary to the user of how the goals were accomplished.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
logger.info(title="Shutting down...\n", message=reason)
|
||||
quit()
|
||||
41
autogpt/commands/twitter.py
Normal file
41
autogpt/commands/twitter.py
Normal file
@@ -0,0 +1,41 @@
|
||||
"""A module that contains a command to send a tweet."""
|
||||
import os
|
||||
|
||||
import tweepy
|
||||
|
||||
from autogpt.commands.command import command
|
||||
|
||||
|
||||
@command(
|
||||
"send_tweet",
|
||||
"Send Tweet",
|
||||
'"tweet_text": "<tweet_text>"',
|
||||
)
|
||||
def send_tweet(tweet_text: str) -> str:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Args:
|
||||
tweet_text (str): Text to be tweeted.
|
||||
|
||||
Returns:
|
||||
A result from sending the tweet.
|
||||
"""
|
||||
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
||||
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
||||
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
||||
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
|
||||
# Authenticate to Twitter
|
||||
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
||||
auth.set_access_token(access_token, access_token_secret)
|
||||
|
||||
# Create API object
|
||||
api = tweepy.API(auth)
|
||||
|
||||
# Send tweet
|
||||
try:
|
||||
api.update_status(tweet_text)
|
||||
return "Tweet sent successfully!"
|
||||
except tweepy.TweepyException as e:
|
||||
return f"Error sending tweet: {e.reason}"
|
||||
82
autogpt/commands/web_playwright.py
Normal file
82
autogpt/commands/web_playwright.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""Web scraping commands using Playwright"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
logger.info(
|
||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape text from
|
||||
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch()
|
||||
page = browser.new_page()
|
||||
|
||||
try:
|
||||
page.goto(url)
|
||||
html_content = page.content()
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
|
||||
except Exception as e:
|
||||
text = f"Error: {str(e)}"
|
||||
|
||||
finally:
|
||||
browser.close()
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape links from
|
||||
|
||||
Returns:
|
||||
Union[str, List[str]]: The scraped links
|
||||
"""
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch()
|
||||
page = browser.new_page()
|
||||
|
||||
try:
|
||||
page.goto(url)
|
||||
html_content = page.content()
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
formatted_links = format_hyperlinks(hyperlinks)
|
||||
|
||||
except Exception as e:
|
||||
formatted_links = f"Error: {str(e)}"
|
||||
|
||||
finally:
|
||||
browser.close()
|
||||
|
||||
return formatted_links
|
||||
112
autogpt/commands/web_requests.py
Normal file
112
autogpt/commands/web_requests.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from __future__ import annotations
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({"User-Agent": CFG.user_agent})
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
Args:
|
||||
url (str): The URL to get the response from
|
||||
timeout (int): The timeout for the HTTP request
|
||||
|
||||
Returns:
|
||||
tuple[None, str] | tuple[Response, None]: The response and error message
|
||||
|
||||
Raises:
|
||||
ValueError: If the URL is invalid
|
||||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
response = session.get(url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
return None, f"Error: HTTP {str(response.status_code)} error"
|
||||
|
||||
return response, None
|
||||
except ValueError as ve:
|
||||
# Handle invalid URL format
|
||||
return None, f"Error: {str(ve)}"
|
||||
|
||||
except requests.exceptions.RequestException as re:
|
||||
# Handle exceptions related to the HTTP request
|
||||
# (e.g., connection errors, timeouts, etc.)
|
||||
return None, f"Error: {str(re)}"
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape text from
|
||||
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape links from
|
||||
|
||||
Returns:
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
|
||||
return format_hyperlinks(hyperlinks)
|
||||
|
||||
|
||||
def create_message(chunk, question):
|
||||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the'
|
||||
" text, summarize the text.",
|
||||
}
|
||||
@@ -1,65 +1,40 @@
|
||||
"""Commands for browsing a website"""
|
||||
|
||||
"""Selenium web scraping module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.llm.utils.token_counter import count_string_tokens
|
||||
|
||||
COMMAND_CATEGORY = "web_browse"
|
||||
COMMAND_CATEGORY_TITLE = "Web Browsing"
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import Optional
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.chrome.service import Service as ChromeDriverService
|
||||
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.common.options import ArgOptions as BrowserOptions
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.edge.service import Service as EdgeDriverService
|
||||
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.firefox.service import Service as GeckoDriverService
|
||||
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, get_memory
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
TOKENS_TO_TRIGGER_SUMMARY = 50
|
||||
LINKS_TO_RETURN = 20
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"browse_website",
|
||||
"Browses a Website",
|
||||
{
|
||||
"url": {"type": "string", "description": "The URL to visit", "required": True},
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "What you want to find on the website",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
"Browse Website",
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str, agent: Agent) -> str:
|
||||
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
@@ -67,33 +42,28 @@ def browse_website(url: str, question: str, agent: Agent) -> str:
|
||||
question (str): The question asked by the user
|
||||
|
||||
Returns:
|
||||
str: The answer and links to the user and the webdriver
|
||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
driver = None
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url, agent)
|
||||
add_header(driver)
|
||||
if TOKENS_TO_TRIGGER_SUMMARY < count_string_tokens(text, agent.llm.name):
|
||||
text = summarize_memorize_webpage(url, text, question, agent, driver)
|
||||
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to LINKS_TO_RETURN
|
||||
if len(links) > LINKS_TO_RETURN:
|
||||
links = links[:LINKS_TO_RETURN]
|
||||
|
||||
return f"Answer gathered from website: {text}\n\nLinks: {links}"
|
||||
driver, text = scrape_text_with_selenium(url)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
msg = e.msg.split("\n")[0]
|
||||
return f"Error: {msg}"
|
||||
finally:
|
||||
if driver:
|
||||
close_browser(driver)
|
||||
return f"Error: {msg}", None
|
||||
|
||||
add_header(driver)
|
||||
summary_text = summary.summarize_text(url, text, question, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to 5
|
||||
if len(links) > 5:
|
||||
links = links[:5]
|
||||
close_browser(driver)
|
||||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
@@ -104,49 +74,44 @@ def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
|
||||
"""
|
||||
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
||||
|
||||
options_available: dict[str, BrowserOptions] = {
|
||||
options_available = {
|
||||
"chrome": ChromeOptions,
|
||||
"edge": EdgeOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"safari": SafariOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
}
|
||||
|
||||
options: BrowserOptions = options_available[agent.config.selenium_web_browser]()
|
||||
options = options_available[CFG.selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if agent.config.selenium_web_browser == "firefox":
|
||||
if agent.config.selenium_headless:
|
||||
if CFG.selenium_web_browser == "firefox":
|
||||
if CFG.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
driver = webdriver.Firefox(
|
||||
executable_path=GeckoDriverManager().install(), options=options
|
||||
)
|
||||
elif agent.config.selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif agent.config.selenium_web_browser == "safari":
|
||||
elif CFG.selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = SafariDriver(options=options)
|
||||
driver = webdriver.Safari(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if agent.config.selenium_headless:
|
||||
if CFG.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
chromium_driver_path = Path("/usr/bin/chromedriver")
|
||||
|
||||
driver = ChromeDriver(
|
||||
service=ChromeDriverService(str(chromium_driver_path))
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=chromium_driver_path
|
||||
if chromium_driver_path.exists()
|
||||
else ChromeDriverService(ChromeDriverManager().install()),
|
||||
else ChromeDriverManager().install(),
|
||||
options=options,
|
||||
)
|
||||
driver.get(url)
|
||||
@@ -210,40 +175,4 @@ def add_header(driver: WebDriver) -> None:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file:
|
||||
overlay_script = overlay_file.read()
|
||||
driver.execute_script(overlay_script)
|
||||
except Exception as e:
|
||||
print(f"Error executing overlay.js: {e}")
|
||||
|
||||
|
||||
def summarize_memorize_webpage(
|
||||
url: str,
|
||||
text: str,
|
||||
question: str,
|
||||
agent: Agent,
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
url (str): The url of the text
|
||||
text (str): The text to summarize
|
||||
question (str): The question to ask the model
|
||||
driver (WebDriver): The webdriver to use to scroll the page
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
memory = get_memory(agent.config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, agent.config, question=question)
|
||||
memory.add(new_memory)
|
||||
return new_memory.summary
|
||||
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
|
||||
|
||||
37
autogpt/commands/write_tests.py
Normal file
37
autogpt/commands/write_tests.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""A module that contains a function to generate test cases for the submitted code."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"write_tests",
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code
|
||||
in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||
)
|
||||
args = [code, json.dumps(focus)]
|
||||
description_string = (
|
||||
"Generates test cases for the existing code, focusing on"
|
||||
" specific areas if required."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
@@ -1,12 +1,11 @@
|
||||
"""
|
||||
This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from .ai_config import AIConfig
|
||||
from .config import Config, ConfigBuilder, check_openai_api_key
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
|
||||
__all__ = [
|
||||
"check_openai_api_key",
|
||||
"AIConfig",
|
||||
"Config",
|
||||
"ConfigBuilder",
|
||||
]
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
"""A module that contains the AIConfig class object that contains the configuration"""
|
||||
# sourcery skip: do-not-use-staticmethod
|
||||
"""
|
||||
A module that contains the AIConfig class object that contains the configuration
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import distro
|
||||
import yaml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
from .config import Config
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
||||
|
||||
|
||||
class AIConfig:
|
||||
@@ -30,7 +33,7 @@ class AIConfig:
|
||||
self,
|
||||
ai_name: str = "",
|
||||
ai_role: str = "",
|
||||
ai_goals: list[str] = [],
|
||||
ai_goals: list | None = None,
|
||||
api_budget: float = 0.0,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -44,29 +47,33 @@ class AIConfig:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if ai_goals is None:
|
||||
ai_goals = []
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
self.api_budget = api_budget
|
||||
self.prompt_generator: PromptGenerator | None = None
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
self.prompt_generator = None
|
||||
self.command_registry = None
|
||||
|
||||
@staticmethod
|
||||
def load(ai_settings_file: str | Path) -> "AIConfig":
|
||||
def load(config_file: str = SAVE_FILE) -> "AIConfig":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
|
||||
loaded from yaml file if yaml file exists, else returns class with no parameters.
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
ai_settings_file (Path): The path to the config yaml file.
|
||||
config_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): An instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(ai_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
@@ -79,15 +86,16 @@ class AIConfig:
|
||||
for goal in config_params.get("ai_goals", [])
|
||||
]
|
||||
api_budget = config_params.get("api_budget", 0.0)
|
||||
|
||||
# type: Type[AIConfig]
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
def save(self, ai_settings_file: str | Path) -> None:
|
||||
def save(self, config_file: str = SAVE_FILE) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
ai_settings_file (Path): The path to the config yaml file.
|
||||
config_file(str): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
None
|
||||
@@ -99,11 +107,11 @@ class AIConfig:
|
||||
"ai_goals": self.ai_goals,
|
||||
"api_budget": self.api_budget,
|
||||
}
|
||||
with open(ai_settings_file, "w", encoding="utf-8") as file:
|
||||
with open(config_file, "w", encoding="utf-8") as file:
|
||||
yaml.dump(config, file, allow_unicode=True)
|
||||
|
||||
def construct_full_prompt(
|
||||
self, config: Config, prompt_generator: Optional[PromptGenerator] = None
|
||||
self, prompt_generator: Optional[PromptGenerator] = None
|
||||
) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
@@ -116,28 +124,29 @@ class AIConfig:
|
||||
including the ai_name, ai_role, ai_goals, and api_budget.
|
||||
"""
|
||||
|
||||
prompt_start = (
|
||||
"Your decisions must always be made independently without"
|
||||
" seeking user assistance. Play to your strengths as an LLM and pursue"
|
||||
" simple strategies with no legal complications."
|
||||
""
|
||||
)
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.prompts.prompt import build_default_prompt_generator
|
||||
|
||||
prompt_generator = prompt_generator or self.prompt_generator
|
||||
cfg = Config()
|
||||
if prompt_generator is None:
|
||||
prompt_generator = build_default_prompt_generator(config)
|
||||
prompt_generator.command_registry = self.command_registry
|
||||
self.prompt_generator = prompt_generator
|
||||
|
||||
for plugin in config.plugins:
|
||||
prompt_generator = build_default_prompt_generator()
|
||||
prompt_generator.goals = self.ai_goals
|
||||
prompt_generator.name = self.ai_name
|
||||
prompt_generator.role = self.ai_role
|
||||
prompt_generator.command_registry = self.command_registry
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
prompt_generator = plugin.post_prompt(prompt_generator)
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt_parts = [
|
||||
f"You are {self.ai_name}, {self.ai_role.rstrip('.')}.",
|
||||
"Your decisions must always be made independently without seeking "
|
||||
"user assistance. Play to your strengths as an LLM and pursue "
|
||||
"simple strategies with no legal complications.",
|
||||
]
|
||||
|
||||
if config.execute_local_commands:
|
||||
if cfg.execute_local_commands:
|
||||
# add OS info to prompt
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
@@ -146,30 +155,14 @@ class AIConfig:
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
|
||||
full_prompt_parts.append(f"The OS you are running on is: {os_info}")
|
||||
prompt_start += f"\nThe OS you are running on is: {os_info}"
|
||||
|
||||
additional_constraints: list[str] = []
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(self.ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
if self.api_budget > 0.0:
|
||||
additional_constraints.append(
|
||||
f"It takes money to let you run. "
|
||||
f"Your API budget is ${self.api_budget:.3f}"
|
||||
)
|
||||
|
||||
full_prompt_parts.append(
|
||||
prompt_generator.generate_prompt_string(
|
||||
additional_constraints=additional_constraints
|
||||
)
|
||||
)
|
||||
|
||||
if self.ai_goals:
|
||||
full_prompt_parts.append(
|
||||
"\n".join(
|
||||
[
|
||||
"## Goals",
|
||||
"For your task, you must fulfill the following goals:",
|
||||
*[f"{i+1}. {goal}" for i, goal in enumerate(self.ai_goals)],
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
return "\n\n".join(full_prompt_parts).strip("\n")
|
||||
full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}"
|
||||
self.prompt_generator = prompt_generator
|
||||
full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}"
|
||||
return full_prompt
|
||||
|
||||
@@ -1,409 +1,282 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Union
|
||||
from typing import List
|
||||
|
||||
import openai
|
||||
import yaml
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
from pydantic import Field, validator
|
||||
|
||||
from autogpt.core.configuration.schema import Configurable, SystemSettings
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
AI_SETTINGS_FILE = "ai_settings.yaml"
|
||||
AZURE_CONFIG_FILE = "azure.yaml"
|
||||
PLUGINS_CONFIG_FILE = "plugins_config.yaml"
|
||||
PROMPT_SETTINGS_FILE = "prompt_settings.yaml"
|
||||
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
name: str = "Auto-GPT configuration"
|
||||
description: str = "Default configuration for the Auto-GPT application."
|
||||
########################
|
||||
# Application Settings #
|
||||
########################
|
||||
skip_news: bool = False
|
||||
skip_reprompt: bool = False
|
||||
authorise_key: str = "y"
|
||||
exit_key: str = "n"
|
||||
debug_mode: bool = False
|
||||
plain_output: bool = False
|
||||
chat_messages_enabled: bool = True
|
||||
# TTS configuration
|
||||
speak_mode: bool = False
|
||||
text_to_speech_provider: str = "gtts"
|
||||
streamelements_voice: str = "Brian"
|
||||
elevenlabs_voice_id: Optional[str] = None
|
||||
class Config(metaclass=Singleton):
|
||||
"""
|
||||
Configuration class to store the state of bools for different scripts access.
|
||||
"""
|
||||
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Paths
|
||||
ai_settings_file: str = AI_SETTINGS_FILE
|
||||
prompt_settings_file: str = PROMPT_SETTINGS_FILE
|
||||
workdir: Path = None
|
||||
workspace_path: Optional[Path] = None
|
||||
file_logger_path: Optional[Path] = None
|
||||
# Model configuration
|
||||
fast_llm: str = "gpt-3.5-turbo"
|
||||
smart_llm: str = "gpt-4-0314"
|
||||
temperature: float = 0
|
||||
openai_functions: bool = False
|
||||
embedding_model: str = "text-embedding-ada-002"
|
||||
browse_spacy_language_model: str = "en_core_web_sm"
|
||||
# Run loop configuration
|
||||
continuous_mode: bool = False
|
||||
continuous_limit: int = 0
|
||||
|
||||
##########
|
||||
# Memory #
|
||||
##########
|
||||
memory_backend: str = "json_file"
|
||||
memory_index: str = "auto-gpt-memory"
|
||||
redis_host: str = "localhost"
|
||||
redis_port: int = 6379
|
||||
redis_password: str = ""
|
||||
wipe_redis_on_start: bool = True
|
||||
|
||||
############
|
||||
# Commands #
|
||||
############
|
||||
# General
|
||||
disabled_command_categories: list[str] = Field(default_factory=list)
|
||||
# File ops
|
||||
restrict_to_workspace: bool = True
|
||||
allow_downloads: bool = False
|
||||
# Shell commands
|
||||
shell_command_control: str = "denylist"
|
||||
execute_local_commands: bool = False
|
||||
shell_denylist: list[str] = Field(default_factory=lambda: ["sudo", "su"])
|
||||
shell_allowlist: list[str] = Field(default_factory=list)
|
||||
# Text to image
|
||||
image_provider: Optional[str] = None
|
||||
huggingface_image_model: str = "CompVis/stable-diffusion-v1-4"
|
||||
sd_webui_url: Optional[str] = "http://localhost:7860"
|
||||
image_size: int = 256
|
||||
# Audio to text
|
||||
audio_to_text_provider: str = "huggingface"
|
||||
huggingface_audio_to_text_model: Optional[str] = None
|
||||
# Web browsing
|
||||
selenium_web_browser: str = "chrome"
|
||||
selenium_headless: bool = True
|
||||
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
###################
|
||||
# Plugin Settings #
|
||||
###################
|
||||
plugins_dir: str = "plugins"
|
||||
plugins_config_file: str = PLUGINS_CONFIG_FILE
|
||||
plugins_config: PluginsConfig = Field(
|
||||
default_factory=lambda: PluginsConfig(plugins={})
|
||||
)
|
||||
plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
|
||||
plugins_allowlist: list[str] = Field(default_factory=list)
|
||||
plugins_denylist: list[str] = Field(default_factory=list)
|
||||
plugins_openai: list[str] = Field(default_factory=list)
|
||||
|
||||
###############
|
||||
# Credentials #
|
||||
###############
|
||||
# OpenAI
|
||||
openai_api_key: Optional[str] = None
|
||||
openai_api_type: Optional[str] = None
|
||||
openai_api_base: Optional[str] = None
|
||||
openai_api_version: Optional[str] = None
|
||||
openai_organization: Optional[str] = None
|
||||
use_azure: bool = False
|
||||
azure_config_file: Optional[str] = AZURE_CONFIG_FILE
|
||||
azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None
|
||||
# Elevenlabs
|
||||
elevenlabs_api_key: Optional[str] = None
|
||||
# Github
|
||||
github_api_key: Optional[str] = None
|
||||
github_username: Optional[str] = None
|
||||
# Google
|
||||
google_api_key: Optional[str] = None
|
||||
google_custom_search_engine_id: Optional[str] = None
|
||||
# Huggingface
|
||||
huggingface_api_token: Optional[str] = None
|
||||
# Stable Diffusion
|
||||
sd_webui_auth: Optional[str] = None
|
||||
|
||||
@validator("plugins", each_item=True)
|
||||
def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
|
||||
assert issubclass(
|
||||
p.__class__, AutoGPTPluginTemplate
|
||||
), f"{p} does not subclass AutoGPTPluginTemplate"
|
||||
assert (
|
||||
p.__class__.__name__ != "AutoGPTPluginTemplate"
|
||||
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
|
||||
return p
|
||||
|
||||
@validator("openai_functions")
|
||||
def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
|
||||
if v:
|
||||
smart_llm = values["smart_llm"]
|
||||
assert OPEN_AI_CHAT_MODELS[smart_llm].supports_functions, (
|
||||
f"Model {smart_llm} does not support OpenAI Functions. "
|
||||
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
|
||||
)
|
||||
|
||||
def get_openai_credentials(self, model: str) -> dict[str, str]:
|
||||
credentials = {
|
||||
"api_key": self.openai_api_key,
|
||||
"api_base": self.openai_api_base,
|
||||
"organization": self.openai_organization,
|
||||
}
|
||||
if self.use_azure:
|
||||
azure_credentials = self.get_azure_credentials(model)
|
||||
credentials.update(azure_credentials)
|
||||
return credentials
|
||||
|
||||
def get_azure_credentials(self, model: str) -> dict[str, str]:
|
||||
"""Get the kwargs for the Azure API."""
|
||||
|
||||
# Fix --gpt3only and --gpt4only in combination with Azure
|
||||
fast_llm = (
|
||||
self.fast_llm
|
||||
if not (
|
||||
self.fast_llm == self.smart_llm
|
||||
and self.fast_llm.startswith(GPT_4_MODEL)
|
||||
)
|
||||
else f"not_{self.fast_llm}"
|
||||
)
|
||||
smart_llm = (
|
||||
self.smart_llm
|
||||
if not (
|
||||
self.smart_llm == self.fast_llm
|
||||
and self.smart_llm.startswith(GPT_3_MODEL)
|
||||
)
|
||||
else f"not_{self.smart_llm}"
|
||||
)
|
||||
|
||||
deployment_id = {
|
||||
fast_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
smart_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
self.embedding_model: self.azure_model_to_deployment_id_map.get(
|
||||
"embedding_model_deployment_id"
|
||||
),
|
||||
}.get(model, None)
|
||||
|
||||
kwargs = {
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
}
|
||||
if model == self.embedding_model:
|
||||
kwargs["engine"] = deployment_id
|
||||
else:
|
||||
kwargs["deployment_id"] = deployment_id
|
||||
return kwargs
|
||||
|
||||
|
||||
class ConfigBuilder(Configurable[Config]):
|
||||
default_settings = Config()
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls, workdir: Path) -> Config:
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the Config class"""
|
||||
config_dict = {
|
||||
"workdir": workdir,
|
||||
"authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
|
||||
"exit_key": os.getenv("EXIT_KEY"),
|
||||
"plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
|
||||
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
|
||||
"ai_settings_file": os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE),
|
||||
"prompt_settings_file": os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE
|
||||
),
|
||||
"fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
|
||||
"smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
|
||||
"embedding_model": os.getenv("EMBEDDING_MODEL"),
|
||||
"browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"),
|
||||
"openai_api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"use_azure": os.getenv("USE_AZURE") == "True",
|
||||
"azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE),
|
||||
"execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False")
|
||||
== "True",
|
||||
"restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True")
|
||||
== "True",
|
||||
"openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True",
|
||||
"elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"),
|
||||
"streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"),
|
||||
"text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"),
|
||||
"github_api_key": os.getenv("GITHUB_API_KEY"),
|
||||
"github_username": os.getenv("GITHUB_USERNAME"),
|
||||
"google_api_key": os.getenv("GOOGLE_API_KEY"),
|
||||
"image_provider": os.getenv("IMAGE_PROVIDER"),
|
||||
"huggingface_api_token": os.getenv("HUGGINGFACE_API_TOKEN"),
|
||||
"huggingface_image_model": os.getenv("HUGGINGFACE_IMAGE_MODEL"),
|
||||
"audio_to_text_provider": os.getenv("AUDIO_TO_TEXT_PROVIDER"),
|
||||
"huggingface_audio_to_text_model": os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
),
|
||||
"sd_webui_url": os.getenv("SD_WEBUI_URL"),
|
||||
"sd_webui_auth": os.getenv("SD_WEBUI_AUTH"),
|
||||
"selenium_web_browser": os.getenv("USE_WEB_BROWSER"),
|
||||
"selenium_headless": os.getenv("HEADLESS_BROWSER", "True") == "True",
|
||||
"user_agent": os.getenv("USER_AGENT"),
|
||||
"memory_backend": os.getenv("MEMORY_BACKEND"),
|
||||
"memory_index": os.getenv("MEMORY_INDEX"),
|
||||
"redis_host": os.getenv("REDIS_HOST"),
|
||||
"redis_password": os.getenv("REDIS_PASSWORD"),
|
||||
"wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
|
||||
"plugins_dir": os.getenv("PLUGINS_DIR"),
|
||||
"plugins_config_file": os.getenv(
|
||||
"PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE
|
||||
),
|
||||
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
|
||||
}
|
||||
self.workspace_path = None
|
||||
self.file_logger_path = None
|
||||
|
||||
config_dict["disabled_command_categories"] = _safe_split(
|
||||
os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
self.debug_mode = False
|
||||
self.continuous_mode = False
|
||||
self.continuous_limit = 0
|
||||
self.speak_mode = False
|
||||
self.skip_reprompt = False
|
||||
self.allow_downloads = False
|
||||
self.skip_news = False
|
||||
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
|
||||
config_dict["shell_denylist"] = _safe_split(
|
||||
os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "0"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
)
|
||||
config_dict["shell_allowlist"] = _safe_split(
|
||||
os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
|
||||
self.restrict_to_workspace = (
|
||||
os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
|
||||
)
|
||||
|
||||
config_dict["google_custom_search_engine_id"] = os.getenv(
|
||||
"GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
if self.use_azure:
|
||||
self.load_azure_config()
|
||||
openai.api_type = self.openai_api_type
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
||||
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
|
||||
self.use_brian_tts = False
|
||||
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
|
||||
|
||||
self.github_api_key = os.getenv("GITHUB_API_KEY")
|
||||
self.github_username = os.getenv("GITHUB_USERNAME")
|
||||
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_port = os.getenv("WEAVIATE_PORT")
|
||||
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
|
||||
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
|
||||
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = (
|
||||
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
)
|
||||
|
||||
config_dict["elevenlabs_voice_id"] = os.getenv(
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
# milvus or zilliz cloud configuration.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
self.milvus_username = os.getenv("MILVUS_USERNAME")
|
||||
self.milvus_password = os.getenv("MILVUS_PASSWORD")
|
||||
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
|
||||
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
self.huggingface_image_model = os.getenv(
|
||||
"HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
if not config_dict["text_to_speech_provider"]:
|
||||
if os.getenv("USE_MAC_OS_TTS"):
|
||||
default_tts_provider = "macos"
|
||||
elif config_dict["elevenlabs_api_key"]:
|
||||
default_tts_provider = "elevenlabs"
|
||||
elif os.getenv("USE_BRIAN_TTS"):
|
||||
default_tts_provider = "streamelements"
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
config_dict["text_to_speech_provider"] = default_tts_provider
|
||||
self.huggingface_audio_to_text_model = os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
)
|
||||
self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
|
||||
self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
|
||||
|
||||
config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS"))
|
||||
config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS"))
|
||||
# Selenium browser settings
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
|
||||
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["image_size"] = int(os.getenv("IMAGE_SIZE"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["redis_port"] = int(os.getenv("REDIS_PORT"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["temperature"] = float(os.getenv("TEMPERATURE"))
|
||||
|
||||
if config_dict["use_azure"]:
|
||||
azure_config = cls.load_azure_config(
|
||||
workdir / config_dict["azure_config_file"]
|
||||
)
|
||||
config_dict.update(azure_config)
|
||||
|
||||
elif os.getenv("OPENAI_API_BASE_URL"):
|
||||
config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL")
|
||||
|
||||
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
if openai_organization is not None:
|
||||
config_dict["openai_organization"] = openai_organization
|
||||
|
||||
config_dict_without_none_values = {
|
||||
k: v for k, v in config_dict.items() if v is not None
|
||||
}
|
||||
|
||||
config = cls.build_agent_configuration(config_dict_without_none_values)
|
||||
|
||||
# Set secondary config variables (that depend on other config variables)
|
||||
|
||||
config.plugins_config = PluginsConfig.load_config(
|
||||
config.workdir / config.plugins_config_file,
|
||||
config.plugins_denylist,
|
||||
config.plugins_allowlist,
|
||||
# User agent header to use when making HTTP requests
|
||||
# Some websites might just completely deny request with an error code if
|
||||
# no user agent was found.
|
||||
self.user_agent = os.getenv(
|
||||
"USER_AGENT",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
|
||||
return config
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
@classmethod
|
||||
def load_azure_config(cls, config_file: Path) -> Dict[str, str]:
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
||||
|
||||
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
|
||||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
self.plugins_openai = []
|
||||
|
||||
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
|
||||
if plugins_allowlist:
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
self.plugins_denylist = []
|
||||
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
Returns the relevant deployment id for the model specified.
|
||||
|
||||
Parameters:
|
||||
model(str): The model to map to the deployment id.
|
||||
|
||||
Returns:
|
||||
The matching deployment id if found, otherwise an empty string.
|
||||
"""
|
||||
if model == self.fast_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"fast_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == self.smart_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"smart_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == "text-embedding-ada-002":
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"embedding_model_deployment_id"
|
||||
] # type: ignore
|
||||
else:
|
||||
return ""
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
|
||||
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
|
||||
"""
|
||||
Loads the configuration parameters for Azure hosting from the specified file
|
||||
path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file (Path): The path to the config yaml file.
|
||||
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
|
||||
|
||||
Returns:
|
||||
Dict
|
||||
None
|
||||
"""
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
self.openai_api_type = config_params.get("azure_api_type") or "azure"
|
||||
self.openai_api_base = config_params.get("azure_api_base") or ""
|
||||
self.openai_api_version = (
|
||||
config_params.get("azure_api_version") or "2023-03-15-preview"
|
||||
)
|
||||
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {})
|
||||
|
||||
return {
|
||||
"openai_api_type": config_params.get("azure_api_type", "azure"),
|
||||
"openai_api_base": config_params.get("azure_api_base", ""),
|
||||
"openai_api_version": config_params.get(
|
||||
"azure_api_version", "2023-03-15-preview"
|
||||
),
|
||||
"azure_model_to_deployment_id_map": config_params.get(
|
||||
"azure_model_map", {}
|
||||
),
|
||||
}
|
||||
def set_continuous_mode(self, value: bool) -> None:
|
||||
"""Set the continuous mode value."""
|
||||
self.continuous_mode = value
|
||||
|
||||
def set_continuous_limit(self, value: int) -> None:
|
||||
"""Set the continuous limit value."""
|
||||
self.continuous_limit = value
|
||||
|
||||
def set_speak_mode(self, value: bool) -> None:
|
||||
"""Set the speak mode value."""
|
||||
self.speak_mode = value
|
||||
|
||||
def set_fast_llm_model(self, value: str) -> None:
|
||||
"""Set the fast LLM model value."""
|
||||
self.fast_llm_model = value
|
||||
|
||||
def set_smart_llm_model(self, value: str) -> None:
|
||||
"""Set the smart LLM model value."""
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_fast_token_limit(self, value: int) -> None:
|
||||
"""Set the fast token limit value."""
|
||||
self.fast_token_limit = value
|
||||
|
||||
def set_smart_token_limit(self, value: int) -> None:
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_browse_chunk_max_length(self, value: int) -> None:
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
|
||||
def set_elevenlabs_api_key(self, value: str) -> None:
|
||||
"""Set the ElevenLabs API key value."""
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
def set_elevenlabs_voice_1_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 1 ID value."""
|
||||
self.elevenlabs_voice_1_id = value
|
||||
|
||||
def set_elevenlabs_voice_2_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 2 ID value."""
|
||||
self.elevenlabs_voice_2_id = value
|
||||
|
||||
def set_google_api_key(self, value: str) -> None:
|
||||
"""Set the Google API key value."""
|
||||
self.google_api_key = value
|
||||
|
||||
def set_custom_search_engine_id(self, value: str) -> None:
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str) -> None:
|
||||
"""Set the Pinecone API key value."""
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str) -> None:
|
||||
"""Set the Pinecone region value."""
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool) -> None:
|
||||
"""Set the debug mode value."""
|
||||
self.debug_mode = value
|
||||
|
||||
def set_plugins(self, value: list) -> None:
|
||||
"""Set the plugins value."""
|
||||
self.plugins = value
|
||||
|
||||
def set_temperature(self, value: int) -> None:
|
||||
"""Set the temperature value."""
|
||||
self.temperature = value
|
||||
|
||||
def set_memory_backend(self, name: str) -> None:
|
||||
"""Set the memory backend name."""
|
||||
self.memory_backend = name
|
||||
|
||||
|
||||
def check_openai_api_key(config: Config) -> None:
|
||||
def check_openai_api_key() -> None:
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not config.openai_api_key:
|
||||
cfg = Config()
|
||||
if not cfg.openai_api_key:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
+ Fore.RESET
|
||||
)
|
||||
print("You can get your key from https://platform.openai.com/account/api-keys")
|
||||
openai_api_key = input(
|
||||
"If you do have the key, please enter your OpenAI API key now:\n"
|
||||
)
|
||||
key_pattern = r"^sk-\w{48}"
|
||||
openai_api_key = openai_api_key.strip()
|
||||
if re.search(key_pattern, openai_api_key):
|
||||
os.environ["OPENAI_API_KEY"] = openai_api_key
|
||||
config.openai_api_key = openai_api_key
|
||||
print(
|
||||
Fore.GREEN
|
||||
+ "OpenAI API key successfully set!\n"
|
||||
+ Fore.YELLOW
|
||||
+ "NOTE: The API key you've set is only temporary.\n"
|
||||
+ "For longer sessions, please set it in .env file"
|
||||
+ Fore.RESET
|
||||
)
|
||||
else:
|
||||
print("Invalid OpenAI API key!")
|
||||
exit(1)
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
exit(1)
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# sourcery skip: do-not-use-staticmethod
|
||||
"""
|
||||
A module that contains the PromptConfig class object that contains the configuration
|
||||
"""
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class PromptConfig:
|
||||
"""
|
||||
A class object that contains the configuration information for the prompt, which will be used by the prompt generator
|
||||
|
||||
Attributes:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
"""
|
||||
|
||||
def __init__(self, prompt_settings_file: str) -> None:
|
||||
"""
|
||||
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else raises error.
|
||||
|
||||
Parameters:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(prompt_settings_file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
with open(prompt_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
||||
self.constraints = config_params.get("constraints", [])
|
||||
self.resources = config_params.get("resources", [])
|
||||
self.best_practices = config_params.get("best_practices", [])
|
||||
@@ -1,25 +1,19 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Literal
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
from autogpt.memory import get_supported_memory_backends
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def create_config(
|
||||
config: Config,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -36,7 +30,6 @@ def create_config(
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
@@ -47,13 +40,13 @@ def create_config(
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
config.debug_mode = False
|
||||
config.continuous_mode = False
|
||||
config.speak_mode = False
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.debug_mode = True
|
||||
CFG.set_debug_mode(True)
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
@@ -64,13 +57,13 @@ def create_config(
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.continuous_mode = True
|
||||
CFG.set_continuous_mode(True)
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
config.continuous_limit = continuous_limit
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
@@ -78,26 +71,15 @@ def create_config(
|
||||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.speak_mode = True
|
||||
CFG.set_speak_mode(True)
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
|
||||
config.fast_llm = GPT_3_MODEL
|
||||
config.smart_llm = GPT_3_MODEL
|
||||
elif (
|
||||
gpt4only
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm", config=config)
|
||||
== GPT_4_MODEL
|
||||
):
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
|
||||
if gpt4only:
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
|
||||
config.fast_llm = GPT_4_MODEL
|
||||
config.smart_llm = GPT_4_MODEL
|
||||
else:
|
||||
config.fast_llm = check_model(config.fast_llm, "fast_llm", config=config)
|
||||
config.smart_llm = check_model(config.smart_llm, "smart_llm", config=config)
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
@@ -108,13 +90,13 @@ def create_config(
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
||||
else:
|
||||
config.memory_backend = chosen
|
||||
CFG.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
config.skip_reprompt = True
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
@@ -127,24 +109,11 @@ def create_config(
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
config.ai_settings_file = file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||
config.prompt_settings_file = file
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
CFG.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
@@ -159,29 +128,7 @@ def create_config(
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
config.allow_downloads = True
|
||||
CFG.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
def check_model(
|
||||
model_name: str,
|
||||
model_type: Literal["smart_llm", "fast_llm"],
|
||||
config: Config,
|
||||
) -> str:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
openai_credentials = config.get_openai_credentials(model_name)
|
||||
api_manager = ApiManager()
|
||||
models = api_manager.get_models(**openai_credentials)
|
||||
|
||||
if any(model_name in m["id"] for m in models):
|
||||
return model_name
|
||||
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"You do not have access to {model_name}. Setting {model_type} to "
|
||||
f"gpt-3.5-turbo.",
|
||||
)
|
||||
return "gpt-3.5-turbo"
|
||||
CFG.skip_news = True
|
||||
@@ -1,272 +0,0 @@
|
||||
# Re-architecture Notes
|
||||
|
||||
## Key Documents
|
||||
|
||||
- [Planned Agent Workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
|
||||
- [Original Architecture Diagram](https://www.figma.com/file/fwdj44tPR7ArYtnGGUKknw/Modular-Architecture?type=whiteboard&node-id=0-1) - This is sadly well out of date at this point.
|
||||
- [Kanban](https://github.com/orgs/Significant-Gravitas/projects/1/views/1?filterQuery=label%3Are-arch)
|
||||
|
||||
## The Motivation
|
||||
|
||||
The `master` branch of Auto-GPT is an organically grown amalgamation of many thoughts
|
||||
and ideas about agent-driven autonomous systems. It lacks clear abstraction boundaries,
|
||||
has issues of global state and poorly encapsulated state, and is generally just hard to
|
||||
make effective changes to. Mainly it's just a system that's hard to make changes to.
|
||||
And research in the field is moving fast, so we want to be able to try new ideas
|
||||
quickly.
|
||||
|
||||
## Initial Planning
|
||||
|
||||
A large group of maintainers and contributors met do discuss the architectural
|
||||
challenges associated with the existing codebase. Many much-desired features (building
|
||||
new user interfaces, enabling project-specific agents, enabling multi-agent systems)
|
||||
are bottlenecked by the global state in the system. We discussed the tradeoffs between
|
||||
an incremental system transition and a big breaking version change and decided to go
|
||||
for the breaking version change. We justified this by saying:
|
||||
|
||||
- We can maintain, in essence, the same user experience as now even with a radical
|
||||
restructuring of the codebase
|
||||
- Our developer audience is struggling to use the existing codebase to build
|
||||
applications and libraries of their own, so this breaking change will largely be
|
||||
welcome.
|
||||
|
||||
## Primary Goals
|
||||
|
||||
- Separate the AutoGPT application code from the library code.
|
||||
- Remove global state from the system
|
||||
- Allow for multiple agents per user (with facilities for running simultaneously)
|
||||
- Create a serializable representation of an Agent
|
||||
- Encapsulate the core systems in abstractions with clear boundaries.
|
||||
|
||||
## Secondary goals
|
||||
|
||||
- Use existing tools to ditch any unneccesary cruft in the codebase (document loading,
|
||||
json parsing, anything easier to replace than to port).
|
||||
- Bring in the [core agent loop updates](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
|
||||
being developed simultaneously by @Pwuts
|
||||
|
||||
# The Agent Subsystems
|
||||
|
||||
## Configuration
|
||||
|
||||
We want a lot of things from a configuration system. We lean heavily on it in the
|
||||
`master` branch to allow several parts of the system to communicate with each other.
|
||||
[Recent work](https://github.com/Significant-Gravitas/Auto-GPT/pull/4737) has made it
|
||||
so that the config is no longer a singleton object that is materialized from the import
|
||||
state, but it's still treated as a
|
||||
[god object](https://en.wikipedia.org/wiki/God_object) containing all information about
|
||||
the system and _critically_ allowing any system to reference configuration information
|
||||
about other parts of the system.
|
||||
|
||||
### What we want
|
||||
|
||||
- It should still be reasonable to collate the entire system configuration in a
|
||||
sensible way.
|
||||
- The configuration should be validatable and validated.
|
||||
- The system configuration should be a _serializable_ representation of an `Agent`.
|
||||
- The configuration system should provide a clear (albeit very low-level) contract
|
||||
about user-configurable aspects of the system.
|
||||
- The configuration should reasonably manage default values and user-provided overrides.
|
||||
- The configuration system needs to handle credentials in a reasonable way.
|
||||
- The configuration should be the representation of some amount of system state, like
|
||||
api budgets and resource usage. These aspects are recorded in the configuration and
|
||||
updated by the system itself.
|
||||
- Agent systems should have encapsulated views of the configuration. E.g. the memory
|
||||
system should know about memory configuration but nothing about command configuration.
|
||||
|
||||
## Workspace
|
||||
|
||||
There are two ways to think about the workspace:
|
||||
|
||||
- The workspace is a scratch space for an agent where it can store files, write code,
|
||||
and do pretty much whatever else it likes.
|
||||
- The workspace is, at any given point in time, the single source of truth for what an
|
||||
agent is. It contains the serializable state (the configuration) as well as all
|
||||
other working state (stored files, databases, memories, custom code).
|
||||
|
||||
In the existing system there is **one** workspace. And because the workspace holds so
|
||||
much agent state, that means a user can only work with one agent at a time.
|
||||
|
||||
## Memory
|
||||
|
||||
The memory system has been under extremely active development.
|
||||
See [#3536](https://github.com/Significant-Gravitas/Auto-GPT/issues/3536) and
|
||||
[#4208](https://github.com/Significant-Gravitas/Auto-GPT/pull/4208) for discussion and
|
||||
work in the `master` branch. The TL;DR is
|
||||
that we noticed a couple of months ago that the `Agent` performed **worse** with
|
||||
permanent memory than without it. Since then the knowledge storage and retrieval
|
||||
system has been [redesigned](https://whimsical.com/memory-system-8Ae6x6QkjDwQAUe9eVJ6w1)
|
||||
and partially implemented in the `master` branch.
|
||||
|
||||
## Planning/Prompt-Engineering
|
||||
|
||||
The planning system is the system that translates user desires/agent intentions into
|
||||
language model prompts. In the course of development, it has become pretty clear
|
||||
that `Planning` is the wrong name for this system
|
||||
|
||||
### What we want
|
||||
|
||||
- It should be incredibly obvious what's being passed to a language model, when it's
|
||||
being passed, and what the language model response is. The landscape of language
|
||||
model research is developing very rapidly, so building complex abstractions between
|
||||
users/contributors and the language model interactions is going to make it very
|
||||
difficult for us to nimbly respond to new research developments.
|
||||
- Prompt-engineering should ideally be exposed in a parameterizeable way to users.
|
||||
- We should, where possible, leverage OpenAI's new
|
||||
[function calling api](https://openai.com/blog/function-calling-and-other-api-updates)
|
||||
to get outputs in a standard machine-readable format and avoid the deep pit of
|
||||
parsing json (and fixing unparsable json).
|
||||
|
||||
### Planning Strategies
|
||||
|
||||
The [new agent workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
|
||||
has many, many interaction points for language models. We really would like to not
|
||||
distribute prompt templates and raw strings all through the system. The re-arch solution
|
||||
is to encapsulate language model interactions into planning strategies.
|
||||
These strategies are defined by
|
||||
|
||||
- The `LanguageModelClassification` they use (`FAST` or `SMART`)
|
||||
- A function `build_prompt` that takes strategy specific arguments and constructs a
|
||||
`LanguageModelPrompt` (a simple container for lists of messages and functions to
|
||||
pass to the language model)
|
||||
- A function `parse_content` that parses the response content (a dict) into a better
|
||||
formatted dict. Contracts here are intentionally loose and will tighten once we have
|
||||
at least one other language model provider.
|
||||
|
||||
## Resources
|
||||
|
||||
Resources are kinds of services we consume from external APIs. They may have associated
|
||||
credentials and costs we need to manage. Management of those credentials is implemented
|
||||
as manipulation of the resource configuration. We have two categories of resources
|
||||
currently
|
||||
|
||||
- AI/ML model providers (including language model providers and embedding model providers, ie OpenAI)
|
||||
- Memory providers (e.g. Pinecone, Weaviate, ChromaDB, etc.)
|
||||
|
||||
### What we want
|
||||
|
||||
- Resource abstractions should provide a common interface to different service providers
|
||||
for a particular kind of service.
|
||||
- Resource abstractions should manipulate the configuration to manage their credentials
|
||||
and budget/accounting.
|
||||
- Resource abstractions should be composable over an API (e.g. I should be able to make
|
||||
an OpenAI provider that is both a LanguageModelProvider and an EmbeddingModelProvider
|
||||
and use it wherever I need those services).
|
||||
|
||||
## Abilities
|
||||
|
||||
Along with planning and memory usage, abilities are one of the major augmentations of
|
||||
augmented language models. They allow us to expand the scope of what language models
|
||||
can do by hooking them up to code they can execute to obtain new knowledge or influence
|
||||
the world.
|
||||
|
||||
### What we want
|
||||
|
||||
- Abilities should have an extremely clear interface that users can write to.
|
||||
- Abilities should have an extremely clear interface that a language model can
|
||||
understand
|
||||
- Abilities should be declarative about their dependencies so the system can inject them
|
||||
- Abilities should be executable (where sensible) in an async run loop.
|
||||
- Abilities should be not have side effects unless those side effects are clear in
|
||||
their representation to an agent (e.g. the BrowseWeb ability shouldn't write a file,
|
||||
but the WriteFile ability can).
|
||||
|
||||
## Plugins
|
||||
|
||||
Users want to add lots of features that we don't want to support as first-party.
|
||||
Or solution to this is a plugin system to allow users to plug in their functionality or
|
||||
to construct their agent from a public plugin marketplace. Our primary concern in the
|
||||
re-arch is to build a stateless plugin service interface and a simple implementation
|
||||
that can load plugins from installed packages or from zip files. Future efforts will
|
||||
expand this system to allow plugins to load from a marketplace or some other kind
|
||||
of service.
|
||||
|
||||
### What is a Plugin
|
||||
|
||||
Plugins are a kind of garbage term. They refer to a number of things.
|
||||
|
||||
- New commands for the agent to execute. This is the most common usage.
|
||||
- Replacements for entire subsystems like memory or language model providers
|
||||
- Application plugins that do things like send emails or communicate via whatsapp
|
||||
- The repositories contributors create that may themselves have multiple plugins in them.
|
||||
|
||||
### Usage in the existing system
|
||||
|
||||
The current plugin system is _hook-based_. This means plugins don't correspond to
|
||||
kinds of objects in the system, but rather to times in the system at which we defer
|
||||
execution to them. The main advantage of this setup is that user code can hijack
|
||||
pretty much any behavior of the agent by injecting code that supercedes the normal
|
||||
agent execution. The disadvantages to this approach are numerous:
|
||||
|
||||
- We have absolutely no mechanisms to enforce any security measures because the threat
|
||||
surface is everything.
|
||||
- We cannot reason about agent behavior in a cohesive way because control flow can be
|
||||
ceded to user code at pretty much any point and arbitrarily change or break the
|
||||
agent behavior
|
||||
- The interface for designing a plugin is kind of terrible and difficult to standardize
|
||||
- The hook based implementation means we couple ourselves to a particular flow of
|
||||
control (or otherwise risk breaking plugin behavior). E.g. many of the hook targets
|
||||
in the [old workflow](https://whimsical.com/agent-workflow-VAzeKcup3SR7awpNZJKTyK)
|
||||
are not present or mean something entirely different in the
|
||||
[new workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ).
|
||||
- Etc.
|
||||
|
||||
### What we want
|
||||
|
||||
- A concrete definition of a plugin that is narrow enough in scope that we can define
|
||||
it well and reason about how it will work in the system.
|
||||
- A set of abstractions that let us define a plugin by its storage format and location
|
||||
- A service interface that knows how to parse the plugin abstractions and turn them
|
||||
into concrete classes and objects.
|
||||
|
||||
|
||||
## Some Notes on how and why we'll use OO in this project
|
||||
|
||||
First and foremost, Python itself is an object-oriented language. It's
|
||||
underlying [data model](https://docs.python.org/3/reference/datamodel.html) is built
|
||||
with object-oriented programming in mind. It offers useful tools like abstract base
|
||||
classes to communicate interfaces to developers who want to, e.g., write plugins, or
|
||||
help work on implementations. If we were working in a different language that offered
|
||||
different tools, we'd use a different paradigm.
|
||||
|
||||
While many things are classes in the re-arch, they are not classes in the same way.
|
||||
There are three kinds of things (roughly) that are written as classes in the re-arch:
|
||||
1. **Configuration**: Auto-GPT has *a lot* of configuration. This configuration
|
||||
is *data* and we use **[Pydantic](https://docs.pydantic.dev/latest/)** to manage it as
|
||||
pydantic is basically industry standard for this stuff. It provides runtime validation
|
||||
for all the configuration and allows us to easily serialize configuration to both basic
|
||||
python types (dicts, lists, and primatives) as well as serialize to json, which is
|
||||
important for us being able to put representations of agents
|
||||
[on the wire](https://en.wikipedia.org/wiki/Wire_protocol) for web applications and
|
||||
agent-to-agent communication. *These are essentially
|
||||
[structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language)) rather than
|
||||
traditional classes.*
|
||||
2. **Internal Data**: Very similar to configuration, Auto-GPT passes around boatloads
|
||||
of internal data. We are interacting with language models and language model APIs
|
||||
which means we are handling lots of *structured* but *raw* text. Here we also
|
||||
leverage **pydantic** to both *parse* and *validate* the internal data and also to
|
||||
give us concrete types which we can use static type checkers to validate against
|
||||
and discover problems before they show up as bugs at runtime. *These are
|
||||
essentially [structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language))
|
||||
rather than traditional classes.*
|
||||
3. **System Interfaces**: This is our primary traditional use of classes in the
|
||||
re-arch. We have a bunch of systems. We want many of those systems to have
|
||||
alternative implementations (e.g. via plugins). We use abstract base classes to
|
||||
define interfaces to communicate with people who might want to provide those
|
||||
plugins. We provide a single concrete implementation of most of those systems as a
|
||||
subclass of the interface. This should not be controversial.
|
||||
|
||||
The approach is consistent with
|
||||
[prior](https://github.com/Significant-Gravitas/Auto-GPT/issues/2458)
|
||||
[work](https://github.com/Significant-Gravitas/Auto-GPT/pull/2442) done by other
|
||||
maintainers in this direction.
|
||||
|
||||
From an organization standpoint, OO programming is by far the most popular programming
|
||||
paradigm (especially for Python). It's the one most often taught in programming classes
|
||||
and the one with the most available online training for people interested in
|
||||
contributing.
|
||||
|
||||
Finally, and importantly, we scoped the plan and initial design of the re-arch as a
|
||||
large group of maintainers and collaborators early on. This is consistent with the
|
||||
design we chose and no-one offered alternatives.
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
# Auto-GPT Core
|
||||
|
||||
This subpackage contains the ongoing work for the
|
||||
[Auto-GPT Re-arch](https://github.com/Significant-Gravitas/Auto-GPT/issues/4770). It is
|
||||
a work in progress and is not yet feature complete. In particular, it does not yet
|
||||
have many of the Auto-GPT commands implemented and is pending ongoing work to
|
||||
[re-incorporate vector-based memory and knowledge retrieval](https://github.com/Significant-Gravitas/Auto-GPT/issues/3536).
|
||||
|
||||
## [Overview](ARCHITECTURE_NOTES.md)
|
||||
|
||||
The Auto-GPT Re-arch is a re-implementation of the Auto-GPT agent that is designed to be more modular,
|
||||
more extensible, and more maintainable than the original Auto-GPT agent. It is also designed to be
|
||||
more accessible to new developers and to be easier to contribute to. The re-arch is a work in progress
|
||||
and is not yet feature complete. It is also not yet ready for production use.
|
||||
|
||||
## Running the Re-arch Code
|
||||
|
||||
There are two client applications for Auto-GPT included.
|
||||
|
||||
Unlike the main version of Auto-GPT, the re-arch requires you to actually install Auto-GPT in your python
|
||||
environment to run this application. To do so, run
|
||||
|
||||
```
|
||||
pip install -e REPOSITORY_ROOT
|
||||
```
|
||||
|
||||
where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. The `REPOSITORY_ROOT`
|
||||
is the directory that contains the `setup.py` file and is the main, top-level directory of the repository
|
||||
when you clone it.
|
||||
|
||||
## CLI Application
|
||||
|
||||
:star2: **This is the reference application I'm working with for now** :star2:
|
||||
|
||||
The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py)
|
||||
|
||||
You'll then need a settings file. Run
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings
|
||||
```
|
||||
|
||||
This will write a file called `default_agent_settings.yaml` with all the user-modifiable
|
||||
configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory
|
||||
in your user directory if it doesn't exist). Your user directory is located in different places
|
||||
depending on your operating system:
|
||||
|
||||
- On Linux, it's `/home/USERNAME`
|
||||
- On Windows, it's `C:\Users\USERNAME`
|
||||
- On Mac, it's `/Users/USERNAME`
|
||||
|
||||
At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run
|
||||
the model.
|
||||
|
||||
You can then run Auto-GPT with
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run
|
||||
```
|
||||
|
||||
to launch the interaction loop.
|
||||
|
||||
### CLI Web App
|
||||
|
||||
:warning: I am not actively developing this application. I am primarily working with the traditional CLI app
|
||||
described above. It is a very good place to get involved if you have web application design experience and are
|
||||
looking to get involved in the re-arch.
|
||||
|
||||
The second app is still a CLI, but it sets up a local webserver that the client application talks to
|
||||
rather than invoking calls to the Agent library code directly. This application is essentially a sketch
|
||||
at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/client/client.py)
|
||||
- [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/server/api.py)
|
||||
|
||||
To run, you still need to generate a default configuration. You can do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings
|
||||
```
|
||||
|
||||
It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key.
|
||||
|
||||
To run, do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client
|
||||
```
|
||||
|
||||
This will launch a webserver and then start the client cli application to communicate with it.
|
||||
@@ -1,4 +0,0 @@
|
||||
"""The command system provides a way to extend the functionality of the AI agent."""
|
||||
from autogpt.core.ability.base import Ability, AbilityRegistry
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry
|
||||
@@ -1,92 +0,0 @@
|
||||
import abc
|
||||
from pprint import pformat
|
||||
from typing import Any, ClassVar
|
||||
|
||||
import inflection
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
|
||||
|
||||
class AbilityConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
from autogpt.core.plugin.base import PluginLocation
|
||||
|
||||
location: PluginLocation
|
||||
packages_required: list[str] = Field(default_factory=list)
|
||||
language_model_required: LanguageModelConfiguration = None
|
||||
memory_provider_required: bool = False
|
||||
workspace_required: bool = False
|
||||
|
||||
|
||||
class Ability(abc.ABC):
|
||||
"""A class representing an agent ability."""
|
||||
|
||||
default_configuration: ClassVar[AbilityConfiguration]
|
||||
|
||||
@classmethod
|
||||
def name(cls) -> str:
|
||||
"""The name of the ability."""
|
||||
return inflection.underscore(cls.__name__)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def description(cls) -> str:
|
||||
"""A detailed description of what the ability does."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def arguments(cls) -> dict:
|
||||
"""A dict of arguments in standard json schema format."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
"""A list of required arguments."""
|
||||
return []
|
||||
|
||||
@abc.abstractmethod
|
||||
async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult:
|
||||
...
|
||||
|
||||
def __str__(self) -> str:
|
||||
return pformat(self.dump())
|
||||
|
||||
def dump(self) -> dict:
|
||||
return {
|
||||
"name": self.name(),
|
||||
"description": self.description(),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": self.arguments(),
|
||||
"required": self.required_arguments(),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class AbilityRegistry(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def list_abilities(self) -> list[str]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def perform(self, ability_name: str, **kwargs: Any) -> AbilityResult:
|
||||
...
|
||||
@@ -1,6 +0,0 @@
|
||||
from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility
|
||||
from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel
|
||||
|
||||
BUILTIN_ABILITIES = {
|
||||
QueryLanguageModel.name(): QueryLanguageModel,
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
|
||||
|
||||
class CreateNewAbility(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.CreateNewAbility",
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Create a new ability by writing python code."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"ability_name": {
|
||||
"type": "string",
|
||||
"description": "A meaningful and concise name for the new ability.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the ability and its uses, including any limitations.",
|
||||
},
|
||||
"arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the argument.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "The type of the argument. Must be a standard json schema type.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the argument and its uses.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"description": "A list of arguments that the ability will accept.",
|
||||
},
|
||||
"required_arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The names of the arguments that are required.",
|
||||
},
|
||||
"description": "A list of the names of the arguments that are required.",
|
||||
},
|
||||
"package_requirements": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The of the Python package that is required to execute the ability.",
|
||||
},
|
||||
"description": "A list of the names of the Python packages that are required to execute the ability.",
|
||||
},
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code that will be executed when the ability is called.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return [
|
||||
"ability_name",
|
||||
"description",
|
||||
"arguments",
|
||||
"required_arguments",
|
||||
"package_requirements",
|
||||
"code",
|
||||
]
|
||||
|
||||
async def __call__(
|
||||
self,
|
||||
ability_name: str,
|
||||
description: str,
|
||||
arguments: list[dict],
|
||||
required_arguments: list[str],
|
||||
package_requirements: list[str],
|
||||
code: str,
|
||||
) -> AbilityResult:
|
||||
raise NotImplementedError
|
||||
@@ -1,167 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class ReadFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Read and parse all text from a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to read.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(self, filename: str) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
pass
|
||||
except ImportError:
|
||||
message = "Package charset_normalizer is not installed."
|
||||
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if not file_path.exists():
|
||||
message = f"File {filename} does not exist."
|
||||
if not file_path.is_file():
|
||||
message = f"{filename} is not a file."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename):
|
||||
return result
|
||||
|
||||
from unstructured.partition.auto import partition
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
elements = partition(str(file_path))
|
||||
# TODO: Lots of other potentially useful information is available
|
||||
# in the partitioned file. Consider returning more of it.
|
||||
new_knowledge = Knowledge(
|
||||
content="\n\n".join([element.text for element in elements]),
|
||||
content_type=ContentType.TEXT,
|
||||
content_metadata={"filename": filename},
|
||||
)
|
||||
success = True
|
||||
message = f"File {file_path} read successfully."
|
||||
except IOError as e:
|
||||
new_knowledge = None
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
new_knowledge=new_knowledge,
|
||||
)
|
||||
|
||||
|
||||
class WriteFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Write text to a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write.",
|
||||
},
|
||||
"contents": {
|
||||
"type": "string",
|
||||
"description": "The contents of the file to write.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(
|
||||
self, filename: str, contents: str
|
||||
) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if file_path.exists():
|
||||
message = f"File {filename} already exists."
|
||||
if len(contents):
|
||||
message = f"File {filename} was not given any content."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename, "contents": contents},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str, contents: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename, contents):
|
||||
return result
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
directory = os.path.dirname(file_path)
|
||||
os.makedirs(directory)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(contents)
|
||||
success = True
|
||||
message = f"File {file_path} written successfully."
|
||||
except IOError as e:
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
)
|
||||
@@ -1,78 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
MessageRole,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
|
||||
|
||||
class QueryLanguageModel(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.QueryLanguageModel",
|
||||
),
|
||||
language_model_required=LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
language_model_provider: LanguageModelProvider,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
self._language_model_provider = language_model_provider
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Query a language model. A query should be a question and any relevant context."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "A query for a language model. A query should contain a question and any relevant context.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return ["query"]
|
||||
|
||||
async def __call__(self, query: str) -> AbilityResult:
|
||||
messages = [
|
||||
LanguageModelMessage(
|
||||
content=query,
|
||||
role=MessageRole.USER,
|
||||
),
|
||||
]
|
||||
model_response = await self._language_model_provider.create_language_completion(
|
||||
model_prompt=messages,
|
||||
functions=[],
|
||||
model_name=self._configuration.language_model_required.model_name,
|
||||
completion_parser=self._parse_response,
|
||||
)
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"query": query},
|
||||
success=True,
|
||||
message=model_response.content["content"],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_response(response_content: dict) -> dict:
|
||||
return {"content": response_content["content"]}
|
||||
@@ -1,30 +0,0 @@
|
||||
import enum
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ContentType(str, enum.Enum):
|
||||
# TBD what these actually are.
|
||||
TEXT = "text"
|
||||
CODE = "code"
|
||||
|
||||
|
||||
class Knowledge(BaseModel):
|
||||
content: str
|
||||
content_type: ContentType
|
||||
content_metadata: dict[str, Any]
|
||||
|
||||
|
||||
class AbilityResult(BaseModel):
|
||||
"""The AbilityResult is a standard response struct for an ability."""
|
||||
|
||||
ability_name: str
|
||||
ability_args: dict[str, str]
|
||||
success: bool
|
||||
message: str
|
||||
new_knowledge: Knowledge = None
|
||||
|
||||
def summary(self) -> str:
|
||||
kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items())
|
||||
return f"{self.ability_name}({kwargs}): {self.message}"
|
||||
@@ -1,96 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
|
||||
from autogpt.core.ability.builtins import BUILTIN_ABILITIES
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.plugin.simple import SimplePluginService
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
)
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
|
||||
class AbilityRegistryConfiguration(SystemConfiguration):
|
||||
"""Configuration for the AbilityRegistry subsystem."""
|
||||
|
||||
abilities: dict[str, AbilityConfiguration]
|
||||
|
||||
|
||||
class AbilityRegistrySettings(SystemSettings):
|
||||
configuration: AbilityRegistryConfiguration
|
||||
|
||||
|
||||
class SimpleAbilityRegistry(AbilityRegistry, Configurable):
|
||||
default_settings = AbilityRegistrySettings(
|
||||
name="simple_ability_registry",
|
||||
description="A simple ability registry.",
|
||||
configuration=AbilityRegistryConfiguration(
|
||||
abilities={
|
||||
ability_name: ability.default_configuration
|
||||
for ability_name, ability in BUILTIN_ABILITIES.items()
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AbilityRegistrySettings,
|
||||
logger: logging.Logger,
|
||||
memory: Memory,
|
||||
workspace: Workspace,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._memory = memory
|
||||
self._workspace = workspace
|
||||
self._model_providers = model_providers
|
||||
self._abilities = []
|
||||
for (
|
||||
ability_name,
|
||||
ability_configuration,
|
||||
) in self._configuration.abilities.items():
|
||||
self.register_ability(ability_name, ability_configuration)
|
||||
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
ability_class = SimplePluginService.get_plugin(ability_configuration.location)
|
||||
ability_args = {
|
||||
"logger": self._logger.getChild(ability_name),
|
||||
"configuration": ability_configuration,
|
||||
}
|
||||
if ability_configuration.packages_required:
|
||||
# TODO: Check packages are installed and maybe install them.
|
||||
pass
|
||||
if ability_configuration.memory_provider_required:
|
||||
ability_args["memory"] = self._memory
|
||||
if ability_configuration.workspace_required:
|
||||
ability_args["workspace"] = self._workspace
|
||||
if ability_configuration.language_model_required:
|
||||
ability_args["language_model_provider"] = self._model_providers[
|
||||
ability_configuration.language_model_required.provider_name
|
||||
]
|
||||
ability = ability_class(**ability_args)
|
||||
self._abilities.append(ability)
|
||||
|
||||
def list_abilities(self) -> list[str]:
|
||||
return [
|
||||
f"{ability.name()}: {ability.description()}" for ability in self._abilities
|
||||
]
|
||||
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
return [ability.dump() for ability in self._abilities]
|
||||
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
for ability in self._abilities:
|
||||
if ability.name() == ability_name:
|
||||
return ability
|
||||
raise ValueError(f"Ability '{ability_name}' not found.")
|
||||
|
||||
async def perform(self, ability_name: str, **kwargs) -> AbilityResult:
|
||||
ability = self.get_ability(ability_name)
|
||||
return await ability(**kwargs)
|
||||
@@ -1,3 +0,0 @@
|
||||
"""The Agent is an autonomouos entity guided by a LLM provider."""
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.agent.simple import AgentSettings, SimpleAgent
|
||||
@@ -1,26 +0,0 @@
|
||||
import abc
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class Agent(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "Agent":
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def __repr__(self):
|
||||
...
|
||||
@@ -1,391 +0,0 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.ability import (
|
||||
AbilityRegistrySettings,
|
||||
AbilityResult,
|
||||
SimpleAbilityRegistry,
|
||||
)
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory import MemorySettings, SimpleMemory
|
||||
from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus
|
||||
from autogpt.core.plugin.simple import (
|
||||
PluginLocation,
|
||||
PluginStorageFormat,
|
||||
SimplePluginService,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings
|
||||
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
|
||||
|
||||
class AgentSystems(SystemConfiguration):
|
||||
ability_registry: PluginLocation
|
||||
memory: PluginLocation
|
||||
openai_provider: PluginLocation
|
||||
planning: PluginLocation
|
||||
workspace: PluginLocation
|
||||
|
||||
|
||||
class AgentConfiguration(SystemConfiguration):
|
||||
cycle_count: int
|
||||
max_task_cycle_count: int
|
||||
creation_time: str
|
||||
name: str
|
||||
role: str
|
||||
goals: list[str]
|
||||
systems: AgentSystems
|
||||
|
||||
|
||||
class AgentSystemSettings(SystemSettings):
|
||||
configuration: AgentConfiguration
|
||||
|
||||
|
||||
class AgentSettings(BaseModel):
|
||||
agent: AgentSystemSettings
|
||||
ability_registry: AbilityRegistrySettings
|
||||
memory: MemorySettings
|
||||
openai_provider: OpenAISettings
|
||||
planning: PlannerSettings
|
||||
workspace: WorkspaceSettings
|
||||
|
||||
def update_agent_name_and_goals(self, agent_goals: dict) -> None:
|
||||
self.agent.configuration.name = agent_goals["agent_name"]
|
||||
self.agent.configuration.role = agent_goals["agent_role"]
|
||||
self.agent.configuration.goals = agent_goals["agent_goals"]
|
||||
|
||||
|
||||
class SimpleAgent(Agent, Configurable):
|
||||
default_settings = AgentSystemSettings(
|
||||
name="simple_agent",
|
||||
description="A simple agent.",
|
||||
configuration=AgentConfiguration(
|
||||
name="Entrepreneur-GPT",
|
||||
role=(
|
||||
"An AI designed to autonomously develop and run businesses with "
|
||||
"the sole goal of increasing your net worth."
|
||||
),
|
||||
goals=[
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
],
|
||||
cycle_count=0,
|
||||
max_task_cycle_count=3,
|
||||
creation_time="",
|
||||
systems=AgentSystems(
|
||||
ability_registry=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.SimpleAbilityRegistry",
|
||||
),
|
||||
memory=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.memory.SimpleMemory",
|
||||
),
|
||||
openai_provider=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.resource.model_providers.OpenAIProvider",
|
||||
),
|
||||
planning=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.planning.SimplePlanner",
|
||||
),
|
||||
workspace=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.workspace.SimpleWorkspace",
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSystemSettings,
|
||||
logger: logging.Logger,
|
||||
ability_registry: SimpleAbilityRegistry,
|
||||
memory: SimpleMemory,
|
||||
openai_provider: OpenAIProvider,
|
||||
planning: SimplePlanner,
|
||||
workspace: SimpleWorkspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._ability_registry = ability_registry
|
||||
self._memory = memory
|
||||
# FIXME: Need some work to make this work as a dict of providers
|
||||
# Getting the construction of the config to work is a bit tricky
|
||||
self._openai_provider = openai_provider
|
||||
self._planning = planning
|
||||
self._workspace = workspace
|
||||
self._task_queue = []
|
||||
self._completed_tasks = []
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
@classmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "SimpleAgent":
|
||||
agent_settings = SimpleWorkspace.load_agent_settings(workspace_path)
|
||||
agent_args = {}
|
||||
|
||||
agent_args["settings"] = agent_settings.agent
|
||||
agent_args["logger"] = logger
|
||||
agent_args["workspace"] = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["openai_provider"] = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["planning"] = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger,
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
agent_args["memory"] = cls._get_system_instance(
|
||||
"memory",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
)
|
||||
|
||||
agent_args["ability_registry"] = cls._get_system_instance(
|
||||
"ability_registry",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
memory=agent_args["memory"],
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
|
||||
return cls(**agent_args)
|
||||
|
||||
async def build_initial_plan(self) -> dict:
|
||||
plan = await self._planning.make_initial_plan(
|
||||
agent_name=self._configuration.name,
|
||||
agent_role=self._configuration.role,
|
||||
agent_goals=self._configuration.goals,
|
||||
abilities=self._ability_registry.list_abilities(),
|
||||
)
|
||||
tasks = [Task.parse_obj(task) for task in plan.content["task_list"]]
|
||||
|
||||
# TODO: Should probably do a step to evaluate the quality of the generated tasks,
|
||||
# and ensure that they have actionable ready and acceptance criteria
|
||||
|
||||
self._task_queue.extend(tasks)
|
||||
self._task_queue.sort(key=lambda t: t.priority, reverse=True)
|
||||
self._task_queue[-1].context.status = TaskStatus.READY
|
||||
return plan.content
|
||||
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
if not self._task_queue:
|
||||
return {"response": "I don't have any tasks to work on right now."}
|
||||
|
||||
self._configuration.cycle_count += 1
|
||||
task = self._task_queue.pop()
|
||||
self._logger.info(f"Working on task: {task}")
|
||||
|
||||
task = await self._evaluate_task_and_add_context(task)
|
||||
next_ability = await self._choose_next_ability(
|
||||
task,
|
||||
self._ability_registry.dump_abilities(),
|
||||
)
|
||||
self._current_task = task
|
||||
self._next_ability = next_ability.content
|
||||
return self._current_task, self._next_ability
|
||||
|
||||
async def execute_next_ability(self, user_input: str, *args, **kwargs):
|
||||
if user_input == "y":
|
||||
ability = self._ability_registry.get_ability(
|
||||
self._next_ability["next_ability"]
|
||||
)
|
||||
ability_response = await ability(**self._next_ability["ability_arguments"])
|
||||
await self._update_tasks_and_memory(ability_response)
|
||||
if self._current_task.context.status == TaskStatus.DONE:
|
||||
self._completed_tasks.append(self._current_task)
|
||||
else:
|
||||
self._task_queue.append(self._current_task)
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
return ability_response.dict()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
async def _evaluate_task_and_add_context(self, task: Task) -> Task:
|
||||
"""Evaluate the task and add context to it."""
|
||||
if task.context.status == TaskStatus.IN_PROGRESS:
|
||||
# Nothing to do here
|
||||
return task
|
||||
else:
|
||||
self._logger.debug(f"Evaluating task {task} and adding relevant context.")
|
||||
# TODO: Look up relevant memories (need working memory system)
|
||||
# TODO: Evaluate whether there is enough information to start the task (language model call).
|
||||
task.context.enough_info = True
|
||||
task.context.status = TaskStatus.IN_PROGRESS
|
||||
return task
|
||||
|
||||
async def _choose_next_ability(self, task: Task, ability_schema: list[dict]):
|
||||
"""Choose the next ability to use for the task."""
|
||||
self._logger.debug(f"Choosing next ability for task {task}.")
|
||||
if task.context.cycle_count > self._configuration.max_task_cycle_count:
|
||||
# Don't hit the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
elif not task.context.enough_info:
|
||||
# Don't ask the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
else:
|
||||
next_ability = await self._planning.determine_next_ability(
|
||||
task, ability_schema
|
||||
)
|
||||
return next_ability
|
||||
|
||||
async def _update_tasks_and_memory(self, ability_result: AbilityResult):
|
||||
self._current_task.context.cycle_count += 1
|
||||
self._current_task.context.prior_actions.append(ability_result)
|
||||
# TODO: Summarize new knowledge
|
||||
# TODO: store knowledge and summaries in memory and in relevant tasks
|
||||
# TODO: evaluate whether the task is complete
|
||||
|
||||
def __repr__(self):
|
||||
return "SimpleAgent()"
|
||||
|
||||
################################################################
|
||||
# Factory interface for agent bootstrapping and initialization #
|
||||
################################################################
|
||||
|
||||
@classmethod
|
||||
def build_user_configuration(cls) -> dict[str, Any]:
|
||||
"""Build the user's configuration."""
|
||||
configuration_dict = {
|
||||
"agent": cls.get_user_config(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
for system_name, system_location in system_locations.items():
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.get_user_config()
|
||||
configuration_dict = _prune_empty_dicts(configuration_dict)
|
||||
return configuration_dict
|
||||
|
||||
@classmethod
|
||||
def compile_settings(
|
||||
cls, logger: logging.Logger, user_configuration: dict
|
||||
) -> AgentSettings:
|
||||
"""Compile the user's configuration with the defaults."""
|
||||
logger.debug("Processing agent system configuration.")
|
||||
configuration_dict = {
|
||||
"agent": cls.build_agent_configuration(
|
||||
user_configuration.get("agent", {})
|
||||
).dict(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
|
||||
# Build up default configuration
|
||||
for system_name, system_location in system_locations.items():
|
||||
logger.debug(f"Compiling configuration for system {system_name}")
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.build_agent_configuration(
|
||||
user_configuration.get(system_name, {})
|
||||
).dict()
|
||||
|
||||
return AgentSettings.parse_obj(configuration_dict)
|
||||
|
||||
@classmethod
|
||||
async def determine_agent_name_and_goals(
|
||||
cls,
|
||||
user_objective: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
) -> dict:
|
||||
logger.debug("Loading OpenAI provider.")
|
||||
provider: OpenAIProvider = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
logger.debug("Loading agent planner.")
|
||||
agent_planner: SimplePlanner = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
model_providers={"openai": provider},
|
||||
)
|
||||
logger.debug("determining agent name and goals.")
|
||||
model_response = await agent_planner.decide_name_and_goals(
|
||||
user_objective,
|
||||
)
|
||||
|
||||
return model_response.content
|
||||
|
||||
@classmethod
|
||||
def provision_agent(
|
||||
cls,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
agent_settings.agent.configuration.creation_time = datetime.now().strftime(
|
||||
"%Y%m%d_%H%M%S"
|
||||
)
|
||||
workspace: SimpleWorkspace = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
return workspace.setup_workspace(agent_settings, logger)
|
||||
|
||||
@classmethod
|
||||
def _get_system_instance(
|
||||
cls,
|
||||
system_name: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
system_locations = agent_settings.agent.configuration.systems.dict()
|
||||
|
||||
system_settings = getattr(agent_settings, system_name)
|
||||
system_class = SimplePluginService.get_plugin(system_locations[system_name])
|
||||
system_instance = system_class(
|
||||
system_settings,
|
||||
*args,
|
||||
logger=logger.getChild(system_name),
|
||||
**kwargs,
|
||||
)
|
||||
return system_instance
|
||||
|
||||
|
||||
def _prune_empty_dicts(d: dict) -> dict:
|
||||
"""
|
||||
Prune branches from a nested dictionary if the branch only contains empty dictionaries at the leaves.
|
||||
|
||||
Args:
|
||||
d: The dictionary to prune.
|
||||
|
||||
Returns:
|
||||
The pruned dictionary.
|
||||
"""
|
||||
pruned = {}
|
||||
for key, value in d.items():
|
||||
if isinstance(value, dict):
|
||||
pruned_value = _prune_empty_dicts(value)
|
||||
if (
|
||||
pruned_value
|
||||
): # if the pruned dictionary is not empty, add it to the result
|
||||
pruned[key] = pruned_value
|
||||
else:
|
||||
pruned[key] = value
|
||||
return pruned
|
||||
@@ -1,7 +0,0 @@
|
||||
"""The configuration encapsulates settings for all Agent subsystems."""
|
||||
from autogpt.core.configuration.schema import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
@@ -1,107 +0,0 @@
|
||||
import abc
|
||||
import typing
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
def UserConfigurable(*args, **kwargs):
|
||||
return Field(*args, **kwargs, user_configurable=True)
|
||||
|
||||
|
||||
class SystemConfiguration(BaseModel):
|
||||
def get_user_config(self) -> dict[str, Any]:
|
||||
return _get_user_config_fields(self)
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class SystemSettings(BaseModel):
|
||||
"""A base class for all system settings."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
S = TypeVar("S", bound=SystemSettings)
|
||||
|
||||
|
||||
class Configurable(abc.ABC, Generic[S]):
|
||||
"""A base class for all configurable objects."""
|
||||
|
||||
prefix: str = ""
|
||||
default_settings: typing.ClassVar[S]
|
||||
|
||||
@classmethod
|
||||
def get_user_config(cls) -> dict[str, Any]:
|
||||
return _get_user_config_fields(cls.default_settings)
|
||||
|
||||
@classmethod
|
||||
def build_agent_configuration(cls, configuration: dict) -> S:
|
||||
"""Process the configuration for this object."""
|
||||
|
||||
defaults = cls.default_settings.dict()
|
||||
final_configuration = deep_update(defaults, configuration)
|
||||
|
||||
return cls.default_settings.__class__.parse_obj(final_configuration)
|
||||
|
||||
|
||||
def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
|
||||
"""
|
||||
Get the user config fields of a Pydantic model instance.
|
||||
|
||||
Args:
|
||||
instance: The Pydantic model instance.
|
||||
|
||||
Returns:
|
||||
The user config fields of the instance.
|
||||
"""
|
||||
user_config_fields = {}
|
||||
|
||||
for name, value in instance.__dict__.items():
|
||||
field_info = instance.__fields__[name]
|
||||
if "user_configurable" in field_info.field_info.extra:
|
||||
user_config_fields[name] = value
|
||||
elif isinstance(value, SystemConfiguration):
|
||||
user_config_fields[name] = value.get_user_config()
|
||||
elif isinstance(value, list) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value
|
||||
):
|
||||
user_config_fields[name] = [i.get_user_config() for i in value]
|
||||
elif isinstance(value, dict) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value.values()
|
||||
):
|
||||
user_config_fields[name] = {
|
||||
k: v.get_user_config() for k, v in value.items()
|
||||
}
|
||||
|
||||
return user_config_fields
|
||||
|
||||
|
||||
def deep_update(original_dict: dict, update_dict: dict) -> dict:
|
||||
"""
|
||||
Recursively update a dictionary.
|
||||
|
||||
Args:
|
||||
original_dict (dict): The dictionary to be updated.
|
||||
update_dict (dict): The dictionary to update with.
|
||||
|
||||
Returns:
|
||||
dict: The updated dictionary.
|
||||
"""
|
||||
for key, value in update_dict.items():
|
||||
if (
|
||||
key in original_dict
|
||||
and isinstance(original_dict[key], dict)
|
||||
and isinstance(value, dict)
|
||||
):
|
||||
original_dict[key] = deep_update(original_dict[key], value)
|
||||
else:
|
||||
original_dict[key] = value
|
||||
return original_dict
|
||||
@@ -1,3 +0,0 @@
|
||||
"""The memory subsystem manages the Agent's long-term memory."""
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.memory.simple import MemorySettings, SimpleMemory
|
||||
@@ -1,13 +0,0 @@
|
||||
import abc
|
||||
|
||||
|
||||
class Memory(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MemoryItem(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MessageHistory(abc.ABC):
|
||||
pass
|
||||
@@ -1,47 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class MemoryConfiguration(SystemConfiguration):
|
||||
pass
|
||||
|
||||
|
||||
class MemorySettings(SystemSettings):
|
||||
configuration: MemoryConfiguration
|
||||
|
||||
|
||||
class MessageHistory:
|
||||
def __init__(self, previous_message_history: list[str]):
|
||||
self._message_history = previous_message_history
|
||||
|
||||
|
||||
class SimpleMemory(Memory, Configurable):
|
||||
default_settings = MemorySettings(
|
||||
name="simple_memory",
|
||||
description="A simple memory.",
|
||||
configuration=MemoryConfiguration(),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: MemorySettings,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._message_history = self._load_message_history(workspace)
|
||||
|
||||
@staticmethod
|
||||
def _load_message_history(workspace: Workspace):
|
||||
message_history_path = workspace.get_path("message_history.json")
|
||||
if message_history_path.exists():
|
||||
with message_history_path.open("r") as f:
|
||||
message_history = json.load(f)
|
||||
else:
|
||||
message_history = []
|
||||
return MessageHistory(message_history)
|
||||
@@ -1,10 +0,0 @@
|
||||
"""The planning system organizes the Agent's activities."""
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
TaskStatus,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.simple import PlannerSettings, SimplePlanner
|
||||
@@ -1,76 +0,0 @@
|
||||
import abc
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
|
||||
# class Planner(abc.ABC):
|
||||
# """Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
#
|
||||
# @staticmethod
|
||||
# @abc.abstractmethod
|
||||
# async def decide_name_and_goals(
|
||||
# user_objective: str,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Decide the name and goals of an Agent from a user-defined objective.
|
||||
#
|
||||
# Args:
|
||||
# user_objective: The user-defined objective for the agent.
|
||||
#
|
||||
# Returns:
|
||||
# The agent name and goals as a response from the language model.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# async def plan(self, context: PlanningContext) -> LanguageModelResponse:
|
||||
# """Plan the next ability for the Agent.
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# progress, result, memories, and feedback.
|
||||
#
|
||||
#
|
||||
# Returns:
|
||||
# The next ability the agent should take along with thoughts and reasoning.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# def reflect(
|
||||
# self,
|
||||
# context: ReflectionContext,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Reflect on a planned ability and provide self-criticism.
|
||||
#
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# reasoning, plan, thoughts, and criticism.
|
||||
#
|
||||
# Returns:
|
||||
# Self-criticism about the agent's plan.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
|
||||
|
||||
class PromptStrategy(abc.ABC):
|
||||
default_configuration: SystemConfiguration
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def build_prompt(self, *_, **kwargs) -> LanguageModelPrompt:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def parse_response_content(self, response_content: dict) -> dict:
|
||||
...
|
||||
@@ -1,76 +0,0 @@
|
||||
import enum
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProviderModelResponse,
|
||||
)
|
||||
|
||||
|
||||
class LanguageModelClassification(str, enum.Enum):
|
||||
"""The LanguageModelClassification is a functional description of the model.
|
||||
|
||||
This is used to determine what kind of model to use for a given prompt.
|
||||
Sometimes we prefer a faster or cheaper model to accomplish a task when
|
||||
possible.
|
||||
|
||||
"""
|
||||
|
||||
FAST_MODEL: str = "fast_model"
|
||||
SMART_MODEL: str = "smart_model"
|
||||
|
||||
|
||||
class LanguageModelPrompt(BaseModel):
|
||||
messages: list[LanguageModelMessage]
|
||||
functions: list[LanguageModelFunction] = Field(default_factory=list)
|
||||
|
||||
def __str__(self):
|
||||
return "\n\n".join([f"{m.role.value}: {m.content}" for m in self.messages])
|
||||
|
||||
|
||||
class LanguageModelResponse(LanguageModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
|
||||
class TaskType(str, enum.Enum):
|
||||
RESEARCH: str = "research"
|
||||
WRITE: str = "write"
|
||||
EDIT: str = "edit"
|
||||
CODE: str = "code"
|
||||
DESIGN: str = "design"
|
||||
TEST: str = "test"
|
||||
PLAN: str = "plan"
|
||||
|
||||
|
||||
class TaskStatus(str, enum.Enum):
|
||||
BACKLOG: str = "backlog"
|
||||
READY: str = "ready"
|
||||
IN_PROGRESS: str = "in_progress"
|
||||
DONE: str = "done"
|
||||
|
||||
|
||||
class TaskContext(BaseModel):
|
||||
cycle_count: int = 0
|
||||
status: TaskStatus = TaskStatus.BACKLOG
|
||||
parent: "Task" = None
|
||||
prior_actions: list[AbilityResult] = Field(default_factory=list)
|
||||
memories: list = Field(default_factory=list)
|
||||
user_input: list[str] = Field(default_factory=list)
|
||||
supplementary_info: list[str] = Field(default_factory=list)
|
||||
enough_info: bool = False
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
objective: str
|
||||
type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema
|
||||
priority: int
|
||||
ready_criteria: list[str]
|
||||
acceptance_criteria: list[str]
|
||||
context: TaskContext = Field(default_factory=TaskContext)
|
||||
|
||||
|
||||
# Need to resolve the circular dependency between Task and TaskContext once both models are defined.
|
||||
TaskContext.update_forward_refs()
|
||||
@@ -1,182 +0,0 @@
|
||||
import logging
|
||||
import platform
|
||||
import time
|
||||
|
||||
import distro
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.planning import strategies
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class LanguageModelConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
model_name: str = UserConfigurable()
|
||||
provider_name: ModelProviderName = UserConfigurable()
|
||||
temperature: float = UserConfigurable()
|
||||
|
||||
|
||||
class PromptStrategiesConfiguration(SystemConfiguration):
|
||||
name_and_goals: strategies.NameAndGoalsConfiguration
|
||||
initial_plan: strategies.InitialPlanConfiguration
|
||||
next_ability: strategies.NextAbilityConfiguration
|
||||
|
||||
|
||||
class PlannerConfiguration(SystemConfiguration):
|
||||
"""Configuration for the Planner subsystem."""
|
||||
|
||||
models: dict[LanguageModelClassification, LanguageModelConfiguration]
|
||||
prompt_strategies: PromptStrategiesConfiguration
|
||||
|
||||
|
||||
class PlannerSettings(SystemSettings):
|
||||
"""Settings for the Planner subsystem."""
|
||||
|
||||
configuration: PlannerConfiguration
|
||||
|
||||
|
||||
class SimplePlanner(Configurable):
|
||||
"""Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
|
||||
default_settings = PlannerSettings(
|
||||
name="planner",
|
||||
description="Manages the agent's planning and goal-setting by constructing language model prompts.",
|
||||
configuration=PlannerConfiguration(
|
||||
models={
|
||||
LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT4,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
},
|
||||
prompt_strategies=PromptStrategiesConfiguration(
|
||||
name_and_goals=strategies.NameAndGoals.default_configuration,
|
||||
initial_plan=strategies.InitialPlan.default_configuration,
|
||||
next_ability=strategies.NextAbility.default_configuration,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: PlannerSettings,
|
||||
logger: logging.Logger,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
workspace: Workspace = None, # Workspace is not available during bootstrapping.
|
||||
) -> None:
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {}
|
||||
for model, model_config in self._configuration.models.items():
|
||||
self._providers[model] = model_providers[model_config.provider_name]
|
||||
|
||||
self._prompt_strategies = {
|
||||
"name_and_goals": strategies.NameAndGoals(
|
||||
**self._configuration.prompt_strategies.name_and_goals.dict()
|
||||
),
|
||||
"initial_plan": strategies.InitialPlan(
|
||||
**self._configuration.prompt_strategies.initial_plan.dict()
|
||||
),
|
||||
"next_ability": strategies.NextAbility(
|
||||
**self._configuration.prompt_strategies.next_ability.dict()
|
||||
),
|
||||
}
|
||||
|
||||
async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["name_and_goals"],
|
||||
user_objective=user_objective,
|
||||
)
|
||||
|
||||
async def make_initial_plan(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["initial_plan"],
|
||||
agent_name=agent_name,
|
||||
agent_role=agent_role,
|
||||
agent_goals=agent_goals,
|
||||
abilities=abilities,
|
||||
)
|
||||
|
||||
async def determine_next_ability(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
):
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["next_ability"],
|
||||
task=task,
|
||||
ability_schema=ability_schema,
|
||||
)
|
||||
|
||||
async def chat_with_model(
|
||||
self,
|
||||
prompt_strategy: PromptStrategy,
|
||||
**kwargs,
|
||||
) -> LanguageModelResponse:
|
||||
model_classification = prompt_strategy.model_classification
|
||||
model_configuration = self._configuration.models[model_classification].dict()
|
||||
self._logger.debug(f"Using model configuration: {model_configuration}")
|
||||
del model_configuration["provider_name"]
|
||||
provider = self._providers[model_classification]
|
||||
|
||||
template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy)
|
||||
template_kwargs.update(kwargs)
|
||||
prompt = prompt_strategy.build_prompt(**template_kwargs)
|
||||
|
||||
self._logger.debug(f"Using prompt:\n{prompt}\n\n")
|
||||
response = await provider.create_language_completion(
|
||||
model_prompt=prompt.messages,
|
||||
functions=prompt.functions,
|
||||
**model_configuration,
|
||||
completion_parser=prompt_strategy.parse_response_content,
|
||||
)
|
||||
return LanguageModelResponse.parse_obj(response.dict())
|
||||
|
||||
def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy):
|
||||
provider = self._providers[strategy.model_classification]
|
||||
template_kwargs = {
|
||||
"os_info": get_os_info(),
|
||||
"api_budget": provider.get_remaining_budget(),
|
||||
"current_time": time.strftime("%c"),
|
||||
}
|
||||
return template_kwargs
|
||||
|
||||
|
||||
def get_os_info() -> str:
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return os_info
|
||||
@@ -1,12 +0,0 @@
|
||||
from autogpt.core.planning.strategies.initial_plan import (
|
||||
InitialPlan,
|
||||
InitialPlanConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.name_and_goals import (
|
||||
NameAndGoals,
|
||||
NameAndGoalsConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.next_ability import (
|
||||
NextAbility,
|
||||
NextAbilityConfiguration,
|
||||
)
|
||||
@@ -1,190 +0,0 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class InitialPlanConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_plan_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class InitialPlan(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = (
|
||||
"You are an expert project planner. You're responsibility is to create work plans for autonomous agents. "
|
||||
"You will be given a name, a role, set of goals for the agent to accomplish. Your job is to "
|
||||
"break down those goals into a set of tasks that the agent can accomplish to achieve those goals. "
|
||||
"Agents are resourceful, but require clear instructions. Each task you create should have clearly defined "
|
||||
"`ready_criteria` that the agent can check to see if the task is ready to be started. Each task should "
|
||||
"also have clearly defined `acceptance_criteria` that the agent can check to evaluate if the task is complete. "
|
||||
"You should create as many tasks as you think is necessary to accomplish the goals.\n\n"
|
||||
"System Info:\n{system_info}"
|
||||
)
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}"
|
||||
)
|
||||
|
||||
DEFAULT_CREATE_PLAN_FUNCTION = {
|
||||
"name": "create_initial_agent_plan",
|
||||
"description": "Creates a set of tasks that forms the initial plan for an autonomous agent.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"task_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"objective": {
|
||||
"type": "string",
|
||||
"description": "An imperative verb phrase that succinctly describes the task.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "A categorization for the task. ",
|
||||
"enum": [t.value for t in TaskType],
|
||||
},
|
||||
"acceptance_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met for the task to be considered complete.",
|
||||
},
|
||||
},
|
||||
"priority": {
|
||||
"type": "integer",
|
||||
"description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.",
|
||||
"minimum": 1,
|
||||
"maximum": 10,
|
||||
},
|
||||
"ready_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met before the task can be started.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"objective",
|
||||
"type",
|
||||
"acceptance_criteria",
|
||||
"priority",
|
||||
"ready_criteria",
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = InitialPlanConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
create_plan_function: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_plan_function = create_plan_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"agent_name": agent_name,
|
||||
"agent_role": agent_role,
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
template_kwargs["agent_goals"] = to_numbered_list(
|
||||
agent_goals, **template_kwargs
|
||||
)
|
||||
template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs)
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info, **template_kwargs
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
create_plan_function = LanguageModelFunction(
|
||||
json_schema=self._create_plan_function,
|
||||
)
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=[create_plan_function],
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response["task_list"] = [
|
||||
Task.parse_obj(task) for task in parsed_response["task_list"]
|
||||
]
|
||||
return parsed_response
|
||||
@@ -1,139 +0,0 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NameAndGoalsConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt: str = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_agent_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NameAndGoals(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT = (
|
||||
"Your job is to respond to a user-defined task by invoking the `create_agent` function "
|
||||
"to generate an autonomous agent to complete the task. You should supply a role-based "
|
||||
"name for the agent, an informative description for what the agent does, and 1 to 5 "
|
||||
"goals that are optimally aligned with the successful completion of its assigned task.\n\n"
|
||||
"Example Input:\n"
|
||||
"Help me with marketing my business\n\n"
|
||||
"Example Function Call:\n"
|
||||
"create_agent(name='CMOGPT', "
|
||||
"description='A professional digital marketer AI that assists Solopreneurs in "
|
||||
"growing their businesses by providing world-class expertise in solving "
|
||||
"marketing problems for SaaS, content products, agencies, and more.', "
|
||||
"goals=['Engage in effective problem-solving, prioritization, planning, and "
|
||||
"supporting execution to address your marketing needs as your virtual Chief "
|
||||
"Marketing Officer.', 'Provide specific, actionable, and concise advice to "
|
||||
"help you make informed decisions without the use of platitudes or overly "
|
||||
"wordy explanations.', 'Identify and prioritize quick wins and cost-effective "
|
||||
"campaigns that maximize results with minimal time and budget investment.', "
|
||||
"'Proactively take the lead in guiding you and offering suggestions when faced "
|
||||
"with unclear information or uncertainty to ensure your marketing strategy "
|
||||
"remains on track.'])"
|
||||
)
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = "'{user_objective}'"
|
||||
|
||||
DEFAULT_CREATE_AGENT_FUNCTION = {
|
||||
"name": "create_agent",
|
||||
"description": ("Create a new autonomous AI agent to complete a given task."),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "A short role-based name for an autonomous agent.",
|
||||
},
|
||||
"agent_role": {
|
||||
"type": "string",
|
||||
"description": "An informative one sentence description of what the AI agent does",
|
||||
},
|
||||
"agent_goals": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"maxItems": 5,
|
||||
"items": {
|
||||
"type": "string",
|
||||
},
|
||||
"description": (
|
||||
"One to five highly effective goals that are optimally aligned with the completion of a "
|
||||
"specific task. The number and complexity of the goals should correspond to the "
|
||||
"complexity of the agent's primary objective."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["agent_name", "agent_role", "agent_goals"],
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NameAndGoalsConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: str,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = create_agent_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt:
|
||||
system_message = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_message,
|
||||
)
|
||||
user_message = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
),
|
||||
)
|
||||
create_agent_function = LanguageModelFunction(
|
||||
json_schema=self._create_agent_function,
|
||||
)
|
||||
prompt = LanguageModelPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[create_agent_function],
|
||||
# TODO
|
||||
tokens_used=0,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
return parsed_response
|
||||
@@ -1,183 +0,0 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NextAbilityConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
additional_ability_arguments: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NextAbility(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}"
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"Your current task is is {task_objective}.\n"
|
||||
"You have taken {cycle_count} actions on this task already. "
|
||||
"Here is the actions you have taken and their results:\n"
|
||||
"{action_history}\n\n"
|
||||
"Here is additional information that may be useful to you:\n"
|
||||
"{additional_info}\n\n"
|
||||
"Additionally, you should consider the following:\n"
|
||||
"{user_input}\n\n"
|
||||
"Your task of {task_objective} is complete when the following acceptance criteria have been met:\n"
|
||||
"{acceptance_criteria}\n\n"
|
||||
"Please choose one of the provided functions to accomplish this task. "
|
||||
"Some tasks may require multiple functions to accomplish. If that is the case, choose the function that "
|
||||
"you think is most appropriate for the current situation given your progress so far."
|
||||
)
|
||||
|
||||
DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = {
|
||||
"motivation": {
|
||||
"type": "string",
|
||||
"description": "Your justification for choosing choosing this function instead of a different one.",
|
||||
},
|
||||
"self_criticism": {
|
||||
"type": "string",
|
||||
"description": "Thoughtful self-criticism that explains why this function may not be the best choice.",
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.",
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NextAbilityConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
additional_ability_arguments: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._additional_ability_arguments = additional_ability_arguments
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
for ability in ability_schema:
|
||||
ability["parameters"]["properties"].update(
|
||||
self._additional_ability_arguments
|
||||
)
|
||||
ability["parameters"]["required"] += list(
|
||||
self._additional_ability_arguments.keys()
|
||||
)
|
||||
|
||||
template_kwargs["task_objective"] = task.objective
|
||||
template_kwargs["cycle_count"] = task.context.cycle_count
|
||||
template_kwargs["action_history"] = to_numbered_list(
|
||||
[action.summary() for action in task.context.prior_actions],
|
||||
no_items_response="You have not taken any actions yet.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["additional_info"] = to_numbered_list(
|
||||
[memory.summary() for memory in task.context.memories]
|
||||
+ [info for info in task.context.supplementary_info],
|
||||
no_items_response="There is no additional information available at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["user_input"] = to_numbered_list(
|
||||
[user_input for user_input in task.context.user_input],
|
||||
no_items_response="There are no additional considerations at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["acceptance_criteria"] = to_numbered_list(
|
||||
[acceptance_criteria for acceptance_criteria in task.acceptance_criteria],
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info,
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
functions = [
|
||||
LanguageModelFunction(json_schema=ability) for ability in ability_schema
|
||||
]
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=functions,
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
function_name = response_content["function_call"]["name"]
|
||||
function_arguments = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response = {
|
||||
"motivation": function_arguments.pop("motivation"),
|
||||
"self_criticism": function_arguments.pop("self_criticism"),
|
||||
"reasoning": function_arguments.pop("reasoning"),
|
||||
"next_ability": function_name,
|
||||
"ability_arguments": function_arguments,
|
||||
}
|
||||
return parsed_response
|
||||
@@ -1,27 +0,0 @@
|
||||
import ast
|
||||
import json
|
||||
|
||||
|
||||
def to_numbered_list(
|
||||
items: list[str], no_items_response: str = "", **template_args
|
||||
) -> str:
|
||||
if items:
|
||||
return "\n".join(
|
||||
f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items)
|
||||
)
|
||||
else:
|
||||
return no_items_response
|
||||
|
||||
|
||||
def json_loads(json_str: str):
|
||||
# TODO: this is a hack function for now. Trying to see what errors show up in testing.
|
||||
# Can hopefully just replace with a call to ast.literal_eval (the function api still
|
||||
# sometimes returns json strings with minor issues like trailing commas).
|
||||
try:
|
||||
return ast.literal_eval(json_str)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
try:
|
||||
print(f"json decode error {e}. trying literal eval")
|
||||
return ast.literal_eval(json_str)
|
||||
except Exception:
|
||||
breakpoint()
|
||||
@@ -1,101 +0,0 @@
|
||||
# Rules of thumb:
|
||||
# - Templates don't add new lines at the end of the string. This is the
|
||||
# responsibility of the or a consuming template.
|
||||
|
||||
####################
|
||||
# Planner defaults #
|
||||
####################
|
||||
|
||||
|
||||
USER_OBJECTIVE = (
|
||||
"Write a wikipedia style article about the project: "
|
||||
"https://github.com/significant-gravitas/Auto-GPT"
|
||||
)
|
||||
|
||||
|
||||
ABILITIES = (
|
||||
'analyze_code: Analyze Code, args: "code": "<full_code_string>"',
|
||||
'execute_python_file: Execute Python File, args: "filename": "<filename>"',
|
||||
'append_to_file: Append to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'list_files: List Files in Directory, args: "directory": "<directory>"',
|
||||
'read_file: Read a file, args: "filename": "<filename>"',
|
||||
'write_to_file: Write to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'google: Google Search, args: "query": "<query>"',
|
||||
'improve_code: Get Improved Code, args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
'browse_website: Browse Website, args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
'write_tests: Write Tests, args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
'get_hyperlinks: Get hyperlinks, args: "url": "<url>"',
|
||||
'get_text_summary: Get text summary, args: "url": "<url>", "question": "<question>"',
|
||||
'task_complete: Task Complete (Shutdown), args: "reason": "<reason>"',
|
||||
)
|
||||
|
||||
|
||||
# Plan Prompt
|
||||
# -----------
|
||||
|
||||
|
||||
PLAN_PROMPT_CONSTRAINTS = (
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so "
|
||||
"immediately save important information to files.",
|
||||
"If you are unsure how you previously did something or want to recall past "
|
||||
"events, thinking about similar events will help you remember.",
|
||||
"No user assistance",
|
||||
"Exclusively use the commands listed below e.g. command_name",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_RESOURCES = (
|
||||
"Internet access for searches and information gathering.",
|
||||
"Long-term memory management.",
|
||||
"File output.",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_PERFORMANCE_EVALUATIONS = (
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities.",
|
||||
"Constructively self-criticize your big-picture behavior constantly.",
|
||||
"Reflect on past decisions and strategies to refine your approach.",
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps.",
|
||||
"Write all code to a file",
|
||||
)
|
||||
|
||||
|
||||
PLAN_PROMPT_RESPONSE_DICT = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user",
|
||||
},
|
||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||
}
|
||||
|
||||
PLAN_PROMPT_RESPONSE_FORMAT = (
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n"
|
||||
"{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
PLAN_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
|
||||
PLAN_PROMPT_MAIN = (
|
||||
"{header}\n\n"
|
||||
"GOALS:\n\n{goals}\n\n"
|
||||
"Info:\n{info}\n\n"
|
||||
"Constraints:\n{constraints}\n\n"
|
||||
"Commands:\n{commands}\n\n"
|
||||
"Resources:\n{resources}\n\n"
|
||||
"Performance Evaluations:\n{performance_evaluations}\n\n"
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
|
||||
###########################
|
||||
# Parameterized templates #
|
||||
###########################
|
||||
@@ -1,2 +0,0 @@
|
||||
"""The plugin system allows the Agent to be extended with new functionality."""
|
||||
from autogpt.core.plugin.base import PluginService
|
||||
@@ -1,155 +0,0 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import TYPE_CHECKING, Type
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.ability import Ability, AbilityRegistry
|
||||
from autogpt.core.memory import Memory
|
||||
from autogpt.core.resource.model_providers import (
|
||||
EmbeddingModelProvider,
|
||||
LanguageModelProvider,
|
||||
)
|
||||
|
||||
# Expand to other types as needed
|
||||
PluginType = (
|
||||
Type[Ability] # Swappable now
|
||||
| Type[AbilityRegistry] # Swappable maybe never
|
||||
| Type[LanguageModelProvider] # Swappable soon
|
||||
| Type[EmbeddingModelProvider] # Swappable soon
|
||||
| Type[Memory] # Swappable now
|
||||
# | Type[Planner] # Swappable soon
|
||||
)
|
||||
|
||||
|
||||
class PluginStorageFormat(str, enum.Enum):
|
||||
"""Supported plugin storage formats.
|
||||
|
||||
Plugins can be stored at one of these supported locations.
|
||||
|
||||
"""
|
||||
|
||||
INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults
|
||||
WORKSPACE = "workspace" # Required now
|
||||
# OPENAPI_URL = "open_api_url" # Soon (requires some tooling we don't have yet).
|
||||
# OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now)
|
||||
# GIT = "git" # Maybe later (or soon)
|
||||
# PYPI = "pypi" # Maybe later
|
||||
# AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service" # Long term solution, requires design
|
||||
# AUTO = "auto" # Feature for later maybe, automatically find plugin.
|
||||
|
||||
|
||||
# Installed package example
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
# Workspace example
|
||||
# PluginLocation(
|
||||
# storage_format='workspace',
|
||||
# storage_route='relative/path/to/plugin.pkl'
|
||||
# OR
|
||||
# storage_route='relative/path/to/plugin.py'
|
||||
# )
|
||||
# Git
|
||||
# PluginLocation(
|
||||
# storage_format='git',
|
||||
# Exact format TBD.
|
||||
# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py'
|
||||
# )
|
||||
# PyPI
|
||||
# PluginLocation(
|
||||
# storage_format='pypi',
|
||||
# storage_route='package_name'
|
||||
# )
|
||||
|
||||
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
|
||||
|
||||
# A plugin storage route.
|
||||
#
|
||||
# This is a string that specifies where to load a plugin from
|
||||
# (e.g. an import path or file path).
|
||||
PluginStorageRoute = str
|
||||
|
||||
|
||||
class PluginLocation(SystemConfiguration):
|
||||
"""A plugin location.
|
||||
|
||||
This is a combination of a plugin storage format and a plugin storage route.
|
||||
It is used by the PluginService to load plugins.
|
||||
|
||||
"""
|
||||
|
||||
storage_format: PluginStorageFormat = UserConfigurable()
|
||||
storage_route: PluginStorageRoute = UserConfigurable()
|
||||
|
||||
|
||||
class PluginMetadata(BaseModel):
|
||||
"""Metadata about a plugin."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
location: PluginLocation
|
||||
|
||||
|
||||
class PluginService(abc.ABC):
|
||||
"""Base class for plugin service.
|
||||
|
||||
The plugin service should be stateless. This defines the interface for
|
||||
loading plugins from various storage formats.
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def get_plugin(plugin_location: PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
...
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
...
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an installed package."""
|
||||
...
|
||||
@@ -1,74 +0,0 @@
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.core.plugin.base import (
|
||||
PluginLocation,
|
||||
PluginService,
|
||||
PluginStorageFormat,
|
||||
PluginStorageRoute,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.plugin.base import PluginType
|
||||
|
||||
|
||||
class SimplePluginService(PluginService):
|
||||
@staticmethod
|
||||
def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
if isinstance(plugin_location, dict):
|
||||
plugin_location = PluginLocation.parse_obj(plugin_location)
|
||||
if plugin_location.storage_format == PluginStorageFormat.WORKSPACE:
|
||||
return SimplePluginService.load_from_workspace(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE:
|
||||
return SimplePluginService.load_from_installed_package(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Plugin storage format {plugin_location.storage_format} is not implemented."
|
||||
)
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
# TODO: Define an on disk storage format and implement this.
|
||||
# Can pull from existing zip file loading implementation
|
||||
raise NotImplemented("Loading from file path is not implemented.")
|
||||
|
||||
@staticmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
module_path, _, class_name = plugin_route.rpartition(".")
|
||||
return getattr(import_module(module_path), class_name)
|
||||
|
||||
@staticmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
# TODO: Implement a discovery system for finding plugins by name from known
|
||||
# storage locations. E.g. if we know that path_type is a file path, we can
|
||||
# search the workspace for it. If it's an import path, we can check the core
|
||||
# system and the auto_gpt_plugins package.
|
||||
raise NotImplemented("Resolving plugin name to path is not implemented.")
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
plugin = SimplePluginService.load_from_file_path(plugin_route)
|
||||
return plugin
|
||||
|
||||
@staticmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
plugin = SimplePluginService.load_from_import_path(plugin_route)
|
||||
return plugin
|
||||
@@ -1,7 +0,0 @@
|
||||
from autogpt.core.resource.schema import (
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
@@ -1,44 +0,0 @@
|
||||
from autogpt.core.resource.model_providers.openai import (
|
||||
OPEN_AI_MODELS,
|
||||
OpenAIModelName,
|
||||
OpenAIProvider,
|
||||
OpenAISettings,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
MessageRole,
|
||||
ModelProvider,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderModelInfo,
|
||||
ModelProviderModelResponse,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ModelProvider",
|
||||
"ModelProviderName",
|
||||
"ModelProviderSettings",
|
||||
"EmbeddingModelProvider",
|
||||
"EmbeddingModelProviderModelResponse",
|
||||
"LanguageModelProvider",
|
||||
"LanguageModelProviderModelResponse",
|
||||
"LanguageModelFunction",
|
||||
"LanguageModelMessage",
|
||||
"MessageRole",
|
||||
"OpenAIModelName",
|
||||
"OPEN_AI_MODELS",
|
||||
"OpenAIProvider",
|
||||
"OpenAISettings",
|
||||
]
|
||||
@@ -1,373 +0,0 @@
|
||||
import enum
|
||||
import functools
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
OpenAIEmbeddingParser = Callable[[Embedding], Embedding]
|
||||
OpenAIChatParser = Callable[[str], dict]
|
||||
|
||||
|
||||
class OpenAIModelName(str, enum.Enum):
|
||||
ADA = "text-embedding-ada-002"
|
||||
GPT3 = "gpt-3.5-turbo-0613"
|
||||
GPT3_16K = "gpt-3.5-turbo-16k-0613"
|
||||
GPT4 = "gpt-4-0613"
|
||||
GPT4_32K = "gpt-4-32k-0613"
|
||||
|
||||
|
||||
OPEN_AI_EMBEDDING_MODELS = {
|
||||
OpenAIModelName.ADA: EmbeddingModelProviderModelInfo(
|
||||
name=OpenAIModelName.ADA,
|
||||
service=ModelProviderService.EMBEDDING,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_LANGUAGE_MODELS = {
|
||||
OpenAIModelName.GPT3: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0015,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
OpenAIModelName.GPT3_16K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.003,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=16384,
|
||||
),
|
||||
OpenAIModelName.GPT4: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
OpenAIModelName.GPT4_32K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4_32K,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_MODELS = {
|
||||
**OPEN_AI_LANGUAGE_MODELS,
|
||||
**OPEN_AI_EMBEDDING_MODELS,
|
||||
}
|
||||
|
||||
|
||||
class OpenAIConfiguration(SystemConfiguration):
|
||||
retries_per_request: int = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAIModelProviderBudget(ModelProviderBudget):
|
||||
graceful_shutdown_threshold: float = UserConfigurable()
|
||||
warning_threshold: float = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAISettings(ModelProviderSettings):
|
||||
configuration: OpenAIConfiguration
|
||||
credentials: ModelProviderCredentials
|
||||
budget: OpenAIModelProviderBudget
|
||||
|
||||
|
||||
class OpenAIProvider(
|
||||
Configurable,
|
||||
LanguageModelProvider,
|
||||
EmbeddingModelProvider,
|
||||
):
|
||||
default_settings = OpenAISettings(
|
||||
name="openai_provider",
|
||||
description="Provides access to OpenAI's API.",
|
||||
configuration=OpenAIConfiguration(
|
||||
retries_per_request=10,
|
||||
),
|
||||
credentials=ModelProviderCredentials(),
|
||||
budget=OpenAIModelProviderBudget(
|
||||
total_budget=math.inf,
|
||||
total_cost=0.0,
|
||||
remaining_budget=math.inf,
|
||||
usage=ModelProviderUsage(
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
total_tokens=0,
|
||||
),
|
||||
graceful_shutdown_threshold=0.005,
|
||||
warning_threshold=0.01,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: OpenAISettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._credentials = settings.credentials
|
||||
self._budget = settings.budget
|
||||
|
||||
self._logger = logger
|
||||
|
||||
retry_handler = _OpenAIRetryHandler(
|
||||
logger=self._logger,
|
||||
num_retries=self._configuration.retries_per_request,
|
||||
)
|
||||
|
||||
self._create_completion = retry_handler(_create_completion)
|
||||
self._create_embedding = retry_handler(_create_embedding)
|
||||
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
"""Get the token limit for a given model."""
|
||||
return OPEN_AI_MODELS[model_name].max_tokens
|
||||
|
||||
def get_remaining_budget(self) -> float:
|
||||
"""Get the remaining budget."""
|
||||
return self._budget.remaining_budget
|
||||
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: OpenAIModelName,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
"""Create a completion using the OpenAI API."""
|
||||
completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
|
||||
response = await self._create_completion(
|
||||
messages=model_prompt,
|
||||
**completion_kwargs,
|
||||
)
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_LANGUAGE_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
|
||||
parsed_response = completion_parser(
|
||||
response.choices[0].message.to_dict_recursive()
|
||||
)
|
||||
response = LanguageModelProviderModelResponse(
|
||||
content=parsed_response, **response_args
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: OpenAIModelName,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
"""Create an embedding using the OpenAI API."""
|
||||
embedding_kwargs = self._get_embedding_kwargs(model_name, **kwargs)
|
||||
response = await self._create_embedding(text=text, **embedding_kwargs)
|
||||
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_EMBEDDING_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
response = EmbeddingModelProviderModelResponse(
|
||||
**response_args,
|
||||
embedding=embedding_parser(response.embeddings[0]),
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
def _get_completion_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
functions: list[LanguageModelFunction],
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for completion API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the chat API call.
|
||||
|
||||
"""
|
||||
completion_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
if functions:
|
||||
completion_kwargs["functions"] = functions
|
||||
|
||||
return completion_kwargs
|
||||
|
||||
def _get_embedding_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for embedding API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the embedding API call.
|
||||
|
||||
"""
|
||||
embedding_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
|
||||
return embedding_kwargs
|
||||
|
||||
def __repr__(self):
|
||||
return "OpenAIProvider()"
|
||||
|
||||
|
||||
async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding:
|
||||
"""Embed text using the OpenAI API.
|
||||
|
||||
Args:
|
||||
text str: The text to embed.
|
||||
model_name str: The name of the model to use.
|
||||
|
||||
Returns:
|
||||
str: The embedding.
|
||||
"""
|
||||
return await openai.Embedding.acreate(
|
||||
input=[text],
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
async def _create_completion(
|
||||
messages: list[LanguageModelMessage], *_, **kwargs
|
||||
) -> openai.Completion:
|
||||
"""Create a chat completion using the OpenAI API.
|
||||
|
||||
Args:
|
||||
messages: The prompt to use.
|
||||
|
||||
Returns:
|
||||
The completion.
|
||||
|
||||
"""
|
||||
messages = [message.dict() for message in messages]
|
||||
if "functions" in kwargs:
|
||||
kwargs["functions"] = [function.json_schema for function in kwargs["functions"]]
|
||||
return await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_P = ParamSpec("_P")
|
||||
|
||||
|
||||
class _OpenAIRetryHandler:
|
||||
"""Retry Handler for OpenAI API call.
|
||||
|
||||
Args:
|
||||
num_retries int: Number of retries. Defaults to 10.
|
||||
backoff_base float: Base for exponential backoff. Defaults to 2.
|
||||
warn_user bool: Whether to warn the user. Defaults to True.
|
||||
"""
|
||||
|
||||
_retry_limit_msg = "Error: Reached rate limit, passing..."
|
||||
_api_key_error_msg = (
|
||||
"Please double check that you have setup a PAID OpenAI API Account. You can "
|
||||
"read more here: https://docs.agpt.co/setup/#getting-an-api-key"
|
||||
)
|
||||
_backoff_msg = "Error: API Bad gateway. Waiting {backoff} seconds..."
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
num_retries: int = 10,
|
||||
backoff_base: float = 2.0,
|
||||
warn_user: bool = True,
|
||||
):
|
||||
self._logger = logger
|
||||
self._num_retries = num_retries
|
||||
self._backoff_base = backoff_base
|
||||
self._warn_user = warn_user
|
||||
|
||||
def _log_rate_limit_error(self) -> None:
|
||||
self._logger.debug(self._retry_limit_msg)
|
||||
if self._warn_user:
|
||||
self._logger.warning(self._api_key_error_msg)
|
||||
self._warn_user = False
|
||||
|
||||
def _backoff(self, attempt: int) -> None:
|
||||
backoff = self._backoff_base ** (attempt + 2)
|
||||
self._logger.debug(self._backoff_msg.format(backoff=backoff))
|
||||
time.sleep(backoff)
|
||||
|
||||
def __call__(self, func: Callable[_P, _T]) -> Callable[_P, _T]:
|
||||
@functools.wraps(func)
|
||||
async def _wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T:
|
||||
num_attempts = self._num_retries + 1 # +1 for the first attempt
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
try:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
except RateLimitError:
|
||||
if attempt == num_attempts:
|
||||
raise
|
||||
self._log_rate_limit_error()
|
||||
|
||||
except APIError as e:
|
||||
if (e.http_status != 502) or (attempt == num_attempts):
|
||||
raise
|
||||
|
||||
self._backoff(attempt)
|
||||
|
||||
return _wrapped
|
||||
@@ -1,219 +0,0 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import Callable, ClassVar
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, validator
|
||||
|
||||
from autogpt.core.configuration import UserConfigurable
|
||||
from autogpt.core.resource.schema import (
|
||||
Embedding,
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderService(str, enum.Enum):
|
||||
"""A ModelService describes what kind of service the model provides."""
|
||||
|
||||
EMBEDDING: str = "embedding"
|
||||
LANGUAGE: str = "language"
|
||||
TEXT: str = "text"
|
||||
|
||||
|
||||
class ModelProviderName(str, enum.Enum):
|
||||
OPENAI: str = "openai"
|
||||
|
||||
|
||||
class MessageRole(str, enum.Enum):
|
||||
USER = "user"
|
||||
SYSTEM = "system"
|
||||
ASSISTANT = "assistant"
|
||||
|
||||
|
||||
class LanguageModelMessage(BaseModel):
|
||||
role: MessageRole
|
||||
content: str
|
||||
|
||||
|
||||
class LanguageModelFunction(BaseModel):
|
||||
json_schema: dict
|
||||
|
||||
|
||||
class ModelProviderModelInfo(BaseModel):
|
||||
"""Struct for model information.
|
||||
|
||||
Would be lovely to eventually get this directly from APIs, but needs to be
|
||||
scraped from websites for now.
|
||||
|
||||
"""
|
||||
|
||||
name: str
|
||||
service: ModelProviderService
|
||||
provider_name: ModelProviderName
|
||||
prompt_token_cost: float = 0.0
|
||||
completion_token_cost: float = 0.0
|
||||
|
||||
|
||||
class ModelProviderModelResponse(BaseModel):
|
||||
"""Standard response struct for a response from a model."""
|
||||
|
||||
prompt_tokens_used: int
|
||||
completion_tokens_used: int
|
||||
model_info: ModelProviderModelInfo
|
||||
|
||||
|
||||
class ModelProviderCredentials(ProviderCredentials):
|
||||
"""Credentials for a model provider."""
|
||||
|
||||
api_key: SecretStr | None = UserConfigurable(default=None)
|
||||
api_type: SecretStr | None = UserConfigurable(default=None)
|
||||
api_base: SecretStr | None = UserConfigurable(default=None)
|
||||
api_version: SecretStr | None = UserConfigurable(default=None)
|
||||
deployment_id: SecretStr | None = UserConfigurable(default=None)
|
||||
|
||||
def unmasked(self) -> dict:
|
||||
return unmask(self)
|
||||
|
||||
class Config:
|
||||
extra = "ignore"
|
||||
|
||||
|
||||
def unmask(model: BaseModel):
|
||||
unmasked_fields = {}
|
||||
for field_name, field in model.__fields__.items():
|
||||
value = getattr(model, field_name)
|
||||
if isinstance(value, SecretStr):
|
||||
unmasked_fields[field_name] = value.get_secret_value()
|
||||
else:
|
||||
unmasked_fields[field_name] = value
|
||||
return unmasked_fields
|
||||
|
||||
|
||||
class ModelProviderUsage(ProviderUsage):
|
||||
"""Usage for a particular model from a model provider."""
|
||||
|
||||
completion_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
|
||||
def update_usage(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
self.completion_tokens += model_response.completion_tokens_used
|
||||
self.prompt_tokens += model_response.prompt_tokens_used
|
||||
self.total_tokens += (
|
||||
model_response.completion_tokens_used + model_response.prompt_tokens_used
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderBudget(ProviderBudget):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ModelProviderUsage
|
||||
|
||||
def update_usage_and_cost(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
"""Update the usage and cost of the provider."""
|
||||
model_info = model_response.model_info
|
||||
self.usage.update_usage(model_response)
|
||||
incremental_cost = (
|
||||
model_response.completion_tokens_used * model_info.completion_token_cost
|
||||
+ model_response.prompt_tokens_used * model_info.prompt_token_cost
|
||||
) / 1000.0
|
||||
self.total_cost += incremental_cost
|
||||
self.remaining_budget -= incremental_cost
|
||||
|
||||
|
||||
class ModelProviderSettings(ProviderSettings):
|
||||
resource_type = ResourceType.MODEL
|
||||
credentials: ModelProviderCredentials
|
||||
budget: ModelProviderBudget
|
||||
|
||||
|
||||
class ModelProvider(abc.ABC):
|
||||
"""A ModelProvider abstracts the details of a particular provider of models."""
|
||||
|
||||
defaults: ClassVar[ModelProviderSettings]
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_remaining_budget(self) -> float:
|
||||
...
|
||||
|
||||
|
||||
####################
|
||||
# Embedding Models #
|
||||
####################
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for embedding model information."""
|
||||
|
||||
model_service = ModelProviderService.EMBEDDING
|
||||
embedding_dimensions: int
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from an embedding model."""
|
||||
|
||||
embedding: Embedding = Field(default_factory=list)
|
||||
|
||||
@classmethod
|
||||
@validator("completion_tokens_used")
|
||||
def _verify_no_completion_tokens_used(cls, v):
|
||||
if v > 0:
|
||||
raise ValueError("Embeddings should not have completion tokens used.")
|
||||
return v
|
||||
|
||||
|
||||
class EmbeddingModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: str,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
...
|
||||
|
||||
|
||||
###################
|
||||
# Language Models #
|
||||
###################
|
||||
|
||||
|
||||
class LanguageModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for language model information."""
|
||||
|
||||
model_service = ModelProviderService.LANGUAGE
|
||||
max_tokens: int
|
||||
|
||||
|
||||
class LanguageModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
content: dict = None
|
||||
|
||||
|
||||
class LanguageModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: str,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
...
|
||||
@@ -1,57 +0,0 @@
|
||||
import abc
|
||||
import enum
|
||||
|
||||
from pydantic import SecretBytes, SecretField, SecretStr
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
|
||||
|
||||
class ResourceType(str, enum.Enum):
|
||||
"""An enumeration of resource types."""
|
||||
|
||||
MODEL = "model"
|
||||
MEMORY = "memory"
|
||||
|
||||
|
||||
class ProviderUsage(SystemConfiguration, abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def update_usage(self, *args, **kwargs) -> None:
|
||||
"""Update the usage of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderBudget(SystemConfiguration):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ProviderUsage
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_usage_and_cost(self, *args, **kwargs) -> None:
|
||||
"""Update the usage and cost of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderCredentials(SystemConfiguration):
|
||||
"""Struct for credentials."""
|
||||
|
||||
class Config:
|
||||
json_encoders = {
|
||||
SecretStr: lambda v: v.get_secret_value() if v else None,
|
||||
SecretBytes: lambda v: v.get_secret_value() if v else None,
|
||||
SecretField: lambda v: v.get_secret_value() if v else None,
|
||||
}
|
||||
|
||||
|
||||
class ProviderSettings(SystemSettings):
|
||||
resource_type: ResourceType
|
||||
credentials: ProviderCredentials | None = None
|
||||
budget: ProviderBudget | None = None
|
||||
|
||||
|
||||
# Used both by model providers and memory providers
|
||||
Embedding = list[float]
|
||||
@@ -1,3 +0,0 @@
|
||||
"""
|
||||
This module contains the runner for the v2 agent server and client.
|
||||
"""
|
||||
@@ -1,47 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
import yaml
|
||||
|
||||
from autogpt.core.runner.cli_app.main import run_auto_gpt
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions
|
||||
|
||||
|
||||
@click.group()
|
||||
def autogpt():
|
||||
"""Temporary command group for v2 commands."""
|
||||
pass
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
@click.option(
|
||||
"--pdb",
|
||||
is_flag=True,
|
||||
help="Drop into a debugger if an error is raised.",
|
||||
)
|
||||
@coroutine
|
||||
async def run(settings_file: str, pdb: bool) -> None:
|
||||
"""Run the Auto-GPT agent."""
|
||||
click.echo("Running Auto-GPT agent...")
|
||||
settings_file = Path(settings_file)
|
||||
settings = {}
|
||||
if settings_file.exists():
|
||||
settings = yaml.safe_load(settings_file.read_text())
|
||||
main = handle_exceptions(run_auto_gpt, with_debugger=pdb)
|
||||
await main(settings)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt()
|
||||
@@ -1,69 +0,0 @@
|
||||
import click
|
||||
|
||||
from autogpt.core.agent import AgentSettings, SimpleAgent
|
||||
from autogpt.core.runner.client_lib.logging import get_client_logger
|
||||
from autogpt.core.runner.client_lib.parser import (
|
||||
parse_ability_result,
|
||||
parse_agent_name_and_goals,
|
||||
parse_agent_plan,
|
||||
parse_next_ability,
|
||||
)
|
||||
|
||||
|
||||
async def run_auto_gpt(user_configuration: dict):
|
||||
"""Run the Auto-GPT CLI client."""
|
||||
|
||||
client_logger = get_client_logger()
|
||||
client_logger.debug("Getting agent settings")
|
||||
|
||||
agent_workspace = (
|
||||
user_configuration.get("workspace", {}).get("configuration", {}).get("root", "")
|
||||
)
|
||||
|
||||
if not agent_workspace: # We don't have an agent yet.
|
||||
#################
|
||||
# Bootstrapping #
|
||||
#################
|
||||
# Step 1. Collate the user's settings with the default system settings.
|
||||
agent_settings: AgentSettings = SimpleAgent.compile_settings(
|
||||
client_logger,
|
||||
user_configuration,
|
||||
)
|
||||
|
||||
# Step 2. Get a name and goals for the agent.
|
||||
# First we need to figure out what the user wants to do with the agent.
|
||||
# We'll do this by asking the user for a prompt.
|
||||
user_objective = click.prompt("What do you want Auto-GPT to do?")
|
||||
# Ask a language model to determine a name and goals for a suitable agent.
|
||||
name_and_goals = await SimpleAgent.determine_agent_name_and_goals(
|
||||
user_objective,
|
||||
agent_settings,
|
||||
client_logger,
|
||||
)
|
||||
print(parse_agent_name_and_goals(name_and_goals))
|
||||
# Finally, update the agent settings with the name and goals.
|
||||
agent_settings.update_agent_name_and_goals(name_and_goals)
|
||||
|
||||
# Step 3. Provision the agent.
|
||||
agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger)
|
||||
print("agent is provisioned")
|
||||
|
||||
# launch agent interaction loop
|
||||
agent = SimpleAgent.from_workspace(
|
||||
agent_workspace,
|
||||
client_logger,
|
||||
)
|
||||
print("agent is loaded")
|
||||
|
||||
plan = await agent.build_initial_plan()
|
||||
print(parse_agent_plan(plan))
|
||||
|
||||
while True:
|
||||
current_task, next_ability = await agent.determine_next_ability(plan)
|
||||
print(parse_next_ability(current_task, next_ability))
|
||||
user_input = click.prompt(
|
||||
"Should the agent proceed with this ability?",
|
||||
default="y",
|
||||
)
|
||||
ability_result = await agent.execute_next_ability(user_input)
|
||||
print(parse_ability_result(ability_result))
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user