mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
22 Commits
v0.4.4
...
summary_me
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb6214e647 | ||
|
|
8b82421b9c | ||
|
|
75cc71f8d3 | ||
|
|
f287282e8c | ||
|
|
2a93aff512 | ||
|
|
6d1653b84f | ||
|
|
a7816b8c79 | ||
|
|
21913c4733 | ||
|
|
9d9c66d50f | ||
|
|
a00a7a2bd0 | ||
|
|
d6cb10432b | ||
|
|
0bea5e38a4 | ||
|
|
88b2d5fb2d | ||
|
|
f1032926cc | ||
|
|
e7ad51ce42 | ||
|
|
a3522223d9 | ||
|
|
4e3035efe4 | ||
|
|
a8cbf51489 | ||
|
|
317361da8c | ||
|
|
991bc77e0b | ||
|
|
83357f6c2f | ||
|
|
acf48d2d4d |
@@ -7,12 +7,11 @@
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
"installZsh": "true",
|
||||
"username": "vscode",
|
||||
"userUid": "1000",
|
||||
"userGid": "1000",
|
||||
"userUid": "6942",
|
||||
"userGid": "6942",
|
||||
"upgradePackages": "true"
|
||||
},
|
||||
"ghcr.io/devcontainers/features/desktop-lite:1": {},
|
||||
"ghcr.io/devcontainers/features/github-cli:1": {},
|
||||
"ghcr.io/devcontainers/features/python:1": "none",
|
||||
"ghcr.io/devcontainers/features/node:1": "none",
|
||||
"ghcr.io/devcontainers/features/git:1": {
|
||||
@@ -26,20 +25,8 @@
|
||||
"vscode": {
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python",
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.unittestEnabled": false
|
||||
},
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"VisualStudioExptTeam.vscodeintellicode",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.isort",
|
||||
"GitHub.vscode-pull-request-github",
|
||||
"GitHub.copilot",
|
||||
"github.vscode-github-actions"
|
||||
]
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python"
|
||||
}
|
||||
}
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
@@ -49,8 +36,5 @@
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode",
|
||||
|
||||
// Add the freshly containerized repo to the list of safe repositories
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/Auto-GPT && pip3 install --user -r requirements.txt"
|
||||
}
|
||||
"remoteUser": "vscode"
|
||||
}
|
||||
|
||||
@@ -4,9 +4,16 @@ version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
depends_on:
|
||||
- redis
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ../:/workspace/Auto-GPT
|
||||
redis:
|
||||
image: 'redis/redis-stack-server:latest'
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
!prompt_settings.yaml
|
||||
|
||||
*.md
|
||||
*.png
|
||||
|
||||
250
.env.template
250
.env.template
@@ -1,16 +1,10 @@
|
||||
# For further descriptions of these settings see docs/configuration/options.md or go to docs.agpt.co
|
||||
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
@@ -19,194 +13,202 @@ OPENAI_API_KEY=your-openai-api-key
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file (Default plugins_config.yaml)
|
||||
# PLUGINS_CONFIG_FILE=plugins_config.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
|
||||
# OPENAI_FUNCTIONS=False
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
## PLAIN_OUTPUT - Plain output, which disables the spinner (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMAND_CATEGORIES=
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# TEMPERATURE=0
|
||||
# USE_AZURE=False
|
||||
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
### AZURE
|
||||
# moved to `azure.yaml.template`
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM - Smart language model (Default: gpt-4)
|
||||
# SMART_LLM=gpt-4
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-4
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
|
||||
################################################################################
|
||||
### SHELL EXECUTION
|
||||
################################################################################
|
||||
|
||||
## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
|
||||
# SHELL_COMMAND_CONTROL=denylist
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
|
||||
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by Auto-GPT (Default: sudo,su)
|
||||
# SHELL_DENYLIST=sudo,su
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
|
||||
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by Auto-GPT (Default: None)
|
||||
# SHELL_ALLOWLIST=
|
||||
### LLM MODEL SETTINGS
|
||||
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
## When using --gpt3only this needs to be set to 4000.
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### General
|
||||
|
||||
## MEMORY_BACKEND - Memory backend type
|
||||
# MEMORY_BACKEND=json_file
|
||||
|
||||
## MEMORY_INDEX - Value used in the Memory backend for scoping, naming, or indexing (Default: auto-gpt)
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
## local - Default
|
||||
## pinecone - Pinecone (if configured)
|
||||
## redis - Redis (if configured)
|
||||
## milvus - Milvus (if configured - also works with Zilliz)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=local
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### Redis
|
||||
### PINECONE
|
||||
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
# PINECONE_API_KEY=your-pinecone-api-key
|
||||
# PINECONE_ENV=your-pinecone-region
|
||||
|
||||
### REDIS
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
# REDIS_HOST=localhost
|
||||
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PORT=6379
|
||||
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
# REDIS_PASSWORD=
|
||||
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# REDIS_HOST=localhost
|
||||
# REDIS_PORT=6379
|
||||
# REDIS_PASSWORD=
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
### WEAVIATE
|
||||
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
## WEAVIATE_HOST - Weaviate host IP
|
||||
## WEAVIATE_PORT - Weaviate host port
|
||||
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
## WEAVIATE_USERNAME - Weaviate username
|
||||
## WEAVIATE_PASSWORD - Weaviate password
|
||||
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# WEAVIATE_HOST="127.0.0.1"
|
||||
# WEAVIATE_PORT=8080
|
||||
# WEAVIATE_PROTOCOL="http"
|
||||
# USE_WEAVIATE_EMBEDDED=False
|
||||
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
# WEAVIATE_USERNAME=
|
||||
# WEAVIATE_PASSWORD=
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
### MILVUS
|
||||
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443)
|
||||
## MILVUS_USERNAME - username for your Milvus database
|
||||
## MILVUS_PASSWORD - password for your Milvus database
|
||||
## MILVUS_SECURE - True to enable TLS. (Default: False)
|
||||
## Setting MILVUS_ADDR to a `https://` URL will override this setting.
|
||||
## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
|
||||
# MILVUS_ADDR=localhost:19530
|
||||
# MILVUS_USERNAME=
|
||||
# MILVUS_PASSWORD=
|
||||
# MILVUS_SECURE=
|
||||
# MILVUS_COLLECTION=autogpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Common
|
||||
|
||||
## IMAGE_PROVIDER - Image provider (Default: dalle)
|
||||
### OPEN AI
|
||||
## IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## DALLE: 256, 512, 1024
|
||||
# IMAGE_PROVIDER=dalle
|
||||
|
||||
## IMAGE_SIZE - Image size (Default: 256)
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
### HUGGINGFACE
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
### STABLE DIFFUSION WEBUI
|
||||
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
|
||||
# SD_WEBUI_URL=http://localhost:7860
|
||||
# SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
|
||||
# AUDIO_TO_TEXT_PROVIDER=huggingface
|
||||
|
||||
## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
|
||||
### HUGGINGFACE
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
|
||||
################################################################################
|
||||
### GIT Provider for repository actions
|
||||
################################################################################
|
||||
|
||||
### GITHUB
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
## GITHUB_USERNAME - Github username
|
||||
# GITHUB_API_KEY=github_pat_123
|
||||
# GITHUB_USERNAME=your-github-username
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
### GOOGLE
|
||||
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
# GOOGLE_API_KEY=your-google-api-key
|
||||
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
|
||||
################################################################################
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
### TTS PROVIDER
|
||||
################################################################################
|
||||
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
### MAC OS
|
||||
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
# USE_MAC_OS_TTS=False
|
||||
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=streamelements
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
### STREAMELEMENTS
|
||||
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
# USE_BRIAN_TTS=False
|
||||
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=elevenlabs
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
### ELEVENLABS
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
|
||||
################################################################################
|
||||
### CHAT MESSAGES
|
||||
### TWITTER API
|
||||
################################################################################
|
||||
|
||||
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# TW_CONSUMER_KEY=
|
||||
# TW_CONSUMER_SECRET=
|
||||
# TW_ACCESS_TOKEN=
|
||||
# TW_ACCESS_TOKEN_SECRET=
|
||||
|
||||
################################################################################
|
||||
### ALLOWLISTED PLUGINS
|
||||
################################################################################
|
||||
|
||||
#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
|
||||
ALLOWLISTED_PLUGINS=
|
||||
|
||||
################################################################################
|
||||
### CHAT PLUGIN SETTINGS
|
||||
################################################################################
|
||||
# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,5 +1,5 @@
|
||||
# Exclude VCR cassettes from stats
|
||||
tests/Auto-GPT-test-cassettes/**/**.y*ml linguist-generated
|
||||
tests/**/cassettes/**.y*ml linguist-generated
|
||||
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
.github/workflows/ @Significant-Gravitas/maintainers
|
||||
autogpt/core @collijk
|
||||
153
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
153
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -8,16 +8,14 @@ body:
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Thoroughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
@@ -27,29 +25,23 @@ body:
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please confirm that the issue you have is described well and precise in the title above ⬆️.
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
For example:
|
||||
BAD - my auto-gpt keeps looping
|
||||
GOOD - After performing execute_python_file, auto-gpt goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
|
||||
Please help us help you by following these steps:
|
||||
- Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
|
||||
newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
|
||||
- Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it) if possible
|
||||
- If it's a pip/packages issue, mention this in the title and provide pip version, python version
|
||||
- If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we spend building AutoGPT.
|
||||
|
||||
Please help us help you:
|
||||
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
|
||||
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
|
||||
- Search for existing issues, "add comment" is tidier than "new issue"
|
||||
- Ask on our Discord (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
@@ -62,15 +54,9 @@ body:
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other
|
||||
- Other (Please specify in your problem)
|
||||
validations:
|
||||
required: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the system
|
||||
description: Please specify the system you are working on.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of Auto-GPT are you using?
|
||||
@@ -85,80 +71,61 @@ body:
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Do you use OpenAI GPT-3 or GPT-4?
|
||||
label: GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
- GPT-4(32k)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Which area covers your issue best?
|
||||
label: Steps to reproduce 🕹
|
||||
description: |
|
||||
**⚠️ Issues that we can't reproduce will be closed.**
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current behavior 😯
|
||||
description: Describe what happens instead of the expected behavior.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior 🤔
|
||||
description: Describe what should happen.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
description: >
|
||||
Select the area related to the issue you are reporting.
|
||||
options:
|
||||
- Installation and setup
|
||||
- Memory
|
||||
- Performance
|
||||
- Prompt
|
||||
- Commands
|
||||
- Plugins
|
||||
- AI Model Limitations
|
||||
- Challenges
|
||||
- Documentation
|
||||
- Logging
|
||||
- Agents
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
autolabels: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
#Following are optional file content uploads
|
||||
- type: markdown
|
||||
attributes:
|
||||
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
|
||||
value: |
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your Logs 📒
|
||||
description: |
|
||||
Please include the log showing your error and the command that caused it, if applicable.
|
||||
You can copy it from your terminal or from `logs/activity.log`.
|
||||
This will help us understand your issue better!
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
<details>
|
||||
<summary><i>Example</i></summary>
|
||||
```log
|
||||
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
|
||||
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
|
||||
Traceback (most recent call last):
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
|
||||
self._interpret_response_line(
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
|
||||
raise self.handle_error_response(
|
||||
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
|
||||
```
|
||||
</details>
|
||||
value: |
|
||||
```log
|
||||
<insert your logs here>
|
||||
```
|
||||
|
||||
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -1,12 +1,13 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for Auto-GPT!
|
||||
description: Suggest a new idea for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
Thanks for contributing by creating an issue! ❤️
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
@@ -25,4 +26,4 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation 🔦
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -14,8 +14,6 @@ Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
### Background
|
||||
@@ -35,14 +33,7 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes. <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
- [ ] I have run the following commands against my code to ensure it passes our linters:
|
||||
```shell
|
||||
black .
|
||||
isort .
|
||||
mypy
|
||||
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
|
||||
```
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
|
||||
82
.github/workflows/benchmarks.yml
vendored
82
.github/workflows/benchmarks.yml
vendored
@@ -1,73 +1,31 @@
|
||||
name: Benchmarks
|
||||
name: Run Benchmarks
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Benchmark:
|
||||
name: ${{ matrix.config.task-name }}
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- python-version: "3.10"
|
||||
task: "tests/challenges"
|
||||
task-name: "Mandatory Tasks"
|
||||
- python-version: "3.10"
|
||||
task: "--beat-challenges -ra tests/challenges"
|
||||
task-name: "Challenging Tasks"
|
||||
|
||||
env:
|
||||
python-version: '3.10'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: master
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
- name: Set up Python ${{ env.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
rm -rf tests/Auto-GPT-test-cassettes
|
||||
pytest -n auto --record-mode=all ${{ matrix.config.task }}
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
AGENT_MODE: ${{ secrets.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload logs as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs-${{ matrix.config.task-name }}
|
||||
path: logs/
|
||||
|
||||
- name: Upload cassettes as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: cassettes-${{ matrix.config.task-name }}
|
||||
path: tests/Auto-GPT-test-cassettes/
|
||||
- name: benchmark
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py
|
||||
|
||||
200
.github/workflows/ci.yml
vendored
200
.github/workflows/ci.yml
vendored
@@ -2,24 +2,16 @@ name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, ci-test* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ stable, master, release-* ]
|
||||
pull_request_target:
|
||||
branches: [ master, release-*, ci-test* ]
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
@@ -27,26 +19,12 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
@@ -63,19 +41,7 @@ jobs:
|
||||
run: isort . --check
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check mypy formatting
|
||||
run: mypy
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check for unused imports and pass statements
|
||||
run: |
|
||||
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
|
||||
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
|
||||
|
||||
test:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
permissions:
|
||||
# Gives the action the necessary permissions for publishing new
|
||||
# comments in pull requests.
|
||||
@@ -85,175 +51,27 @@ jobs:
|
||||
# comments (to avoid publishing multiple comments in the same PR)
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
python-version: ["3.10", "3.11"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest with coverage
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
pytest -n auto --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
tests/unit tests/integration tests/challenges
|
||||
python tests/challenges/utils/build_current_score.py
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
AGENT_MODE: ${{ secrets.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
|
||||
PLAIN_OUTPUT: True
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Push updated challenge scores
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
score_file="tests/challenges/current_score.json"
|
||||
|
||||
if ! git diff --quiet $score_file; then
|
||||
git add $score_file
|
||||
git commit -m "Update challenge scores"
|
||||
git push origin HEAD:${{ github.ref_name }}
|
||||
else
|
||||
echo "The challenge scores didn't change."
|
||||
fi
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || success() || failure()
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/Auto-GPT-test-cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
TOKEN=${{ secrets.PAT_REVIEW }}
|
||||
REPO=${{ github.repository }}
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels \
|
||||
-d '{"labels":["behaviour change"]}'
|
||||
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs
|
||||
path: logs/
|
||||
|
||||
79
.github/workflows/docker-ci.yml
vendored
79
.github/workflows/docker-ci.yml
vendored
@@ -3,11 +3,8 @@ name: Docker CI
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master, release-*, stable ]
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
@@ -76,51 +73,43 @@ jobs:
|
||||
# Docker setup needs fixing before this is going to work: #1843
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
needs: build
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
PLAIN_OUTPUT: True
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -n auto --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
tests/unit tests/integration 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
12
.github/workflows/pr-label.yml
vendored
12
.github/workflows/pr-label.yml
vendored
@@ -3,10 +3,7 @@ name: "Pull Request auto-label"
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, release-* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
branches: [ master ]
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
@@ -48,10 +45,11 @@ jobs:
|
||||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 100
|
||||
m_max_size: 50
|
||||
l_label: 'size/l'
|
||||
l_max_size: 500
|
||||
l_max_size: 200
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 500 lines.
|
||||
This PR exceeds the recommended size of 200 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size
|
||||
|
||||
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
28
.github/workflows/sponsors_readme.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Generate Sponsors README
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 */12 * * *'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Generate Sponsors 💖
|
||||
uses: JamesIves/github-sponsors-readme-action@v1
|
||||
with:
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
file: 'README.md'
|
||||
minimum: 2500
|
||||
maximum: 99999
|
||||
|
||||
- name: Deploy to GitHub Pages 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: master
|
||||
folder: '.'
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -1,7 +1,12 @@
|
||||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
**/auto_gpt_workspace/*
|
||||
autogpt/*json
|
||||
autogpt/node_modules/
|
||||
autogpt/__pycache__/keys.cpython-310.pyc
|
||||
autogpt/auto_gpt_workspace
|
||||
package-lock.json
|
||||
*.pyc
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
@@ -16,7 +21,6 @@ logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -31,8 +35,7 @@ __pycache__/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
/plugins/
|
||||
plugins_config.yaml
|
||||
plugins/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
@@ -159,4 +162,4 @@ vicuna-*
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
CURRENT_BULLETIN.md
|
||||
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -1,4 +0,0 @@
|
||||
[submodule "tests/Auto-GPT-test-cassettes"]
|
||||
path = tests/Auto-GPT-test-cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
branch = master
|
||||
@@ -22,21 +22,11 @@ repos:
|
||||
- id: black
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: 'v1.3.0'
|
||||
hooks:
|
||||
- id: mypy
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: autoflake
|
||||
name: autoflake
|
||||
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
|
||||
language: python
|
||||
types: [ python ]
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt tests/unit
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
|
||||
32
BULLETIN.md
32
BULLETIN.md
@@ -1,29 +1,9 @@
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
|
||||
# v0.4.4 RELEASE HIGHLIGHTS! 🚀
|
||||
# -----------------------------
|
||||
## GPT-4 is back!
|
||||
Following OpenAI's recent GPT-4 GA announcement, the SMART_LLM .env setting
|
||||
now defaults to GPT-4, and Auto-GPT will use GPT-4 by default in its main loop.
|
||||
# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE
|
||||
Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
|
||||
|
||||
### !! High Costs Warning !! 💰💀🚨
|
||||
GPT-4 costs ~20x more than GPT-3.5-turbo.
|
||||
Please take note of this before using SMART_LLM. You can use `--gpt3only`
|
||||
or `--gpt4only` to force the use of GPT-3.5-turbo or GPT-4, respectively,
|
||||
at runtime.
|
||||
## Changes to Docker configuration
|
||||
The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
|
||||
|
||||
## Re-arch v1 preview release!
|
||||
We've released a preview version of the re-arch code, under `autogpt/core`.
|
||||
This is a major milestone for us, and we're excited to continue working on it.
|
||||
We look forward to your feedback. Follow the process here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/issues/4770.
|
||||
|
||||
## Other highlights
|
||||
Other fixes include plugins regressions, Azure config and security patches.
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases.
|
||||
|
||||
150
CONTRIBUTING.md
150
CONTRIBUTING.md
@@ -1,14 +1,148 @@
|
||||
We maintain a knowledgebase at this [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
|
||||
# Contributing to Auto-GPT
|
||||
|
||||
We would like to say "We value all contributions". After all, we are an open-source project, so we should say something fluffy like this, right?
|
||||
First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
|
||||
|
||||
However the reality is that some contributions are SUPER-valuable, while others create more trouble than they are worth and actually _create_ work for the core team.
|
||||
This document provides guidelines and best practices to help you contribute effectively.
|
||||
|
||||
If you wish to contribute, please look through the wiki [contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) page.
|
||||
## Code of Conduct
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
|
||||
By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and hop on our discord (you'll find it in the wiki).
|
||||
[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md
|
||||
|
||||
❤️ & 🔆
|
||||
The team @ Auto-GPT
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins.
|
||||
Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository and clone your fork.
|
||||
2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
|
||||
3. Make your changes in the new branch.
|
||||
4. Test your changes thoroughly.
|
||||
5. Commit and push your changes to your fork.
|
||||
6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
If you find a bug in the project, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A description of the problem, including steps to reproduce the issue.
|
||||
- Any relevant logs, screenshots, or other supporting information.
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
|
||||
- Any relevant examples, mockups, or supporting information.
|
||||
|
||||
### Submitting Pull Requests
|
||||
|
||||
When submitting a pull request, please ensure that your changes meet the following criteria:
|
||||
|
||||
- Your pull request should be atomic and focus on a single change.
|
||||
- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status)
|
||||
- You should have thoroughly tested your changes with multiple different prompts.
|
||||
- You should have considered potential risks and mitigations for your changes.
|
||||
- You should have documented your changes clearly and comprehensively.
|
||||
- You should not include any unrelated or "extra" small tweaks or changes.
|
||||
|
||||
## Style Guidelines
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request.
|
||||
|
||||
To format your code, run the following commands in the project's root directory:
|
||||
|
||||
```bash
|
||||
python -m black .
|
||||
python -m isort .
|
||||
```
|
||||
|
||||
Or if you have these tools installed globally:
|
||||
```bash
|
||||
black .
|
||||
isort .
|
||||
```
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
|
||||
|
||||
Install the pre-commit package using pip:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Run the following command in the project's root directory to install the pre-commit hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
|
||||
|
||||
If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
|
||||
|
||||
Happy coding, and once again, thank you for your contributions!
|
||||
|
||||
Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts
|
||||
|
||||
## Testing your changes
|
||||
|
||||
If you add or change code, make sure the updated code is covered by tests.
|
||||
To increase coverage if necessary, [write tests using pytest].
|
||||
|
||||
For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/).
|
||||
|
||||
[write tests using pytest]: https://realpython.com/pytest-python-testing/
|
||||
|
||||
### API-dependent tests
|
||||
|
||||
To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known
|
||||
requests and matching responses in so-called *cassettes*, allowing us to run the tests
|
||||
in CI without needing actual API access.
|
||||
|
||||
When changes cause a test prompt to be generated differently, it will likely miss the
|
||||
cache and make a request to the API, updating the cassette with the new request+response.
|
||||
*Be sure to include the updated cassette in your PR!*
|
||||
|
||||
When you run Pytest locally:
|
||||
|
||||
- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required.
|
||||
- If the prompt changes in a way that the cassettes are not reusable:
|
||||
- If no API key, the test fails. It requires a new cassette. So, add an API key to .env.
|
||||
- If the API key is present, the tests will make a real call to OpenAI.
|
||||
- If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR.
|
||||
- If the test is unsuccessful:
|
||||
- Either: Your change made Auto-GPT less capable, in that case, you have to change your code.
|
||||
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
||||
|
||||
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
||||
|
||||
|
||||
### Community Challenges
|
||||
Challenges are goals we need Auto-GPT to achieve.
|
||||
To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on.
|
||||
- a challenge is new if level_currently_beaten is None
|
||||
- a challenge is in progress if level_currently_beaten is greater or equal to 1
|
||||
- a challenge is beaten if level_currently_beaten = max_level
|
||||
|
||||
Here is an example of how to run the memory challenge A and attempt to beat level 3.
|
||||
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3
|
||||
|
||||
To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder
|
||||
|
||||
Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline.
|
||||
Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run.
|
||||
|
||||
Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge.
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -6,13 +6,11 @@ FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr ca-certificates \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get install -y curl jq wget git
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
@@ -24,7 +22,7 @@ ENV PATH="$PATH:/root/.local/bin"
|
||||
COPY requirements.txt .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
|
||||
ENTRYPOINT ["python", "-m", "autogpt"]
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
@@ -38,9 +36,5 @@ RUN sed -i '/Items below this point will not be included in the Docker Image/,$d
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
|
||||
@@ -1,27 +1,11 @@
|
||||
import json
|
||||
import signal
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
|
||||
from autogpt.llm.chat import chat_with_ai
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.log_cycle.log_cycle import (
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.logs import logger, print_assistant_thoughts, remove_ansi_escape
|
||||
from autogpt.memory.message_history import MessageHistory
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input
|
||||
@@ -34,6 +18,7 @@ class Agent:
|
||||
Attributes:
|
||||
ai_name: The name of the agent.
|
||||
memory: The memory object to use.
|
||||
full_message_history: The full message history.
|
||||
next_action_count: The number of actions to execute.
|
||||
system_prompt: The system prompt is the initial prompt that defines everything
|
||||
the AI needs to know to achieve its task successfully.
|
||||
@@ -42,7 +27,7 @@ class Agent:
|
||||
|
||||
triggering_prompt: The last sentence the AI will see before answering.
|
||||
For Auto-GPT, this prompt is:
|
||||
Determine exactly one command to use, and respond using the format specified
|
||||
Determine which next command to use, and respond using the format specified
|
||||
above:
|
||||
The triggering prompt is not part of the system prompt because between the
|
||||
system prompt and the triggering
|
||||
@@ -58,165 +43,136 @@ class Agent:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name: str,
|
||||
memory: VectorMemory,
|
||||
next_action_count: int,
|
||||
command_registry: CommandRegistry,
|
||||
ai_config: AIConfig,
|
||||
system_prompt: str,
|
||||
triggering_prompt: str,
|
||||
workspace_directory: str | Path,
|
||||
config: Config,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.history = MessageHistory(self)
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hilucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.ai_config = ai_config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, config.restrict_to_workspace)
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.cycle_count = 0
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
self.smart_token_limit = OPEN_AI_CHAT_MODELS.get(config.smart_llm).max_tokens
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Avoid circular imports
|
||||
from autogpt.app import execute_command, extract_command
|
||||
|
||||
# Interaction Loop
|
||||
self.cycle_count = 0
|
||||
cfg = Config()
|
||||
loop_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
# Signal handler for interrupting y -N
|
||||
def signal_handler(signum, frame):
|
||||
if self.next_action_count == 0:
|
||||
sys.exit()
|
||||
else:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Interrupt signal received. Stopping continuous command execution."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
self.next_action_count = 0
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
self.cycle_count += 1
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
[m.raw() for m in self.history],
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
loop_count += 1
|
||||
if (
|
||||
self.config.continuous_mode
|
||||
and self.config.continuous_limit > 0
|
||||
and self.cycle_count > self.config.continuous_limit
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and loop_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ",
|
||||
Fore.YELLOW,
|
||||
f"{self.config.continuous_limit}",
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... ", plain_output=self.config.plain_output):
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat_with_ai(
|
||||
self.config,
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.smart_token_limit,
|
||||
self.config.smart_llm,
|
||||
)
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
try:
|
||||
assistant_reply_json = extract_json_from_response(
|
||||
assistant_reply.content
|
||||
)
|
||||
validate_json(assistant_reply_json, self.config)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Exception while validating assistant reply JSON: {e}")
|
||||
assistant_reply_json = {}
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(assistant_reply_json)
|
||||
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, self.config
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
)
|
||||
command_name, arguments = extract_command(
|
||||
assistant_reply_json, assistant_reply, self.config
|
||||
)
|
||||
if self.config.speak_mode:
|
||||
say_text(f"I want to execute {command_name}", self.config)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_json,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
if not self.config.continuous_mode and self.next_action_count == 0:
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Enter '{self.config.authorise_key}' to authorise command, "
|
||||
f"'{self.config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{self.config.exit_key}' to exit program, or enter feedback for "
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
if self.config.chat_messages_enabled:
|
||||
console_input = clean_input(
|
||||
self.config, "Waiting for your response..."
|
||||
)
|
||||
if cfg.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
self.config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == self.config.authorise_key:
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "s":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
Fore.YELLOW,
|
||||
"",
|
||||
)
|
||||
if self_feedback_resp[0].lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
else:
|
||||
user_input = self_feedback_resp
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(
|
||||
f"{self.config.authorise_key} -"
|
||||
):
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
@@ -224,24 +180,17 @@ class Agent:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
f"Invalid input format. Please enter '{self.config.authorise_key} -n' "
|
||||
"where n is the number of continuous tasks."
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == self.config.exit_key:
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
USER_INPUT_FILE_NAME,
|
||||
)
|
||||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
@@ -254,43 +203,37 @@ class Agent:
|
||||
logger.info("Exiting...")
|
||||
break
|
||||
else:
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
# Print authorized commands left value
|
||||
# Print command
|
||||
logger.typewriter_log(
|
||||
f"{Fore.CYAN}AUTHORISED COMMANDS LEFT: {Style.RESET_ALL}{self.next_action_count}"
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = f"Could not execute command: {arguments}"
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
else:
|
||||
for plugin in self.config.plugins:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
command_name, arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
command_name=command_name,
|
||||
arguments=arguments,
|
||||
agent=self,
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(
|
||||
str(command_result), self.config.smart_llm
|
||||
)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), self.config.smart_llm
|
||||
)
|
||||
if result_tlength + memory_tlength + 600 > self.smart_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
@@ -300,10 +243,12 @@ class Agent:
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.history.add("system", result, "action_result")
|
||||
self.full_message_history.append(create_chat_message("system", result))
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.history.add("system", "Unable to execute command", "action_result")
|
||||
self.full_message_history.append(
|
||||
create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
@@ -318,3 +263,28 @@ class Agent:
|
||||
self.workspace.get_path(command_args[pathlike])
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
feedback message and uses the create_chat_completion() function to generate a
|
||||
response based on the input message.
|
||||
Args:
|
||||
thoughts (dict): A dictionary containing thought elements like reasoning,
|
||||
plan, thoughts, and criticism.
|
||||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
|
||||
feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
thought = thoughts.get("thoughts", "")
|
||||
criticism = thoughts.get("criticism", "")
|
||||
feedback_thoughts = thought + reasoning + plan + criticism
|
||||
return create_chat_completion(
|
||||
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
|
||||
llm_model,
|
||||
)
|
||||
|
||||
@@ -1,70 +1,69 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.chat import Message, create_chat_completion
|
||||
from typing import List
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import Message, create_chat_completion
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class AgentManager(metaclass=Singleton):
|
||||
"""Agent manager for managing GPT agents"""
|
||||
|
||||
def __init__(self, config: Config):
|
||||
def __init__(self):
|
||||
self.next_key = 0
|
||||
self.agents: dict[
|
||||
int, tuple[str, list[Message], str]
|
||||
] = {} # key, (task, full_message_history, model)
|
||||
self.config = config
|
||||
self.agents = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(
|
||||
self, task: str, creation_prompt: str, model: str
|
||||
) -> tuple[int, str]:
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
|
||||
"""Create a new agent and return its key
|
||||
|
||||
Args:
|
||||
task: The task to perform
|
||||
creation_prompt: Prompt passed to the LLM at creation
|
||||
model: The model to use to run this agent
|
||||
prompt: The prompt to use
|
||||
model: The model to use
|
||||
|
||||
Returns:
|
||||
The key of the new agent
|
||||
"""
|
||||
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
messages: List[Message] = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages.raw()):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
messages.extend(iter(plugin_messages))
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
prompt=messages, config=self.config
|
||||
).content
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.add("assistant", agent_reply)
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.add("assistant", plugins_reply)
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
key = self.next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
self.next_key += 1
|
||||
|
||||
self.agents[key] = (task, list(messages), model)
|
||||
self.agents[key] = (task, messages, model)
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
@@ -84,34 +83,35 @@ class AgentManager(metaclass=Singleton):
|
||||
task, messages, model = self.agents[int(key)]
|
||||
|
||||
# Add user message to message history before sending to agent
|
||||
messages = ChatSequence.for_model(model, messages)
|
||||
messages.add("user", message)
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
for plugin_message in plugin_messages:
|
||||
messages.append(plugin_message)
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
prompt=messages, config=self.config
|
||||
).content
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.add("assistant", agent_reply)
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
# Update full message history
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.add("assistant", plugins_reply)
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
221
autogpt/app.py
221
autogpt/app.py
@@ -1,10 +1,20 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import Dict
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm import ChatModelResponse
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
|
||||
|
||||
def is_valid_int(value: str) -> bool:
|
||||
@@ -23,15 +33,11 @@ def is_valid_int(value: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def extract_command(
|
||||
assistant_reply_json: Dict, assistant_reply: ChatModelResponse, config: Config
|
||||
):
|
||||
def get_command(response_json: Dict):
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
assistant_reply_json (dict): The response object from the AI
|
||||
assistant_reply (ChatModelResponse): The model response from the AI
|
||||
config (Config): The config object
|
||||
response_json (json): The response from the AI
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
@@ -41,24 +47,14 @@ def extract_command(
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
if config.openai_functions:
|
||||
if assistant_reply.function_call is None:
|
||||
return "Error:", "No 'function_call' in assistant reply"
|
||||
assistant_reply_json["command"] = {
|
||||
"name": assistant_reply.function_call.name,
|
||||
"args": json.loads(assistant_reply.function_call.arguments),
|
||||
}
|
||||
try:
|
||||
if "command" not in assistant_reply_json:
|
||||
if "command" not in response_json:
|
||||
return "Error:", "Missing 'command' object in JSON"
|
||||
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
return (
|
||||
"Error:",
|
||||
f"The previous message sent was not a dictionary {assistant_reply_json}",
|
||||
)
|
||||
if not isinstance(response_json, dict):
|
||||
return "Error:", f"'response_json' object is not dictionary {response_json}"
|
||||
|
||||
command = assistant_reply_json["command"]
|
||||
command = response_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", "'command' object is not a dictionary"
|
||||
|
||||
@@ -78,37 +74,182 @@ def extract_command(
|
||||
return "Error:", str(e)
|
||||
|
||||
|
||||
def map_command_synonyms(command_name: str):
|
||||
"""Takes the original command name given by the AI, and checks if the
|
||||
string matches a list of common/known hallucinations
|
||||
"""
|
||||
synonyms = [
|
||||
("write_file", "write_to_file"),
|
||||
("create_file", "write_to_file"),
|
||||
("search", "google"),
|
||||
]
|
||||
for seen_command, actual_command_name in synonyms:
|
||||
if command_name == seen_command:
|
||||
return actual_command_name
|
||||
return command_name
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments: dict[str, str],
|
||||
agent: Agent,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
agent (Agent): The agent that is executing the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
if command := agent.command_registry.get_command(command_name):
|
||||
return command(**arguments, agent=agent)
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
|
||||
# Handle non-native commands (e.g. from plugins)
|
||||
for command in agent.ai_config.prompt_generator.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments)
|
||||
|
||||
raise RuntimeError(
|
||||
f"Cannot execute '{command_name}': unknown command."
|
||||
" Do not try to use this command again."
|
||||
)
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
question (str): The question to summarize the text for
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
summary = summarize_text(url, text, question)
|
||||
return f""" "Result" : {summary}"""
|
||||
|
||||
|
||||
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
|
||||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url)
|
||||
|
||||
|
||||
def shutdown() -> NoReturn:
|
||||
"""Shut down the program"""
|
||||
logger.info("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
@command(
|
||||
"start_agent",
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent
|
||||
task (str): The task of the agent
|
||||
prompt (str): The prompt for the agent
|
||||
model (str): The model to use for the agent
|
||||
|
||||
Returns:
|
||||
str: The response of the agent
|
||||
"""
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
|
||||
|
||||
if CFG.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = AGENT_MANAGER.message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = AGENT_MANAGER.message_agent(int(key), message)
|
||||
else:
|
||||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "")
|
||||
def list_agents() -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
str: A list of all agents
|
||||
"""
|
||||
return "List of agents:\n" + "\n".join(
|
||||
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
|
||||
)
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
key (str): The key of the agent to delete
|
||||
|
||||
Returns:
|
||||
str: A message indicating whether the agent was deleted or not
|
||||
"""
|
||||
result = AGENT_MANAGER.delete_agent(key)
|
||||
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@@ -17,11 +15,6 @@ import click
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
@@ -67,29 +60,12 @@ import click
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-goal",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help="AI goal override; may be used multiple times to pass multiple goals",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -101,9 +77,6 @@ def main(
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
ai_goal: tuple[str],
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
@@ -118,7 +91,6 @@ def main(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
@@ -130,9 +102,6 @@ def main(
|
||||
skip_news,
|
||||
workspace_directory,
|
||||
install_plugin_deps,
|
||||
ai_name,
|
||||
ai_role,
|
||||
ai_goal,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
import functools
|
||||
from typing import Any, Callable, Optional, TypedDict
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.models.command import Command, CommandParameter
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class CommandParameterSpec(TypedDict):
|
||||
type: str
|
||||
description: str
|
||||
required: bool
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
parameters: dict[str, CommandParameterSpec],
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
aliases: list[str] = [],
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
typed_parameters = [
|
||||
CommandParameter(
|
||||
name=param_name,
|
||||
description=parameter.get("description"),
|
||||
type=parameter.get("type", "string"),
|
||||
required=parameter.get("required", False),
|
||||
)
|
||||
for param_name, parameter in parameters.items()
|
||||
]
|
||||
cmd = Command(
|
||||
name=name,
|
||||
description=description,
|
||||
method=func,
|
||||
parameters=typed_parameters,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
aliases=aliases,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.command = cmd
|
||||
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
31
autogpt/commands/analyze_code.py
Normal file
31
autogpt/commands/analyze_code.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"analyze_code",
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Parameters:
|
||||
code (str): Code to be evaluated.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> list[str]:"
|
||||
args = [code]
|
||||
description_string = (
|
||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
61
autogpt/commands/audio_text.py
Normal file
61
autogpt/commands/audio_text.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"read_audio_from_file",
|
||||
"Convert Audio to text",
|
||||
'"filename": "<filename>"',
|
||||
CFG.huggingface_audio_to_text_model,
|
||||
"Configure huggingface_audio_to_text_model.",
|
||||
)
|
||||
def read_audio_from_file(filename: str) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
filename (str): The path to the audio file
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio)
|
||||
|
||||
|
||||
def read_audio(audio: bytes) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
audio (bytes): The audio to convert
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
model = CFG.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = CFG.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
|
||||
response = requests.post(
|
||||
api_url,
|
||||
headers=headers,
|
||||
data=audio,
|
||||
)
|
||||
|
||||
text = json.loads(response.content.decode("utf-8"))["text"]
|
||||
return f"The audio says: {text}"
|
||||
156
autogpt/commands/command.py
Normal file
156
autogpt/commands/command.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import functools
|
||||
import importlib
|
||||
import inspect
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class Command:
|
||||
"""A class representing a command.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the command.
|
||||
description (str): A brief description of what the command does.
|
||||
signature (str): The signature of the function that the command executes. Defaults to None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
method: Callable[..., Any],
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.method = method
|
||||
self.signature = signature if signature else str(inspect.signature(self.method))
|
||||
self.enabled = enabled
|
||||
self.disabled_reason = disabled_reason
|
||||
|
||||
def __call__(self, *args, **kwargs) -> Any:
|
||||
if not self.enabled:
|
||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||
return self.method(*args, **kwargs)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.name}: {self.description}, args: {self.signature}"
|
||||
|
||||
|
||||
class CommandRegistry:
|
||||
"""
|
||||
The CommandRegistry class is a manager for a collection of Command objects.
|
||||
It allows the registration, modification, and retrieval of Command objects,
|
||||
as well as the scanning and loading of command plugins from a specified
|
||||
directory.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.commands = {}
|
||||
|
||||
def _import_module(self, module_name: str) -> Any:
|
||||
return importlib.import_module(module_name)
|
||||
|
||||
def _reload_module(self, module: Any) -> Any:
|
||||
return importlib.reload(module)
|
||||
|
||||
def register(self, cmd: Command) -> None:
|
||||
self.commands[cmd.name] = cmd
|
||||
|
||||
def unregister(self, command_name: str):
|
||||
if command_name in self.commands:
|
||||
del self.commands[command_name]
|
||||
else:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
|
||||
def reload_commands(self) -> None:
|
||||
"""Reloads all loaded command plugins."""
|
||||
for cmd_name in self.commands:
|
||||
cmd = self.commands[cmd_name]
|
||||
module = self._import_module(cmd.__module__)
|
||||
reloaded_module = self._reload_module(module)
|
||||
if hasattr(reloaded_module, "register"):
|
||||
reloaded_module.register(self)
|
||||
|
||||
def get_command(self, name: str) -> Callable[..., Any]:
|
||||
return self.commands[name]
|
||||
|
||||
def call(self, command_name: str, **kwargs) -> Any:
|
||||
if command_name not in self.commands:
|
||||
raise KeyError(f"Command '{command_name}' not found in registry.")
|
||||
command = self.commands[command_name]
|
||||
return command(**kwargs)
|
||||
|
||||
def command_prompt(self) -> str:
|
||||
"""
|
||||
Returns a string representation of all registered `Command` objects for use in a prompt
|
||||
"""
|
||||
commands_list = [
|
||||
f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
|
||||
]
|
||||
return "\n".join(commands_list)
|
||||
|
||||
def import_commands(self, module_name: str) -> None:
|
||||
"""
|
||||
Imports the specified Python module containing command plugins.
|
||||
|
||||
This method imports the associated module and registers any functions or
|
||||
classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
|
||||
as `Command` objects. The registered `Command` objects are then added to the
|
||||
`commands` dictionary of the `CommandRegistry` object.
|
||||
|
||||
Args:
|
||||
module_name (str): The name of the module to import for command plugins.
|
||||
"""
|
||||
|
||||
module = importlib.import_module(module_name)
|
||||
|
||||
for attr_name in dir(module):
|
||||
attr = getattr(module, attr_name)
|
||||
# Register decorated functions
|
||||
if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
|
||||
attr, AUTO_GPT_COMMAND_IDENTIFIER
|
||||
):
|
||||
self.register(attr.command)
|
||||
# Register command classes
|
||||
elif (
|
||||
inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
|
||||
):
|
||||
cmd_instance = attr()
|
||||
self.register(cmd_instance)
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
signature: str = "",
|
||||
enabled: bool = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
cmd = Command(
|
||||
name=name,
|
||||
description=description,
|
||||
method=func,
|
||||
signature=signature,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.command = cmd
|
||||
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -4,79 +4,17 @@ import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
from docker.errors import DockerException, ImageNotFound
|
||||
from docker.models.containers import Container as DockerContainer
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
ALLOWLIST_CONTROL = "allowlist"
|
||||
DENYLIST_CONTROL = "denylist"
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"execute_python_code",
|
||||
"Creates a Python file and executes it",
|
||||
{
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code to run",
|
||||
"required": True,
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "A name to be given to the python file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
def execute_python_code(code: str, name: str, agent: Agent) -> str:
|
||||
"""Create and execute a Python file in a Docker container and return the STDOUT of the
|
||||
executed code. If there is any data that needs to be captured use a print statement
|
||||
|
||||
Args:
|
||||
code (str): The Python code to run
|
||||
name (str): A name to be given to the Python file
|
||||
|
||||
Returns:
|
||||
str: The STDOUT captured from the code when it ran
|
||||
"""
|
||||
ai_name = agent.ai_name
|
||||
code_dir = agent.workspace.get_path(Path(ai_name, "executed_code"))
|
||||
os.makedirs(code_dir, exist_ok=True)
|
||||
|
||||
if not name.endswith(".py"):
|
||||
name = name + ".py"
|
||||
|
||||
# The `name` arg is not covered by Agent._resolve_pathlike_command_args(),
|
||||
# so sanitization must be done here to prevent path traversal.
|
||||
file_path = agent.workspace.get_path(code_dir / name)
|
||||
if not file_path.is_relative_to(code_dir):
|
||||
return "Error: 'name' argument resulted in path traversal, operation aborted"
|
||||
|
||||
try:
|
||||
with open(file_path, "w+", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
return execute_python_file(str(file_path), agent)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"execute_python_file",
|
||||
"Executes an existing Python file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of te file to execute",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
@@ -85,26 +23,17 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(
|
||||
f"Executing python file '{filename}' in working directory '{agent.config.workspace_path}'"
|
||||
)
|
||||
logger.info(f"Executing file '{filename}'")
|
||||
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
file_path = Path(filename)
|
||||
if not file_path.is_file():
|
||||
# Mimic the response that you get from the command line so that it's easier to identify
|
||||
return (
|
||||
f"python: can't open file '{filename}': [Errno 2] No such file or directory"
|
||||
)
|
||||
if not os.path.isfile(filename):
|
||||
return f"Error: File '{filename}' does not exist."
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
result = subprocess.run(
|
||||
["python", str(file_path)],
|
||||
capture_output=True,
|
||||
encoding="utf8",
|
||||
cwd=agent.config.workspace_path,
|
||||
f"python {filename}", capture_output=True, encoding="utf8", shell=True
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
@@ -134,12 +63,11 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
logger.info(f"{status}: {progress}")
|
||||
elif status:
|
||||
logger.info(status)
|
||||
|
||||
container: DockerContainer = client.containers.run(
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
["python", str(file_path.relative_to(agent.workspace.root))],
|
||||
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
||||
volumes={
|
||||
agent.config.workspace_path: {
|
||||
CFG.workspace_path: {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
@@ -148,7 +76,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
stderr=True,
|
||||
stdout=True,
|
||||
detach=True,
|
||||
) # type: ignore
|
||||
)
|
||||
|
||||
container.wait()
|
||||
logs = container.logs().decode("utf-8")
|
||||
@@ -159,7 +87,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
|
||||
return logs
|
||||
|
||||
except DockerException as e:
|
||||
except docker.errors.DockerException as e:
|
||||
logger.warn(
|
||||
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
||||
)
|
||||
@@ -169,43 +97,16 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def validate_command(command: str, config: Config) -> bool:
|
||||
"""Validate a command to ensure it is allowed
|
||||
|
||||
Args:
|
||||
command (str): The command to validate
|
||||
config (Config): The config to use to validate the command
|
||||
|
||||
Returns:
|
||||
bool: True if the command is allowed, False otherwise
|
||||
"""
|
||||
if not command:
|
||||
return False
|
||||
|
||||
command_name = command.split()[0]
|
||||
|
||||
if config.shell_command_control == ALLOWLIST_CONTROL:
|
||||
return command_name in config.shell_allowlist
|
||||
else:
|
||||
return command_name not in config.shell_denylist
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell",
|
||||
"Executes a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"command_line": {
|
||||
"type": "string",
|
||||
"description": "The command line to execute",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
enabled=lambda config: config.execute_local_commands,
|
||||
disabled_reason="You are not allowed to run local shell commands. To execute"
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
def execute_shell(command_line: str) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
Args:
|
||||
@@ -214,14 +115,11 @@ def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = Path.cwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(agent.config.workspace_path):
|
||||
os.chdir(agent.config.workspace_path)
|
||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
@@ -238,20 +136,14 @@ def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
|
||||
@command(
|
||||
"execute_shell_popen",
|
||||
"Executes a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
lambda config: config.execute_local_commands,
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line, agent: Agent) -> str:
|
||||
def execute_shell_popen(command_line) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
@@ -261,14 +153,11 @@ def execute_shell_popen(command_line, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if agent.config.workspace_path not in current_dir:
|
||||
os.chdir(agent.config.workspace_path)
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
|
||||
@@ -1,132 +1,83 @@
|
||||
"""File operations for AutoGPT"""
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
from typing import Generator, Literal
|
||||
from typing import Generator
|
||||
|
||||
from confection import Config
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.file_operations_utils import read_textual_file
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def text_checksum(text: str) -> str:
|
||||
"""Get the hex checksum for the given text."""
|
||||
return hashlib.md5(text.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def operations_from_log(
|
||||
log_path: str,
|
||||
) -> Generator[tuple[Operation, str, str | None], None, None]:
|
||||
"""Parse the file operations log and return a tuple containing the log entries"""
|
||||
try:
|
||||
log = open(log_path, "r", encoding="utf-8")
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
for line in log:
|
||||
line = line.replace("File Operation Logger", "").strip()
|
||||
if not line:
|
||||
continue
|
||||
operation, tail = line.split(": ", maxsplit=1)
|
||||
operation = operation.strip()
|
||||
if operation in ("write", "append"):
|
||||
try:
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
except ValueError:
|
||||
logger.warn(f"File log entry lacks checksum: '{line}'")
|
||||
path, checksum = tail.strip(), None
|
||||
yield (operation, path, checksum)
|
||||
elif operation == "delete":
|
||||
yield (operation, tail.strip(), None)
|
||||
|
||||
log.close()
|
||||
|
||||
|
||||
def file_operations_state(log_path: str) -> dict[str, str]:
|
||||
"""Iterates over the operations log and returns the expected state.
|
||||
|
||||
Parses a log file at config.file_logger_path to construct a dictionary that maps
|
||||
each file path written or appended to its checksum. Deleted files are removed
|
||||
from the dictionary.
|
||||
|
||||
Returns:
|
||||
A dictionary mapping file paths to their checksums.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If config.file_logger_path is not found.
|
||||
ValueError: If the log file content is not in the expected format.
|
||||
"""
|
||||
state = {}
|
||||
for operation, path, checksum in operations_from_log(log_path):
|
||||
if operation in ("write", "append"):
|
||||
state[path] = checksum
|
||||
elif operation == "delete":
|
||||
del state[path]
|
||||
return state
|
||||
|
||||
|
||||
def is_duplicate_operation(
|
||||
operation: Operation, filename: str, config: Config, checksum: str | None = None
|
||||
) -> bool:
|
||||
"""Check if the operation has already been performed
|
||||
def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
"""Check if the operation has already been performed on the given file
|
||||
|
||||
Args:
|
||||
operation: The operation to check for
|
||||
filename: The name of the file to check for
|
||||
config: The agent config
|
||||
checksum: The checksum of the contents to be written
|
||||
operation (str): The operation to check for
|
||||
filename (str): The name of the file to check for
|
||||
|
||||
Returns:
|
||||
True if the operation has already been performed on the file
|
||||
bool: True if the operation has already been performed on the file
|
||||
"""
|
||||
state = file_operations_state(config.file_logger_path)
|
||||
if operation == "delete" and filename not in state:
|
||||
return True
|
||||
if operation == "write" and state.get(filename) == checksum:
|
||||
return True
|
||||
return False
|
||||
log_content = read_file(CFG.file_logger_path)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
return log_entry in log_content
|
||||
|
||||
|
||||
def log_operation(
|
||||
operation: str, filename: str, agent: Agent, checksum: str | None = None
|
||||
) -> None:
|
||||
def log_operation(operation: str, filename: str) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
Args:
|
||||
operation: The operation to log
|
||||
filename: The name of the file the operation was performed on
|
||||
checksum: The checksum of the contents to be written
|
||||
operation (str): The operation to log
|
||||
filename (str): The name of the file the operation was performed on
|
||||
"""
|
||||
log_entry = f"{operation}: {filename}"
|
||||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(
|
||||
agent.config.file_logger_path, f"{log_entry}\n", agent, should_log=False
|
||||
)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
append_to_file(CFG.file_logger_path, log_entry, should_log=False)
|
||||
|
||||
|
||||
@command(
|
||||
"read_file",
|
||||
"Read an existing file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The path of the file to read",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
def read_file(filename: str, agent: Agent) -> str:
|
||||
def split_file(
|
||||
content: str, max_length: int = 4000, overlap: int = 0
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Split text into chunks of a specified maximum length with a specified overlap
|
||||
between chunks.
|
||||
|
||||
:param content: The input text to be split into chunks
|
||||
:param max_length: The maximum length of each chunk,
|
||||
default is 4000 (about 1k token)
|
||||
:param overlap: The number of overlapping characters between chunks,
|
||||
default is no overlap
|
||||
:return: A generator yielding chunks of text
|
||||
"""
|
||||
start = 0
|
||||
content_length = len(content)
|
||||
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + overlap - 1]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
|
||||
if len(chunk) <= overlap:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
start += max_length - overlap
|
||||
|
||||
|
||||
@command("read_file", "Read file", '"filename": "<filename>"')
|
||||
def read_file(filename: str) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
Args:
|
||||
@@ -136,62 +87,49 @@ def read_file(filename: str, agent: Agent) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
content = read_textual_file(filename, logger)
|
||||
|
||||
# TODO: invalidate/update memory when file is edited
|
||||
file_memory = MemoryItem.from_text_file(content, filename, agent.config)
|
||||
if len(file_memory.chunks) > 1:
|
||||
return file_memory.summary
|
||||
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def ingest_file(
|
||||
filename: str,
|
||||
memory: VectorMemory,
|
||||
filename: str, memory, max_length: int = 4000, overlap: int = 200
|
||||
) -> None:
|
||||
"""
|
||||
Ingest a file by reading its content, splitting it into chunks with a specified
|
||||
maximum length and overlap, and adding the chunks to the memory storage.
|
||||
|
||||
Args:
|
||||
filename: The name of the file to ingest
|
||||
memory: An object with an add() method to store the chunks in memory
|
||||
:param filename: The name of the file to ingest
|
||||
:param memory: An object with an add() method to store the chunks in memory
|
||||
:param max_length: The maximum length of each chunk, default is 4000
|
||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Ingesting file {filename}")
|
||||
logger.info(f"Working with file {filename}")
|
||||
content = read_file(filename)
|
||||
content_length = len(content)
|
||||
logger.info(f"File length: {content_length} characters")
|
||||
|
||||
# TODO: differentiate between different types of files
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
logger.debug(f"Created memory: {file_memory.dump(True)}")
|
||||
memory.add(file_memory)
|
||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||
|
||||
logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
|
||||
except Exception as err:
|
||||
logger.warn(f"Error while ingesting file '{filename}': {err}")
|
||||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
except Exception as e:
|
||||
logger.info(f"Error while ingesting file '{filename}': {str(e)}")
|
||||
|
||||
|
||||
@command(
|
||||
"write_to_file",
|
||||
"Writes to a file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write to",
|
||||
"required": True,
|
||||
},
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "The text to write to the file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
aliases=["write_file", "create_file"],
|
||||
)
|
||||
def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
Args:
|
||||
@@ -201,39 +139,23 @@ def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, agent.config, checksum):
|
||||
if check_duplicate_operation("write", filename):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename, agent, checksum)
|
||||
log_operation("write", filename)
|
||||
return "File written to successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"append_to_file",
|
||||
"Appends to a file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write to",
|
||||
"required": True,
|
||||
},
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "The text to write to the file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
)
|
||||
def append_to_file(
|
||||
filename: str, text: str, agent: Agent, should_log: bool = True
|
||||
) -> str:
|
||||
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
Args:
|
||||
@@ -247,31 +169,19 @@ def append_to_file(
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "a", encoding="utf-8") as f:
|
||||
with open(filename, "a") as f:
|
||||
f.write(text)
|
||||
|
||||
if should_log:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, agent, checksum=checksum)
|
||||
log_operation("append", filename)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"delete_file",
|
||||
"Deletes a file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to delete",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
def delete_file(filename: str, agent: Agent) -> str:
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str) -> str:
|
||||
"""Delete a file
|
||||
|
||||
Args:
|
||||
@@ -280,29 +190,19 @@ def delete_file(filename: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if is_duplicate_operation("delete", filename, agent.config):
|
||||
if check_duplicate_operation("delete", filename):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename, agent)
|
||||
log_operation("delete", filename)
|
||||
return "File deleted successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"list_files",
|
||||
"Lists Files in a Directory",
|
||||
{
|
||||
"directory": {
|
||||
"type": "string",
|
||||
"description": "The directory to list files in",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
def list_files(directory: str, agent: Agent) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
@command("search_files", "Search Files", '"directory": "<directory>"')
|
||||
def search_files(directory: str) -> list[str]:
|
||||
"""Search for files in a directory
|
||||
|
||||
Args:
|
||||
directory (str): The directory to search in
|
||||
@@ -317,8 +217,56 @@ def list_files(directory: str, agent: Agent) -> list[str]:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), agent.config.workspace_path
|
||||
os.path.join(root, file), CFG.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
return found_files
|
||||
|
||||
|
||||
@command(
|
||||
"download_file",
|
||||
"Download File",
|
||||
'"url": "<url>", "filename": "<filename>"',
|
||||
CFG.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
filename (str): Filename to save the file as
|
||||
"""
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
|
||||
total_size = 0
|
||||
downloaded_size = 0
|
||||
|
||||
with session.get(url, allow_redirects=True, stream=True) as r:
|
||||
r.raise_for_status()
|
||||
total_size = int(r.headers.get("Content-Length", 0))
|
||||
downloaded_size = 0
|
||||
|
||||
with open(filename, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
|
||||
# Update the progress message
|
||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||
spinner.update_message(f"{message} {progress}")
|
||||
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
|
||||
except requests.HTTPError as e:
|
||||
return f"Got an HTTP Error whilst trying to download file: {e}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import charset_normalizer
|
||||
import docx
|
||||
import markdown
|
||||
import PyPDF2
|
||||
import yaml
|
||||
from bs4 import BeautifulSoup
|
||||
from pylatexenc.latex2text import LatexNodes2Text
|
||||
|
||||
from autogpt import logs
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class ParserStrategy:
|
||||
def read(self, file_path: str) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
# Basic text file reading
|
||||
class TXTParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
charset_match = charset_normalizer.from_path(file_path).best()
|
||||
logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
|
||||
return str(charset_match)
|
||||
|
||||
|
||||
# Reading text from binary file using pdf parser
|
||||
class PDFParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
parser = PyPDF2.PdfReader(file_path)
|
||||
text = ""
|
||||
for page_idx in range(len(parser.pages)):
|
||||
text += parser.pages[page_idx].extract_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading text from binary file using docs parser
|
||||
class DOCXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
doc_file = docx.Document(file_path)
|
||||
text = ""
|
||||
for para in doc_file.paragraphs:
|
||||
text += para.text
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class JSONParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = json.load(f)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class XMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "xml")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class YAMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class HTMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "html.parser")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
class MarkdownParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
html = markdown.markdown(f.read())
|
||||
text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True))
|
||||
return text
|
||||
|
||||
|
||||
class LaTeXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
latex = f.read()
|
||||
text = LatexNodes2Text().latex_to_text(latex)
|
||||
return text
|
||||
|
||||
|
||||
class FileContext:
|
||||
def __init__(self, parser: ParserStrategy, logger: logs.Logger):
|
||||
self.parser = parser
|
||||
self.logger = logger
|
||||
|
||||
def set_parser(self, parser: ParserStrategy) -> None:
|
||||
self.logger.debug(f"Setting Context Parser to {parser}")
|
||||
self.parser = parser
|
||||
|
||||
def read_file(self, file_path) -> str:
|
||||
self.logger.debug(f"Reading file {file_path} with parser {self.parser}")
|
||||
return self.parser.read(file_path)
|
||||
|
||||
|
||||
extension_to_parser = {
|
||||
".txt": TXTParser(),
|
||||
".csv": TXTParser(),
|
||||
".pdf": PDFParser(),
|
||||
".docx": DOCXParser(),
|
||||
".json": JSONParser(),
|
||||
".xml": XMLParser(),
|
||||
".yaml": YAMLParser(),
|
||||
".yml": YAMLParser(),
|
||||
".html": HTMLParser(),
|
||||
".htm": HTMLParser(),
|
||||
".xhtml": HTMLParser(),
|
||||
".md": MarkdownParser(),
|
||||
".markdown": MarkdownParser(),
|
||||
".tex": LaTeXParser(),
|
||||
}
|
||||
|
||||
|
||||
def is_file_binary_fn(file_path: str):
|
||||
"""Given a file path load all its content and checks if the null bytes is present
|
||||
|
||||
Args:
|
||||
file_path (_type_): _description_
|
||||
|
||||
Returns:
|
||||
bool: is_binary
|
||||
"""
|
||||
with open(file_path, "rb") as f:
|
||||
file_data = f.read()
|
||||
if b"\x00" in file_data:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def read_textual_file(file_path: str, logger: logs.Logger) -> str:
|
||||
if not os.path.isfile(file_path):
|
||||
raise FileNotFoundError(
|
||||
f"read_file {file_path} failed: no such file or directory"
|
||||
)
|
||||
is_binary = is_file_binary_fn(file_path)
|
||||
file_extension = os.path.splitext(file_path)[1].lower()
|
||||
parser = extension_to_parser.get(file_extension)
|
||||
if not parser:
|
||||
if is_binary:
|
||||
raise ValueError(f"Unsupported binary file format: {file_extension}")
|
||||
# fallback to txt file parser (to support script and code files loading)
|
||||
parser = TXTParser()
|
||||
file_context = FileContext(parser, logger)
|
||||
return file_context.read_file(file_path)
|
||||
@@ -1,32 +1,22 @@
|
||||
"""Git operations for autogpt"""
|
||||
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"clone_repository",
|
||||
"Clones a Repository",
|
||||
{
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the repository to clone",
|
||||
"required": True,
|
||||
},
|
||||
"clone_path": {
|
||||
"type": "string",
|
||||
"description": "The path to clone the repository to",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
lambda config: config.github_username and config.github_api_key,
|
||||
"Clone Repository",
|
||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
CFG.github_username and CFG.github_api_key,
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
|
||||
def clone_repository(url: str, clone_path: str) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
@@ -37,11 +27,7 @@ def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = (
|
||||
f"//{agent.config.github_username}:{agent.config.github_api_key}@".join(
|
||||
split_url
|
||||
)
|
||||
)
|
||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||
try:
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
|
||||
@@ -2,30 +2,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from itertools import islice
|
||||
|
||||
from duckduckgo_search import DDGS
|
||||
from duckduckgo_search import ddg
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"web_search",
|
||||
"Searches the web",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
aliases=["search"],
|
||||
)
|
||||
def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
|
||||
def google_search(query: str, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
@@ -36,20 +23,15 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
str: The results of the search.
|
||||
"""
|
||||
search_results = []
|
||||
attempts = 0
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
while attempts < DUCKDUCKGO_MAX_ATTEMPTS:
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
results = ddg(query, max_results=num_results)
|
||||
if not results:
|
||||
return json.dumps(search_results)
|
||||
|
||||
results = DDGS().text(query)
|
||||
search_results = list(islice(results, num_results))
|
||||
|
||||
if search_results:
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
attempts += 1
|
||||
for j in results:
|
||||
search_results.append(j)
|
||||
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
@@ -58,19 +40,11 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
@command(
|
||||
"google",
|
||||
"Google Search",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
lambda config: bool(config.google_api_key)
|
||||
and bool(config.google_custom_search_engine_id),
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
aliases=["search"],
|
||||
'"query": "<query>"',
|
||||
bool(CFG.google_api_key),
|
||||
"Configure google_api_key.",
|
||||
)
|
||||
def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
Args:
|
||||
@@ -86,8 +60,8 @@ def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = agent.config.google_api_key
|
||||
custom_search_engine_id = agent.config.google_custom_search_engine_id
|
||||
api_key = CFG.google_api_key
|
||||
custom_search_engine_id = CFG.custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
@@ -126,7 +100,7 @@ def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
Return the results of a Google search in a safe format.
|
||||
Return the results of a google search in a safe format.
|
||||
|
||||
Args:
|
||||
results (str | list): The search results.
|
||||
@@ -136,7 +110,7 @@ def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
if isinstance(results, list):
|
||||
safe_message = json.dumps(
|
||||
[result.encode("utf-8", "ignore").decode("utf-8") for result in results]
|
||||
[result.encode("utf-8", "ignore") for result in results]
|
||||
)
|
||||
else:
|
||||
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
|
||||
@@ -1,7 +1,5 @@
|
||||
""" Image Generation Module for AutoGPT."""
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
|
||||
@@ -9,25 +7,15 @@ import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@command(
|
||||
"generate_image",
|
||||
"Generates an Image",
|
||||
{
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "The prompt used to generate the image",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
lambda config: config.image_provider,
|
||||
"Requires a image provider to be set.",
|
||||
)
|
||||
def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
|
||||
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
|
||||
def generate_image(prompt: str, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
@@ -37,21 +25,21 @@ def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{agent.config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if agent.config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, agent)
|
||||
if CFG.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size)
|
||||
# HuggingFace
|
||||
elif agent.config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, agent)
|
||||
elif CFG.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename)
|
||||
# SD WebUI
|
||||
elif agent.config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, agent, size)
|
||||
elif CFG.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
|
||||
def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
"""Generate an image with HuggingFace's API.
|
||||
|
||||
Args:
|
||||
@@ -61,57 +49,35 @@ def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = f"https://api-inference.huggingface.co/models/{agent.config.huggingface_image_model}"
|
||||
if agent.config.huggingface_api_token is None:
|
||||
API_URL = (
|
||||
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
|
||||
)
|
||||
if CFG.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {agent.config.huggingface_api_token}",
|
||||
"Authorization": f"Bearer {CFG.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
retry_count = 0
|
||||
while retry_count < 10:
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
try:
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
image.save(filename)
|
||||
return f"Saved to disk:{filename}"
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
error = json.loads(response.text)
|
||||
if "estimated_time" in error:
|
||||
delay = error["estimated_time"]
|
||||
logger.debug(response.text)
|
||||
logger.info("Retrying in", delay)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
retry_count += 1
|
||||
image.save(filename)
|
||||
|
||||
return f"Error creating image."
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
||||
def generate_image_with_dalle(
|
||||
prompt: str, filename: str, size: int, agent: Agent
|
||||
) -> str:
|
||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
Args:
|
||||
@@ -136,7 +102,7 @@ def generate_image_with_dalle(
|
||||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=agent.config.openai_api_key,
|
||||
api_key=CFG.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
@@ -152,7 +118,6 @@ def generate_image_with_dalle(
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
agent: Agent,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
@@ -169,19 +134,19 @@ def generate_image_with_sd_webui(
|
||||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if agent.config.sd_webui_auth:
|
||||
username, password = agent.config.sd_webui_auth.split(":")
|
||||
if CFG.sd_webui_auth:
|
||||
username, password = CFG.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{agent.config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"sampler_index": "DDIM",
|
||||
"steps": 20,
|
||||
"config_scale": 7.0,
|
||||
"cfg_scale": 7.0,
|
||||
"width": size,
|
||||
"height": size,
|
||||
"n_iter": 1,
|
||||
|
||||
35
autogpt/commands/improve_code.py
Normal file
35
autogpt/commands/improve_code.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"improve_code",
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code to be improved.
|
||||
Returns:
|
||||
A result string from create chat completion. Improved code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def generate_improved_code(suggestions: list[str], code: str) -> str:"
|
||||
)
|
||||
args = [json.dumps(suggestions), code]
|
||||
description_string = (
|
||||
"Improves the provided code based on the suggestions"
|
||||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
@@ -1,33 +0,0 @@
|
||||
"""Task Statuses module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import NoReturn
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
@command(
|
||||
"goals_accomplished",
|
||||
"Goals are accomplished and there is nothing left to do",
|
||||
{
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "A summary to the user of how the goals were accomplished",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
def task_complete(reason: str, agent: Agent) -> NoReturn:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
Parameters:
|
||||
reason (str): A summary to the user of how the goals were accomplished.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
logger.info(title="Shutting down...\n", message=reason)
|
||||
quit()
|
||||
41
autogpt/commands/twitter.py
Normal file
41
autogpt/commands/twitter.py
Normal file
@@ -0,0 +1,41 @@
|
||||
"""A module that contains a command to send a tweet."""
|
||||
import os
|
||||
|
||||
import tweepy
|
||||
|
||||
from autogpt.commands.command import command
|
||||
|
||||
|
||||
@command(
|
||||
"send_tweet",
|
||||
"Send Tweet",
|
||||
'"tweet_text": "<tweet_text>"',
|
||||
)
|
||||
def send_tweet(tweet_text: str) -> str:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Args:
|
||||
tweet_text (str): Text to be tweeted.
|
||||
|
||||
Returns:
|
||||
A result from sending the tweet.
|
||||
"""
|
||||
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
||||
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
||||
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
||||
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
|
||||
# Authenticate to Twitter
|
||||
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
||||
auth.set_access_token(access_token, access_token_secret)
|
||||
|
||||
# Create API object
|
||||
api = tweepy.API(auth)
|
||||
|
||||
# Send tweet
|
||||
try:
|
||||
api.update_status(tweet_text)
|
||||
return "Tweet sent successfully!"
|
||||
except tweepy.TweepyException as e:
|
||||
return f"Error sending tweet: {e.reason}"
|
||||
82
autogpt/commands/web_playwright.py
Normal file
82
autogpt/commands/web_playwright.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""Web scraping commands using Playwright"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
logger.info(
|
||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape text from
|
||||
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch()
|
||||
page = browser.new_page()
|
||||
|
||||
try:
|
||||
page.goto(url)
|
||||
html_content = page.content()
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
|
||||
except Exception as e:
|
||||
text = f"Error: {str(e)}"
|
||||
|
||||
finally:
|
||||
browser.close()
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape links from
|
||||
|
||||
Returns:
|
||||
Union[str, List[str]]: The scraped links
|
||||
"""
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch()
|
||||
page = browser.new_page()
|
||||
|
||||
try:
|
||||
page.goto(url)
|
||||
html_content = page.content()
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
formatted_links = format_hyperlinks(hyperlinks)
|
||||
|
||||
except Exception as e:
|
||||
formatted_links = f"Error: {str(e)}"
|
||||
|
||||
finally:
|
||||
browser.close()
|
||||
|
||||
return formatted_links
|
||||
112
autogpt/commands/web_requests.py
Normal file
112
autogpt/commands/web_requests.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from __future__ import annotations
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({"User-Agent": CFG.user_agent})
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
Args:
|
||||
url (str): The URL to get the response from
|
||||
timeout (int): The timeout for the HTTP request
|
||||
|
||||
Returns:
|
||||
tuple[None, str] | tuple[Response, None]: The response and error message
|
||||
|
||||
Raises:
|
||||
ValueError: If the URL is invalid
|
||||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
response = session.get(url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
return None, f"Error: HTTP {str(response.status_code)} error"
|
||||
|
||||
return response, None
|
||||
except ValueError as ve:
|
||||
# Handle invalid URL format
|
||||
return None, f"Error: {str(ve)}"
|
||||
|
||||
except requests.exceptions.RequestException as re:
|
||||
# Handle exceptions related to the HTTP request
|
||||
# (e.g., connection errors, timeouts, etc.)
|
||||
return None, f"Error: {str(re)}"
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape text from
|
||||
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape links from
|
||||
|
||||
Returns:
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
|
||||
return format_hyperlinks(hyperlinks)
|
||||
|
||||
|
||||
def create_message(chunk, question):
|
||||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the'
|
||||
" text, summarize the text.",
|
||||
}
|
||||
@@ -4,55 +4,37 @@ from __future__ import annotations
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import Optional, Type
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.chrome.service import Service as ChromeDriverService
|
||||
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.edge.service import Service as EdgeDriverService
|
||||
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.firefox.service import Service as GeckoDriverService
|
||||
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, get_memory
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"browse_website",
|
||||
"Browses a Website",
|
||||
{
|
||||
"url": {"type": "string", "description": "The URL to visit", "required": True},
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "What you want to find on the website",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
"Browse Website",
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str, agent: Agent) -> str:
|
||||
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
@@ -63,25 +45,25 @@ def browse_website(url: str, question: str, agent: Agent) -> str:
|
||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url, agent)
|
||||
driver, text = scrape_text_with_selenium(url)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
msg = e.msg.split("\n")[0]
|
||||
return f"Error: {msg}"
|
||||
return f"Error: {msg}", None
|
||||
|
||||
add_header(driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, agent, driver)
|
||||
summary_text = summary.summarize_text(url, text, question, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to 5
|
||||
if len(links) > 5:
|
||||
links = links[:5]
|
||||
close_browser(driver)
|
||||
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
||||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
@@ -92,49 +74,44 @@ def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
|
||||
"""
|
||||
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
||||
|
||||
options_available: dict[str, Type[BrowserOptions]] = {
|
||||
options_available = {
|
||||
"chrome": ChromeOptions,
|
||||
"edge": EdgeOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"safari": SafariOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
}
|
||||
|
||||
options: BrowserOptions = options_available[agent.config.selenium_web_browser]()
|
||||
options = options_available[CFG.selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if agent.config.selenium_web_browser == "firefox":
|
||||
if agent.config.selenium_headless:
|
||||
if CFG.selenium_web_browser == "firefox":
|
||||
if CFG.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
driver = webdriver.Firefox(
|
||||
executable_path=GeckoDriverManager().install(), options=options
|
||||
)
|
||||
elif agent.config.selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif agent.config.selenium_web_browser == "safari":
|
||||
elif CFG.selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = SafariDriver(options=options)
|
||||
driver = webdriver.Safari(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if agent.config.selenium_headless:
|
||||
if CFG.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
chromium_driver_path = Path("/usr/bin/chromedriver")
|
||||
|
||||
driver = ChromeDriver(
|
||||
service=ChromeDriverService(str(chromium_driver_path))
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=chromium_driver_path
|
||||
if chromium_driver_path.exists()
|
||||
else ChromeDriverService(ChromeDriverManager().install()),
|
||||
else ChromeDriverManager().install(),
|
||||
options=options,
|
||||
)
|
||||
driver.get(url)
|
||||
@@ -198,40 +175,4 @@ def add_header(driver: WebDriver) -> None:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file:
|
||||
overlay_script = overlay_file.read()
|
||||
driver.execute_script(overlay_script)
|
||||
except Exception as e:
|
||||
print(f"Error executing overlay.js: {e}")
|
||||
|
||||
|
||||
def summarize_memorize_webpage(
|
||||
url: str,
|
||||
text: str,
|
||||
question: str,
|
||||
agent: Agent,
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
url (str): The url of the text
|
||||
text (str): The text to summarize
|
||||
question (str): The question to ask the model
|
||||
driver (WebDriver): The webdriver to use to scroll the page
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
memory = get_memory(agent.config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, agent.config, question=question)
|
||||
memory.add(new_memory)
|
||||
return new_memory.summary
|
||||
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
|
||||
|
||||
37
autogpt/commands/write_tests.py
Normal file
37
autogpt/commands/write_tests.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""A module that contains a function to generate test cases for the submitted code."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"write_tests",
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code
|
||||
in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||
)
|
||||
args = [code, json.dumps(focus)]
|
||||
description_string = (
|
||||
"Generates test cases for the existing code, focusing on"
|
||||
" specific areas if required."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
@@ -1,12 +1,11 @@
|
||||
"""
|
||||
This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from .ai_config import AIConfig
|
||||
from .config import Config, ConfigBuilder, check_openai_api_key
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
|
||||
__all__ = [
|
||||
"check_openai_api_key",
|
||||
"AIConfig",
|
||||
"Config",
|
||||
"ConfigBuilder",
|
||||
]
|
||||
|
||||
@@ -7,14 +7,12 @@ from __future__ import annotations
|
||||
import os
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import distro
|
||||
import yaml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
||||
@@ -35,7 +33,7 @@ class AIConfig:
|
||||
self,
|
||||
ai_name: str = "",
|
||||
ai_role: str = "",
|
||||
ai_goals: list[str] = [],
|
||||
ai_goals: list | None = None,
|
||||
api_budget: float = 0.0,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -49,30 +47,33 @@ class AIConfig:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if ai_goals is None:
|
||||
ai_goals = []
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
self.api_budget = api_budget
|
||||
self.prompt_generator: PromptGenerator | None = None
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
self.prompt_generator = None
|
||||
self.command_registry = None
|
||||
|
||||
@staticmethod
|
||||
def load(ai_settings_file: str = SAVE_FILE) -> "AIConfig":
|
||||
def load(config_file: str = SAVE_FILE) -> "AIConfig":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
|
||||
loaded from yaml file if yaml file exists, else returns class with no parameters.
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
ai_settings_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
config_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): An instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(ai_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
@@ -88,12 +89,12 @@ class AIConfig:
|
||||
# type: Type[AIConfig]
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
def save(self, ai_settings_file: str = SAVE_FILE) -> None:
|
||||
def save(self, config_file: str = SAVE_FILE) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
ai_settings_file(str): The path to the config yaml file.
|
||||
config_file(str): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
@@ -106,11 +107,11 @@ class AIConfig:
|
||||
"ai_goals": self.ai_goals,
|
||||
"api_budget": self.api_budget,
|
||||
}
|
||||
with open(ai_settings_file, "w", encoding="utf-8") as file:
|
||||
with open(config_file, "w", encoding="utf-8") as file:
|
||||
yaml.dump(config, file, allow_unicode=True)
|
||||
|
||||
def construct_full_prompt(
|
||||
self, config, prompt_generator: Optional[PromptGenerator] = None
|
||||
self, prompt_generator: Optional[PromptGenerator] = None
|
||||
) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
@@ -130,20 +131,22 @@ class AIConfig:
|
||||
""
|
||||
)
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.prompts.prompt import build_default_prompt_generator
|
||||
|
||||
cfg = Config()
|
||||
if prompt_generator is None:
|
||||
prompt_generator = build_default_prompt_generator(config)
|
||||
prompt_generator = build_default_prompt_generator()
|
||||
prompt_generator.goals = self.ai_goals
|
||||
prompt_generator.name = self.ai_name
|
||||
prompt_generator.role = self.ai_role
|
||||
prompt_generator.command_registry = self.command_registry
|
||||
for plugin in config.plugins:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
prompt_generator = plugin.post_prompt(prompt_generator)
|
||||
|
||||
if config.execute_local_commands:
|
||||
if cfg.execute_local_commands:
|
||||
# add OS info to prompt
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
@@ -161,5 +164,5 @@ class AIConfig:
|
||||
if self.api_budget > 0.0:
|
||||
full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}"
|
||||
self.prompt_generator = prompt_generator
|
||||
full_prompt += f"\n\n{prompt_generator.generate_prompt_string(config)}"
|
||||
full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}"
|
||||
return full_prompt
|
||||
|
||||
@@ -1,325 +1,174 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
from typing import Dict, Optional, Union
|
||||
from typing import List
|
||||
|
||||
import openai
|
||||
import yaml
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.core.configuration.schema import Configurable, SystemSettings
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class Config(SystemSettings):
|
||||
fast_llm: str
|
||||
smart_llm: str
|
||||
continuous_mode: bool
|
||||
skip_news: bool
|
||||
workspace_path: Optional[str] = None
|
||||
file_logger_path: Optional[str] = None
|
||||
debug_mode: bool
|
||||
plugins_dir: str
|
||||
plugins_config: PluginsConfig
|
||||
continuous_limit: int
|
||||
speak_mode: bool
|
||||
skip_reprompt: bool
|
||||
allow_downloads: bool
|
||||
exit_key: str
|
||||
plain_output: bool
|
||||
disabled_command_categories: list[str]
|
||||
shell_command_control: str
|
||||
shell_denylist: list[str]
|
||||
shell_allowlist: list[str]
|
||||
ai_settings_file: str
|
||||
prompt_settings_file: str
|
||||
embedding_model: str
|
||||
browse_spacy_language_model: str
|
||||
openai_api_key: Optional[str] = None
|
||||
openai_organization: Optional[str] = None
|
||||
temperature: float
|
||||
use_azure: bool
|
||||
azure_config_file: Optional[str] = None
|
||||
azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None
|
||||
execute_local_commands: bool
|
||||
restrict_to_workspace: bool
|
||||
openai_api_type: Optional[str] = None
|
||||
openai_api_base: Optional[str] = None
|
||||
openai_api_version: Optional[str] = None
|
||||
openai_functions: bool
|
||||
elevenlabs_api_key: Optional[str] = None
|
||||
streamelements_voice: str
|
||||
text_to_speech_provider: str
|
||||
github_api_key: Optional[str] = None
|
||||
github_username: Optional[str] = None
|
||||
google_api_key: Optional[str] = None
|
||||
google_custom_search_engine_id: Optional[str] = None
|
||||
image_provider: Optional[str] = None
|
||||
image_size: int
|
||||
huggingface_api_token: Optional[str] = None
|
||||
huggingface_image_model: str
|
||||
audio_to_text_provider: str
|
||||
huggingface_audio_to_text_model: Optional[str] = None
|
||||
sd_webui_url: Optional[str] = None
|
||||
sd_webui_auth: Optional[str] = None
|
||||
selenium_web_browser: str
|
||||
selenium_headless: bool
|
||||
user_agent: str
|
||||
memory_backend: str
|
||||
memory_index: str
|
||||
redis_host: str
|
||||
redis_port: int
|
||||
redis_password: str
|
||||
wipe_redis_on_start: bool
|
||||
plugins_allowlist: list[str]
|
||||
plugins_denylist: list[str]
|
||||
plugins_openai: list[str]
|
||||
plugins_config_file: str
|
||||
chat_messages_enabled: bool
|
||||
elevenlabs_voice_id: Optional[str] = None
|
||||
plugins: list[str]
|
||||
authorise_key: str
|
||||
class Config(metaclass=Singleton):
|
||||
"""
|
||||
Configuration class to store the state of bools for different scripts access.
|
||||
"""
|
||||
|
||||
def get_openai_credentials(self, model: str) -> dict[str, str]:
|
||||
credentials = {
|
||||
"api_key": self.openai_api_key,
|
||||
"api_base": self.openai_api_base,
|
||||
"organization": self.openai_organization,
|
||||
}
|
||||
if self.use_azure:
|
||||
azure_credentials = self.get_azure_credentials(model)
|
||||
credentials.update(azure_credentials)
|
||||
return credentials
|
||||
|
||||
def get_azure_credentials(self, model: str) -> dict[str, str]:
|
||||
"""Get the kwargs for the Azure API."""
|
||||
|
||||
# Fix --gpt3only and --gpt4only in combination with Azure
|
||||
fast_llm = (
|
||||
self.fast_llm
|
||||
if not (
|
||||
self.fast_llm == self.smart_llm
|
||||
and self.fast_llm.startswith(GPT_4_MODEL)
|
||||
)
|
||||
else f"not_{self.fast_llm}"
|
||||
)
|
||||
smart_llm = (
|
||||
self.smart_llm
|
||||
if not (
|
||||
self.smart_llm == self.fast_llm
|
||||
and self.smart_llm.startswith(GPT_3_MODEL)
|
||||
)
|
||||
else f"not_{self.smart_llm}"
|
||||
)
|
||||
|
||||
deployment_id = {
|
||||
fast_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
smart_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
self.embedding_model: self.azure_model_to_deployment_id_map.get(
|
||||
"embedding_model_deployment_id"
|
||||
),
|
||||
}.get(model, None)
|
||||
|
||||
kwargs = {
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
}
|
||||
if model == self.embedding_model:
|
||||
kwargs["engine"] = deployment_id
|
||||
else:
|
||||
kwargs["deployment_id"] = deployment_id
|
||||
return kwargs
|
||||
|
||||
|
||||
class ConfigBuilder(Configurable[Config]):
|
||||
default_plugins_config_file = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml"
|
||||
)
|
||||
|
||||
elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
if os.getenv("USE_MAC_OS_TTS"):
|
||||
default_tts_provider = "macos"
|
||||
elif elevenlabs_api_key:
|
||||
default_tts_provider = "elevenlabs"
|
||||
elif os.getenv("USE_BRIAN_TTS"):
|
||||
default_tts_provider = "streamelements"
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
|
||||
default_settings = Config(
|
||||
name="Default Server Config",
|
||||
description="This is a default server configuration",
|
||||
smart_llm="gpt-4",
|
||||
fast_llm="gpt-3.5-turbo",
|
||||
continuous_mode=False,
|
||||
continuous_limit=0,
|
||||
skip_news=False,
|
||||
debug_mode=False,
|
||||
plugins_dir="plugins",
|
||||
plugins_config=PluginsConfig(plugins={}),
|
||||
speak_mode=False,
|
||||
skip_reprompt=False,
|
||||
allow_downloads=False,
|
||||
exit_key="n",
|
||||
plain_output=False,
|
||||
disabled_command_categories=[],
|
||||
shell_command_control="denylist",
|
||||
shell_denylist=["sudo", "su"],
|
||||
shell_allowlist=[],
|
||||
ai_settings_file="ai_settings.yaml",
|
||||
prompt_settings_file="prompt_settings.yaml",
|
||||
embedding_model="text-embedding-ada-002",
|
||||
browse_spacy_language_model="en_core_web_sm",
|
||||
temperature=0,
|
||||
use_azure=False,
|
||||
azure_config_file=AZURE_CONFIG_FILE,
|
||||
execute_local_commands=False,
|
||||
restrict_to_workspace=True,
|
||||
openai_functions=False,
|
||||
streamelements_voice="Brian",
|
||||
text_to_speech_provider=default_tts_provider,
|
||||
image_size=256,
|
||||
huggingface_image_model="CompVis/stable-diffusion-v1-4",
|
||||
audio_to_text_provider="huggingface",
|
||||
sd_webui_url="http://localhost:7860",
|
||||
selenium_web_browser="chrome",
|
||||
selenium_headless=True,
|
||||
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
memory_backend="json_file",
|
||||
memory_index="auto-gpt-memory",
|
||||
redis_host="localhost",
|
||||
redis_port=6379,
|
||||
wipe_redis_on_start=True,
|
||||
plugins_allowlist=[],
|
||||
plugins_denylist=[],
|
||||
plugins_openai=[],
|
||||
plugins_config_file=default_plugins_config_file,
|
||||
chat_messages_enabled=True,
|
||||
plugins=[],
|
||||
authorise_key="y",
|
||||
redis_password="",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls) -> Config:
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the Config class"""
|
||||
config_dict = {
|
||||
"authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
|
||||
"exit_key": os.getenv("EXIT_KEY"),
|
||||
"plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
|
||||
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
|
||||
"ai_settings_file": os.getenv("AI_SETTINGS_FILE"),
|
||||
"prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"),
|
||||
"fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
|
||||
"smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
|
||||
"embedding_model": os.getenv("EMBEDDING_MODEL"),
|
||||
"browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"),
|
||||
"openai_api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"use_azure": os.getenv("USE_AZURE") == "True",
|
||||
"azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE),
|
||||
"execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False")
|
||||
== "True",
|
||||
"restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True")
|
||||
== "True",
|
||||
"openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True",
|
||||
"elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"),
|
||||
"streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"),
|
||||
"text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"),
|
||||
"github_api_key": os.getenv("GITHUB_API_KEY"),
|
||||
"github_username": os.getenv("GITHUB_USERNAME"),
|
||||
"google_api_key": os.getenv("GOOGLE_API_KEY"),
|
||||
"image_provider": os.getenv("IMAGE_PROVIDER"),
|
||||
"huggingface_api_token": os.getenv("HUGGINGFACE_API_TOKEN"),
|
||||
"huggingface_image_model": os.getenv("HUGGINGFACE_IMAGE_MODEL"),
|
||||
"audio_to_text_provider": os.getenv("AUDIO_TO_TEXT_PROVIDER"),
|
||||
"huggingface_audio_to_text_model": os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
),
|
||||
"sd_webui_url": os.getenv("SD_WEBUI_URL"),
|
||||
"sd_webui_auth": os.getenv("SD_WEBUI_AUTH"),
|
||||
"selenium_web_browser": os.getenv("USE_WEB_BROWSER"),
|
||||
"selenium_headless": os.getenv("HEADLESS_BROWSER", "True") == "True",
|
||||
"user_agent": os.getenv("USER_AGENT"),
|
||||
"memory_backend": os.getenv("MEMORY_BACKEND"),
|
||||
"memory_index": os.getenv("MEMORY_INDEX"),
|
||||
"redis_host": os.getenv("REDIS_HOST"),
|
||||
"redis_password": os.getenv("REDIS_PASSWORD"),
|
||||
"wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
|
||||
"plugins_dir": os.getenv("PLUGINS_DIR"),
|
||||
"plugins_config_file": os.getenv("PLUGINS_CONFIG_FILE"),
|
||||
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
|
||||
}
|
||||
self.workspace_path = None
|
||||
self.file_logger_path = None
|
||||
|
||||
config_dict["disabled_command_categories"] = _safe_split(
|
||||
os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
self.debug_mode = False
|
||||
self.continuous_mode = False
|
||||
self.continuous_limit = 0
|
||||
self.speak_mode = False
|
||||
self.skip_reprompt = False
|
||||
self.allow_downloads = False
|
||||
self.skip_news = False
|
||||
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
|
||||
config_dict["shell_denylist"] = _safe_split(
|
||||
os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "0"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
)
|
||||
config_dict["shell_allowlist"] = _safe_split(
|
||||
os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
|
||||
self.restrict_to_workspace = (
|
||||
os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
|
||||
)
|
||||
|
||||
config_dict["google_custom_search_engine_id"] = os.getenv(
|
||||
"GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
if self.use_azure:
|
||||
self.load_azure_config()
|
||||
openai.api_type = self.openai_api_type
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
||||
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
|
||||
self.use_brian_tts = False
|
||||
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
|
||||
|
||||
self.github_api_key = os.getenv("GITHUB_API_KEY")
|
||||
self.github_username = os.getenv("GITHUB_USERNAME")
|
||||
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_port = os.getenv("WEAVIATE_PORT")
|
||||
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
|
||||
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
|
||||
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = (
|
||||
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
)
|
||||
|
||||
config_dict["elevenlabs_voice_id"] = os.getenv(
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
# milvus or zilliz cloud configuration.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
self.milvus_username = os.getenv("MILVUS_USERNAME")
|
||||
self.milvus_password = os.getenv("MILVUS_PASSWORD")
|
||||
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
|
||||
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
self.huggingface_image_model = os.getenv(
|
||||
"HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
self.huggingface_audio_to_text_model = os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
)
|
||||
self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
|
||||
self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
|
||||
|
||||
# Selenium browser settings
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
|
||||
|
||||
# User agent header to use when making HTTP requests
|
||||
# Some websites might just completely deny request with an error code if
|
||||
# no user agent was found.
|
||||
self.user_agent = os.getenv(
|
||||
"USER_AGENT",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
|
||||
config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS"))
|
||||
config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS"))
|
||||
config_dict["plugins_config"] = PluginsConfig.load_config(
|
||||
config_dict["plugins_config_file"],
|
||||
config_dict["plugins_denylist"],
|
||||
config_dict["plugins_allowlist"],
|
||||
)
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["image_size"] = int(os.getenv("IMAGE_SIZE"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["redis_port"] = int(os.getenv("REDIS_PORT"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["temperature"] = float(os.getenv("TEMPERATURE"))
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
||||
|
||||
if config_dict["use_azure"]:
|
||||
azure_config = cls.load_azure_config(config_dict["azure_config_file"])
|
||||
config_dict.update(azure_config)
|
||||
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
|
||||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
self.plugins_openai = []
|
||||
|
||||
elif os.getenv("OPENAI_API_BASE_URL"):
|
||||
config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL")
|
||||
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
|
||||
if plugins_allowlist:
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
self.plugins_denylist = []
|
||||
|
||||
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
if openai_organization is not None:
|
||||
config_dict["openai_organization"] = openai_organization
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
Returns the relevant deployment id for the model specified.
|
||||
|
||||
config_dict_without_none_values = {
|
||||
k: v for k, v in config_dict.items() if v is not None
|
||||
}
|
||||
Parameters:
|
||||
model(str): The model to map to the deployment id.
|
||||
|
||||
return cls.build_agent_configuration(config_dict_without_none_values)
|
||||
Returns:
|
||||
The matching deployment id if found, otherwise an empty string.
|
||||
"""
|
||||
if model == self.fast_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"fast_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == self.smart_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"smart_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == "text-embedding-ada-002":
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"embedding_model_deployment_id"
|
||||
] # type: ignore
|
||||
else:
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def load_azure_config(cls, config_file: str = AZURE_CONFIG_FILE) -> Dict[str, str]:
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
|
||||
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
|
||||
"""
|
||||
Loads the configuration parameters for Azure hosting from the specified file
|
||||
path as a yaml file.
|
||||
@@ -328,55 +177,106 @@ class ConfigBuilder(Configurable[Config]):
|
||||
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
|
||||
|
||||
Returns:
|
||||
Dict
|
||||
None
|
||||
"""
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
self.openai_api_type = config_params.get("azure_api_type") or "azure"
|
||||
self.openai_api_base = config_params.get("azure_api_base") or ""
|
||||
self.openai_api_version = (
|
||||
config_params.get("azure_api_version") or "2023-03-15-preview"
|
||||
)
|
||||
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {})
|
||||
|
||||
return {
|
||||
"openai_api_type": config_params.get("azure_api_type", "azure"),
|
||||
"openai_api_base": config_params.get("azure_api_base", ""),
|
||||
"openai_api_version": config_params.get(
|
||||
"azure_api_version", "2023-03-15-preview"
|
||||
),
|
||||
"azure_model_to_deployment_id_map": config_params.get(
|
||||
"azure_model_map", {}
|
||||
),
|
||||
}
|
||||
def set_continuous_mode(self, value: bool) -> None:
|
||||
"""Set the continuous mode value."""
|
||||
self.continuous_mode = value
|
||||
|
||||
def set_continuous_limit(self, value: int) -> None:
|
||||
"""Set the continuous limit value."""
|
||||
self.continuous_limit = value
|
||||
|
||||
def set_speak_mode(self, value: bool) -> None:
|
||||
"""Set the speak mode value."""
|
||||
self.speak_mode = value
|
||||
|
||||
def set_fast_llm_model(self, value: str) -> None:
|
||||
"""Set the fast LLM model value."""
|
||||
self.fast_llm_model = value
|
||||
|
||||
def set_smart_llm_model(self, value: str) -> None:
|
||||
"""Set the smart LLM model value."""
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_fast_token_limit(self, value: int) -> None:
|
||||
"""Set the fast token limit value."""
|
||||
self.fast_token_limit = value
|
||||
|
||||
def set_smart_token_limit(self, value: int) -> None:
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_browse_chunk_max_length(self, value: int) -> None:
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
|
||||
def set_elevenlabs_api_key(self, value: str) -> None:
|
||||
"""Set the ElevenLabs API key value."""
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
def set_elevenlabs_voice_1_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 1 ID value."""
|
||||
self.elevenlabs_voice_1_id = value
|
||||
|
||||
def set_elevenlabs_voice_2_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 2 ID value."""
|
||||
self.elevenlabs_voice_2_id = value
|
||||
|
||||
def set_google_api_key(self, value: str) -> None:
|
||||
"""Set the Google API key value."""
|
||||
self.google_api_key = value
|
||||
|
||||
def set_custom_search_engine_id(self, value: str) -> None:
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str) -> None:
|
||||
"""Set the Pinecone API key value."""
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str) -> None:
|
||||
"""Set the Pinecone region value."""
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool) -> None:
|
||||
"""Set the debug mode value."""
|
||||
self.debug_mode = value
|
||||
|
||||
def set_plugins(self, value: list) -> None:
|
||||
"""Set the plugins value."""
|
||||
self.plugins = value
|
||||
|
||||
def set_temperature(self, value: int) -> None:
|
||||
"""Set the temperature value."""
|
||||
self.temperature = value
|
||||
|
||||
def set_memory_backend(self, name: str) -> None:
|
||||
"""Set the memory backend name."""
|
||||
self.memory_backend = name
|
||||
|
||||
|
||||
def check_openai_api_key(config: Config) -> None:
|
||||
def check_openai_api_key() -> None:
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not config.openai_api_key:
|
||||
cfg = Config()
|
||||
if not cfg.openai_api_key:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
+ Fore.RESET
|
||||
)
|
||||
print("You can get your key from https://platform.openai.com/account/api-keys")
|
||||
openai_api_key = input(
|
||||
"If you do have the key, please enter your OpenAI API key now:\n"
|
||||
)
|
||||
key_pattern = r"^sk-\w{48}"
|
||||
openai_api_key = openai_api_key.strip()
|
||||
if re.search(key_pattern, openai_api_key):
|
||||
os.environ["OPENAI_API_KEY"] = openai_api_key
|
||||
config.openai_api_key = openai_api_key
|
||||
print(
|
||||
Fore.GREEN
|
||||
+ "OpenAI API key successfully set!\n"
|
||||
+ Fore.ORANGE
|
||||
+ "NOTE: The API key you've set is only temporary.\n"
|
||||
+ "For longer sessions, please set it in .env file"
|
||||
+ Fore.RESET
|
||||
)
|
||||
else:
|
||||
print("Invalid OpenAI API key!")
|
||||
exit(1)
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
exit(1)
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# sourcery skip: do-not-use-staticmethod
|
||||
"""
|
||||
A module that contains the PromptConfig class object that contains the configuration
|
||||
"""
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class PromptConfig:
|
||||
"""
|
||||
A class object that contains the configuration information for the prompt, which will be used by the prompt generator
|
||||
|
||||
Attributes:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
"""
|
||||
|
||||
def __init__(self, prompt_settings_file: str) -> None:
|
||||
"""
|
||||
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else raises error.
|
||||
|
||||
Parameters:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(prompt_settings_file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
with open(prompt_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
||||
self.constraints = config_params.get("constraints", [])
|
||||
self.resources = config_params.get("resources", [])
|
||||
self.performance_evaluations = config_params.get("performance_evaluations", [])
|
||||
@@ -1,27 +1,19 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
|
||||
from autogpt.llm.utils import check_model
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
from autogpt.memory import get_supported_memory_backends
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def create_config(
|
||||
config: Config,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -38,7 +30,6 @@ def create_config(
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
@@ -49,13 +40,13 @@ def create_config(
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
config.debug_mode = False
|
||||
config.continuous_mode = False
|
||||
config.speak_mode = False
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.debug_mode = True
|
||||
CFG.set_debug_mode(True)
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
@@ -66,13 +57,13 @@ def create_config(
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.continuous_mode = True
|
||||
CFG.set_continuous_mode(True)
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
config.continuous_limit = continuous_limit
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
@@ -80,26 +71,15 @@ def create_config(
|
||||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.speak_mode = True
|
||||
CFG.set_speak_mode(True)
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
|
||||
config.fast_llm = GPT_3_MODEL
|
||||
config.smart_llm = GPT_3_MODEL
|
||||
elif (
|
||||
gpt4only
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm", config=config)
|
||||
== GPT_4_MODEL
|
||||
):
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
|
||||
if gpt4only:
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
|
||||
config.fast_llm = GPT_4_MODEL
|
||||
config.smart_llm = GPT_4_MODEL
|
||||
else:
|
||||
config.fast_llm = check_model(config.fast_llm, "fast_llm", config=config)
|
||||
config.smart_llm = check_model(config.smart_llm, "smart_llm", config=config)
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
@@ -110,13 +90,13 @@ def create_config(
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
||||
else:
|
||||
config.memory_backend = chosen
|
||||
CFG.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
config.skip_reprompt = True
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
@@ -129,24 +109,11 @@ def create_config(
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
config.ai_settings_file = file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||
config.prompt_settings_file = file
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
CFG.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
@@ -161,7 +128,7 @@ def create_config(
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
config.allow_downloads = True
|
||||
CFG.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
CFG.skip_news = True
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
# Run instructions
|
||||
|
||||
There are two client applications for Auto-GPT included.
|
||||
|
||||
## CLI Application
|
||||
|
||||
:star2: **This is the reference application I'm working with for now** :star2:
|
||||
|
||||
The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py)
|
||||
|
||||
Auto-GPT must be installed in your python environment to run this application. To do so, run
|
||||
|
||||
```
|
||||
pip install -e REPOSITORY_ROOT
|
||||
```
|
||||
|
||||
where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine.
|
||||
|
||||
You'll then need a settings file. Run
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings
|
||||
```
|
||||
|
||||
This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model.
|
||||
|
||||
You can then run Auto-GPT with
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run
|
||||
```
|
||||
|
||||
to launch the interaction loop.
|
||||
|
||||
## CLI Web App
|
||||
|
||||
The second app is still a CLI, but it sets up a local webserver that the client application talks to rather than invoking calls to the Agent library code directly. This application is essentially a sketch at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/client/client.py)
|
||||
- [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/server/api.py)
|
||||
|
||||
To run, you still need to generate a default configuration. You can do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings
|
||||
```
|
||||
|
||||
It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key.
|
||||
|
||||
To run, do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client
|
||||
```
|
||||
|
||||
This will launch a webserver and then start the client cli application to communicate with it.
|
||||
|
||||
:warning: I am not actively developing this application. It is a very good place to get involved if you have web application design experience and are looking to get involved in the re-arch.
|
||||
@@ -1,4 +0,0 @@
|
||||
"""The command system provides a way to extend the functionality of the AI agent."""
|
||||
from autogpt.core.ability.base import Ability, AbilityRegistry
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry
|
||||
@@ -1,92 +0,0 @@
|
||||
import abc
|
||||
from pprint import pformat
|
||||
from typing import ClassVar
|
||||
|
||||
import inflection
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
|
||||
|
||||
class AbilityConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
from autogpt.core.plugin.base import PluginLocation
|
||||
|
||||
location: PluginLocation
|
||||
packages_required: list[str] = Field(default_factory=list)
|
||||
language_model_required: LanguageModelConfiguration = None
|
||||
memory_provider_required: bool = False
|
||||
workspace_required: bool = False
|
||||
|
||||
|
||||
class Ability(abc.ABC):
|
||||
"""A class representing an agent ability."""
|
||||
|
||||
default_configuration: ClassVar[AbilityConfiguration]
|
||||
|
||||
@classmethod
|
||||
def name(cls) -> str:
|
||||
"""The name of the ability."""
|
||||
return inflection.underscore(cls.__name__)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def description(cls) -> str:
|
||||
"""A detailed description of what the ability does."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def arguments(cls) -> dict:
|
||||
"""A dict of arguments in standard json schema format."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
"""A list of required arguments."""
|
||||
return []
|
||||
|
||||
@abc.abstractmethod
|
||||
async def __call__(self, *args, **kwargs) -> AbilityResult:
|
||||
...
|
||||
|
||||
def __str__(self) -> str:
|
||||
return pformat(self.dump)
|
||||
|
||||
def dump(self) -> dict:
|
||||
return {
|
||||
"name": self.name(),
|
||||
"description": self.description(),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": self.arguments(),
|
||||
"required": self.required_arguments(),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class AbilityRegistry(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def list_abilities(self) -> list[str]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def perform(self, ability_name: str, **kwargs) -> AbilityResult:
|
||||
...
|
||||
@@ -1,6 +0,0 @@
|
||||
from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility
|
||||
from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel
|
||||
|
||||
BUILTIN_ABILITIES = {
|
||||
QueryLanguageModel.name(): QueryLanguageModel,
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
|
||||
|
||||
class CreateNewAbility(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.CreateNewAbility",
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Create a new ability by writing python code."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"ability_name": {
|
||||
"type": "string",
|
||||
"description": "A meaningful and concise name for the new ability.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the ability and its uses, including any limitations.",
|
||||
},
|
||||
"arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the argument.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "The type of the argument. Must be a standard json schema type.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the argument and its uses.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"description": "A list of arguments that the ability will accept.",
|
||||
},
|
||||
"required_arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The names of the arguments that are required.",
|
||||
},
|
||||
"description": "A list of the names of the arguments that are required.",
|
||||
},
|
||||
"package_requirements": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The of the Python package that is required to execute the ability.",
|
||||
},
|
||||
"description": "A list of the names of the Python packages that are required to execute the ability.",
|
||||
},
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code that will be executed when the ability is called.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return [
|
||||
"ability_name",
|
||||
"description",
|
||||
"arguments",
|
||||
"required_arguments",
|
||||
"package_requirements",
|
||||
"code",
|
||||
]
|
||||
|
||||
async def __call__(
|
||||
self,
|
||||
ability_name: str,
|
||||
description: str,
|
||||
arguments: list[dict],
|
||||
required_arguments: list[str],
|
||||
package_requirements: list[str],
|
||||
code: str,
|
||||
) -> AbilityResult:
|
||||
raise NotImplementedError
|
||||
@@ -1,167 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class ReadFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Read and parse all text from a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to read.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(self, filename: str) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
pass
|
||||
except ImportError:
|
||||
message = "Package charset_normalizer is not installed."
|
||||
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if not file_path.exists():
|
||||
message = f"File {filename} does not exist."
|
||||
if not file_path.is_file():
|
||||
message = f"{filename} is not a file."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename):
|
||||
return result
|
||||
|
||||
from unstructured.partition.auto import partition
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
elements = partition(str(file_path))
|
||||
# TODO: Lots of other potentially useful information is available
|
||||
# in the partitioned file. Consider returning more of it.
|
||||
new_knowledge = Knowledge(
|
||||
content="\n\n".join([element.text for element in elements]),
|
||||
content_type=ContentType.TEXT,
|
||||
content_metadata={"filename": filename},
|
||||
)
|
||||
success = True
|
||||
message = f"File {file_path} read successfully."
|
||||
except IOError as e:
|
||||
new_knowledge = None
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
new_knowledge=new_knowledge,
|
||||
)
|
||||
|
||||
|
||||
class WriteFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Write text to a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write.",
|
||||
},
|
||||
"contents": {
|
||||
"type": "string",
|
||||
"description": "The contents of the file to write.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(
|
||||
self, filename: str, contents: str
|
||||
) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if file_path.exists():
|
||||
message = f"File {filename} already exists."
|
||||
if len(contents):
|
||||
message = f"File {filename} was not given any content."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename, "contents": contents},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str, contents: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename, contents):
|
||||
return result
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
directory = os.path.dirname(file_path)
|
||||
os.makedirs(directory)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(contents)
|
||||
success = True
|
||||
message = f"File {file_path} written successfully."
|
||||
except IOError as e:
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
)
|
||||
@@ -1,78 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
MessageRole,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
|
||||
|
||||
class QueryLanguageModel(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.QueryLanguageModel",
|
||||
),
|
||||
language_model_required=LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
language_model_provider: LanguageModelProvider,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
self._language_model_provider = language_model_provider
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Query a language model. A query should be a question and any relevant context."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "A query for a language model. A query should contain a question and any relevant context.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return ["query"]
|
||||
|
||||
async def __call__(self, query: str) -> AbilityResult:
|
||||
messages = [
|
||||
LanguageModelMessage(
|
||||
content=query,
|
||||
role=MessageRole.USER,
|
||||
),
|
||||
]
|
||||
model_response = await self._language_model_provider.create_language_completion(
|
||||
model_prompt=messages,
|
||||
functions=[],
|
||||
model_name=self._configuration.language_model_required.model_name,
|
||||
completion_parser=self._parse_response,
|
||||
)
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"query": query},
|
||||
success=True,
|
||||
message=model_response.content["content"],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_response(response_content: dict) -> dict:
|
||||
return {"content": response_content["content"]}
|
||||
@@ -1,30 +0,0 @@
|
||||
import enum
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ContentType(str, enum.Enum):
|
||||
# TBD what these actually are.
|
||||
TEXT = "text"
|
||||
CODE = "code"
|
||||
|
||||
|
||||
class Knowledge(BaseModel):
|
||||
content: str
|
||||
content_type: ContentType
|
||||
content_metadata: dict[str, Any]
|
||||
|
||||
|
||||
class AbilityResult(BaseModel):
|
||||
"""The AbilityResult is a standard response struct for an ability."""
|
||||
|
||||
ability_name: str
|
||||
ability_args: dict[str, str]
|
||||
success: bool
|
||||
message: str
|
||||
new_knowledge: Knowledge = None
|
||||
|
||||
def summary(self) -> str:
|
||||
kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items())
|
||||
return f"{self.ability_name}({kwargs}): {self.message}"
|
||||
@@ -1,96 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
|
||||
from autogpt.core.ability.builtins import BUILTIN_ABILITIES
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.plugin.simple import SimplePluginService
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
)
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
|
||||
class AbilityRegistryConfiguration(SystemConfiguration):
|
||||
"""Configuration for the AbilityRegistry subsystem."""
|
||||
|
||||
abilities: dict[str, AbilityConfiguration]
|
||||
|
||||
|
||||
class AbilityRegistrySettings(SystemSettings):
|
||||
configuration: AbilityRegistryConfiguration
|
||||
|
||||
|
||||
class SimpleAbilityRegistry(AbilityRegistry, Configurable):
|
||||
default_settings = AbilityRegistrySettings(
|
||||
name="simple_ability_registry",
|
||||
description="A simple ability registry.",
|
||||
configuration=AbilityRegistryConfiguration(
|
||||
abilities={
|
||||
ability_name: ability.default_configuration
|
||||
for ability_name, ability in BUILTIN_ABILITIES.items()
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AbilityRegistrySettings,
|
||||
logger: logging.Logger,
|
||||
memory: Memory,
|
||||
workspace: Workspace,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._memory = memory
|
||||
self._workspace = workspace
|
||||
self._model_providers = model_providers
|
||||
self._abilities = []
|
||||
for (
|
||||
ability_name,
|
||||
ability_configuration,
|
||||
) in self._configuration.abilities.items():
|
||||
self.register_ability(ability_name, ability_configuration)
|
||||
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
ability_class = SimplePluginService.get_plugin(ability_configuration.location)
|
||||
ability_args = {
|
||||
"logger": self._logger.getChild(ability_name),
|
||||
"configuration": ability_configuration,
|
||||
}
|
||||
if ability_configuration.packages_required:
|
||||
# TODO: Check packages are installed and maybe install them.
|
||||
pass
|
||||
if ability_configuration.memory_provider_required:
|
||||
ability_args["memory"] = self._memory
|
||||
if ability_configuration.workspace_required:
|
||||
ability_args["workspace"] = self._workspace
|
||||
if ability_configuration.language_model_required:
|
||||
ability_args["language_model_provider"] = self._model_providers[
|
||||
ability_configuration.language_model_required.provider_name
|
||||
]
|
||||
ability = ability_class(**ability_args)
|
||||
self._abilities.append(ability)
|
||||
|
||||
def list_abilities(self) -> list[str]:
|
||||
return [
|
||||
f"{ability.name()}: {ability.description()}" for ability in self._abilities
|
||||
]
|
||||
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
return [ability.dump() for ability in self._abilities]
|
||||
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
for ability in self._abilities:
|
||||
if ability.name() == ability_name:
|
||||
return ability
|
||||
raise ValueError(f"Ability '{ability_name}' not found.")
|
||||
|
||||
async def perform(self, ability_name: str, **kwargs) -> AbilityResult:
|
||||
ability = self.get_ability(ability_name)
|
||||
return await ability(**kwargs)
|
||||
@@ -1,3 +0,0 @@
|
||||
"""The Agent is an autonomouos entity guided by a LLM provider."""
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.agent.simple import AgentSettings, SimpleAgent
|
||||
@@ -1,26 +0,0 @@
|
||||
import abc
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class Agent(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "Agent":
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def __repr__(self):
|
||||
...
|
||||
@@ -1,391 +0,0 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.ability import (
|
||||
AbilityRegistrySettings,
|
||||
AbilityResult,
|
||||
SimpleAbilityRegistry,
|
||||
)
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory import MemorySettings, SimpleMemory
|
||||
from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus
|
||||
from autogpt.core.plugin.simple import (
|
||||
PluginLocation,
|
||||
PluginStorageFormat,
|
||||
SimplePluginService,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings
|
||||
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
|
||||
|
||||
class AgentSystems(SystemConfiguration):
|
||||
ability_registry: PluginLocation
|
||||
memory: PluginLocation
|
||||
openai_provider: PluginLocation
|
||||
planning: PluginLocation
|
||||
workspace: PluginLocation
|
||||
|
||||
|
||||
class AgentConfiguration(SystemConfiguration):
|
||||
cycle_count: int
|
||||
max_task_cycle_count: int
|
||||
creation_time: str
|
||||
name: str
|
||||
role: str
|
||||
goals: list[str]
|
||||
systems: AgentSystems
|
||||
|
||||
|
||||
class AgentSystemSettings(SystemSettings):
|
||||
configuration: AgentConfiguration
|
||||
|
||||
|
||||
class AgentSettings(BaseModel):
|
||||
agent: AgentSystemSettings
|
||||
ability_registry: AbilityRegistrySettings
|
||||
memory: MemorySettings
|
||||
openai_provider: OpenAISettings
|
||||
planning: PlannerSettings
|
||||
workspace: WorkspaceSettings
|
||||
|
||||
def update_agent_name_and_goals(self, agent_goals: dict) -> None:
|
||||
self.agent.configuration.name = agent_goals["agent_name"]
|
||||
self.agent.configuration.role = agent_goals["agent_role"]
|
||||
self.agent.configuration.goals = agent_goals["agent_goals"]
|
||||
|
||||
|
||||
class SimpleAgent(Agent, Configurable):
|
||||
default_settings = AgentSystemSettings(
|
||||
name="simple_agent",
|
||||
description="A simple agent.",
|
||||
configuration=AgentConfiguration(
|
||||
name="Entrepreneur-GPT",
|
||||
role=(
|
||||
"An AI designed to autonomously develop and run businesses with "
|
||||
"the sole goal of increasing your net worth."
|
||||
),
|
||||
goals=[
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
],
|
||||
cycle_count=0,
|
||||
max_task_cycle_count=3,
|
||||
creation_time="",
|
||||
systems=AgentSystems(
|
||||
ability_registry=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.SimpleAbilityRegistry",
|
||||
),
|
||||
memory=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.memory.SimpleMemory",
|
||||
),
|
||||
openai_provider=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.resource.model_providers.OpenAIProvider",
|
||||
),
|
||||
planning=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.planning.SimplePlanner",
|
||||
),
|
||||
workspace=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.workspace.SimpleWorkspace",
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSystemSettings,
|
||||
logger: logging.Logger,
|
||||
ability_registry: SimpleAbilityRegistry,
|
||||
memory: SimpleMemory,
|
||||
openai_provider: OpenAIProvider,
|
||||
planning: SimplePlanner,
|
||||
workspace: SimpleWorkspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._ability_registry = ability_registry
|
||||
self._memory = memory
|
||||
# FIXME: Need some work to make this work as a dict of providers
|
||||
# Getting the construction of the config to work is a bit tricky
|
||||
self._openai_provider = openai_provider
|
||||
self._planning = planning
|
||||
self._workspace = workspace
|
||||
self._task_queue = []
|
||||
self._completed_tasks = []
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
@classmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "SimpleAgent":
|
||||
agent_settings = SimpleWorkspace.load_agent_settings(workspace_path)
|
||||
agent_args = {}
|
||||
|
||||
agent_args["settings"] = agent_settings.agent
|
||||
agent_args["logger"] = logger
|
||||
agent_args["workspace"] = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["openai_provider"] = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["planning"] = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger,
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
agent_args["memory"] = cls._get_system_instance(
|
||||
"memory",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
)
|
||||
|
||||
agent_args["ability_registry"] = cls._get_system_instance(
|
||||
"ability_registry",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
memory=agent_args["memory"],
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
|
||||
return cls(**agent_args)
|
||||
|
||||
async def build_initial_plan(self) -> dict:
|
||||
plan = await self._planning.make_initial_plan(
|
||||
agent_name=self._configuration.name,
|
||||
agent_role=self._configuration.role,
|
||||
agent_goals=self._configuration.goals,
|
||||
abilities=self._ability_registry.list_abilities(),
|
||||
)
|
||||
tasks = [Task.parse_obj(task) for task in plan.content["task_list"]]
|
||||
|
||||
# TODO: Should probably do a step to evaluate the quality of the generated tasks,
|
||||
# and ensure that they have actionable ready and acceptance criteria
|
||||
|
||||
self._task_queue.extend(tasks)
|
||||
self._task_queue.sort(key=lambda t: t.priority, reverse=True)
|
||||
self._task_queue[-1].context.status = TaskStatus.READY
|
||||
return plan.content
|
||||
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
if not self._task_queue:
|
||||
return {"response": "I don't have any tasks to work on right now."}
|
||||
|
||||
self._configuration.cycle_count += 1
|
||||
task = self._task_queue.pop()
|
||||
self._logger.info(f"Working on task: {task}")
|
||||
|
||||
task = await self._evaluate_task_and_add_context(task)
|
||||
next_ability = await self._choose_next_ability(
|
||||
task,
|
||||
self._ability_registry.dump_abilities(),
|
||||
)
|
||||
self._current_task = task
|
||||
self._next_ability = next_ability.content
|
||||
return self._current_task, self._next_ability
|
||||
|
||||
async def execute_next_ability(self, user_input: str, *args, **kwargs):
|
||||
if user_input == "y":
|
||||
ability = self._ability_registry.get_ability(
|
||||
self._next_ability["next_ability"]
|
||||
)
|
||||
ability_response = await ability(**self._next_ability["ability_arguments"])
|
||||
await self._update_tasks_and_memory(ability_response)
|
||||
if self._current_task.context.status == TaskStatus.DONE:
|
||||
self._completed_tasks.append(self._current_task)
|
||||
else:
|
||||
self._task_queue.append(self._current_task)
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
return ability_response.dict()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
async def _evaluate_task_and_add_context(self, task: Task) -> Task:
|
||||
"""Evaluate the task and add context to it."""
|
||||
if task.context.status == TaskStatus.IN_PROGRESS:
|
||||
# Nothing to do here
|
||||
return task
|
||||
else:
|
||||
self._logger.debug(f"Evaluating task {task} and adding relevant context.")
|
||||
# TODO: Look up relevant memories (need working memory system)
|
||||
# TODO: Evaluate whether there is enough information to start the task (language model call).
|
||||
task.context.enough_info = True
|
||||
task.context.status = TaskStatus.IN_PROGRESS
|
||||
return task
|
||||
|
||||
async def _choose_next_ability(self, task: Task, ability_schema: list[dict]):
|
||||
"""Choose the next ability to use for the task."""
|
||||
self._logger.debug(f"Choosing next ability for task {task}.")
|
||||
if task.context.cycle_count > self._configuration.max_task_cycle_count:
|
||||
# Don't hit the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
elif not task.context.enough_info:
|
||||
# Don't ask the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
else:
|
||||
next_ability = await self._planning.determine_next_ability(
|
||||
task, ability_schema
|
||||
)
|
||||
return next_ability
|
||||
|
||||
async def _update_tasks_and_memory(self, ability_result: AbilityResult):
|
||||
self._current_task.context.cycle_count += 1
|
||||
self._current_task.context.prior_actions.append(ability_result)
|
||||
# TODO: Summarize new knowledge
|
||||
# TODO: store knowledge and summaries in memory and in relevant tasks
|
||||
# TODO: evaluate whether the task is complete
|
||||
|
||||
def __repr__(self):
|
||||
return "SimpleAgent()"
|
||||
|
||||
################################################################
|
||||
# Factory interface for agent bootstrapping and initialization #
|
||||
################################################################
|
||||
|
||||
@classmethod
|
||||
def build_user_configuration(cls) -> dict[str, Any]:
|
||||
"""Build the user's configuration."""
|
||||
configuration_dict = {
|
||||
"agent": cls.get_user_config(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
for system_name, system_location in system_locations.items():
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.get_user_config()
|
||||
configuration_dict = _prune_empty_dicts(configuration_dict)
|
||||
return configuration_dict
|
||||
|
||||
@classmethod
|
||||
def compile_settings(
|
||||
cls, logger: logging.Logger, user_configuration: dict
|
||||
) -> AgentSettings:
|
||||
"""Compile the user's configuration with the defaults."""
|
||||
logger.debug("Processing agent system configuration.")
|
||||
configuration_dict = {
|
||||
"agent": cls.build_agent_configuration(
|
||||
user_configuration.get("agent", {})
|
||||
).dict(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
|
||||
# Build up default configuration
|
||||
for system_name, system_location in system_locations.items():
|
||||
logger.debug(f"Compiling configuration for system {system_name}")
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.build_agent_configuration(
|
||||
user_configuration.get(system_name, {})
|
||||
).dict()
|
||||
|
||||
return AgentSettings.parse_obj(configuration_dict)
|
||||
|
||||
@classmethod
|
||||
async def determine_agent_name_and_goals(
|
||||
cls,
|
||||
user_objective: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
) -> dict:
|
||||
logger.debug("Loading OpenAI provider.")
|
||||
provider: OpenAIProvider = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
logger.debug("Loading agent planner.")
|
||||
agent_planner: SimplePlanner = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
model_providers={"openai": provider},
|
||||
)
|
||||
logger.debug("determining agent name and goals.")
|
||||
model_response = await agent_planner.decide_name_and_goals(
|
||||
user_objective,
|
||||
)
|
||||
|
||||
return model_response.content
|
||||
|
||||
@classmethod
|
||||
def provision_agent(
|
||||
cls,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
agent_settings.agent.configuration.creation_time = datetime.now().strftime(
|
||||
"%Y%m%d_%H%M%S"
|
||||
)
|
||||
workspace: SimpleWorkspace = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
return workspace.setup_workspace(agent_settings, logger)
|
||||
|
||||
@classmethod
|
||||
def _get_system_instance(
|
||||
cls,
|
||||
system_name: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
system_locations = agent_settings.agent.configuration.systems.dict()
|
||||
|
||||
system_settings = getattr(agent_settings, system_name)
|
||||
system_class = SimplePluginService.get_plugin(system_locations[system_name])
|
||||
system_instance = system_class(
|
||||
system_settings,
|
||||
*args,
|
||||
logger=logger.getChild(system_name),
|
||||
**kwargs,
|
||||
)
|
||||
return system_instance
|
||||
|
||||
|
||||
def _prune_empty_dicts(d: dict) -> dict:
|
||||
"""
|
||||
Prune branches from a nested dictionary if the branch only contains empty dictionaries at the leaves.
|
||||
|
||||
Args:
|
||||
d: The dictionary to prune.
|
||||
|
||||
Returns:
|
||||
The pruned dictionary.
|
||||
"""
|
||||
pruned = {}
|
||||
for key, value in d.items():
|
||||
if isinstance(value, dict):
|
||||
pruned_value = _prune_empty_dicts(value)
|
||||
if (
|
||||
pruned_value
|
||||
): # if the pruned dictionary is not empty, add it to the result
|
||||
pruned[key] = pruned_value
|
||||
else:
|
||||
pruned[key] = value
|
||||
return pruned
|
||||
@@ -1,7 +0,0 @@
|
||||
"""The configuration encapsulates settings for all Agent subsystems."""
|
||||
from autogpt.core.configuration.schema import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
@@ -1,107 +0,0 @@
|
||||
import abc
|
||||
import typing
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
def UserConfigurable(*args, **kwargs):
|
||||
return Field(*args, **kwargs, user_configurable=True)
|
||||
|
||||
|
||||
class SystemConfiguration(BaseModel):
|
||||
def get_user_config(self) -> dict[str, Any]:
|
||||
return _get_user_config_fields(self)
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class SystemSettings(BaseModel):
|
||||
"""A base class for all system settings."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
S = TypeVar("S", bound=SystemSettings)
|
||||
|
||||
|
||||
class Configurable(abc.ABC, Generic[S]):
|
||||
"""A base class for all configurable objects."""
|
||||
|
||||
prefix: str = ""
|
||||
default_settings: typing.ClassVar[S]
|
||||
|
||||
@classmethod
|
||||
def get_user_config(cls) -> dict[str, Any]:
|
||||
return _get_user_config_fields(cls.default_settings)
|
||||
|
||||
@classmethod
|
||||
def build_agent_configuration(cls, configuration: dict) -> S:
|
||||
"""Process the configuration for this object."""
|
||||
|
||||
defaults = cls.default_settings.dict()
|
||||
final_configuration = deep_update(defaults, configuration)
|
||||
|
||||
return cls.default_settings.__class__.parse_obj(final_configuration)
|
||||
|
||||
|
||||
def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
|
||||
"""
|
||||
Get the user config fields of a Pydantic model instance.
|
||||
|
||||
Args:
|
||||
instance: The Pydantic model instance.
|
||||
|
||||
Returns:
|
||||
The user config fields of the instance.
|
||||
"""
|
||||
user_config_fields = {}
|
||||
|
||||
for name, value in instance.__dict__.items():
|
||||
field_info = instance.__fields__[name]
|
||||
if "user_configurable" in field_info.field_info.extra:
|
||||
user_config_fields[name] = value
|
||||
elif isinstance(value, SystemConfiguration):
|
||||
user_config_fields[name] = value.get_user_config()
|
||||
elif isinstance(value, list) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value
|
||||
):
|
||||
user_config_fields[name] = [i.get_user_config() for i in value]
|
||||
elif isinstance(value, dict) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value.values()
|
||||
):
|
||||
user_config_fields[name] = {
|
||||
k: v.get_user_config() for k, v in value.items()
|
||||
}
|
||||
|
||||
return user_config_fields
|
||||
|
||||
|
||||
def deep_update(original_dict: dict, update_dict: dict) -> dict:
|
||||
"""
|
||||
Recursively update a dictionary.
|
||||
|
||||
Args:
|
||||
original_dict (dict): The dictionary to be updated.
|
||||
update_dict (dict): The dictionary to update with.
|
||||
|
||||
Returns:
|
||||
dict: The updated dictionary.
|
||||
"""
|
||||
for key, value in update_dict.items():
|
||||
if (
|
||||
key in original_dict
|
||||
and isinstance(original_dict[key], dict)
|
||||
and isinstance(value, dict)
|
||||
):
|
||||
original_dict[key] = deep_update(original_dict[key], value)
|
||||
else:
|
||||
original_dict[key] = value
|
||||
return original_dict
|
||||
@@ -1,3 +0,0 @@
|
||||
"""The memory subsystem manages the Agent's long-term memory."""
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.memory.simple import MemorySettings, SimpleMemory
|
||||
@@ -1,13 +0,0 @@
|
||||
import abc
|
||||
|
||||
|
||||
class Memory(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MemoryItem(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MessageHistory(abc.ABC):
|
||||
pass
|
||||
@@ -1,47 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class MemoryConfiguration(SystemConfiguration):
|
||||
pass
|
||||
|
||||
|
||||
class MemorySettings(SystemSettings):
|
||||
configuration: MemoryConfiguration
|
||||
|
||||
|
||||
class MessageHistory:
|
||||
def __init__(self, previous_message_history: list[str]):
|
||||
self._message_history = previous_message_history
|
||||
|
||||
|
||||
class SimpleMemory(Memory, Configurable):
|
||||
default_settings = MemorySettings(
|
||||
name="simple_memory",
|
||||
description="A simple memory.",
|
||||
configuration=MemoryConfiguration(),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: MemorySettings,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._message_history = self._load_message_history(workspace)
|
||||
|
||||
@staticmethod
|
||||
def _load_message_history(workspace: Workspace):
|
||||
message_history_path = workspace.get_path("message_history.json")
|
||||
if message_history_path.exists():
|
||||
with message_history_path.open("r") as f:
|
||||
message_history = json.load(f)
|
||||
else:
|
||||
message_history = []
|
||||
return MessageHistory(message_history)
|
||||
@@ -1,10 +0,0 @@
|
||||
"""The planning system organizes the Agent's activities."""
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
TaskStatus,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.simple import PlannerSettings, SimplePlanner
|
||||
@@ -1,76 +0,0 @@
|
||||
import abc
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
|
||||
# class Planner(abc.ABC):
|
||||
# """Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
#
|
||||
# @staticmethod
|
||||
# @abc.abstractmethod
|
||||
# async def decide_name_and_goals(
|
||||
# user_objective: str,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Decide the name and goals of an Agent from a user-defined objective.
|
||||
#
|
||||
# Args:
|
||||
# user_objective: The user-defined objective for the agent.
|
||||
#
|
||||
# Returns:
|
||||
# The agent name and goals as a response from the language model.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# async def plan(self, context: PlanningContext) -> LanguageModelResponse:
|
||||
# """Plan the next ability for the Agent.
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# progress, result, memories, and feedback.
|
||||
#
|
||||
#
|
||||
# Returns:
|
||||
# The next ability the agent should take along with thoughts and reasoning.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# def reflect(
|
||||
# self,
|
||||
# context: ReflectionContext,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Reflect on a planned ability and provide self-criticism.
|
||||
#
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# reasoning, plan, thoughts, and criticism.
|
||||
#
|
||||
# Returns:
|
||||
# Self-criticism about the agent's plan.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
|
||||
|
||||
class PromptStrategy(abc.ABC):
|
||||
default_configuration: SystemConfiguration
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def build_prompt(self, *_, **kwargs) -> LanguageModelPrompt:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def parse_response_content(self, response_content: dict) -> dict:
|
||||
...
|
||||
@@ -1,76 +0,0 @@
|
||||
import enum
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProviderModelResponse,
|
||||
)
|
||||
|
||||
|
||||
class LanguageModelClassification(str, enum.Enum):
|
||||
"""The LanguageModelClassification is a functional description of the model.
|
||||
|
||||
This is used to determine what kind of model to use for a given prompt.
|
||||
Sometimes we prefer a faster or cheaper model to accomplish a task when
|
||||
possible.
|
||||
|
||||
"""
|
||||
|
||||
FAST_MODEL: str = "fast_model"
|
||||
SMART_MODEL: str = "smart_model"
|
||||
|
||||
|
||||
class LanguageModelPrompt(BaseModel):
|
||||
messages: list[LanguageModelMessage]
|
||||
functions: list[LanguageModelFunction] = Field(default_factory=list)
|
||||
|
||||
def __str__(self):
|
||||
return "\n\n".join([f"{m.role.value}: {m.content}" for m in self.messages])
|
||||
|
||||
|
||||
class LanguageModelResponse(LanguageModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
|
||||
class TaskType(str, enum.Enum):
|
||||
RESEARCH: str = "research"
|
||||
WRITE: str = "write"
|
||||
EDIT: str = "edit"
|
||||
CODE: str = "code"
|
||||
DESIGN: str = "design"
|
||||
TEST: str = "test"
|
||||
PLAN: str = "plan"
|
||||
|
||||
|
||||
class TaskStatus(str, enum.Enum):
|
||||
BACKLOG: str = "backlog"
|
||||
READY: str = "ready"
|
||||
IN_PROGRESS: str = "in_progress"
|
||||
DONE: str = "done"
|
||||
|
||||
|
||||
class TaskContext(BaseModel):
|
||||
cycle_count: int = 0
|
||||
status: TaskStatus = TaskStatus.BACKLOG
|
||||
parent: "Task" = None
|
||||
prior_actions: list[AbilityResult] = Field(default_factory=list)
|
||||
memories: list = Field(default_factory=list)
|
||||
user_input: list[str] = Field(default_factory=list)
|
||||
supplementary_info: list[str] = Field(default_factory=list)
|
||||
enough_info: bool = False
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
objective: str
|
||||
type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema
|
||||
priority: int
|
||||
ready_criteria: list[str]
|
||||
acceptance_criteria: list[str]
|
||||
context: TaskContext = Field(default_factory=TaskContext)
|
||||
|
||||
|
||||
# Need to resolve the circular dependency between Task and TaskContext once both models are defined.
|
||||
TaskContext.update_forward_refs()
|
||||
@@ -1,182 +0,0 @@
|
||||
import logging
|
||||
import platform
|
||||
import time
|
||||
|
||||
import distro
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.planning import strategies
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class LanguageModelConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
model_name: str = UserConfigurable()
|
||||
provider_name: ModelProviderName = UserConfigurable()
|
||||
temperature: float = UserConfigurable()
|
||||
|
||||
|
||||
class PromptStrategiesConfiguration(SystemConfiguration):
|
||||
name_and_goals: strategies.NameAndGoalsConfiguration
|
||||
initial_plan: strategies.InitialPlanConfiguration
|
||||
next_ability: strategies.NextAbilityConfiguration
|
||||
|
||||
|
||||
class PlannerConfiguration(SystemConfiguration):
|
||||
"""Configuration for the Planner subsystem."""
|
||||
|
||||
models: dict[LanguageModelClassification, LanguageModelConfiguration]
|
||||
prompt_strategies: PromptStrategiesConfiguration
|
||||
|
||||
|
||||
class PlannerSettings(SystemSettings):
|
||||
"""Settings for the Planner subsystem."""
|
||||
|
||||
configuration: PlannerConfiguration
|
||||
|
||||
|
||||
class SimplePlanner(Configurable):
|
||||
"""Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
|
||||
default_settings = PlannerSettings(
|
||||
name="planner",
|
||||
description="Manages the agent's planning and goal-setting by constructing language model prompts.",
|
||||
configuration=PlannerConfiguration(
|
||||
models={
|
||||
LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT4,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
},
|
||||
prompt_strategies=PromptStrategiesConfiguration(
|
||||
name_and_goals=strategies.NameAndGoals.default_configuration,
|
||||
initial_plan=strategies.InitialPlan.default_configuration,
|
||||
next_ability=strategies.NextAbility.default_configuration,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: PlannerSettings,
|
||||
logger: logging.Logger,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
workspace: Workspace = None, # Workspace is not available during bootstrapping.
|
||||
) -> None:
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {}
|
||||
for model, model_config in self._configuration.models.items():
|
||||
self._providers[model] = model_providers[model_config.provider_name]
|
||||
|
||||
self._prompt_strategies = {
|
||||
"name_and_goals": strategies.NameAndGoals(
|
||||
**self._configuration.prompt_strategies.name_and_goals.dict()
|
||||
),
|
||||
"initial_plan": strategies.InitialPlan(
|
||||
**self._configuration.prompt_strategies.initial_plan.dict()
|
||||
),
|
||||
"next_ability": strategies.NextAbility(
|
||||
**self._configuration.prompt_strategies.next_ability.dict()
|
||||
),
|
||||
}
|
||||
|
||||
async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["name_and_goals"],
|
||||
user_objective=user_objective,
|
||||
)
|
||||
|
||||
async def make_initial_plan(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["initial_plan"],
|
||||
agent_name=agent_name,
|
||||
agent_role=agent_role,
|
||||
agent_goals=agent_goals,
|
||||
abilities=abilities,
|
||||
)
|
||||
|
||||
async def determine_next_ability(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
):
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["next_ability"],
|
||||
task=task,
|
||||
ability_schema=ability_schema,
|
||||
)
|
||||
|
||||
async def chat_with_model(
|
||||
self,
|
||||
prompt_strategy: PromptStrategy,
|
||||
**kwargs,
|
||||
) -> LanguageModelResponse:
|
||||
model_classification = prompt_strategy.model_classification
|
||||
model_configuration = self._configuration.models[model_classification].dict()
|
||||
self._logger.debug(f"Using model configuration: {model_configuration}")
|
||||
del model_configuration["provider_name"]
|
||||
provider = self._providers[model_classification]
|
||||
|
||||
template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy)
|
||||
template_kwargs.update(kwargs)
|
||||
prompt = prompt_strategy.build_prompt(**template_kwargs)
|
||||
|
||||
self._logger.debug(f"Using prompt:\n{prompt}\n\n")
|
||||
response = await provider.create_language_completion(
|
||||
model_prompt=prompt.messages,
|
||||
functions=prompt.functions,
|
||||
**model_configuration,
|
||||
completion_parser=prompt_strategy.parse_response_content,
|
||||
)
|
||||
return LanguageModelResponse.parse_obj(response.dict())
|
||||
|
||||
def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy):
|
||||
provider = self._providers[strategy.model_classification]
|
||||
template_kwargs = {
|
||||
"os_info": get_os_info(),
|
||||
"api_budget": provider.get_remaining_budget(),
|
||||
"current_time": time.strftime("%c"),
|
||||
}
|
||||
return template_kwargs
|
||||
|
||||
|
||||
def get_os_info() -> str:
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return os_info
|
||||
@@ -1,12 +0,0 @@
|
||||
from autogpt.core.planning.strategies.initial_plan import (
|
||||
InitialPlan,
|
||||
InitialPlanConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.name_and_goals import (
|
||||
NameAndGoals,
|
||||
NameAndGoalsConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.next_ability import (
|
||||
NextAbility,
|
||||
NextAbilityConfiguration,
|
||||
)
|
||||
@@ -1,190 +0,0 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class InitialPlanConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_plan_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class InitialPlan(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = (
|
||||
"You are an expert project planner. You're responsibility is to create work plans for autonomous agents. "
|
||||
"You will be given a name, a role, set of goals for the agent to accomplish. Your job is to "
|
||||
"break down those goals into a set of tasks that the agent can accomplish to achieve those goals. "
|
||||
"Agents are resourceful, but require clear instructions. Each task you create should have clearly defined "
|
||||
"`ready_criteria` that the agent can check to see if the task is ready to be started. Each task should "
|
||||
"also have clearly defined `acceptance_criteria` that the agent can check to evaluate if the task is complete. "
|
||||
"You should create as many tasks as you think is necessary to accomplish the goals.\n\n"
|
||||
"System Info:\n{system_info}"
|
||||
)
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}"
|
||||
)
|
||||
|
||||
DEFAULT_CREATE_PLAN_FUNCTION = {
|
||||
"name": "create_initial_agent_plan",
|
||||
"description": "Creates a set of tasks that forms the initial plan for an autonomous agent.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"task_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"objective": {
|
||||
"type": "string",
|
||||
"description": "An imperative verb phrase that succinctly describes the task.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "A categorization for the task. ",
|
||||
"enum": [t.value for t in TaskType],
|
||||
},
|
||||
"acceptance_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met for the task to be considered complete.",
|
||||
},
|
||||
},
|
||||
"priority": {
|
||||
"type": "integer",
|
||||
"description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.",
|
||||
"minimum": 1,
|
||||
"maximum": 10,
|
||||
},
|
||||
"ready_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met before the task can be started.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"objective",
|
||||
"type",
|
||||
"acceptance_criteria",
|
||||
"priority",
|
||||
"ready_criteria",
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = InitialPlanConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
create_plan_function: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_plan_function = create_plan_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"agent_name": agent_name,
|
||||
"agent_role": agent_role,
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
template_kwargs["agent_goals"] = to_numbered_list(
|
||||
agent_goals, **template_kwargs
|
||||
)
|
||||
template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs)
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info, **template_kwargs
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
create_plan_function = LanguageModelFunction(
|
||||
json_schema=self._create_plan_function,
|
||||
)
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=[create_plan_function],
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response["task_list"] = [
|
||||
Task.parse_obj(task) for task in parsed_response["task_list"]
|
||||
]
|
||||
return parsed_response
|
||||
@@ -1,139 +0,0 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NameAndGoalsConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt: str = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_agent_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NameAndGoals(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT = (
|
||||
"Your job is to respond to a user-defined task by invoking the `create_agent` function "
|
||||
"to generate an autonomous agent to complete the task. You should supply a role-based "
|
||||
"name for the agent, an informative description for what the agent does, and 1 to 5 "
|
||||
"goals that are optimally aligned with the successful completion of its assigned task.\n\n"
|
||||
"Example Input:\n"
|
||||
"Help me with marketing my business\n\n"
|
||||
"Example Function Call:\n"
|
||||
"create_agent(name='CMOGPT', "
|
||||
"description='A professional digital marketer AI that assists Solopreneurs in "
|
||||
"growing their businesses by providing world-class expertise in solving "
|
||||
"marketing problems for SaaS, content products, agencies, and more.', "
|
||||
"goals=['Engage in effective problem-solving, prioritization, planning, and "
|
||||
"supporting execution to address your marketing needs as your virtual Chief "
|
||||
"Marketing Officer.', 'Provide specific, actionable, and concise advice to "
|
||||
"help you make informed decisions without the use of platitudes or overly "
|
||||
"wordy explanations.', 'Identify and prioritize quick wins and cost-effective "
|
||||
"campaigns that maximize results with minimal time and budget investment.', "
|
||||
"'Proactively take the lead in guiding you and offering suggestions when faced "
|
||||
"with unclear information or uncertainty to ensure your marketing strategy "
|
||||
"remains on track.'])"
|
||||
)
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = "'{user_objective}'"
|
||||
|
||||
DEFAULT_CREATE_AGENT_FUNCTION = {
|
||||
"name": "create_agent",
|
||||
"description": ("Create a new autonomous AI agent to complete a given task."),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "A short role-based name for an autonomous agent.",
|
||||
},
|
||||
"agent_role": {
|
||||
"type": "string",
|
||||
"description": "An informative one sentence description of what the AI agent does",
|
||||
},
|
||||
"agent_goals": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"maxItems": 5,
|
||||
"items": {
|
||||
"type": "string",
|
||||
},
|
||||
"description": (
|
||||
"One to five highly effective goals that are optimally aligned with the completion of a "
|
||||
"specific task. The number and complexity of the goals should correspond to the "
|
||||
"complexity of the agent's primary objective."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["agent_name", "agent_role", "agent_goals"],
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NameAndGoalsConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: str,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = create_agent_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt:
|
||||
system_message = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_message,
|
||||
)
|
||||
user_message = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
),
|
||||
)
|
||||
create_agent_function = LanguageModelFunction(
|
||||
json_schema=self._create_agent_function,
|
||||
)
|
||||
prompt = LanguageModelPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[create_agent_function],
|
||||
# TODO
|
||||
tokens_used=0,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
return parsed_response
|
||||
@@ -1,183 +0,0 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NextAbilityConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
additional_ability_arguments: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NextAbility(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}"
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"Your current task is is {task_objective}.\n"
|
||||
"You have taken {cycle_count} actions on this task already. "
|
||||
"Here is the actions you have taken and their results:\n"
|
||||
"{action_history}\n\n"
|
||||
"Here is additional information that may be useful to you:\n"
|
||||
"{additional_info}\n\n"
|
||||
"Additionally, you should consider the following:\n"
|
||||
"{user_input}\n\n"
|
||||
"Your task of {task_objective} is complete when the following acceptance criteria have been met:\n"
|
||||
"{acceptance_criteria}\n\n"
|
||||
"Please choose one of the provided functions to accomplish this task. "
|
||||
"Some tasks may require multiple functions to accomplish. If that is the case, choose the function that "
|
||||
"you think is most appropriate for the current situation given your progress so far."
|
||||
)
|
||||
|
||||
DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = {
|
||||
"motivation": {
|
||||
"type": "string",
|
||||
"description": "Your justification for choosing choosing this function instead of a different one.",
|
||||
},
|
||||
"self_criticism": {
|
||||
"type": "string",
|
||||
"description": "Thoughtful self-criticism that explains why this function may not be the best choice.",
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.",
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NextAbilityConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
additional_ability_arguments: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._additional_ability_arguments = additional_ability_arguments
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
for ability in ability_schema:
|
||||
ability["parameters"]["properties"].update(
|
||||
self._additional_ability_arguments
|
||||
)
|
||||
ability["parameters"]["required"] += list(
|
||||
self._additional_ability_arguments.keys()
|
||||
)
|
||||
|
||||
template_kwargs["task_objective"] = task.objective
|
||||
template_kwargs["cycle_count"] = task.context.cycle_count
|
||||
template_kwargs["action_history"] = to_numbered_list(
|
||||
[action.summary() for action in task.context.prior_actions],
|
||||
no_items_response="You have not taken any actions yet.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["additional_info"] = to_numbered_list(
|
||||
[memory.summary() for memory in task.context.memories]
|
||||
+ [info for info in task.context.supplementary_info],
|
||||
no_items_response="There is no additional information available at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["user_input"] = to_numbered_list(
|
||||
[user_input for user_input in task.context.user_input],
|
||||
no_items_response="There are no additional considerations at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["acceptance_criteria"] = to_numbered_list(
|
||||
[acceptance_criteria for acceptance_criteria in task.acceptance_criteria],
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info,
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
functions = [
|
||||
LanguageModelFunction(json_schema=ability) for ability in ability_schema
|
||||
]
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=functions,
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
function_name = response_content["function_call"]["name"]
|
||||
function_arguments = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response = {
|
||||
"motivation": function_arguments.pop("motivation"),
|
||||
"self_criticism": function_arguments.pop("self_criticism"),
|
||||
"reasoning": function_arguments.pop("reasoning"),
|
||||
"next_ability": function_name,
|
||||
"ability_arguments": function_arguments,
|
||||
}
|
||||
return parsed_response
|
||||
@@ -1,27 +0,0 @@
|
||||
import ast
|
||||
import json
|
||||
|
||||
|
||||
def to_numbered_list(
|
||||
items: list[str], no_items_response: str = "", **template_args
|
||||
) -> str:
|
||||
if items:
|
||||
return "\n".join(
|
||||
f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items)
|
||||
)
|
||||
else:
|
||||
return no_items_response
|
||||
|
||||
|
||||
def json_loads(json_str: str):
|
||||
# TODO: this is a hack function for now. Trying to see what errors show up in testing.
|
||||
# Can hopefully just replace with a call to ast.literal_eval (the function api still
|
||||
# sometimes returns json strings with minor issues like trailing commas).
|
||||
try:
|
||||
return ast.literal_eval(json_str)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
try:
|
||||
print(f"json decode error {e}. trying literal eval")
|
||||
return ast.literal_eval(json_str)
|
||||
except Exception:
|
||||
breakpoint()
|
||||
@@ -1,102 +0,0 @@
|
||||
# Rules of thumb:
|
||||
# - Templates don't add new lines at the end of the string. This is the
|
||||
# responsibility of the or a consuming template.
|
||||
|
||||
####################
|
||||
# Planner defaults #
|
||||
####################
|
||||
|
||||
|
||||
USER_OBJECTIVE = (
|
||||
"Write a wikipedia style article about the project: "
|
||||
"https://github.com/significant-gravitas/Auto-GPT"
|
||||
)
|
||||
|
||||
|
||||
ABILITIES = (
|
||||
'analyze_code: Analyze Code, args: "code": "<full_code_string>"',
|
||||
'execute_python_file: Execute Python File, args: "filename": "<filename>"',
|
||||
'append_to_file: Append to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'delete_file: Delete file, args: "filename": "<filename>"',
|
||||
'list_files: List Files in Directory, args: "directory": "<directory>"',
|
||||
'read_file: Read a file, args: "filename": "<filename>"',
|
||||
'write_to_file: Write to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'google: Google Search, args: "query": "<query>"',
|
||||
'improve_code: Get Improved Code, args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
'browse_website: Browse Website, args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
'write_tests: Write Tests, args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
'get_hyperlinks: Get hyperlinks, args: "url": "<url>"',
|
||||
'get_text_summary: Get text summary, args: "url": "<url>", "question": "<question>"',
|
||||
'task_complete: Task Complete (Shutdown), args: "reason": "<reason>"',
|
||||
)
|
||||
|
||||
|
||||
# Plan Prompt
|
||||
# -----------
|
||||
|
||||
|
||||
PLAN_PROMPT_CONSTRAINTS = (
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so "
|
||||
"immediately save important information to files.",
|
||||
"If you are unsure how you previously did something or want to recall past "
|
||||
"events, thinking about similar events will help you remember.",
|
||||
"No user assistance",
|
||||
"Exclusively use the commands listed below e.g. command_name",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_RESOURCES = (
|
||||
"Internet access for searches and information gathering.",
|
||||
"Long-term memory management.",
|
||||
"File output.",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_PERFORMANCE_EVALUATIONS = (
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities.",
|
||||
"Constructively self-criticize your big-picture behavior constantly.",
|
||||
"Reflect on past decisions and strategies to refine your approach.",
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps.",
|
||||
"Write all code to a file",
|
||||
)
|
||||
|
||||
|
||||
PLAN_PROMPT_RESPONSE_DICT = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user",
|
||||
},
|
||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||
}
|
||||
|
||||
PLAN_PROMPT_RESPONSE_FORMAT = (
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n"
|
||||
"{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
PLAN_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
|
||||
PLAN_PROMPT_MAIN = (
|
||||
"{header}\n\n"
|
||||
"GOALS:\n\n{goals}\n\n"
|
||||
"Info:\n{info}\n\n"
|
||||
"Constraints:\n{constraints}\n\n"
|
||||
"Commands:\n{commands}\n\n"
|
||||
"Resources:\n{resources}\n\n"
|
||||
"Performance Evaluations:\n{performance_evaluations}\n\n"
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
|
||||
###########################
|
||||
# Parameterized templates #
|
||||
###########################
|
||||
@@ -1,2 +0,0 @@
|
||||
"""The plugin system allows the Agent to be extended with new functionality."""
|
||||
from autogpt.core.plugin.base import PluginService
|
||||
@@ -1,155 +0,0 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import TYPE_CHECKING, Type
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.ability import Ability, AbilityRegistry
|
||||
from autogpt.core.memory import Memory
|
||||
from autogpt.core.resource.model_providers import (
|
||||
EmbeddingModelProvider,
|
||||
LanguageModelProvider,
|
||||
)
|
||||
|
||||
# Expand to other types as needed
|
||||
PluginType = (
|
||||
Type[Ability] # Swappable now
|
||||
| Type[AbilityRegistry] # Swappable maybe never
|
||||
| Type[LanguageModelProvider] # Swappable soon
|
||||
| Type[EmbeddingModelProvider] # Swappable soon
|
||||
| Type[Memory] # Swappable now
|
||||
# | Type[Planner] # Swappable soon
|
||||
)
|
||||
|
||||
|
||||
class PluginStorageFormat(str, enum.Enum):
|
||||
"""Supported plugin storage formats.
|
||||
|
||||
Plugins can be stored at one of these supported locations.
|
||||
|
||||
"""
|
||||
|
||||
INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults
|
||||
WORKSPACE = "workspace" # Required now
|
||||
# OPENAPI_URL = "open_api_url" # Soon (requires some tooling we don't have yet).
|
||||
# OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now)
|
||||
# GIT = "git" # Maybe later (or soon)
|
||||
# PYPI = "pypi" # Maybe later
|
||||
# AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service" # Long term solution, requires design
|
||||
# AUTO = "auto" # Feature for later maybe, automatically find plugin.
|
||||
|
||||
|
||||
# Installed package example
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
# Workspace example
|
||||
# PluginLocation(
|
||||
# storage_format='workspace',
|
||||
# storage_route='relative/path/to/plugin.pkl'
|
||||
# OR
|
||||
# storage_route='relative/path/to/plugin.py'
|
||||
# )
|
||||
# Git
|
||||
# PluginLocation(
|
||||
# storage_format='git',
|
||||
# Exact format TBD.
|
||||
# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py'
|
||||
# )
|
||||
# PyPI
|
||||
# PluginLocation(
|
||||
# storage_format='pypi',
|
||||
# storage_route='package_name'
|
||||
# )
|
||||
|
||||
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
|
||||
|
||||
# A plugin storage route.
|
||||
#
|
||||
# This is a string that specifies where to load a plugin from
|
||||
# (e.g. an import path or file path).
|
||||
PluginStorageRoute = str
|
||||
|
||||
|
||||
class PluginLocation(SystemConfiguration):
|
||||
"""A plugin location.
|
||||
|
||||
This is a combination of a plugin storage format and a plugin storage route.
|
||||
It is used by the PluginService to load plugins.
|
||||
|
||||
"""
|
||||
|
||||
storage_format: PluginStorageFormat = UserConfigurable()
|
||||
storage_route: PluginStorageRoute = UserConfigurable()
|
||||
|
||||
|
||||
class PluginMetadata(BaseModel):
|
||||
"""Metadata about a plugin."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
location: PluginLocation
|
||||
|
||||
|
||||
class PluginService(abc.ABC):
|
||||
"""Base class for plugin service.
|
||||
|
||||
The plugin service should be stateless. This defines the interface for
|
||||
loading plugins from various storage formats.
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def get_plugin(plugin_location: PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
...
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
...
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an installed package."""
|
||||
...
|
||||
@@ -1,74 +0,0 @@
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.core.plugin.base import (
|
||||
PluginLocation,
|
||||
PluginService,
|
||||
PluginStorageFormat,
|
||||
PluginStorageRoute,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.plugin.base import PluginType
|
||||
|
||||
|
||||
class SimplePluginService(PluginService):
|
||||
@staticmethod
|
||||
def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
if isinstance(plugin_location, dict):
|
||||
plugin_location = PluginLocation.parse_obj(plugin_location)
|
||||
if plugin_location.storage_format == PluginStorageFormat.WORKSPACE:
|
||||
return SimplePluginService.load_from_workspace(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE:
|
||||
return SimplePluginService.load_from_installed_package(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Plugin storage format {plugin_location.storage_format} is not implemented."
|
||||
)
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
# TODO: Define an on disk storage format and implement this.
|
||||
# Can pull from existing zip file loading implementation
|
||||
raise NotImplemented("Loading from file path is not implemented.")
|
||||
|
||||
@staticmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
module_path, _, class_name = plugin_route.rpartition(".")
|
||||
return getattr(import_module(module_path), class_name)
|
||||
|
||||
@staticmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
# TODO: Implement a discovery system for finding plugins by name from known
|
||||
# storage locations. E.g. if we know that path_type is a file path, we can
|
||||
# search the workspace for it. If it's an import path, we can check the core
|
||||
# system and the auto_gpt_plugins package.
|
||||
raise NotImplemented("Resolving plugin name to path is not implemented.")
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
plugin = SimplePluginService.load_from_file_path(plugin_route)
|
||||
return plugin
|
||||
|
||||
@staticmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
plugin = SimplePluginService.load_from_import_path(plugin_route)
|
||||
return plugin
|
||||
@@ -1,7 +0,0 @@
|
||||
from autogpt.core.resource.schema import (
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
@@ -1,44 +0,0 @@
|
||||
from autogpt.core.resource.model_providers.openai import (
|
||||
OPEN_AI_MODELS,
|
||||
OpenAIModelName,
|
||||
OpenAIProvider,
|
||||
OpenAISettings,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
MessageRole,
|
||||
ModelProvider,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderModelInfo,
|
||||
ModelProviderModelResponse,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ModelProvider",
|
||||
"ModelProviderName",
|
||||
"ModelProviderSettings",
|
||||
"EmbeddingModelProvider",
|
||||
"EmbeddingModelProviderModelResponse",
|
||||
"LanguageModelProvider",
|
||||
"LanguageModelProviderModelResponse",
|
||||
"LanguageModelFunction",
|
||||
"LanguageModelMessage",
|
||||
"MessageRole",
|
||||
"OpenAIModelName",
|
||||
"OPEN_AI_MODELS",
|
||||
"OpenAIProvider",
|
||||
"OpenAISettings",
|
||||
]
|
||||
@@ -1,373 +0,0 @@
|
||||
import enum
|
||||
import functools
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
OpenAIEmbeddingParser = Callable[[Embedding], Embedding]
|
||||
OpenAIChatParser = Callable[[str], dict]
|
||||
|
||||
|
||||
class OpenAIModelName(str, enum.Enum):
|
||||
ADA = "text-embedding-ada-002"
|
||||
GPT3 = "gpt-3.5-turbo-0613"
|
||||
GPT3_16K = "gpt-3.5-turbo-16k-0613"
|
||||
GPT4 = "gpt-4-0613"
|
||||
GPT4_32K = "gpt-4-32k-0613"
|
||||
|
||||
|
||||
OPEN_AI_EMBEDDING_MODELS = {
|
||||
OpenAIModelName.ADA: EmbeddingModelProviderModelInfo(
|
||||
name=OpenAIModelName.ADA,
|
||||
service=ModelProviderService.EMBEDDING,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_LANGUAGE_MODELS = {
|
||||
OpenAIModelName.GPT3: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0015,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
OpenAIModelName.GPT3_16K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.003,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=16384,
|
||||
),
|
||||
OpenAIModelName.GPT4: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
OpenAIModelName.GPT4_32K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4_32K,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_MODELS = {
|
||||
**OPEN_AI_LANGUAGE_MODELS,
|
||||
**OPEN_AI_EMBEDDING_MODELS,
|
||||
}
|
||||
|
||||
|
||||
class OpenAIConfiguration(SystemConfiguration):
|
||||
retries_per_request: int = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAIModelProviderBudget(ModelProviderBudget):
|
||||
graceful_shutdown_threshold: float = UserConfigurable()
|
||||
warning_threshold: float = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAISettings(ModelProviderSettings):
|
||||
configuration: OpenAIConfiguration
|
||||
credentials: ModelProviderCredentials()
|
||||
budget: OpenAIModelProviderBudget
|
||||
|
||||
|
||||
class OpenAIProvider(
|
||||
Configurable,
|
||||
LanguageModelProvider,
|
||||
EmbeddingModelProvider,
|
||||
):
|
||||
default_settings = OpenAISettings(
|
||||
name="openai_provider",
|
||||
description="Provides access to OpenAI's API.",
|
||||
configuration=OpenAIConfiguration(
|
||||
retries_per_request=10,
|
||||
),
|
||||
credentials=ModelProviderCredentials(),
|
||||
budget=OpenAIModelProviderBudget(
|
||||
total_budget=math.inf,
|
||||
total_cost=0.0,
|
||||
remaining_budget=math.inf,
|
||||
usage=ModelProviderUsage(
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
total_tokens=0,
|
||||
),
|
||||
graceful_shutdown_threshold=0.005,
|
||||
warning_threshold=0.01,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: OpenAISettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._credentials = settings.credentials
|
||||
self._budget = settings.budget
|
||||
|
||||
self._logger = logger
|
||||
|
||||
retry_handler = _OpenAIRetryHandler(
|
||||
logger=self._logger,
|
||||
num_retries=self._configuration.retries_per_request,
|
||||
)
|
||||
|
||||
self._create_completion = retry_handler(_create_completion)
|
||||
self._create_embedding = retry_handler(_create_embedding)
|
||||
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
"""Get the token limit for a given model."""
|
||||
return OPEN_AI_MODELS[model_name].max_tokens
|
||||
|
||||
def get_remaining_budget(self) -> float:
|
||||
"""Get the remaining budget."""
|
||||
return self._budget.remaining_budget
|
||||
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: OpenAIModelName,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
"""Create a completion using the OpenAI API."""
|
||||
completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
|
||||
response = await self._create_completion(
|
||||
messages=model_prompt,
|
||||
**completion_kwargs,
|
||||
)
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_LANGUAGE_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
|
||||
parsed_response = completion_parser(
|
||||
response.choices[0].message.to_dict_recursive()
|
||||
)
|
||||
response = LanguageModelProviderModelResponse(
|
||||
content=parsed_response, **response_args
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: OpenAIModelName,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
"""Create an embedding using the OpenAI API."""
|
||||
embedding_kwargs = self._get_embedding_kwargs(model_name, **kwargs)
|
||||
response = await self._create_embedding(text=text, **embedding_kwargs)
|
||||
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_EMBEDDING_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
response = EmbeddingModelProviderModelResponse(
|
||||
**response_args,
|
||||
embedding=embedding_parser(response.embeddings[0]),
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
def _get_completion_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
functions: list[LanguageModelFunction],
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for completion API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the chat API call.
|
||||
|
||||
"""
|
||||
completion_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
if functions:
|
||||
completion_kwargs["functions"] = functions
|
||||
|
||||
return completion_kwargs
|
||||
|
||||
def _get_embedding_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for embedding API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the embedding API call.
|
||||
|
||||
"""
|
||||
embedding_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
|
||||
return embedding_kwargs
|
||||
|
||||
def __repr__(self):
|
||||
return "OpenAIProvider()"
|
||||
|
||||
|
||||
async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding:
|
||||
"""Embed text using the OpenAI API.
|
||||
|
||||
Args:
|
||||
text str: The text to embed.
|
||||
model_name str: The name of the model to use.
|
||||
|
||||
Returns:
|
||||
str: The embedding.
|
||||
"""
|
||||
return await openai.Embedding.acreate(
|
||||
input=[text],
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
async def _create_completion(
|
||||
messages: list[LanguageModelMessage], *_, **kwargs
|
||||
) -> openai.Completion:
|
||||
"""Create a chat completion using the OpenAI API.
|
||||
|
||||
Args:
|
||||
messages: The prompt to use.
|
||||
|
||||
Returns:
|
||||
The completion.
|
||||
|
||||
"""
|
||||
messages = [message.dict() for message in messages]
|
||||
if "functions" in kwargs:
|
||||
kwargs["functions"] = [function.json_schema for function in kwargs["functions"]]
|
||||
return await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_P = ParamSpec("_P")
|
||||
|
||||
|
||||
class _OpenAIRetryHandler:
|
||||
"""Retry Handler for OpenAI API call.
|
||||
|
||||
Args:
|
||||
num_retries int: Number of retries. Defaults to 10.
|
||||
backoff_base float: Base for exponential backoff. Defaults to 2.
|
||||
warn_user bool: Whether to warn the user. Defaults to True.
|
||||
"""
|
||||
|
||||
_retry_limit_msg = "Error: Reached rate limit, passing..."
|
||||
_api_key_error_msg = (
|
||||
"Please double check that you have setup a PAID OpenAI API Account. You can "
|
||||
"read more here: https://docs.agpt.co/setup/#getting-an-api-key"
|
||||
)
|
||||
_backoff_msg = "Error: API Bad gateway. Waiting {backoff} seconds..."
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
num_retries: int = 10,
|
||||
backoff_base: float = 2.0,
|
||||
warn_user: bool = True,
|
||||
):
|
||||
self._logger = logger
|
||||
self._num_retries = num_retries
|
||||
self._backoff_base = backoff_base
|
||||
self._warn_user = warn_user
|
||||
|
||||
def _log_rate_limit_error(self) -> None:
|
||||
self._logger.debug(self._retry_limit_msg)
|
||||
if self._warn_user:
|
||||
self._logger.warning(self._api_key_error_msg)
|
||||
self._warn_user = False
|
||||
|
||||
def _backoff(self, attempt: int) -> None:
|
||||
backoff = self._backoff_base ** (attempt + 2)
|
||||
self._logger.debug(self._backoff_msg.format(backoff=backoff))
|
||||
time.sleep(backoff)
|
||||
|
||||
def __call__(self, func: Callable[_P, _T]) -> Callable[_P, _T]:
|
||||
@functools.wraps(func)
|
||||
async def _wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T:
|
||||
num_attempts = self._num_retries + 1 # +1 for the first attempt
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
try:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
except RateLimitError:
|
||||
if attempt == num_attempts:
|
||||
raise
|
||||
self._log_rate_limit_error()
|
||||
|
||||
except APIError as e:
|
||||
if (e.http_status != 502) or (attempt == num_attempts):
|
||||
raise
|
||||
|
||||
self._backoff(attempt)
|
||||
|
||||
return _wrapped
|
||||
@@ -1,219 +0,0 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import Callable, ClassVar
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, validator
|
||||
|
||||
from autogpt.core.configuration import UserConfigurable
|
||||
from autogpt.core.resource.schema import (
|
||||
Embedding,
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderService(str, enum.Enum):
|
||||
"""A ModelService describes what kind of service the model provides."""
|
||||
|
||||
EMBEDDING: str = "embedding"
|
||||
LANGUAGE: str = "language"
|
||||
TEXT: str = "text"
|
||||
|
||||
|
||||
class ModelProviderName(str, enum.Enum):
|
||||
OPENAI: str = "openai"
|
||||
|
||||
|
||||
class MessageRole(str, enum.Enum):
|
||||
USER = "user"
|
||||
SYSTEM = "system"
|
||||
ASSISTANT = "assistant"
|
||||
|
||||
|
||||
class LanguageModelMessage(BaseModel):
|
||||
role: MessageRole
|
||||
content: str
|
||||
|
||||
|
||||
class LanguageModelFunction(BaseModel):
|
||||
json_schema: dict
|
||||
|
||||
|
||||
class ModelProviderModelInfo(BaseModel):
|
||||
"""Struct for model information.
|
||||
|
||||
Would be lovely to eventually get this directly from APIs, but needs to be
|
||||
scraped from websites for now.
|
||||
|
||||
"""
|
||||
|
||||
name: str
|
||||
service: ModelProviderService
|
||||
provider_name: ModelProviderName
|
||||
prompt_token_cost: float = 0.0
|
||||
completion_token_cost: float = 0.0
|
||||
|
||||
|
||||
class ModelProviderModelResponse(BaseModel):
|
||||
"""Standard response struct for a response from a model."""
|
||||
|
||||
prompt_tokens_used: int
|
||||
completion_tokens_used: int
|
||||
model_info: ModelProviderModelInfo
|
||||
|
||||
|
||||
class ModelProviderCredentials(ProviderCredentials):
|
||||
"""Credentials for a model provider."""
|
||||
|
||||
api_key: SecretStr | None = UserConfigurable(default=None)
|
||||
api_type: SecretStr | None = UserConfigurable(default=None)
|
||||
api_base: SecretStr | None = UserConfigurable(default=None)
|
||||
api_version: SecretStr | None = UserConfigurable(default=None)
|
||||
deployment_id: SecretStr | None = UserConfigurable(default=None)
|
||||
|
||||
def unmasked(self) -> dict:
|
||||
return unmask(self)
|
||||
|
||||
class Config:
|
||||
extra = "ignore"
|
||||
|
||||
|
||||
def unmask(model: BaseModel):
|
||||
unmasked_fields = {}
|
||||
for field_name, field in model.__fields__.items():
|
||||
value = getattr(model, field_name)
|
||||
if isinstance(value, SecretStr):
|
||||
unmasked_fields[field_name] = value.get_secret_value()
|
||||
else:
|
||||
unmasked_fields[field_name] = value
|
||||
return unmasked_fields
|
||||
|
||||
|
||||
class ModelProviderUsage(ProviderUsage):
|
||||
"""Usage for a particular model from a model provider."""
|
||||
|
||||
completion_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
|
||||
def update_usage(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
self.completion_tokens += model_response.completion_tokens_used
|
||||
self.prompt_tokens += model_response.prompt_tokens_used
|
||||
self.total_tokens += (
|
||||
model_response.completion_tokens_used + model_response.prompt_tokens_used
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderBudget(ProviderBudget):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ModelProviderUsage
|
||||
|
||||
def update_usage_and_cost(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
"""Update the usage and cost of the provider."""
|
||||
model_info = model_response.model_info
|
||||
self.usage.update_usage(model_response)
|
||||
incremental_cost = (
|
||||
model_response.completion_tokens_used * model_info.completion_token_cost
|
||||
+ model_response.prompt_tokens_used * model_info.prompt_token_cost
|
||||
) / 1000.0
|
||||
self.total_cost += incremental_cost
|
||||
self.remaining_budget -= incremental_cost
|
||||
|
||||
|
||||
class ModelProviderSettings(ProviderSettings):
|
||||
resource_type = ResourceType.MODEL
|
||||
credentials: ModelProviderCredentials
|
||||
budget: ModelProviderBudget
|
||||
|
||||
|
||||
class ModelProvider(abc.ABC):
|
||||
"""A ModelProvider abstracts the details of a particular provider of models."""
|
||||
|
||||
defaults: ClassVar[ModelProviderSettings]
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_remaining_budget(self) -> float:
|
||||
...
|
||||
|
||||
|
||||
####################
|
||||
# Embedding Models #
|
||||
####################
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for embedding model information."""
|
||||
|
||||
model_service = ModelProviderService.EMBEDDING
|
||||
embedding_dimensions: int
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from an embedding model."""
|
||||
|
||||
embedding: Embedding = Field(default_factory=list)
|
||||
|
||||
@classmethod
|
||||
@validator("completion_tokens_used")
|
||||
def _verify_no_completion_tokens_used(cls, v):
|
||||
if v > 0:
|
||||
raise ValueError("Embeddings should not have completion tokens used.")
|
||||
return v
|
||||
|
||||
|
||||
class EmbeddingModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: str,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
...
|
||||
|
||||
|
||||
###################
|
||||
# Language Models #
|
||||
###################
|
||||
|
||||
|
||||
class LanguageModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for language model information."""
|
||||
|
||||
model_service = ModelProviderService.LANGUAGE
|
||||
max_tokens: int
|
||||
|
||||
|
||||
class LanguageModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
content: dict = None
|
||||
|
||||
|
||||
class LanguageModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: str,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
...
|
||||
@@ -1,57 +0,0 @@
|
||||
import abc
|
||||
import enum
|
||||
|
||||
from pydantic import SecretBytes, SecretField, SecretStr
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
|
||||
|
||||
class ResourceType(str, enum.Enum):
|
||||
"""An enumeration of resource types."""
|
||||
|
||||
MODEL = "model"
|
||||
MEMORY = "memory"
|
||||
|
||||
|
||||
class ProviderUsage(SystemConfiguration, abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def update_usage(self, *args, **kwargs) -> None:
|
||||
"""Update the usage of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderBudget(SystemConfiguration):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ProviderUsage
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_usage_and_cost(self, *args, **kwargs) -> None:
|
||||
"""Update the usage and cost of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderCredentials(SystemConfiguration):
|
||||
"""Struct for credentials."""
|
||||
|
||||
class Config:
|
||||
json_encoders = {
|
||||
SecretStr: lambda v: v.get_secret_value() if v else None,
|
||||
SecretBytes: lambda v: v.get_secret_value() if v else None,
|
||||
SecretField: lambda v: v.get_secret_value() if v else None,
|
||||
}
|
||||
|
||||
|
||||
class ProviderSettings(SystemSettings):
|
||||
resource_type: ResourceType
|
||||
credentials: ProviderCredentials | None = None
|
||||
budget: ProviderBudget | None = None
|
||||
|
||||
|
||||
# Used both by model providers and memory providers
|
||||
Embedding = list[float]
|
||||
@@ -1,3 +0,0 @@
|
||||
"""
|
||||
This module contains the runner for the v2 agent server and client.
|
||||
"""
|
||||
@@ -1,47 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
import yaml
|
||||
|
||||
from autogpt.core.runner.cli_app.main import run_auto_gpt
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions
|
||||
|
||||
|
||||
@click.group()
|
||||
def autogpt():
|
||||
"""Temporary command group for v2 commands."""
|
||||
pass
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
@click.option(
|
||||
"--pdb",
|
||||
is_flag=True,
|
||||
help="Drop into a debugger if an error is raised.",
|
||||
)
|
||||
@coroutine
|
||||
async def run(settings_file: str, pdb: bool) -> None:
|
||||
"""Run the Auto-GPT agent."""
|
||||
click.echo("Running Auto-GPT agent...")
|
||||
settings_file = Path(settings_file)
|
||||
settings = {}
|
||||
if settings_file.exists():
|
||||
settings = yaml.safe_load(settings_file.read_text())
|
||||
main = handle_exceptions(run_auto_gpt, with_debugger=pdb)
|
||||
await main(settings)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt()
|
||||
@@ -1,110 +0,0 @@
|
||||
import click
|
||||
|
||||
from autogpt.core.agent import AgentSettings, SimpleAgent
|
||||
from autogpt.core.runner.client_lib.logging import get_client_logger
|
||||
|
||||
|
||||
async def run_auto_gpt(user_configuration: dict):
|
||||
"""Run the Auto-GPT CLI client."""
|
||||
|
||||
client_logger = get_client_logger()
|
||||
client_logger.debug("Getting agent settings")
|
||||
|
||||
agent_workspace = (
|
||||
user_configuration.get("workspace", {}).get("configuration", {}).get("root", "")
|
||||
)
|
||||
|
||||
if not agent_workspace: # We don't have an agent yet.
|
||||
#################
|
||||
# Bootstrapping #
|
||||
#################
|
||||
# Step 1. Collate the user's settings with the default system settings.
|
||||
agent_settings: AgentSettings = SimpleAgent.compile_settings(
|
||||
client_logger,
|
||||
user_configuration,
|
||||
)
|
||||
|
||||
# Step 2. Get a name and goals for the agent.
|
||||
# First we need to figure out what the user wants to do with the agent.
|
||||
# We'll do this by asking the user for a prompt.
|
||||
user_objective = click.prompt("What do you want Auto-GPT to do?")
|
||||
# Ask a language model to determine a name and goals for a suitable agent.
|
||||
name_and_goals = await SimpleAgent.determine_agent_name_and_goals(
|
||||
user_objective,
|
||||
agent_settings,
|
||||
client_logger,
|
||||
)
|
||||
print(parse_agent_name_and_goals(name_and_goals))
|
||||
# Finally, update the agent settings with the name and goals.
|
||||
agent_settings.update_agent_name_and_goals(name_and_goals)
|
||||
|
||||
# Step 3. Provision the agent.
|
||||
agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger)
|
||||
print("agent is provisioned")
|
||||
|
||||
# launch agent interaction loop
|
||||
agent = SimpleAgent.from_workspace(
|
||||
agent_workspace,
|
||||
client_logger,
|
||||
)
|
||||
print("agent is loaded")
|
||||
|
||||
plan = await agent.build_initial_plan()
|
||||
print(parse_agent_plan(plan))
|
||||
|
||||
while True:
|
||||
current_task, next_ability = await agent.determine_next_ability(plan)
|
||||
print(parse_next_ability(current_task, next_ability))
|
||||
user_input = click.prompt(
|
||||
"Should the agent proceed with this ability?",
|
||||
default="y",
|
||||
)
|
||||
ability_result = await agent.execute_next_ability(user_input)
|
||||
print(parse_ability_result(ability_result))
|
||||
|
||||
|
||||
def parse_agent_name_and_goals(name_and_goals: dict) -> str:
|
||||
parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n"
|
||||
parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n"
|
||||
parsed_response += "Agent Goals:\n"
|
||||
for i, goal in enumerate(name_and_goals["agent_goals"]):
|
||||
parsed_response += f"{i+1}. {goal}\n"
|
||||
return parsed_response
|
||||
|
||||
|
||||
def parse_agent_plan(plan: dict) -> str:
|
||||
parsed_response = f"Agent Plan:\n"
|
||||
for i, task in enumerate(plan["task_list"]):
|
||||
parsed_response += f"{i+1}. {task['objective']}\n"
|
||||
parsed_response += f"Task type: {task['type']} "
|
||||
parsed_response += f"Priority: {task['priority']}\n"
|
||||
parsed_response += f"Ready Criteria:\n"
|
||||
for j, criteria in enumerate(task["ready_criteria"]):
|
||||
parsed_response += f" {j+1}. {criteria}\n"
|
||||
parsed_response += f"Acceptance Criteria:\n"
|
||||
for j, criteria in enumerate(task["acceptance_criteria"]):
|
||||
parsed_response += f" {j+1}. {criteria}\n"
|
||||
parsed_response += "\n"
|
||||
|
||||
return parsed_response
|
||||
|
||||
|
||||
def parse_next_ability(current_task, next_ability: dict) -> str:
|
||||
parsed_response = f"Current Task: {current_task.objective}\n"
|
||||
ability_args = ", ".join(
|
||||
f"{k}={v}" for k, v in next_ability["ability_arguments"].items()
|
||||
)
|
||||
parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n"
|
||||
parsed_response += f"Motivation: {next_ability['motivation']}\n"
|
||||
parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n"
|
||||
parsed_response += f"Reasoning: {next_ability['reasoning']}\n"
|
||||
return parsed_response
|
||||
|
||||
|
||||
def parse_ability_result(ability_result) -> str:
|
||||
parsed_response = f"Ability: {ability_result['ability_name']}\n"
|
||||
parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n"
|
||||
parsed_response = f"Ability Result: {ability_result['success']}\n"
|
||||
parsed_response += f"Message: {ability_result['message']}\n"
|
||||
parsed_response += f"Data: {ability_result['new_knowledge']}\n"
|
||||
return parsed_response
|
||||
@@ -1,101 +0,0 @@
|
||||
import contextlib
|
||||
import pathlib
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import click
|
||||
import requests
|
||||
import uvicorn
|
||||
import yaml
|
||||
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
status,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine
|
||||
|
||||
|
||||
@click.group()
|
||||
def autogpt():
|
||||
"""Temporary command group for v2 commands."""
|
||||
pass
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
autogpt.add_command(status)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"host",
|
||||
"--host",
|
||||
default="localhost",
|
||||
help="The host for the webserver.",
|
||||
type=click.STRING,
|
||||
)
|
||||
@click.option(
|
||||
"port",
|
||||
"--port",
|
||||
default=8080,
|
||||
help="The port of the webserver.",
|
||||
type=click.INT,
|
||||
)
|
||||
def server(host: str, port: int) -> None:
|
||||
"""Run the Auto-GPT runner httpserver."""
|
||||
click.echo("Running Auto-GPT runner httpserver...")
|
||||
uvicorn.run(
|
||||
"autogpt.core.runner.cli_web_app.server.api:app",
|
||||
workers=1,
|
||||
host=host,
|
||||
port=port,
|
||||
reload=True,
|
||||
)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
@coroutine
|
||||
async def client(settings_file) -> None:
|
||||
"""Run the Auto-GPT runner client."""
|
||||
settings_file = pathlib.Path(settings_file)
|
||||
settings = {}
|
||||
if settings_file.exists():
|
||||
settings = yaml.safe_load(settings_file.read_text())
|
||||
|
||||
from autogpt.core.runner.cli_web_app.client.client import run
|
||||
|
||||
with autogpt_server():
|
||||
run()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def autogpt_server():
|
||||
host = "localhost"
|
||||
port = 8080
|
||||
cmd = shlex.split(
|
||||
f"{sys.executable} autogpt/core/runner/cli_web_app/cli.py server --host {host} --port {port}"
|
||||
)
|
||||
server_process = subprocess.Popen(
|
||||
args=cmd,
|
||||
)
|
||||
started = False
|
||||
|
||||
while not started:
|
||||
try:
|
||||
requests.get(f"http://{host}:{port}")
|
||||
started = True
|
||||
except requests.exceptions.ConnectionError:
|
||||
time.sleep(0.2)
|
||||
yield server_process
|
||||
server_process.terminate()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt()
|
||||
@@ -1,16 +0,0 @@
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def run():
|
||||
body = json.dumps(
|
||||
{"ai_name": "HelloBot", "ai_role": "test", "ai_goals": ["goal1", "goal2"]}
|
||||
)
|
||||
|
||||
header = {"Content-Type": "application/json", "openai_api_key": "asdf"}
|
||||
print("Sending: ", header, body)
|
||||
response = requests.post(
|
||||
"http://localhost:8080/api/v1/agents", data=body, headers=header
|
||||
)
|
||||
print(response.content.decode("utf-8"))
|
||||
@@ -1,48 +0,0 @@
|
||||
import uuid
|
||||
|
||||
from fastapi import APIRouter, FastAPI, Request
|
||||
|
||||
from autogpt.core.runner.cli_web_app.server.schema import InteractRequestBody
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/agents")
|
||||
async def create_agent(request: Request):
|
||||
"""Create a new agent."""
|
||||
agent_id = uuid.uuid4().hex
|
||||
return {"agent_id": agent_id}
|
||||
|
||||
|
||||
@router.post("/agents/{agent_id}")
|
||||
async def interact(request: Request, agent_id: str, body: InteractRequestBody):
|
||||
"""Interact with an agent."""
|
||||
|
||||
# check headers
|
||||
|
||||
# check if agent_id exists
|
||||
|
||||
# get agent object from somewhere, e.g. a database/disk/global dict
|
||||
|
||||
# continue agent interaction with user input
|
||||
|
||||
return {
|
||||
"thoughts": {
|
||||
"thoughts": {
|
||||
"text": "text",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "plan",
|
||||
"criticism": "criticism",
|
||||
"speak": "speak",
|
||||
},
|
||||
"commands": {
|
||||
"name": "name",
|
||||
"args": {"arg_1": "value_1", "arg_2": "value_2"},
|
||||
},
|
||||
},
|
||||
"messages": ["message1", agent_id],
|
||||
}
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
app.include_router(router, prefix="/api/v1")
|
||||
@@ -1,36 +0,0 @@
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
|
||||
class AgentInfo(BaseModel):
|
||||
id: UUID = None
|
||||
objective: str = ""
|
||||
name: str = ""
|
||||
role: str = ""
|
||||
goals: list[str] = []
|
||||
|
||||
|
||||
class AgentConfiguration(BaseModel):
|
||||
"""Configuration for creation of a new agent."""
|
||||
|
||||
# We'll want to get this schema from the configuration, so it needs to be dynamic.
|
||||
user_configuration: dict
|
||||
agent_goals: AgentInfo
|
||||
|
||||
@validator("agent_goals")
|
||||
def only_objective_or_name_role_goals(cls, agent_goals):
|
||||
goals_specification = [agent_goals.name, agent_goals.role, agent_goals.goals]
|
||||
if agent_goals.objective and any(goals_specification):
|
||||
raise ValueError("Cannot specify both objective and name, role, or goals")
|
||||
if not agent_goals.objective and not all(goals_specification):
|
||||
raise ValueError("Must specify either objective or name, role, and goals")
|
||||
|
||||
|
||||
class InteractRequestBody(BaseModel):
|
||||
user_input: str = ""
|
||||
|
||||
|
||||
class InteractResponseBody(BaseModel):
|
||||
thoughts: dict[str, str] # TBD
|
||||
messages: list[str] # for example
|
||||
@@ -1,20 +0,0 @@
|
||||
import uuid
|
||||
|
||||
from fastapi import Request
|
||||
|
||||
|
||||
class UserService:
|
||||
def __init__(self):
|
||||
self.users = {}
|
||||
|
||||
def get_user_id(self, request: Request) -> uuid.UUID:
|
||||
# TODO: something real. I don't know how this works.
|
||||
hostname = request.client.host
|
||||
port = request.client.port
|
||||
user = f"{hostname}:{port}"
|
||||
if user not in self.users:
|
||||
self.users[user] = uuid.uuid4()
|
||||
return self.users[user]
|
||||
|
||||
|
||||
USER_SERVICE = UserService()
|
||||
@@ -1,20 +0,0 @@
|
||||
import logging
|
||||
|
||||
|
||||
def get_client_logger():
|
||||
# Configure logging before we do anything else.
|
||||
# Application logs need a place to live.
|
||||
client_logger = logging.getLogger("autogpt_client_application")
|
||||
client_logger.setLevel(logging.DEBUG)
|
||||
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(logging.DEBUG)
|
||||
ch.setFormatter(formatter)
|
||||
|
||||
client_logger.addHandler(ch)
|
||||
|
||||
return client_logger
|
||||
@@ -1,14 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
from autogpt.core.agent import SimpleAgent
|
||||
|
||||
|
||||
def make_user_configuration(settings_file_path: Path):
|
||||
user_configuration = SimpleAgent.build_user_configuration()
|
||||
|
||||
settings_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
print("Writing settings to", settings_file_path)
|
||||
with settings_file_path.open("w") as f:
|
||||
yaml.safe_dump(user_configuration, f)
|
||||
@@ -1,19 +0,0 @@
|
||||
import pathlib
|
||||
|
||||
import click
|
||||
|
||||
DEFAULT_SETTINGS_FILE = str(
|
||||
pathlib.Path("~/auto-gpt/default_agent_settings.yml").expanduser()
|
||||
)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
def make_settings(settings_file: str) -> None:
|
||||
from autogpt.core.runner.client_lib.settings import make_user_configuration
|
||||
|
||||
make_user_configuration(pathlib.Path(settings_file))
|
||||
@@ -1,61 +0,0 @@
|
||||
import asyncio
|
||||
import functools
|
||||
from bdb import BdbQuit
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
import click
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def handle_exceptions(
|
||||
application_main: Callable[P, T],
|
||||
with_debugger: bool,
|
||||
) -> Callable[P, T]:
|
||||
"""Wraps a function so that it drops a user into a debugger if it raises an error.
|
||||
|
||||
This is intended to be used as a wrapper for the main function of a CLI application.
|
||||
It will catch all errors and drop a user into a debugger if the error is not a
|
||||
KeyboardInterrupt. If the error is a KeyboardInterrupt, it will raise the error.
|
||||
If the error is not a KeyboardInterrupt, it will log the error and drop a user into a
|
||||
debugger if with_debugger is True. If with_debugger is False, it will raise the error.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
application_main
|
||||
The function to wrap.
|
||||
with_debugger
|
||||
Whether to drop a user into a debugger if an error is raised.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Callable
|
||||
The wrapped function.
|
||||
|
||||
"""
|
||||
|
||||
@functools.wraps(application_main)
|
||||
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
return await application_main(*args, **kwargs)
|
||||
except (BdbQuit, KeyboardInterrupt, click.Abort):
|
||||
raise
|
||||
except Exception as e:
|
||||
if with_debugger:
|
||||
print(f"Uncaught exception {e}")
|
||||
import pdb
|
||||
|
||||
pdb.post_mortem()
|
||||
else:
|
||||
raise
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def coroutine(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
return asyncio.run(f(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
@@ -1,3 +0,0 @@
|
||||
"""The workspace is the central hub for the Agent's on disk resources."""
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
@@ -1,70 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import logging
|
||||
import typing
|
||||
from pathlib import Path
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from autogpt.core.configuration import AgentConfiguration
|
||||
|
||||
|
||||
class Workspace(abc.ABC):
|
||||
"""The workspace is the root directory for all generated files.
|
||||
|
||||
The workspace is responsible for creating the root directory and
|
||||
providing a method for getting the full path to an item in the
|
||||
workspace.
|
||||
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def root(self) -> Path:
|
||||
"""The root directory of the workspace."""
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def restrict_to_workspace(self) -> bool:
|
||||
"""Whether to restrict generated paths to the workspace."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def setup_workspace(
|
||||
configuration: AgentConfiguration, logger: logging.Logger
|
||||
) -> Path:
|
||||
"""Create the workspace root directory and set up all initial content.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
configuration
|
||||
The Agent's configuration.
|
||||
logger
|
||||
The Agent's logger.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The path to the workspace root directory.
|
||||
|
||||
"""
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_path(self, relative_path: str | Path) -> Path:
|
||||
"""Get the full path for an item in the workspace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The path to the item relative to the workspace root.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The full path to the item.
|
||||
|
||||
"""
|
||||
...
|
||||
@@ -1,193 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import typing
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import SecretField
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
# Cyclic import
|
||||
from autogpt.core.agent.simple import AgentSettings
|
||||
|
||||
|
||||
class WorkspaceConfiguration(SystemConfiguration):
|
||||
root: str
|
||||
parent: str = UserConfigurable()
|
||||
restrict_to_workspace: bool = UserConfigurable()
|
||||
|
||||
|
||||
class WorkspaceSettings(SystemSettings):
|
||||
configuration: WorkspaceConfiguration
|
||||
|
||||
|
||||
class SimpleWorkspace(Configurable, Workspace):
|
||||
default_settings = WorkspaceSettings(
|
||||
name="workspace",
|
||||
description="The workspace is the root directory for all agent activity.",
|
||||
configuration=WorkspaceConfiguration(
|
||||
root="",
|
||||
parent="~/auto-gpt/agents",
|
||||
restrict_to_workspace=True,
|
||||
),
|
||||
)
|
||||
|
||||
NULL_BYTES = ["\0", "\000", "\x00", "\u0000", "%00"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: WorkspaceSettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger.getChild("workspace")
|
||||
|
||||
@property
|
||||
def root(self) -> Path:
|
||||
return Path(self._configuration.root)
|
||||
|
||||
@property
|
||||
def debug_log_path(self) -> Path:
|
||||
return self.root / "logs" / "debug.log"
|
||||
|
||||
@property
|
||||
def cycle_log_path(self) -> Path:
|
||||
return self.root / "logs" / "cycle.log"
|
||||
|
||||
@property
|
||||
def configuration_path(self) -> Path:
|
||||
return self.root / "configuration.yml"
|
||||
|
||||
@property
|
||||
def restrict_to_workspace(self) -> bool:
|
||||
return self._configuration.restrict_to_workspace
|
||||
|
||||
def get_path(self, relative_path: str | Path) -> Path:
|
||||
"""Get the full path for an item in the workspace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The relative path to resolve in the workspace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The resolved path relative to the workspace.
|
||||
|
||||
"""
|
||||
return self._sanitize_path(
|
||||
relative_path,
|
||||
root=self.root,
|
||||
restrict_to_root=self.restrict_to_workspace,
|
||||
)
|
||||
|
||||
def _sanitize_path(
|
||||
self,
|
||||
relative_path: str | Path,
|
||||
root: str | Path = None,
|
||||
restrict_to_root: bool = True,
|
||||
) -> Path:
|
||||
"""Resolve the relative path within the given root if possible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
relative_path
|
||||
The relative path to resolve.
|
||||
root
|
||||
The root path to resolve the relative path within.
|
||||
restrict_to_root
|
||||
Whether to restrict the path to the root.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The resolved path.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the path is absolute and a root is provided.
|
||||
ValueError
|
||||
If the path is outside the root and the root is restricted.
|
||||
|
||||
"""
|
||||
|
||||
# Posix systems disallow null bytes in paths. Windows is agnostic about it.
|
||||
# Do an explicit check here for all sorts of null byte representations.
|
||||
|
||||
for null_byte in self.NULL_BYTES:
|
||||
if null_byte in str(relative_path) or null_byte in str(root):
|
||||
raise ValueError("embedded null byte")
|
||||
|
||||
if root is None:
|
||||
return Path(relative_path).resolve()
|
||||
|
||||
self._logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
|
||||
root, relative_path = Path(root).resolve(), Path(relative_path)
|
||||
self._logger.debug(f"Resolved root as '{root}'")
|
||||
|
||||
if relative_path.is_absolute():
|
||||
raise ValueError(
|
||||
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
|
||||
)
|
||||
full_path = root.joinpath(relative_path).resolve()
|
||||
|
||||
self._logger.debug(f"Joined paths as '{full_path}'")
|
||||
|
||||
if restrict_to_root and not full_path.is_relative_to(root):
|
||||
raise ValueError(
|
||||
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
|
||||
)
|
||||
|
||||
return full_path
|
||||
|
||||
###################################
|
||||
# Factory methods for agent setup #
|
||||
###################################
|
||||
|
||||
@staticmethod
|
||||
def setup_workspace(settings: "AgentSettings", logger: logging.Logger) -> Path:
|
||||
workspace_parent = settings.workspace.configuration.parent
|
||||
workspace_parent = Path(workspace_parent).expanduser().resolve()
|
||||
workspace_parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
agent_name = settings.agent.name
|
||||
|
||||
workspace_root = workspace_parent / agent_name
|
||||
workspace_root.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
settings.workspace.configuration.root = str(workspace_root)
|
||||
|
||||
with (workspace_root / "agent_settings.json").open("w") as f:
|
||||
settings_json = settings.json(
|
||||
encoder=lambda x: x.get_secret_value()
|
||||
if isinstance(x, SecretField)
|
||||
else x,
|
||||
)
|
||||
f.write(settings_json)
|
||||
|
||||
# TODO: What are all the kinds of logs we want here?
|
||||
log_path = workspace_root / "logs"
|
||||
log_path.mkdir(parents=True, exist_ok=True)
|
||||
(log_path / "debug.log").touch()
|
||||
(log_path / "cycle.log").touch()
|
||||
|
||||
return workspace_root
|
||||
|
||||
@staticmethod
|
||||
def load_agent_settings(workspace_root: Path) -> "AgentSettings":
|
||||
# Cyclic import
|
||||
from autogpt.core.agent.simple import AgentSettings
|
||||
|
||||
with (workspace_root / "agent_settings.json").open("r") as f:
|
||||
agent_settings = json.load(f)
|
||||
|
||||
return AgentSettings.parse_obj(agent_settings)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user