mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
424 Commits
summary_me
...
v0.4.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec47a318bc | ||
|
|
ce86a5e697 | ||
|
|
1607b74126 | ||
|
|
7dffd1a4b7 | ||
|
|
71e7424baf | ||
|
|
e0bcde178e | ||
|
|
295473551f | ||
|
|
669e66a1e7 | ||
|
|
811177099e | ||
|
|
12d126339e | ||
|
|
2c53530e99 | ||
|
|
e0d8e6b75f | ||
|
|
ce33e238a9 | ||
|
|
8503e961d8 | ||
|
|
98c3f6b781 | ||
|
|
db95d4cb84 | ||
|
|
08a1e22973 | ||
|
|
307644a8c5 | ||
|
|
9799ec5551 | ||
|
|
0c94bb5f25 | ||
|
|
d76317fbf3 | ||
|
|
a758acef2c | ||
|
|
515742ee61 | ||
|
|
5ae044f53d | ||
|
|
c821b294c6 | ||
|
|
376ecf0c5f | ||
|
|
7c4fc45b4a | ||
|
|
ed12b2c7d6 | ||
|
|
a0f5aa942d | ||
|
|
c9adedf746 | ||
|
|
4177c37b51 | ||
|
|
077e143cc2 | ||
|
|
3582ada3df | ||
|
|
21c0cdcb76 | ||
|
|
b00570f6d9 | ||
|
|
d676280a5c | ||
|
|
e746a45158 | ||
|
|
2240033b07 | ||
|
|
46f31cb643 | ||
|
|
4d51469473 | ||
|
|
62e330469c | ||
|
|
5e39dd1d26 | ||
|
|
9adcad8b8a | ||
|
|
2b56996a27 | ||
|
|
c562fbf4bc | ||
|
|
050c52a008 | ||
|
|
51d8b43fbf | ||
|
|
e8b6676b22 | ||
|
|
bfdfeff1b3 | ||
|
|
1e756ed5f1 | ||
|
|
bc600169a9 | ||
|
|
1e1eff70bc | ||
|
|
7dc6d736c7 | ||
|
|
f6b9e86ccb | ||
|
|
6afdd1be10 | ||
|
|
89c1f2d0c0 | ||
|
|
a38d400207 | ||
|
|
4f87f70904 | ||
|
|
a6f4fa4416 | ||
|
|
f6da992b3e | ||
|
|
43a62fdc7c | ||
|
|
426b7d12da | ||
|
|
59f353ae06 | ||
|
|
6735b8155c | ||
|
|
6cf8ec9315 | ||
|
|
8b8b3a2cdd | ||
|
|
8bce02736b | ||
|
|
57315bddfb | ||
|
|
9a2a9f7439 | ||
|
|
16787b32e5 | ||
|
|
0f538f6e2c | ||
|
|
35b072f7e8 | ||
|
|
ebe3221405 | ||
|
|
3b7e1014f6 | ||
|
|
053caaa222 | ||
|
|
9e5492bd13 | ||
|
|
9706ff8c26 | ||
|
|
bde007e6f7 | ||
|
|
ac17518663 | ||
|
|
e4a337f1a5 | ||
|
|
6893b7e5e7 | ||
|
|
70e8b07428 | ||
|
|
e0882955e3 | ||
|
|
2fcd91b765 | ||
|
|
bfb45f2cbd | ||
|
|
9cf35010c6 | ||
|
|
0c8288b5e1 | ||
|
|
b9f01330db | ||
|
|
66635f3ae6 | ||
|
|
744275b932 | ||
|
|
7e45b6a975 | ||
|
|
c67940edec | ||
|
|
5318535d0d | ||
|
|
5070cc32ac | ||
|
|
975094fcdd | ||
|
|
30f153e695 | ||
|
|
80151dd6b2 | ||
|
|
445228348d | ||
|
|
c025132c84 | ||
|
|
bafcdcea7c | ||
|
|
ebe1861894 | ||
|
|
5d5a35e383 | ||
|
|
9f353f41c4 | ||
|
|
b1570543c8 | ||
|
|
9e99db1d3e | ||
|
|
321edc5e3d | ||
|
|
de97097db1 | ||
|
|
cfdb24efac | ||
|
|
307f6e50ad | ||
|
|
7309c12592 | ||
|
|
222101b30e | ||
|
|
15ff666d3f | ||
|
|
857d26d101 | ||
|
|
32038c9f5b | ||
|
|
e5d6206a15 | ||
|
|
4e3f832dc3 | ||
|
|
012a286921 | ||
|
|
ea1b28f025 | ||
|
|
d147b42cf7 | ||
|
|
7632067768 | ||
|
|
a564fdf034 | ||
|
|
f4c000a547 | ||
|
|
162d77707b | ||
|
|
2568164cb3 | ||
|
|
3e697d5bd0 | ||
|
|
1fead303a0 | ||
|
|
2a8f4ce0b7 | ||
|
|
71ca4ea990 | ||
|
|
eb2410ac6c | ||
|
|
abb397e442 | ||
|
|
7d923f83e6 | ||
|
|
9f737274b7 | ||
|
|
ee7d04775e | ||
|
|
0abfa3a68f | ||
|
|
8077d2a249 | ||
|
|
a7f805604c | ||
|
|
096d27f342 | ||
|
|
7bac56b57d | ||
|
|
dc1b484630 | ||
|
|
10d7747ae2 | ||
|
|
7f6f18f642 | ||
|
|
0b6fec4a28 | ||
|
|
4a307ad4eb | ||
|
|
e02105ee89 | ||
|
|
d923004e20 | ||
|
|
9943c58fba | ||
|
|
244393e4ef | ||
|
|
a30e5a85b2 | ||
|
|
512d7ba208 | ||
|
|
195a7fcad8 | ||
|
|
12588b6483 | ||
|
|
426cfef882 | ||
|
|
3525a4b6db | ||
|
|
f0a5250da5 | ||
|
|
a1e5be7077 | ||
|
|
bbaa5b89c2 | ||
|
|
6e6e7fcc9a | ||
|
|
a5544a06e1 | ||
|
|
c17d825bba | ||
|
|
c76c67a69c | ||
|
|
49d1a5a17b | ||
|
|
0c8f2cfd1c | ||
|
|
3df8c1b501 | ||
|
|
857c330d58 | ||
|
|
07d9b584f7 | ||
|
|
7bf39cbb72 | ||
|
|
f277e6aa2d | ||
|
|
d5afbbee26 | ||
|
|
a9d177eeeb | ||
|
|
ff46c16805 | ||
|
|
62dd1675a0 | ||
|
|
e60dfca9fc | ||
|
|
a7faab32fe | ||
|
|
2ce6ae6707 | ||
|
|
fd04db12fa | ||
|
|
bc5dbb6692 | ||
|
|
9150f32f8b | ||
|
|
6fb9b6d03b | ||
|
|
4e621280bb | ||
|
|
0594ba33a2 | ||
|
|
097ce08908 | ||
|
|
c1ee8cb62e | ||
|
|
6b9e3b21d3 | ||
|
|
15c6b0c1c3 | ||
|
|
6ff8478118 | ||
|
|
3c51ff501f | ||
|
|
f5a447308d | ||
|
|
fdca233fe3 | ||
|
|
3f2547295f | ||
|
|
5fe600af9d | ||
|
|
3081f56ecb | ||
|
|
923c67e92a | ||
|
|
474a9c4d95 | ||
|
|
ff4e53d0e6 | ||
|
|
cce50bef50 | ||
|
|
8215039785 | ||
|
|
94280b2d14 | ||
|
|
3d06b2e4c0 | ||
|
|
12ed5a957b | ||
|
|
3b0d49a3e0 | ||
|
|
bd2e26a20f | ||
|
|
f4505add69 | ||
|
|
1e851ba3ea | ||
|
|
dae58f8167 | ||
|
|
c9f1f050af | ||
|
|
624af3945c | ||
|
|
463dc54733 | ||
|
|
fdc6e12945 | ||
|
|
20a4922b40 | ||
|
|
1b04e5cafc | ||
|
|
835decc6c1 | ||
|
|
53efa8f6bf | ||
|
|
055806e124 | ||
|
|
dafbd11686 | ||
|
|
8a881f70a3 | ||
|
|
ee6b97ef5e | ||
|
|
60ac0c4da1 | ||
|
|
53add7b594 | ||
|
|
576f24e3ae | ||
|
|
25a7957bb8 | ||
|
|
3dbc377308 | ||
|
|
74e8a886e6 | ||
|
|
120fe762df | ||
|
|
59d31b021d | ||
|
|
af28510aba | ||
|
|
84e58051fa | ||
|
|
02846fcf91 | ||
|
|
0a20fa4fdf | ||
|
|
c4e2d8fbdd | ||
|
|
3e5868f223 | ||
|
|
4c11b21dff | ||
|
|
378126822f | ||
|
|
55a8e242b0 | ||
|
|
79ba85a22e | ||
|
|
ce44012866 | ||
|
|
4cc6e27f02 | ||
|
|
9156db41a1 | ||
|
|
9247f9480c | ||
|
|
fc0688673e | ||
|
|
15c157343d | ||
|
|
3fffe65a3b | ||
|
|
63b79a88c6 | ||
|
|
9e9b128b72 | ||
|
|
64973bfe12 | ||
|
|
41df0204f3 | ||
|
|
f6ee61d607 | ||
|
|
87776b2886 | ||
|
|
86b6231f70 | ||
|
|
387f65c16c | ||
|
|
4c25fabec9 | ||
|
|
6806b66509 | ||
|
|
b56352e218 | ||
|
|
d3a1770dc0 | ||
|
|
22429c6e98 | ||
|
|
56650b60f4 | ||
|
|
ba8046753e | ||
|
|
d34b8a2b61 | ||
|
|
1446ffddb0 | ||
|
|
1ddf2324ff | ||
|
|
b5d78a48a7 | ||
|
|
1127d9c5f5 | ||
|
|
31cd836530 | ||
|
|
daafda320b | ||
|
|
ee9f10a8d8 | ||
|
|
78774526f4 | ||
|
|
44f6d946f5 | ||
|
|
03036c1bd6 | ||
|
|
4b7fa7f49d | ||
|
|
e05c000615 | ||
|
|
064e95b46b | ||
|
|
e7c0d3330c | ||
|
|
6c45fcd067 | ||
|
|
acfd966aa4 | ||
|
|
f07fcdf0a7 | ||
|
|
d3fc8c4286 | ||
|
|
ddf59273e6 | ||
|
|
bfbe613960 | ||
|
|
10489e0df2 | ||
|
|
6e14782f82 | ||
|
|
2a6f01aae2 | ||
|
|
1ab3a40f03 | ||
|
|
e3e9a8121c | ||
|
|
43565b280f | ||
|
|
5168cb5219 | ||
|
|
438d3e489b | ||
|
|
d4d50fda7c | ||
|
|
a8af3313c1 | ||
|
|
9c7f5671e6 | ||
|
|
3a2800bcdd | ||
|
|
cc709bbbaa | ||
|
|
77ee9f8119 | ||
|
|
3e24d312d3 | ||
|
|
360d5cd577 | ||
|
|
dcb1cbe5d6 | ||
|
|
31525dfef7 | ||
|
|
e1c6778c3a | ||
|
|
bcc32cc441 | ||
|
|
57ea7b5216 | ||
|
|
c30f5b7d5e | ||
|
|
023a50d26b | ||
|
|
7e21f38185 | ||
|
|
c3f7916dfd | ||
|
|
a08fc850f0 | ||
|
|
bf33f4a7b0 | ||
|
|
a6d4deaf20 | ||
|
|
ccc03c1a8d | ||
|
|
ee98641210 | ||
|
|
812be60d2a | ||
|
|
f13aba0958 | ||
|
|
5b4bcf1a04 | ||
|
|
e2851dc73f | ||
|
|
c4b32e067c | ||
|
|
dc41db9522 | ||
|
|
0b39b92bde | ||
|
|
19767ca4a4 | ||
|
|
42a5a0ce13 | ||
|
|
1c399e67f4 | ||
|
|
7508e9941f | ||
|
|
feae20d8fa | ||
|
|
7c71b43d42 | ||
|
|
2cd835e5a4 | ||
|
|
1c96a5acf7 | ||
|
|
55af3e19b4 | ||
|
|
52874cc442 | ||
|
|
627aeb20a2 | ||
|
|
f424fac1d8 | ||
|
|
c1cd54d1ea | ||
|
|
0839a16d27 | ||
|
|
cb2b13dff6 | ||
|
|
85fe6f36c2 | ||
|
|
517c080544 | ||
|
|
c04f2210cc | ||
|
|
6c4426d8e6 | ||
|
|
17c45ee53a | ||
|
|
3c80e05e00 | ||
|
|
cf35112291 | ||
|
|
d06d8a609f | ||
|
|
16b7e7a91e | ||
|
|
1a2f7684bc | ||
|
|
19e642259c | ||
|
|
3d494f1032 | ||
|
|
dda8d0f6bf | ||
|
|
f778483ac3 | ||
|
|
15ebe23bc2 | ||
|
|
bc6f8a27ff | ||
|
|
2d9b9294d0 | ||
|
|
de6b8ee9f2 | ||
|
|
a110ff94a5 | ||
|
|
4143d212a5 | ||
|
|
e6f8e51504 | ||
|
|
b958386689 | ||
|
|
2f7beebc61 | ||
|
|
900de5fe63 | ||
|
|
233f900fa6 | ||
|
|
7a34d49264 | ||
|
|
6c78d80d37 | ||
|
|
12e806d754 | ||
|
|
21a202b655 | ||
|
|
79fba4ab7b | ||
|
|
c771e1fd50 | ||
|
|
a60512cdae | ||
|
|
b06ea616d9 | ||
|
|
2513178980 | ||
|
|
8f3119621c | ||
|
|
5989c14577 | ||
|
|
980bbe2bc3 | ||
|
|
23e1e1ed53 | ||
|
|
0166eacb2b | ||
|
|
33a3e6f998 | ||
|
|
d1327fd1c2 | ||
|
|
baa7873ec1 | ||
|
|
dc959596fc | ||
|
|
08bc8ff3f7 | ||
|
|
ca5abff93f | ||
|
|
d184d0d235 | ||
|
|
06317dfb2b | ||
|
|
d57af05f66 | ||
|
|
7eddfacd10 | ||
|
|
3df88be6cb | ||
|
|
b496cdcfb2 | ||
|
|
6d4bea3bb6 | ||
|
|
e12438de41 | ||
|
|
f2bef76368 | ||
|
|
ea08050049 | ||
|
|
7d234522b7 | ||
|
|
d9170cab22 | ||
|
|
ad8b8cb9eb | ||
|
|
fad24b3525 | ||
|
|
b0163230a9 | ||
|
|
cb97f5c101 | ||
|
|
a48f26c150 | ||
|
|
911cea781f | ||
|
|
d2a9e54dfb | ||
|
|
d74428057e | ||
|
|
e21917cc93 | ||
|
|
26c6cfeefd | ||
|
|
479c7468b4 | ||
|
|
b0c6ed999c | ||
|
|
b5f95fd672 | ||
|
|
d8f5cdbb50 | ||
|
|
6e5ddeb015 | ||
|
|
725abbb662 | ||
|
|
e4129e1a3a | ||
|
|
dbd68df40c | ||
|
|
3a80e2f399 | ||
|
|
0e1c0c55f8 | ||
|
|
2e9c80a486 | ||
|
|
1d26f6b697 | ||
|
|
4767fe63d3 | ||
|
|
0ef6f06462 | ||
|
|
a5f856328d | ||
|
|
7fc6f2abfc | ||
|
|
94ec4a4ea5 | ||
|
|
9c56b1beef | ||
|
|
34261a1583 | ||
|
|
d8968ae899 | ||
|
|
6ae90a3ea2 | ||
|
|
c317cf0e75 | ||
|
|
c1329c92fd | ||
|
|
abd6115aea | ||
|
|
6d2c0c4242 | ||
|
|
aab79fdf6d | ||
|
|
91537b0496 | ||
|
|
999990b614 |
@@ -7,11 +7,12 @@
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
||||
"installZsh": "true",
|
||||
"username": "vscode",
|
||||
"userUid": "6942",
|
||||
"userGid": "6942",
|
||||
"userUid": "1000",
|
||||
"userGid": "1000",
|
||||
"upgradePackages": "true"
|
||||
},
|
||||
"ghcr.io/devcontainers/features/desktop-lite:1": {},
|
||||
"ghcr.io/devcontainers/features/github-cli:1": {},
|
||||
"ghcr.io/devcontainers/features/python:1": "none",
|
||||
"ghcr.io/devcontainers/features/node:1": "none",
|
||||
"ghcr.io/devcontainers/features/git:1": {
|
||||
@@ -25,8 +26,20 @@
|
||||
"vscode": {
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python"
|
||||
}
|
||||
"python.defaultInterpreterPath": "/usr/local/bin/python",
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.unittestEnabled": false
|
||||
},
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"VisualStudioExptTeam.vscodeintellicode",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.isort",
|
||||
"GitHub.vscode-pull-request-github",
|
||||
"GitHub.copilot",
|
||||
"github.vscode-github-actions"
|
||||
]
|
||||
}
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
@@ -36,5 +49,8 @@
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode"
|
||||
}
|
||||
"remoteUser": "vscode",
|
||||
|
||||
// Add the freshly containerized repo to the list of safe repositories
|
||||
"postCreateCommand": "git config --global --add safe.directory /workspace/Auto-GPT && pip3 install --user -r requirements.txt"
|
||||
}
|
||||
@@ -4,16 +4,9 @@ version: '3.9'
|
||||
|
||||
services:
|
||||
auto-gpt:
|
||||
depends_on:
|
||||
- redis
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
tty: true
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
volumes:
|
||||
- ../:/workspace/Auto-GPT
|
||||
redis:
|
||||
image: 'redis/redis-stack-server:latest'
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
!prompt_settings.yaml
|
||||
|
||||
*.md
|
||||
*.png
|
||||
|
||||
256
.env.template
256
.env.template
@@ -1,214 +1,212 @@
|
||||
# For further descriptions of these settings see docs/configuration/options.md or go to docs.agpt.co
|
||||
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the Auto-GPT root directory. (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the Auto-GPT root directory. (Default plugins_config.yaml)
|
||||
# PLUGINS_CONFIG_FILE=plugins_config.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the Auto-GPT root directory. (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
|
||||
# OPENAI_FUNCTIONS=False
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
## PLAIN_OUTPUT - Plain output, which disables the spinner (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMAND_CATEGORIES=
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
# USE_AZURE=False
|
||||
|
||||
### AZURE
|
||||
# moved to `azure.yaml.template`
|
||||
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the Auto-GPT root directory. (Default: azure.yaml)
|
||||
# AZURE_CONFIG_FILE=azure.yaml
|
||||
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-4
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
## SMART_LLM - Smart language model (Default: gpt-4)
|
||||
# SMART_LLM=gpt-4
|
||||
|
||||
### LLM MODEL SETTINGS
|
||||
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
## When using --gpt3only this needs to be set to 4000.
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
|
||||
# FAST_LLM=gpt-3.5-turbo
|
||||
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
|
||||
################################################################################
|
||||
### SHELL EXECUTION
|
||||
################################################################################
|
||||
|
||||
## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
|
||||
# SHELL_COMMAND_CONTROL=denylist
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
|
||||
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by Auto-GPT (Default: sudo,su)
|
||||
# SHELL_DENYLIST=sudo,su
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
|
||||
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by Auto-GPT (Default: None)
|
||||
# SHELL_ALLOWLIST=
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
## local - Default
|
||||
## pinecone - Pinecone (if configured)
|
||||
## redis - Redis (if configured)
|
||||
## milvus - Milvus (if configured - also works with Zilliz)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=local
|
||||
### General
|
||||
|
||||
## MEMORY_BACKEND - Memory backend type
|
||||
# MEMORY_BACKEND=json_file
|
||||
|
||||
## MEMORY_INDEX - Value used in the Memory backend for scoping, naming, or indexing (Default: auto-gpt)
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### PINECONE
|
||||
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
# PINECONE_API_KEY=your-pinecone-api-key
|
||||
# PINECONE_ENV=your-pinecone-region
|
||||
### Redis
|
||||
|
||||
### REDIS
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# REDIS_HOST=localhost
|
||||
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PORT=6379
|
||||
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
# REDIS_PASSWORD=
|
||||
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
### WEAVIATE
|
||||
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
## WEAVIATE_HOST - Weaviate host IP
|
||||
## WEAVIATE_PORT - Weaviate host port
|
||||
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
## WEAVIATE_USERNAME - Weaviate username
|
||||
## WEAVIATE_PASSWORD - Weaviate password
|
||||
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# WEAVIATE_HOST="127.0.0.1"
|
||||
# WEAVIATE_PORT=8080
|
||||
# WEAVIATE_PROTOCOL="http"
|
||||
# USE_WEAVIATE_EMBEDDED=False
|
||||
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
# WEAVIATE_USERNAME=
|
||||
# WEAVIATE_PASSWORD=
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
### MILVUS
|
||||
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443)
|
||||
## MILVUS_USERNAME - username for your Milvus database
|
||||
## MILVUS_PASSWORD - password for your Milvus database
|
||||
## MILVUS_SECURE - True to enable TLS. (Default: False)
|
||||
## Setting MILVUS_ADDR to a `https://` URL will override this setting.
|
||||
## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
|
||||
# MILVUS_ADDR=localhost:19530
|
||||
# MILVUS_USERNAME=
|
||||
# MILVUS_PASSWORD=
|
||||
# MILVUS_SECURE=
|
||||
# MILVUS_COLLECTION=autogpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPEN AI
|
||||
## IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## DALLE: 256, 512, 1024
|
||||
### Common
|
||||
|
||||
## IMAGE_PROVIDER - Image provider (Default: dalle)
|
||||
# IMAGE_PROVIDER=dalle
|
||||
|
||||
## IMAGE_SIZE - Image size (Default: 256)
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### HUGGINGFACE
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
### STABLE DIFFUSION WEBUI
|
||||
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
# SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
|
||||
## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
|
||||
# SD_WEBUI_URL=http://localhost:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
### HUGGINGFACE
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
|
||||
# AUDIO_TO_TEXT_PROVIDER=huggingface
|
||||
|
||||
## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
################################################################################
|
||||
### GIT Provider for repository actions
|
||||
################################################################################
|
||||
|
||||
### GITHUB
|
||||
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
## GITHUB_USERNAME - Github username
|
||||
# GITHUB_API_KEY=github_pat_123
|
||||
# GITHUB_USERNAME=your-github-username
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
|
||||
# USE_WEB_BROWSER=chrome
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm
|
||||
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
### GOOGLE
|
||||
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
# GOOGLE_API_KEY=your-google-api-key
|
||||
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
|
||||
################################################################################
|
||||
### TTS PROVIDER
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
################################################################################
|
||||
|
||||
### MAC OS
|
||||
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
# USE_MAC_OS_TTS=False
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
|
||||
### STREAMELEMENTS
|
||||
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
# USE_BRIAN_TTS=False
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=streamelements
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
|
||||
### ELEVENLABS
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=elevenlabs
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### TWITTER API
|
||||
### CHAT MESSAGES
|
||||
################################################################################
|
||||
|
||||
# TW_CONSUMER_KEY=
|
||||
# TW_CONSUMER_SECRET=
|
||||
# TW_ACCESS_TOKEN=
|
||||
# TW_ACCESS_TOKEN_SECRET=
|
||||
|
||||
################################################################################
|
||||
### ALLOWLISTED PLUGINS
|
||||
################################################################################
|
||||
|
||||
#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
|
||||
ALLOWLISTED_PLUGINS=
|
||||
|
||||
################################################################################
|
||||
### CHAT PLUGIN SETTINGS
|
||||
################################################################################
|
||||
# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,5 +1,5 @@
|
||||
# Exclude VCR cassettes from stats
|
||||
tests/**/cassettes/**.y*ml linguist-generated
|
||||
tests/Auto-GPT-test-cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
|
||||
2
.github/CODEOWNERS
vendored
Normal file
2
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.github/workflows/ @Significant-Gravitas/maintainers
|
||||
autogpt/core @collijk
|
||||
149
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
149
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -8,14 +8,16 @@ body:
|
||||
### ⚠️ Before you continue
|
||||
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
|
||||
* If you need help, you can ask in the [discussions] section or in [#tech-support]
|
||||
* **Throughly search the [existing issues] before creating a new one**
|
||||
|
||||
* **Thoroughly search the [existing issues] before creating a new one**
|
||||
* Read our [wiki page on Contributing]
|
||||
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
|
||||
[discord]: https://discord.gg/autogpt
|
||||
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
|
||||
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
|
||||
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
|
||||
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
@@ -25,23 +27,29 @@ body:
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we spend building AutoGPT.
|
||||
Please confirm that the issue you have is described well and precise in the title above ⬆️.
|
||||
A good rule of thumb: What would you type if you were searching for the issue?
|
||||
|
||||
Please help us help you:
|
||||
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
|
||||
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
|
||||
- Search for existing issues, "add comment" is tidier than "new issue"
|
||||
- Ask on our Discord (https://discord.gg/autogpt)
|
||||
For example:
|
||||
BAD - my auto-gpt keeps looping
|
||||
GOOD - After performing execute_python_file, auto-gpt goes into a loop where it keeps trying to execute the file.
|
||||
|
||||
⚠️ SUPER-busy repo, please help the volunteer maintainers.
|
||||
The less time we spend here, the more time we can spend building AutoGPT.
|
||||
|
||||
Please help us help you by following these steps:
|
||||
- Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
|
||||
newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
|
||||
- Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
|
||||
- Provide relevant info:
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
- Provide commit-hash (`git rev-parse HEAD` gets it) if possible
|
||||
- If it's a pip/packages issue, mention this in the title and provide pip version, python version
|
||||
- If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which Operating System are you using?
|
||||
@@ -54,9 +62,15 @@ body:
|
||||
- Docker
|
||||
- Devcontainer / Codespace
|
||||
- Windows Subsystem for Linux (WSL)
|
||||
- Other (Please specify in your problem)
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the system
|
||||
description: Please specify the system you are working on.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Which version of Auto-GPT are you using?
|
||||
@@ -71,61 +85,80 @@ body:
|
||||
- Master (branch)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: GPT-3 or GPT-4?
|
||||
label: Do you use OpenAI GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
- GPT-4(32k)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Steps to reproduce 🕹
|
||||
description: |
|
||||
**⚠️ Issues that we can't reproduce will be closed.**
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current behavior 😯
|
||||
description: Describe what happens instead of the expected behavior.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior 🤔
|
||||
description: Describe what should happen.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your prompt 📝
|
||||
label: Which area covers your issue best?
|
||||
description: >
|
||||
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
|
||||
value: |
|
||||
```yaml
|
||||
# Paste your prompt here
|
||||
```
|
||||
Select the area related to the issue you are reporting.
|
||||
options:
|
||||
- Installation and setup
|
||||
- Memory
|
||||
- Performance
|
||||
- Prompt
|
||||
- Commands
|
||||
- Plugins
|
||||
- AI Model Limitations
|
||||
- Challenges
|
||||
- Documentation
|
||||
- Logging
|
||||
- Agents
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
autolabels: true
|
||||
nested_fields:
|
||||
- type: text
|
||||
attributes:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your Logs 📒
|
||||
description: |
|
||||
Please include the log showing your error and the command that caused it, if applicable.
|
||||
You can copy it from your terminal or from `logs/activity.log`.
|
||||
This will help us understand your issue better!
|
||||
|
||||
<details>
|
||||
<summary><i>Example</i></summary>
|
||||
```log
|
||||
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
|
||||
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
|
||||
Traceback (most recent call last):
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
|
||||
self._interpret_response_line(
|
||||
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
|
||||
raise self.handle_error_response(
|
||||
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
|
||||
```
|
||||
</details>
|
||||
label: Describe your issue.
|
||||
description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
|
||||
validations:
|
||||
required: true
|
||||
|
||||
#Following are optional file content uploads
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
```log
|
||||
<insert your logs here>
|
||||
```
|
||||
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
|
||||
|
||||
"The log files are located in the folder 'logs' inside the main auto-gpt folder."
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Activity Log Content
|
||||
description: |
|
||||
Upload the activity log content, this can help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open activity.log and copy/paste the contents to this field.
|
||||
⚠️ The activity log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Upload Error Log Content
|
||||
description: |
|
||||
Upload the error log content, this will help us understand the issue better.
|
||||
To do this, go to the folder logs in your main auto-gpt folder, open error.log and copy/paste the contents to this field.
|
||||
⚠️ The error log may contain personal data given to auto-gpt by you in prompt or input as well as
|
||||
any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
|
||||
validations:
|
||||
required: false
|
||||
|
||||
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
7
.github/ISSUE_TEMPLATE/2.feature.yml
vendored
@@ -1,13 +1,12 @@
|
||||
name: Feature request 🚀
|
||||
description: Suggest a new idea for Auto-GPT.
|
||||
description: Suggest a new idea for Auto-GPT!
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
Please provide a searchable summary of the issue in the title above ⬆️.
|
||||
|
||||
Thanks for contributing by creating an issue! ❤️
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
@@ -26,4 +25,4 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation 🔦
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
|
||||
|
||||
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -14,6 +14,8 @@ Provide clear documentation and explanations of the changes made.
|
||||
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
||||
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
||||
|
||||
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
|
||||
|
||||
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
||||
|
||||
### Background
|
||||
@@ -33,7 +35,14 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes. <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
||||
- [ ] I have run the following commands against my code to ensure it passes our linters:
|
||||
```shell
|
||||
black .
|
||||
isort .
|
||||
mypy
|
||||
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
|
||||
```
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
|
||||
82
.github/workflows/benchmarks.yml
vendored
82
.github/workflows/benchmarks.yml
vendored
@@ -1,31 +1,73 @@
|
||||
name: Run Benchmarks
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
Benchmark:
|
||||
name: ${{ matrix.config.task-name }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
python-version: '3.10'
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- python-version: "3.10"
|
||||
task: "tests/challenges"
|
||||
task-name: "Mandatory Tasks"
|
||||
- python-version: "3.10"
|
||||
task: "--beat-challenges -ra tests/challenges"
|
||||
task-name: "Challenging Tasks"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: master
|
||||
|
||||
- name: Set up Python ${{ env.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.python-version }}
|
||||
- name: Set up Python ${{ matrix.config.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: benchmark
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
rm -rf tests/Auto-GPT-test-cassettes
|
||||
pytest -n auto --record-mode=all ${{ matrix.config.task }}
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
AGENT_MODE: ${{ secrets.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload logs as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs-${{ matrix.config.task-name }}
|
||||
path: logs/
|
||||
|
||||
- name: Upload cassettes as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: cassettes-${{ matrix.config.task-name }}
|
||||
path: tests/Auto-GPT-test-cassettes/
|
||||
|
||||
206
.github/workflows/ci.yml
vendored
206
.github/workflows/ci.yml
vendored
@@ -2,16 +2,24 @@ name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ master, ci-test* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
branches: [ stable, master, release-* ]
|
||||
pull_request_target:
|
||||
branches: [ master, release-*, ci-test* ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
@@ -19,12 +27,26 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
@@ -41,7 +63,19 @@ jobs:
|
||||
run: isort . --check
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check mypy formatting
|
||||
run: mypy
|
||||
if: success() || failure()
|
||||
|
||||
- name: Check for unused imports and pass statements
|
||||
run: |
|
||||
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
|
||||
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
|
||||
|
||||
test:
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
permissions:
|
||||
# Gives the action the necessary permissions for publishing new
|
||||
# comments in pull requests.
|
||||
@@ -51,27 +85,177 @@ jobs:
|
||||
# comments (to avoid publishing multiple comments in the same PR)
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11"]
|
||||
python-version: ["3.10"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
submodules: true
|
||||
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
pytest -vv --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
tests/unit tests/integration tests/challenges
|
||||
python tests/challenges/utils/build_current_score.py
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }}
|
||||
AGENT_MODE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_MODE || '' }}
|
||||
AGENT_TYPE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_TYPE || '' }}
|
||||
OPENAI_API_KEY: ${{ github.event_name != 'pull_request_target' && secrets.OPENAI_API_KEY || '' }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Push updated challenge scores
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
score_file="tests/challenges/current_score.json"
|
||||
|
||||
if ! git diff --quiet $score_file; then
|
||||
git add $score_file
|
||||
git commit -m "Update challenge scores"
|
||||
git push origin HEAD:${{ github.ref_name }}
|
||||
else
|
||||
echo "The challenge scores didn't change."
|
||||
fi
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || success() || failure()
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/Auto-GPT-test-cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
TOKEN=${{ secrets.PAT_REVIEW }}
|
||||
REPO=${{ github.repository }}
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels \
|
||||
-d '{"labels":["behaviour change"]}'
|
||||
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs
|
||||
path: logs/
|
||||
|
||||
81
.github/workflows/docker-ci.yml
vendored
81
.github/workflows/docker-ci.yml
vendored
@@ -3,8 +3,11 @@ name: Docker CI
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
branches: [ master, release-*, stable ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
@@ -70,46 +73,52 @@ jobs:
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
# Docker setup needs fixing before this is going to work: #1843
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=docker-dev
|
||||
cache-to: type=gha,scope=docker-dev,mode=max
|
||||
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
--numprocesses=4 --durations=10 \
|
||||
tests/unit tests/integration 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
echo "$test_output"
|
||||
echo "$test_output"
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
|
||||
12
.github/workflows/pr-label.yml
vendored
12
.github/workflows/pr-label.yml
vendored
@@ -3,7 +3,10 @@ name: "Pull Request auto-label"
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ master, release-* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/challenges/current_score.json'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
@@ -45,11 +48,10 @@ jobs:
|
||||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 50
|
||||
m_max_size: 100
|
||||
l_label: 'size/l'
|
||||
l_max_size: 200
|
||||
l_max_size: 500
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 200 lines.
|
||||
This PR exceeds the recommended size of 500 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size
|
||||
|
||||
28
.github/workflows/sponsors_readme.yml
vendored
28
.github/workflows/sponsors_readme.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Generate Sponsors README
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 */12 * * *'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Generate Sponsors 💖
|
||||
uses: JamesIves/github-sponsors-readme-action@v1
|
||||
with:
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
file: 'README.md'
|
||||
minimum: 2500
|
||||
maximum: 99999
|
||||
|
||||
- name: Deploy to GitHub Pages 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: master
|
||||
folder: '.'
|
||||
token: ${{ secrets.README_UPDATER_PAT }}
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -1,11 +1,6 @@
|
||||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*json
|
||||
autogpt/node_modules/
|
||||
autogpt/__pycache__/keys.cpython-310.pyc
|
||||
autogpt/auto_gpt_workspace
|
||||
package-lock.json
|
||||
*.pyc
|
||||
autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
@@ -17,10 +12,11 @@ last_run_ai_settings.yaml
|
||||
auto-gpt.json
|
||||
log.txt
|
||||
log-ingestion.txt
|
||||
logs
|
||||
/logs
|
||||
*.log
|
||||
*.mp3
|
||||
mem.sqlite3
|
||||
venvAutoGPT
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -35,7 +31,8 @@ __pycache__/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
plugins/
|
||||
/plugins/
|
||||
plugins_config.yaml
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
@@ -162,4 +159,4 @@ vicuna-*
|
||||
openai/
|
||||
|
||||
# news
|
||||
CURRENT_BULLETIN.md
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
4
.gitmodules
vendored
Normal file
4
.gitmodules
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
[submodule "tests/Auto-GPT-test-cassettes"]
|
||||
path = tests/Auto-GPT-test-cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
branch = master
|
||||
@@ -22,11 +22,21 @@ repos:
|
||||
- id: black
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: 'v1.3.0'
|
||||
hooks:
|
||||
- id: mypy
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: autoflake
|
||||
name: autoflake
|
||||
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
|
||||
language: python
|
||||
types: [ python ]
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: pytest --cov=autogpt --without-integration --without-slow-integration
|
||||
entry: pytest --cov=autogpt tests/unit
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
|
||||
30
BULLETIN.md
30
BULLETIN.md
@@ -1,9 +1,27 @@
|
||||
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
|
||||
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
|
||||
|
||||
# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE
|
||||
Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
|
||||
# v0.4.6 RELEASE HIGHLIGHTS! 🚀
|
||||
# -----------------------------
|
||||
This release includes under-the-hood improvements and bug fixes, including better UTF-8
|
||||
special character support, workspace write access for sandboxed Python execution,
|
||||
more robust path resolution for config files and the workspace, and a full restructure
|
||||
of the Agent class, the "brain" of Auto-GPT, to make it more extensible.
|
||||
|
||||
## Changes to Docker configuration
|
||||
The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
|
||||
We have also released some documentation updates, including:
|
||||
|
||||
- *How to share your system logs*
|
||||
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
|
||||
via a log analyzer graciously contributed by https://www.e2b.dev/
|
||||
|
||||
- *Auto-GPT re-architecture documentation*
|
||||
You can learn more about the inner-workings of the Auto-GPT re-architecture
|
||||
released last cycle, via these links:
|
||||
* [autogpt/core/README.md]
|
||||
* [autogpt/core/ARCHITECTURE_NOTES.md]
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases.
|
||||
|
||||
150
CONTRIBUTING.md
150
CONTRIBUTING.md
@@ -1,148 +1,14 @@
|
||||
# Contributing to Auto-GPT
|
||||
We maintain a knowledgebase at this [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
|
||||
|
||||
First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
|
||||
We would like to say "We value all contributions". After all, we are an open-source project, so we should say something fluffy like this, right?
|
||||
|
||||
This document provides guidelines and best practices to help you contribute effectively.
|
||||
However the reality is that some contributions are SUPER-valuable, while others create more trouble than they are worth and actually _create_ work for the core team.
|
||||
|
||||
## Code of Conduct
|
||||
If you wish to contribute, please look through the wiki [contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) page.
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
|
||||
|
||||
[Code of Conduct]: https://significant-gravitas.github.io/Auto-GPT/code-of-conduct.md
|
||||
In fact, why not just look through the whole wiki (it's only a few pages) and hop on our discord (you'll find it in the wiki).
|
||||
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins.
|
||||
Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository and clone your fork.
|
||||
2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
|
||||
3. Make your changes in the new branch.
|
||||
4. Test your changes thoroughly.
|
||||
5. Commit and push your changes to your fork.
|
||||
6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
If you find a bug in the project, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A description of the problem, including steps to reproduce the issue.
|
||||
- Any relevant logs, screenshots, or other supporting information.
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
|
||||
- Any relevant examples, mockups, or supporting information.
|
||||
|
||||
### Submitting Pull Requests
|
||||
|
||||
When submitting a pull request, please ensure that your changes meet the following criteria:
|
||||
|
||||
- Your pull request should be atomic and focus on a single change.
|
||||
- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status)
|
||||
- You should have thoroughly tested your changes with multiple different prompts.
|
||||
- You should have considered potential risks and mitigations for your changes.
|
||||
- You should have documented your changes clearly and comprehensively.
|
||||
- You should not include any unrelated or "extra" small tweaks or changes.
|
||||
|
||||
## Style Guidelines
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request.
|
||||
|
||||
To format your code, run the following commands in the project's root directory:
|
||||
|
||||
```bash
|
||||
python -m black .
|
||||
python -m isort .
|
||||
```
|
||||
|
||||
Or if you have these tools installed globally:
|
||||
```bash
|
||||
black .
|
||||
isort .
|
||||
```
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
|
||||
|
||||
Install the pre-commit package using pip:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Run the following command in the project's root directory to install the pre-commit hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
|
||||
|
||||
If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
|
||||
|
||||
Happy coding, and once again, thank you for your contributions!
|
||||
|
||||
Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts
|
||||
|
||||
## Testing your changes
|
||||
|
||||
If you add or change code, make sure the updated code is covered by tests.
|
||||
To increase coverage if necessary, [write tests using pytest].
|
||||
|
||||
For more info on running tests, please refer to ["Running tests"](https://significant-gravitas.github.io/Auto-GPT/testing/).
|
||||
|
||||
[write tests using pytest]: https://realpython.com/pytest-python-testing/
|
||||
|
||||
### API-dependent tests
|
||||
|
||||
To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known
|
||||
requests and matching responses in so-called *cassettes*, allowing us to run the tests
|
||||
in CI without needing actual API access.
|
||||
|
||||
When changes cause a test prompt to be generated differently, it will likely miss the
|
||||
cache and make a request to the API, updating the cassette with the new request+response.
|
||||
*Be sure to include the updated cassette in your PR!*
|
||||
|
||||
When you run Pytest locally:
|
||||
|
||||
- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required.
|
||||
- If the prompt changes in a way that the cassettes are not reusable:
|
||||
- If no API key, the test fails. It requires a new cassette. So, add an API key to .env.
|
||||
- If the API key is present, the tests will make a real call to OpenAI.
|
||||
- If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR.
|
||||
- If the test is unsuccessful:
|
||||
- Either: Your change made Auto-GPT less capable, in that case, you have to change your code.
|
||||
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
||||
|
||||
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
||||
|
||||
|
||||
### Community Challenges
|
||||
Challenges are goals we need Auto-GPT to achieve.
|
||||
To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on.
|
||||
- a challenge is new if level_currently_beaten is None
|
||||
- a challenge is in progress if level_currently_beaten is greater or equal to 1
|
||||
- a challenge is beaten if level_currently_beaten = max_level
|
||||
|
||||
Here is an example of how to run the memory challenge A and attempt to beat level 3.
|
||||
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3
|
||||
|
||||
To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder
|
||||
|
||||
Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline.
|
||||
Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run.
|
||||
|
||||
Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge.
|
||||
❤️ & 🔆
|
||||
The team @ Auto-GPT
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -6,11 +6,13 @@ FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
chromium-driver firefox-esr ca-certificates \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get install -y curl jq wget git
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
@@ -22,7 +24,7 @@ ENV PATH="$PATH:/root/.local/bin"
|
||||
COPY requirements.txt .
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["python", "-m", "autogpt"]
|
||||
ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
|
||||
|
||||
# dev build -> include everything
|
||||
FROM autogpt-base as autogpt-dev
|
||||
@@ -36,5 +38,9 @@ RUN sed -i '/Items below this point will not be included in the Docker Image/,$d
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
WORKDIR /app
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Auto-GPT: A GPT powered AI Assistant"""
|
||||
import autogpt.cli
|
||||
import autogpt.app.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt.cli.main()
|
||||
autogpt.app.cli.main()
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
|
||||
__all__ = ["Agent", "AgentManager"]
|
||||
@@ -1,290 +0,0 @@
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
|
||||
class Agent:
|
||||
"""Agent class for interacting with Auto-GPT.
|
||||
|
||||
Attributes:
|
||||
ai_name: The name of the agent.
|
||||
memory: The memory object to use.
|
||||
full_message_history: The full message history.
|
||||
next_action_count: The number of actions to execute.
|
||||
system_prompt: The system prompt is the initial prompt that defines everything
|
||||
the AI needs to know to achieve its task successfully.
|
||||
Currently, the dynamic and customizable information in the system prompt are
|
||||
ai_name, description and goals.
|
||||
|
||||
triggering_prompt: The last sentence the AI will see before answering.
|
||||
For Auto-GPT, this prompt is:
|
||||
Determine which next command to use, and respond using the format specified
|
||||
above:
|
||||
The triggering prompt is not part of the system prompt because between the
|
||||
system prompt and the triggering
|
||||
prompt we have contextual information that can distract the AI and make it
|
||||
forget that its goal is to find the next task to achieve.
|
||||
SYSTEM PROMPT
|
||||
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
|
||||
TRIGGERING PROMPT
|
||||
|
||||
The triggering prompt reminds the AI about its short term meta task
|
||||
(defining the next task)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.summary_memory = (
|
||||
"I was created." # Initial memory necessary to avoid hilucination
|
||||
)
|
||||
self.last_memory_index = 0
|
||||
self.full_message_history = full_message_history
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
cfg = Config()
|
||||
loop_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
user_input = ""
|
||||
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and loop_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat_with_ai(
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
self.user_input = ""
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
||||
"'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
if cfg.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "s":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
|
||||
Fore.GREEN,
|
||||
"",
|
||||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
Fore.YELLOW,
|
||||
"",
|
||||
)
|
||||
if self_feedback_resp[0].lower().strip() == cfg.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
else:
|
||||
user_input = self_feedback_resp
|
||||
break
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif user_input == "EXIT":
|
||||
logger.info("Exiting...")
|
||||
break
|
||||
else:
|
||||
# Print command
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
else:
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
command_name, arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.full_message_history.append(create_chat_message("system", result))
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.full_message_history.append(
|
||||
create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
|
||||
def _resolve_pathlike_command_args(self, command_args):
|
||||
if "directory" in command_args and command_args["directory"] in {"", "/"}:
|
||||
command_args["directory"] = str(self.workspace.root)
|
||||
else:
|
||||
for pathlike in ["filename", "directory", "clone_path"]:
|
||||
if pathlike in command_args:
|
||||
command_args[pathlike] = str(
|
||||
self.workspace.get_path(command_args[pathlike])
|
||||
)
|
||||
return command_args
|
||||
|
||||
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
|
||||
"""Generates a feedback response based on the provided thoughts dictionary.
|
||||
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
|
||||
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
|
||||
feedback message and uses the create_chat_completion() function to generate a
|
||||
response based on the input message.
|
||||
Args:
|
||||
thoughts (dict): A dictionary containing thought elements like reasoning,
|
||||
plan, thoughts, and criticism.
|
||||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
|
||||
feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
plan = thoughts.get("plan", "")
|
||||
thought = thoughts.get("thoughts", "")
|
||||
criticism = thoughts.get("criticism", "")
|
||||
feedback_thoughts = thought + reasoning + plan + criticism
|
||||
return create_chat_completion(
|
||||
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
|
||||
llm_model,
|
||||
)
|
||||
@@ -1,145 +0,0 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm import Message, create_chat_completion
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
class AgentManager(metaclass=Singleton):
|
||||
"""Agent manager for managing GPT agents"""
|
||||
|
||||
def __init__(self):
|
||||
self.next_key = 0
|
||||
self.agents = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
|
||||
"""Create a new agent and return its key
|
||||
|
||||
Args:
|
||||
task: The task to perform
|
||||
prompt: The prompt to use
|
||||
model: The model to use
|
||||
|
||||
Returns:
|
||||
The key of the new agent
|
||||
"""
|
||||
messages: List[Message] = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
messages.extend(iter(plugin_messages))
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
key = self.next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
self.next_key += 1
|
||||
|
||||
self.agents[key] = (task, messages, model)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return key, agent_reply
|
||||
|
||||
def message_agent(self, key: str | int, message: str) -> str:
|
||||
"""Send a message to an agent and return its response
|
||||
|
||||
Args:
|
||||
key: The key of the agent to message
|
||||
message: The message to send to the agent
|
||||
|
||||
Returns:
|
||||
The agent's response
|
||||
"""
|
||||
task, messages, model = self.agents[int(key)]
|
||||
|
||||
# Add user message to message history before sending to agent
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages):
|
||||
for plugin_message in plugin_messages:
|
||||
messages.append(plugin_message)
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction(messages):
|
||||
sep = "\n" if i else ""
|
||||
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
|
||||
# Update full message history
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.append({"role": "assistant", "content": plugins_reply})
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
return agent_reply
|
||||
|
||||
def list_agents(self) -> list[tuple[str | int, str]]:
|
||||
"""Return a list of all agents
|
||||
|
||||
Returns:
|
||||
A list of tuples of the form (key, task)
|
||||
"""
|
||||
|
||||
# Return a list of agent keys and their tasks
|
||||
return [(key, task) for key, (task, _, _) in self.agents.items()]
|
||||
|
||||
def delete_agent(self, key: str | int) -> bool:
|
||||
"""Delete an agent from the agent manager
|
||||
|
||||
Args:
|
||||
key: The key of the agent to delete
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
del self.agents[int(key)]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
4
autogpt/agents/__init__.py
Normal file
4
autogpt/agents/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .agent import Agent
|
||||
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
|
||||
|
||||
__all__ = ["BaseAgent", "Agent", "CommandName", "CommandArgs", "AgentThoughts"]
|
||||
298
autogpt/agents/agent.py
Normal file
298
autogpt/agents/agent.py
Normal file
@@ -0,0 +1,298 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import AIConfig, Config
|
||||
from autogpt.llm.base import ChatModelResponse, ChatSequence
|
||||
from autogpt.memory.vector import VectorMemory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.llm.base import Message
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.logs import logger
|
||||
from autogpt.logs.log_cycle import (
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
|
||||
|
||||
|
||||
class Agent(BaseAgent):
|
||||
"""Agent class for interacting with Auto-GPT."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
command_registry: CommandRegistry,
|
||||
memory: VectorMemory,
|
||||
triggering_prompt: str,
|
||||
config: Config,
|
||||
cycle_budget: Optional[int] = None,
|
||||
):
|
||||
super().__init__(
|
||||
ai_config=ai_config,
|
||||
command_registry=command_registry,
|
||||
config=config,
|
||||
default_cycle_instruction=triggering_prompt,
|
||||
cycle_budget=cycle_budget,
|
||||
)
|
||||
|
||||
self.memory = memory
|
||||
"""VectorMemoryProvider used to manage the agent's context (TODO)"""
|
||||
|
||||
self.workspace = Workspace(config.workspace_path, config.restrict_to_workspace)
|
||||
"""Workspace that the agent has access to, e.g. for reading/writing files."""
|
||||
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
"""Timestamp the agent was created; only used for structured debug logging."""
|
||||
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
"""LogCycleHandler for structured debug logging."""
|
||||
|
||||
def construct_base_prompt(self, *args, **kwargs) -> ChatSequence:
|
||||
if kwargs.get("prepend_messages") is None:
|
||||
kwargs["prepend_messages"] = []
|
||||
|
||||
# Clock
|
||||
kwargs["prepend_messages"].append(
|
||||
Message("system", f"The current time and date is {time.strftime('%c')}"),
|
||||
)
|
||||
|
||||
# Add budget information (if any) to prompt
|
||||
api_manager = ApiManager()
|
||||
if api_manager.get_total_budget() > 0.0:
|
||||
remaining_budget = (
|
||||
api_manager.get_total_budget() - api_manager.get_total_cost()
|
||||
)
|
||||
if remaining_budget < 0:
|
||||
remaining_budget = 0
|
||||
|
||||
budget_msg = Message(
|
||||
"system",
|
||||
f"Your remaining API budget is ${remaining_budget:.3f}"
|
||||
+ (
|
||||
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
|
||||
if remaining_budget == 0
|
||||
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
|
||||
if remaining_budget < 0.005
|
||||
else " Budget nearly exceeded. Finish up.\n\n"
|
||||
if remaining_budget < 0.01
|
||||
else ""
|
||||
),
|
||||
)
|
||||
logger.debug(budget_msg)
|
||||
|
||||
if kwargs.get("append_messages") is None:
|
||||
kwargs["append_messages"] = []
|
||||
kwargs["append_messages"].append(budget_msg)
|
||||
|
||||
return super().construct_base_prompt(*args, **kwargs)
|
||||
|
||||
def on_before_think(self, *args, **kwargs) -> ChatSequence:
|
||||
prompt = super().on_before_think(*args, **kwargs)
|
||||
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
self.history.raw(),
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command_name: str | None,
|
||||
command_args: dict[str, str] | None,
|
||||
user_input: str | None,
|
||||
) -> str:
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = f"Could not execute command: {command_name}{command_args}"
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
USER_INPUT_FILE_NAME,
|
||||
)
|
||||
|
||||
else:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(command_name, command_args)
|
||||
command_result = execute_command(
|
||||
command_name=command_name,
|
||||
arguments=command_args,
|
||||
agent=self,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(str(command_result), self.llm.name)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), self.llm.name
|
||||
)
|
||||
if result_tlength + memory_tlength > self.send_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
# Check if there's a result from the command append it to the message
|
||||
if result is None:
|
||||
self.history.add("system", "Unable to execute command", "action_result")
|
||||
else:
|
||||
self.history.add("system", result, "action_result")
|
||||
|
||||
return result
|
||||
|
||||
def parse_and_process_response(
|
||||
self, llm_response: ChatModelResponse, *args, **kwargs
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
if not llm_response.content:
|
||||
raise SyntaxError("Assistant response has no text content")
|
||||
|
||||
assistant_reply_dict = extract_dict_from_response(llm_response.content)
|
||||
|
||||
valid, errors = validate_dict(assistant_reply_dict, self.config)
|
||||
if not valid:
|
||||
raise SyntaxError(
|
||||
"Validation of response failed:\n "
|
||||
+ ";\n ".join([str(e) for e in errors])
|
||||
)
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_dict = plugin.post_planning(assistant_reply_dict)
|
||||
|
||||
response = None, None, assistant_reply_dict
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_dict != {}:
|
||||
# Get command name and arguments
|
||||
try:
|
||||
command_name, arguments = extract_command(
|
||||
assistant_reply_dict, llm_response, self.config
|
||||
)
|
||||
response = command_name, arguments, assistant_reply_dict
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_dict,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def extract_command(
|
||||
assistant_reply_json: dict, assistant_reply: ChatModelResponse, config: Config
|
||||
) -> tuple[str, dict[str, str]]:
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
assistant_reply_json (dict): The response object from the AI
|
||||
assistant_reply (ChatModelResponse): The model response from the AI
|
||||
config (Config): The config object
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
if config.openai_functions:
|
||||
if assistant_reply.function_call is None:
|
||||
return "Error:", {"message": "No 'function_call' in assistant reply"}
|
||||
assistant_reply_json["command"] = {
|
||||
"name": assistant_reply.function_call.name,
|
||||
"args": json.loads(assistant_reply.function_call.arguments),
|
||||
}
|
||||
try:
|
||||
if "command" not in assistant_reply_json:
|
||||
return "Error:", {"message": "Missing 'command' object in JSON"}
|
||||
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
return (
|
||||
"Error:",
|
||||
{
|
||||
"message": f"The previous message sent was not a dictionary {assistant_reply_json}"
|
||||
},
|
||||
)
|
||||
|
||||
command = assistant_reply_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", {"message": "'command' object is not a dictionary"}
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", {"message": "Missing 'name' field in 'command' object"}
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", {"message": "Invalid JSON"}
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", {"message": str(e)}
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_name: str,
|
||||
arguments: dict[str, str],
|
||||
agent: Agent,
|
||||
) -> Any:
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
agent (Agent): The agent that is executing the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
# Execute a native command with the same name or alias, if it exists
|
||||
if command := agent.command_registry.get_command(command_name):
|
||||
return command(**arguments, agent=agent)
|
||||
|
||||
# Handle non-native commands (e.g. from plugins)
|
||||
for command in agent.ai_config.prompt_generator.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
|
||||
raise RuntimeError(
|
||||
f"Cannot execute '{command_name}': unknown command."
|
||||
" Do not try to use this command again."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
318
autogpt/agents/base.py
Normal file
318
autogpt/agents/base.py
Normal file
@@ -0,0 +1,318 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import AIConfig, Config
|
||||
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
|
||||
from autogpt.llm.base import ChatModelResponse, ChatSequence, Message
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
|
||||
from autogpt.llm.utils import count_message_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.message_history import MessageHistory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
CommandName = str
|
||||
CommandArgs = dict[str, str]
|
||||
AgentThoughts = dict[str, Any]
|
||||
|
||||
|
||||
class BaseAgent(metaclass=ABCMeta):
|
||||
"""Base class for all Auto-GPT agents."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_config: AIConfig,
|
||||
command_registry: CommandRegistry,
|
||||
config: Config,
|
||||
big_brain: bool = True,
|
||||
default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT,
|
||||
cycle_budget: Optional[int] = 1,
|
||||
send_token_limit: Optional[int] = None,
|
||||
summary_max_tlength: Optional[int] = None,
|
||||
):
|
||||
self.ai_config = ai_config
|
||||
"""The AIConfig or "personality" object associated with this agent."""
|
||||
|
||||
self.command_registry = command_registry
|
||||
"""The registry containing all commands available to the agent."""
|
||||
|
||||
self.config = config
|
||||
"""The applicable application configuration."""
|
||||
|
||||
self.big_brain = big_brain
|
||||
"""
|
||||
Whether this agent uses the configured smart LLM (default) to think,
|
||||
as opposed to the configured fast LLM.
|
||||
"""
|
||||
|
||||
self.default_cycle_instruction = default_cycle_instruction
|
||||
"""The default instruction passed to the AI for a thinking cycle."""
|
||||
|
||||
self.cycle_budget = cycle_budget
|
||||
"""
|
||||
The number of cycles that the agent is allowed to run unsupervised.
|
||||
|
||||
`None` for unlimited continuous execution,
|
||||
`1` to require user approval for every step,
|
||||
`0` to stop the agent.
|
||||
"""
|
||||
|
||||
self.cycles_remaining = cycle_budget
|
||||
"""The number of cycles remaining within the `cycle_budget`."""
|
||||
|
||||
self.cycle_count = 0
|
||||
"""The number of cycles that the agent has run since its initialization."""
|
||||
|
||||
self.system_prompt = ai_config.construct_full_prompt(config)
|
||||
"""
|
||||
The system prompt sets up the AI's personality and explains its goals,
|
||||
available resources, and restrictions.
|
||||
"""
|
||||
|
||||
llm_name = self.config.smart_llm if self.big_brain else self.config.fast_llm
|
||||
self.llm = OPEN_AI_CHAT_MODELS[llm_name]
|
||||
"""The LLM that the agent uses to think."""
|
||||
|
||||
self.send_token_limit = send_token_limit or self.llm.max_tokens * 3 // 4
|
||||
"""
|
||||
The token limit for prompt construction. Should leave room for the completion;
|
||||
defaults to 75% of `llm.max_tokens`.
|
||||
"""
|
||||
|
||||
self.history = MessageHistory(
|
||||
self.llm,
|
||||
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
|
||||
)
|
||||
|
||||
def think(
|
||||
self,
|
||||
instruction: Optional[str] = None,
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Runs the agent for one cycle.
|
||||
|
||||
Params:
|
||||
instruction: The instruction to put at the end of the prompt.
|
||||
|
||||
Returns:
|
||||
The command name and arguments, if any, and the agent's thoughts.
|
||||
"""
|
||||
|
||||
instruction = instruction or self.default_cycle_instruction
|
||||
|
||||
prompt: ChatSequence = self.construct_prompt(instruction)
|
||||
prompt = self.on_before_think(prompt, instruction)
|
||||
|
||||
raw_response = create_chat_completion(
|
||||
prompt,
|
||||
self.config,
|
||||
functions=get_openai_command_specs(self.command_registry)
|
||||
if self.config.openai_functions
|
||||
else None,
|
||||
)
|
||||
self.cycle_count += 1
|
||||
|
||||
return self.on_response(raw_response, prompt, instruction)
|
||||
|
||||
@abstractmethod
|
||||
def execute(
|
||||
self,
|
||||
command_name: str | None,
|
||||
command_args: dict[str, str] | None,
|
||||
user_input: str | None,
|
||||
) -> str:
|
||||
"""Executes the given command, if any, and returns the agent's response.
|
||||
|
||||
Params:
|
||||
command_name: The name of the command to execute, if any.
|
||||
command_args: The arguments to pass to the command, if any.
|
||||
user_input: The user's input, if any.
|
||||
|
||||
Returns:
|
||||
The results of the command.
|
||||
"""
|
||||
...
|
||||
|
||||
def construct_base_prompt(
|
||||
self,
|
||||
prepend_messages: list[Message] = [],
|
||||
append_messages: list[Message] = [],
|
||||
reserve_tokens: int = 0,
|
||||
) -> ChatSequence:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. `prepend_messages`
|
||||
3. Message history of the agent, truncated & prepended with running summary as needed
|
||||
4. `append_messages`
|
||||
|
||||
Params:
|
||||
prepend_messages: Messages to insert between the system prompt and message history
|
||||
append_messages: Messages to insert after the message history
|
||||
reserve_tokens: Number of tokens to reserve for content that is added later
|
||||
"""
|
||||
|
||||
prompt = ChatSequence.for_model(
|
||||
self.llm.name,
|
||||
[Message("system", self.system_prompt)] + prepend_messages,
|
||||
)
|
||||
|
||||
# Reserve tokens for messages to be appended later, if any
|
||||
reserve_tokens += self.history.max_summary_tlength
|
||||
if append_messages:
|
||||
reserve_tokens += count_message_tokens(append_messages, self.llm.name)
|
||||
|
||||
# Fill message history, up to a margin of reserved_tokens.
|
||||
# Trim remaining historical messages and add them to the running summary.
|
||||
history_start_index = len(prompt)
|
||||
trimmed_history = add_history_upto_token_limit(
|
||||
prompt, self.history, self.send_token_limit - reserve_tokens
|
||||
)
|
||||
if trimmed_history:
|
||||
new_summary_msg, _ = self.history.trim_messages(list(prompt), self.config)
|
||||
prompt.insert(history_start_index, new_summary_msg)
|
||||
|
||||
if append_messages:
|
||||
prompt.extend(append_messages)
|
||||
|
||||
return prompt
|
||||
|
||||
def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
|
||||
"""Constructs and returns a prompt with the following structure:
|
||||
1. System prompt
|
||||
2. Message history of the agent, truncated & prepended with running summary as needed
|
||||
3. `cycle_instruction`
|
||||
|
||||
Params:
|
||||
cycle_instruction: The final instruction for a thinking cycle
|
||||
"""
|
||||
|
||||
if not cycle_instruction:
|
||||
raise ValueError("No instruction given")
|
||||
|
||||
cycle_instruction_msg = Message("user", cycle_instruction)
|
||||
cycle_instruction_tlength = count_message_tokens(
|
||||
cycle_instruction_msg, self.llm.name
|
||||
)
|
||||
prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength)
|
||||
|
||||
# ADD user input message ("triggering prompt")
|
||||
prompt.append(cycle_instruction_msg)
|
||||
|
||||
return prompt
|
||||
|
||||
def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence:
|
||||
"""Called after constructing the prompt but before executing it.
|
||||
|
||||
Calls the `on_planning` hook of any enabled and capable plugins, adding their
|
||||
output to the prompt.
|
||||
|
||||
Params:
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The prompt to execute
|
||||
"""
|
||||
current_tokens_used = prompt.token_length
|
||||
plugin_count = len(self.config.plugins)
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
self.ai_config.prompt_generator, prompt.raw()
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
message_to_add = Message("system", plugin_response)
|
||||
tokens_to_add = count_message_tokens(message_to_add, self.llm.name)
|
||||
if current_tokens_used + tokens_to_add > self.send_token_limit:
|
||||
logger.debug(f"Plugin response too long, skipping: {plugin_response}")
|
||||
logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
|
||||
break
|
||||
prompt.insert(
|
||||
-1, message_to_add
|
||||
) # HACK: assumes cycle instruction to be at the end
|
||||
current_tokens_used += tokens_to_add
|
||||
return prompt
|
||||
|
||||
def on_response(
|
||||
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Called upon receiving a response from the chat model.
|
||||
|
||||
Adds the last/newest message in the prompt and the response to `history`,
|
||||
and calls `self.parse_and_process_response()` to do the rest.
|
||||
|
||||
Params:
|
||||
llm_response: The raw response from the chat model
|
||||
prompt: The prompt that was executed
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
|
||||
# Save assistant reply to message history
|
||||
self.history.append(prompt[-1])
|
||||
self.history.add(
|
||||
"assistant", llm_response.content, "ai_response"
|
||||
) # FIXME: support function calls
|
||||
|
||||
try:
|
||||
return self.parse_and_process_response(llm_response, prompt, instruction)
|
||||
except SyntaxError as e:
|
||||
logger.error(f"Response could not be parsed: {e}")
|
||||
# TODO: tune this message
|
||||
self.history.add(
|
||||
"system",
|
||||
f"Your response could not be parsed: {e}"
|
||||
"\n\nRemember to only respond using the specified format above!",
|
||||
)
|
||||
return None, None, {}
|
||||
|
||||
# TODO: update memory/context
|
||||
|
||||
@abstractmethod
|
||||
def parse_and_process_response(
|
||||
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
|
||||
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
|
||||
"""Validate, parse & process the LLM's response.
|
||||
|
||||
Must be implemented by derivative classes: no base implementation is provided,
|
||||
since the implementation depends on the role of the derivative Agent.
|
||||
|
||||
Params:
|
||||
llm_response: The raw response from the chat model
|
||||
prompt: The prompt that was executed
|
||||
instruction: The instruction for the current cycle, also used in constructing the prompt
|
||||
|
||||
Returns:
|
||||
The parsed command name and command args, if any, and the agent thoughts.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def add_history_upto_token_limit(
|
||||
prompt: ChatSequence, history: MessageHistory, t_limit: int
|
||||
) -> list[Message]:
|
||||
current_prompt_length = prompt.token_length
|
||||
insertion_index = len(prompt)
|
||||
limit_reached = False
|
||||
trimmed_messages: list[Message] = []
|
||||
for cycle in reversed(list(history.per_cycle())):
|
||||
messages_to_add = [msg for msg in cycle if msg is not None]
|
||||
tokens_to_add = count_message_tokens(messages_to_add, prompt.model.name)
|
||||
if current_prompt_length + tokens_to_add > t_limit:
|
||||
limit_reached = True
|
||||
|
||||
if not limit_reached:
|
||||
# Add the most recent message to the start of the chain,
|
||||
# after the system prompts.
|
||||
prompt.insert(insertion_index, *messages_to_add)
|
||||
current_prompt_length += tokens_to_add
|
||||
else:
|
||||
trimmed_messages = messages_to_add + trimmed_messages
|
||||
|
||||
return trimmed_messages
|
||||
255
autogpt/app.py
255
autogpt/app.py
@@ -1,255 +0,0 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
|
||||
|
||||
def is_valid_int(value: str) -> bool:
|
||||
"""Check if the value is a valid integer
|
||||
|
||||
Args:
|
||||
value (str): The value to check
|
||||
|
||||
Returns:
|
||||
bool: True if the value is a valid integer, False otherwise
|
||||
"""
|
||||
try:
|
||||
int(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def get_command(response_json: Dict):
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
response_json (json): The response from the AI
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
try:
|
||||
if "command" not in response_json:
|
||||
return "Error:", "Missing 'command' object in JSON"
|
||||
|
||||
if not isinstance(response_json, dict):
|
||||
return "Error:", f"'response_json' object is not dictionary {response_json}"
|
||||
|
||||
command = response_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", "'command' object is not a dictionary"
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", "Missing 'name' field in 'command' object"
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", "Invalid JSON"
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", str(e)
|
||||
|
||||
|
||||
def map_command_synonyms(command_name: str):
|
||||
"""Takes the original command name given by the AI, and checks if the
|
||||
string matches a list of common/known hallucinations
|
||||
"""
|
||||
synonyms = [
|
||||
("write_file", "write_to_file"),
|
||||
("create_file", "write_to_file"),
|
||||
("search", "google"),
|
||||
]
|
||||
for seen_command, actual_command_name in synonyms:
|
||||
if command_name == seen_command:
|
||||
return actual_command_name
|
||||
return command_name
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
question (str): The question to summarize the text for
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
summary = summarize_text(url, text, question)
|
||||
return f""" "Result" : {summary}"""
|
||||
|
||||
|
||||
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
url (str): The url to scrape
|
||||
|
||||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url)
|
||||
|
||||
|
||||
def shutdown() -> NoReturn:
|
||||
"""Shut down the program"""
|
||||
logger.info("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
@command(
|
||||
"start_agent",
|
||||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent
|
||||
task (str): The task of the agent
|
||||
prompt (str): The prompt for the agent
|
||||
model (str): The model to use for the agent
|
||||
|
||||
Returns:
|
||||
str: The response of the agent
|
||||
"""
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
|
||||
|
||||
if CFG.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = AGENT_MANAGER.message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = AGENT_MANAGER.message_agent(int(key), message)
|
||||
else:
|
||||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if CFG.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "")
|
||||
def list_agents() -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
str: A list of all agents
|
||||
"""
|
||||
return "List of agents:\n" + "\n".join(
|
||||
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
|
||||
)
|
||||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
key (str): The key of the agent to delete
|
||||
|
||||
Returns:
|
||||
str: A message indicating whether the agent was deleted or not
|
||||
"""
|
||||
result = AGENT_MANAGER.delete_agent(key)
|
||||
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
||||
@@ -1,4 +1,7 @@
|
||||
"""Main script for the autogpt package."""
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@@ -13,7 +16,15 @@ import click
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
help=(
|
||||
"Specifies which ai_settings.yaml file to use, relative to the Auto-GPT"
|
||||
" root directory. Will also automatically skip the re-prompt."
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
@@ -60,12 +71,29 @@ import click
|
||||
is_flag=True,
|
||||
help="Installs external dependencies for 3rd party plugins.",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
help="AI name override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-role",
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-goal",
|
||||
type=str,
|
||||
multiple=True,
|
||||
help="AI goal override; may be used multiple times to pass multiple goals",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -77,6 +105,9 @@ def main(
|
||||
skip_news: bool,
|
||||
workspace_directory: str,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
ai_goal: tuple[str],
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
@@ -84,24 +115,31 @@ def main(
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
from autogpt.main import run_auto_gpt
|
||||
from autogpt.app.main import run_auto_gpt
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
run_auto_gpt(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
workspace_directory,
|
||||
install_plugin_deps,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings=ai_settings,
|
||||
prompt_settings=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
memory_type=memory_type,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
working_directory=Path(
|
||||
__file__
|
||||
).parent.parent.parent, # TODO: make this an option
|
||||
workspace_directory=workspace_directory,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
ai_name=ai_name,
|
||||
ai_role=ai_role,
|
||||
ai_goals=ai_goal,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,19 +1,25 @@
|
||||
"""Configurator module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Literal
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_supported_memory_backends
|
||||
|
||||
CFG = Config()
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
|
||||
|
||||
def create_config(
|
||||
config: Config,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -30,6 +36,7 @@ def create_config(
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
@@ -40,13 +47,13 @@ def create_config(
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
config.debug_mode = False
|
||||
config.continuous_mode = False
|
||||
config.speak_mode = False
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_debug_mode(True)
|
||||
config.debug_mode = True
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
@@ -57,13 +64,13 @@ def create_config(
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
CFG.set_continuous_mode(True)
|
||||
config.continuous_mode = True
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
config.continuous_limit = continuous_limit
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
@@ -71,15 +78,26 @@ def create_config(
|
||||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_speak_mode(True)
|
||||
config.speak_mode = True
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
|
||||
if gpt4only:
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
|
||||
config.fast_llm = GPT_3_MODEL
|
||||
config.smart_llm = GPT_3_MODEL
|
||||
elif (
|
||||
gpt4only
|
||||
and check_model(GPT_4_MODEL, model_type="smart_llm", config=config)
|
||||
== GPT_4_MODEL
|
||||
):
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
|
||||
config.fast_llm = GPT_4_MODEL
|
||||
config.smart_llm = GPT_4_MODEL
|
||||
else:
|
||||
config.fast_llm = check_model(config.fast_llm, "fast_llm", config=config)
|
||||
config.smart_llm = check_model(config.smart_llm, "smart_llm", config=config)
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
@@ -90,13 +108,13 @@ def create_config(
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
|
||||
else:
|
||||
CFG.memory_backend = chosen
|
||||
config.memory_backend = chosen
|
||||
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
CFG.skip_reprompt = True
|
||||
config.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
@@ -109,11 +127,24 @@ def create_config(
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
config.ai_settings_file = file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||
config.prompt_settings_file = file
|
||||
|
||||
if browser_name:
|
||||
CFG.selenium_web_browser = browser_name
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
@@ -128,7 +159,29 @@ def create_config(
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
CFG.allow_downloads = True
|
||||
config.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
CFG.skip_news = True
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
def check_model(
|
||||
model_name: str,
|
||||
model_type: Literal["smart_llm", "fast_llm"],
|
||||
config: Config,
|
||||
) -> str:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
openai_credentials = config.get_openai_credentials(model_name)
|
||||
api_manager = ApiManager()
|
||||
models = api_manager.get_models(**openai_credentials)
|
||||
|
||||
if any(model_name in m["id"] for m in models):
|
||||
return model_name
|
||||
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"You do not have access to {model_name}. Setting {model_type} to "
|
||||
f"gpt-3.5-turbo.",
|
||||
)
|
||||
return "gpt-3.5-turbo"
|
||||
597
autogpt/app/main.py
Normal file
597
autogpt/app/main.py
Normal file
@@ -0,0 +1,597 @@
|
||||
"""The application entry point. Can be invoked by a CLI or any other front end application."""
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
|
||||
from autogpt.app.configurator import create_config
|
||||
from autogpt.app.setup import prompt_user
|
||||
from autogpt.commands import COMMAND_CATEGORIES
|
||||
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import (
|
||||
clean_input,
|
||||
get_current_git_branch,
|
||||
get_latest_bulletin,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
)
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
||||
|
||||
def run_auto_gpt(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
skip_news: bool,
|
||||
working_directory: Path,
|
||||
workspace_directory: str | Path,
|
||||
install_plugin_deps: bool,
|
||||
ai_name: Optional[str] = None,
|
||||
ai_role: Optional[str] = None,
|
||||
ai_goals: tuple[str] = tuple(),
|
||||
):
|
||||
# Configure logging before we do anything else.
|
||||
logger.set_level(logging.DEBUG if debug else logging.INFO)
|
||||
|
||||
config = ConfigBuilder.build_config_from_env(workdir=working_directory)
|
||||
|
||||
# HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
|
||||
# or import it directly.
|
||||
logger.config = config
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key(config)
|
||||
|
||||
create_config(
|
||||
config,
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
skip_news,
|
||||
)
|
||||
|
||||
if config.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
|
||||
|
||||
if not config.skip_news:
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(motd_line, "NEWS:", Fore.GREEN)
|
||||
if is_new_motd and not config.chat_messages_enabled:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
+ "NEWS: Bulletin was updated! Press Enter to continue..."
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
git_branch = get_current_git_branch()
|
||||
if git_branch and git_branch != "stable":
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
f"You are running on `{git_branch}` branch "
|
||||
"- this is not a supported branch.",
|
||||
)
|
||||
if sys.version_info < (3, 10):
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"You are running on an older version of Python. "
|
||||
"Some people have observed problems with certain "
|
||||
"parts of Auto-GPT with this version. "
|
||||
"Please consider upgrading to Python 3.10 or higher.",
|
||||
)
|
||||
|
||||
if install_plugin_deps:
|
||||
install_plugin_dependencies()
|
||||
|
||||
# TODO: have this directory live outside the repository (e.g. in a user's
|
||||
# home directory) and have it come in as a command line argument or part of
|
||||
# the env file.
|
||||
Workspace.set_workspace_directory(config, workspace_directory)
|
||||
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
Workspace.set_file_logger_path(config, config.workspace_path)
|
||||
|
||||
config.plugins = scan_plugins(config, config.debug_mode)
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
|
||||
logger.debug(
|
||||
f"The following command categories are disabled: {config.disabled_command_categories}"
|
||||
)
|
||||
enabled_command_categories = [
|
||||
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
|
||||
]
|
||||
|
||||
logger.debug(
|
||||
f"The following command categories are enabled: {enabled_command_categories}"
|
||||
)
|
||||
|
||||
for command_category in enabled_command_categories:
|
||||
command_registry.import_commands(command_category)
|
||||
|
||||
# Unregister commands that are incompatible with the current config
|
||||
incompatible_commands = []
|
||||
for command in command_registry.commands.values():
|
||||
if callable(command.enabled) and not command.enabled(config):
|
||||
command.enabled = False
|
||||
incompatible_commands.append(command)
|
||||
|
||||
for command in incompatible_commands:
|
||||
command_registry.unregister(command)
|
||||
logger.debug(
|
||||
f"Unregistering incompatible command: {command.name}, "
|
||||
f"reason - {command.disabled_reason or 'Disabled by current config.'}"
|
||||
)
|
||||
|
||||
ai_config = construct_main_ai_config(
|
||||
config,
|
||||
name=ai_name,
|
||||
role=ai_role,
|
||||
goals=ai_goals,
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
# print(prompt)
|
||||
|
||||
# add chat plugins capable of report to logger
|
||||
if config.chat_messages_enabled:
|
||||
for plugin in config.plugins:
|
||||
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
|
||||
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
|
||||
logger.chat_plugins.append(plugin)
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(config)
|
||||
memory.clear()
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
|
||||
|
||||
agent = Agent(
|
||||
memory=memory,
|
||||
command_registry=command_registry,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
ai_config=ai_config,
|
||||
config=config,
|
||||
)
|
||||
|
||||
run_interaction_loop(agent)
|
||||
|
||||
|
||||
def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | None:
|
||||
# Translate from the continuous_mode/continuous_limit config
|
||||
# to a cycle_budget (maximum number of cycles to run without checking in with the
|
||||
# user) and a count of cycles_remaining before we check in..
|
||||
if continuous_mode:
|
||||
cycle_budget = continuous_limit if continuous_limit else math.inf
|
||||
else:
|
||||
cycle_budget = 1
|
||||
|
||||
return cycle_budget
|
||||
|
||||
|
||||
class UserFeedback(str, enum.Enum):
|
||||
"""Enum for user feedback."""
|
||||
|
||||
AUTHORIZE = "GENERATE NEXT COMMAND JSON"
|
||||
EXIT = "EXIT"
|
||||
TEXT = "TEXT"
|
||||
|
||||
|
||||
def run_interaction_loop(
|
||||
agent: Agent,
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
Args:
|
||||
agent: The agent to run the interaction loop for.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
config = agent.config
|
||||
ai_config = agent.ai_config
|
||||
logger.debug(f"{ai_config.ai_name} System Prompt: {agent.system_prompt}")
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
config.continuous_mode, config.continuous_limit
|
||||
)
|
||||
spinner = Spinner("Thinking...", plain_output=config.plain_output)
|
||||
|
||||
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
|
||||
nonlocal cycle_budget, cycles_remaining, spinner
|
||||
if cycles_remaining in [0, 1, math.inf]:
|
||||
logger.typewriter_log(
|
||||
"Interrupt signal received. Stopping continuous command execution "
|
||||
"immediately.",
|
||||
Fore.RED,
|
||||
)
|
||||
sys.exit()
|
||||
else:
|
||||
restart_spinner = spinner.running
|
||||
if spinner.running:
|
||||
spinner.stop()
|
||||
|
||||
logger.typewriter_log(
|
||||
"Interrupt signal received. Stopping continuous command execution.",
|
||||
Fore.RED,
|
||||
)
|
||||
cycles_remaining = 1
|
||||
if restart_spinner:
|
||||
spinner.start()
|
||||
|
||||
# Set up an interrupt signal for the agent.
|
||||
signal.signal(signal.SIGINT, graceful_agent_interrupt)
|
||||
|
||||
#########################
|
||||
# Application Main Loop #
|
||||
#########################
|
||||
|
||||
while cycles_remaining > 0:
|
||||
logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
|
||||
|
||||
########
|
||||
# Plan #
|
||||
########
|
||||
# Have the agent determine the next action to take.
|
||||
with spinner:
|
||||
command_name, command_args, assistant_reply_dict = agent.think()
|
||||
|
||||
###############
|
||||
# Update User #
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(config, ai_config, command_name, command_args, assistant_reply_dict)
|
||||
|
||||
##################
|
||||
# Get user input #
|
||||
##################
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
user_feedback, user_input, new_cycles_remaining = get_user_feedback(
|
||||
config,
|
||||
ai_config,
|
||||
)
|
||||
|
||||
if user_feedback == UserFeedback.AUTHORIZE:
|
||||
if new_cycles_remaining is not None:
|
||||
# Case 1: User is altering the cycle budget.
|
||||
if cycle_budget > 1:
|
||||
cycle_budget = new_cycles_remaining + 1
|
||||
# Case 2: User is running iteratively and
|
||||
# has initiated a one-time continuous cycle
|
||||
cycles_remaining = new_cycles_remaining + 1
|
||||
else:
|
||||
# Case 1: Continuous iteration was interrupted -> resume
|
||||
if cycle_budget > 1:
|
||||
logger.typewriter_log(
|
||||
"RESUMING CONTINUOUS EXECUTION: ",
|
||||
Fore.MAGENTA,
|
||||
f"The cycle budget is {cycle_budget}.",
|
||||
)
|
||||
# Case 2: The agent used up its cycle budget -> reset
|
||||
cycles_remaining = cycle_budget + 1
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"",
|
||||
)
|
||||
elif user_feedback == UserFeedback.EXIT:
|
||||
logger.typewriter_log("Exiting...", Fore.YELLOW)
|
||||
exit()
|
||||
else: # user_feedback == UserFeedback.TEXT
|
||||
command_name = "human_feedback"
|
||||
else:
|
||||
user_input = None
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
if cycles_remaining != math.inf:
|
||||
# Print authorized commands left value
|
||||
logger.typewriter_log(
|
||||
"AUTHORISED COMMANDS LEFT: ", Fore.CYAN, f"{cycles_remaining}"
|
||||
)
|
||||
|
||||
###################
|
||||
# Execute Command #
|
||||
###################
|
||||
# Decrement the cycle counter first to reduce the likelihood of a SIGINT
|
||||
# happening during command execution, setting the cycles remaining to 1,
|
||||
# and then having the decrement set it to 0, exiting the application.
|
||||
if command_name != "human_feedback":
|
||||
cycles_remaining -= 1
|
||||
result = agent.execute(command_name, command_args, user_input)
|
||||
|
||||
if result is not None:
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
|
||||
|
||||
def update_user(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
command_name: CommandName | None,
|
||||
command_args: CommandArgs | None,
|
||||
assistant_reply_dict: AgentThoughts,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
command_name: The name of the command to execute.
|
||||
command_args: The arguments for the command.
|
||||
assistant_reply_dict: The assistant's reply.
|
||||
"""
|
||||
|
||||
print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config)
|
||||
|
||||
if command_name is not None:
|
||||
if config.speak_mode:
|
||||
say_text(f"I want to execute {command_name}", config)
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}",
|
||||
)
|
||||
elif command_name.lower().startswith("error"):
|
||||
logger.typewriter_log(
|
||||
"ERROR: ",
|
||||
Fore.RED,
|
||||
f"The Agent failed to select an action. " f"Error message: {command_name}",
|
||||
)
|
||||
else:
|
||||
logger.typewriter_log(
|
||||
"NO ACTION SELECTED: ",
|
||||
Fore.RED,
|
||||
f"The Agent failed to select an action.",
|
||||
)
|
||||
|
||||
|
||||
def get_user_feedback(
|
||||
config: Config,
|
||||
ai_config: AIConfig,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
Args:
|
||||
config: The program's configuration.
|
||||
ai_config: The AI's configuration.
|
||||
|
||||
Returns:
|
||||
A tuple of the user's feedback, the user's input, and the number of
|
||||
cycles remaining if the user has initiated a continuous cycle.
|
||||
"""
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
logger.info(
|
||||
f"Enter '{config.authorise_key}' to authorise command, "
|
||||
f"'{config.authorise_key} -N' to run N continuous commands, "
|
||||
f"'{config.exit_key}' to exit program, or enter feedback for "
|
||||
f"{ai_config.ai_name}..."
|
||||
)
|
||||
|
||||
user_feedback = None
|
||||
user_input = ""
|
||||
new_cycles_remaining = None
|
||||
|
||||
while user_feedback is None:
|
||||
# Get input from user
|
||||
if config.chat_messages_enabled:
|
||||
console_input = clean_input(config, "Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
|
||||
# Parse user input
|
||||
if console_input.lower().strip() == config.authorise_key:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
elif console_input.lower().startswith(f"{config.authorise_key} -"):
|
||||
try:
|
||||
user_feedback = UserFeedback.AUTHORIZE
|
||||
new_cycles_remaining = abs(int(console_input.split(" ")[1]))
|
||||
except ValueError:
|
||||
logger.warn(
|
||||
f"Invalid input format. "
|
||||
f"Please enter '{config.authorise_key} -N'"
|
||||
" where N is the number of continuous tasks."
|
||||
)
|
||||
elif console_input.lower() in [config.exit_key, "exit"]:
|
||||
user_feedback = UserFeedback.EXIT
|
||||
else:
|
||||
user_feedback = UserFeedback.TEXT
|
||||
user_input = console_input
|
||||
|
||||
return user_feedback, user_input, new_cycles_remaining
|
||||
|
||||
|
||||
def construct_main_ai_config(
|
||||
config: Config,
|
||||
name: Optional[str] = None,
|
||||
role: Optional[str] = None,
|
||||
goals: tuple[str] = tuple(),
|
||||
) -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
str: The prompt string
|
||||
"""
|
||||
ai_config = AIConfig.load(config.workdir / config.ai_settings_file)
|
||||
|
||||
# Apply overrides
|
||||
if name:
|
||||
ai_config.ai_name = name
|
||||
if role:
|
||||
ai_config.ai_role = role
|
||||
if goals:
|
||||
ai_config.ai_goals = list(goals)
|
||||
|
||||
if (
|
||||
all([name, role, goals])
|
||||
or config.skip_reprompt
|
||||
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
|
||||
):
|
||||
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
|
||||
logger.typewriter_log(
|
||||
"API Budget:",
|
||||
Fore.GREEN,
|
||||
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
|
||||
)
|
||||
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
|
||||
logger.typewriter_log(
|
||||
"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {ai_config.ai_name}?",
|
||||
speak_text=True,
|
||||
)
|
||||
should_continue = clean_input(
|
||||
config,
|
||||
f"""Continue with the last settings?
|
||||
Name: {ai_config.ai_name}
|
||||
Role: {ai_config.ai_role}
|
||||
Goals: {ai_config.ai_goals}
|
||||
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
|
||||
Continue ({config.authorise_key}/{config.exit_key}): """,
|
||||
)
|
||||
if should_continue.lower() == config.exit_key:
|
||||
ai_config = AIConfig()
|
||||
|
||||
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
|
||||
ai_config = prompt_user(config)
|
||||
ai_config.save(config.workdir / config.ai_settings_file)
|
||||
|
||||
if config.restrict_to_workspace:
|
||||
logger.typewriter_log(
|
||||
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
|
||||
Fore.YELLOW,
|
||||
f"{config.workspace_path}",
|
||||
)
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(ai_config.api_budget)
|
||||
|
||||
# Agent Created, print message
|
||||
logger.typewriter_log(
|
||||
ai_config.ai_name,
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"has been created with the following details:",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Print the ai_config details
|
||||
# Name
|
||||
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
|
||||
# Role
|
||||
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
|
||||
# Goals
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
|
||||
for goal in ai_config.ai_goals:
|
||||
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
|
||||
|
||||
return ai_config
|
||||
|
||||
|
||||
def print_assistant_thoughts(
|
||||
ai_name: str,
|
||||
assistant_reply_json_valid: dict,
|
||||
config: Config,
|
||||
) -> None:
|
||||
from autogpt.speech import say_text
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
|
||||
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
||||
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = remove_ansi_escape(
|
||||
assistant_thoughts.get("reasoning", "")
|
||||
)
|
||||
assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", ""))
|
||||
assistant_thoughts_criticism = remove_ansi_escape(
|
||||
assistant_thoughts.get("criticism", "")
|
||||
)
|
||||
assistant_thoughts_speak = remove_ansi_escape(
|
||||
assistant_thoughts.get("speak", "")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak:
|
||||
if config.speak_mode:
|
||||
say_text(assistant_thoughts_speak, config)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
|
||||
|
||||
def remove_ansi_escape(s: str) -> str:
|
||||
return s.replace("\x1B", "")
|
||||
238
autogpt/app/setup.py
Normal file
238
autogpt/app/setup.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""Set up the AI and its goals"""
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from jinja2 import Template
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.llm.base import ChatSequence, Message
|
||||
from autogpt.llm.utils import create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
|
||||
DEFAULT_USER_DESIRE_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
def prompt_user(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
# Construct the prompt
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
ai_config_template_provided = ai_config_template is not None and any(
|
||||
[
|
||||
ai_config_template.ai_goals,
|
||||
ai_config_template.ai_name,
|
||||
ai_config_template.ai_role,
|
||||
]
|
||||
)
|
||||
|
||||
user_desire = ""
|
||||
if not ai_config_template_provided:
|
||||
# Get user desire if command line overrides have not been passed in
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"input '--manual' to enter manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
user_desire = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire.strip() == "":
|
||||
user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
|
||||
|
||||
# If user desire contains "--manual" or we have overridden any of the AI configuration
|
||||
if "--manual" in user_desire or ai_config_template_provided:
|
||||
logger.typewriter_log(
|
||||
"Manual Mode Selected",
|
||||
Fore.GREEN,
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual(config, ai_config_template)
|
||||
|
||||
else:
|
||||
try:
|
||||
return generate_aiconfig_automatic(user_desire, config)
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"Unable to automatically generate AI Config based on user desire.",
|
||||
Fore.RED,
|
||||
"Falling back to manual mode.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
return generate_aiconfig_manual(config)
|
||||
|
||||
|
||||
def generate_aiconfig_manual(
|
||||
config: Config, ai_config_template: Optional[AIConfig] = None
|
||||
) -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
This function guides the user through a series of prompts to collect the necessary information to create
|
||||
an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
|
||||
goals. If the user does not provide a value for any of the fields, default values will be used.
|
||||
|
||||
Params:
|
||||
config (Config): The Config object
|
||||
ai_config_template (AIConfig): The AIConfig object to use as a template
|
||||
|
||||
Returns:
|
||||
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
|
||||
"""
|
||||
|
||||
# Manual Setup Intro
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_name:
|
||||
ai_name = ai_config_template.ai_name
|
||||
else:
|
||||
ai_name = ""
|
||||
# Get AI Name from User
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input(config, "AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
||||
)
|
||||
|
||||
if ai_config_template and ai_config_template.ai_role:
|
||||
ai_role = ai_config_template.ai_role
|
||||
else:
|
||||
# Get AI Role from User
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(config, f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
|
||||
if ai_config_template and ai_config_template.ai_goals:
|
||||
ai_goals = ai_config_template.ai_goals
|
||||
else:
|
||||
# Enter up to 5 goals for the AI
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
|
||||
)
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if not ai_goals:
|
||||
ai_goals = [
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
]
|
||||
|
||||
# Get API Budget from User
|
||||
logger.typewriter_log(
|
||||
"Enter your budget for API calls: ",
|
||||
Fore.GREEN,
|
||||
"For example: $1.50",
|
||||
)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
)
|
||||
if api_budget_input == "":
|
||||
api_budget = 0.0
|
||||
else:
|
||||
try:
|
||||
api_budget = float(api_budget_input.replace("$", ""))
|
||||
except ValueError:
|
||||
logger.typewriter_log(
|
||||
"Invalid budget input. Setting budget to unlimited.", Fore.RED
|
||||
)
|
||||
api_budget = 0.0
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
|
||||
def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
|
||||
system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC
|
||||
prompt_ai_config_automatic = Template(
|
||||
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
|
||||
).render(user_prompt=user_prompt)
|
||||
# Call LLM with the string as user input
|
||||
output = create_chat_completion(
|
||||
ChatSequence.for_model(
|
||||
config.fast_llm,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("user", prompt_ai_config_automatic),
|
||||
],
|
||||
),
|
||||
config,
|
||||
).content
|
||||
|
||||
# Debug LLM Output
|
||||
logger.debug(f"AI Config Generator Raw Output: {output}")
|
||||
|
||||
# Parse the output
|
||||
ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
|
||||
ai_role = (
|
||||
re.search(
|
||||
r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)",
|
||||
output,
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
.group(1)
|
||||
.strip()
|
||||
)
|
||||
ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
|
||||
api_budget = 0.0 # TODO: parse api budget using a regular expression
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
57
autogpt/command_decorator.py
Normal file
57
autogpt/command_decorator.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import functools
|
||||
from typing import Any, Callable, Optional, TypedDict
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.models.command import Command, CommandParameter
|
||||
|
||||
# Unique identifier for auto-gpt commands
|
||||
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
|
||||
|
||||
|
||||
class CommandParameterSpec(TypedDict):
|
||||
type: str
|
||||
description: str
|
||||
required: bool
|
||||
|
||||
|
||||
def command(
|
||||
name: str,
|
||||
description: str,
|
||||
parameters: dict[str, CommandParameterSpec],
|
||||
enabled: bool | Callable[[Config], bool] = True,
|
||||
disabled_reason: Optional[str] = None,
|
||||
aliases: list[str] = [],
|
||||
) -> Callable[..., Any]:
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Command:
|
||||
typed_parameters = [
|
||||
CommandParameter(
|
||||
name=param_name,
|
||||
description=parameter.get("description"),
|
||||
type=parameter.get("type", "string"),
|
||||
required=parameter.get("required", False),
|
||||
)
|
||||
for param_name, parameter in parameters.items()
|
||||
]
|
||||
cmd = Command(
|
||||
name=name,
|
||||
description=description,
|
||||
method=func,
|
||||
parameters=typed_parameters,
|
||||
enabled=enabled,
|
||||
disabled_reason=disabled_reason,
|
||||
aliases=aliases,
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.command = cmd
|
||||
|
||||
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -0,0 +1,7 @@
|
||||
COMMAND_CATEGORIES = [
|
||||
"autogpt.commands.execute_code",
|
||||
"autogpt.commands.file_operations",
|
||||
"autogpt.commands.web_search",
|
||||
"autogpt.commands.web_selenium",
|
||||
"autogpt.commands.task_statuses",
|
||||
]
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"analyze_code",
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Parameters:
|
||||
code (str): Code to be evaluated.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> list[str]:"
|
||||
args = [code]
|
||||
description_string = (
|
||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
@@ -1,61 +0,0 @@
|
||||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"read_audio_from_file",
|
||||
"Convert Audio to text",
|
||||
'"filename": "<filename>"',
|
||||
CFG.huggingface_audio_to_text_model,
|
||||
"Configure huggingface_audio_to_text_model.",
|
||||
)
|
||||
def read_audio_from_file(filename: str) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
filename (str): The path to the audio file
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio)
|
||||
|
||||
|
||||
def read_audio(audio: bytes) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
Args:
|
||||
audio (bytes): The audio to convert
|
||||
|
||||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
model = CFG.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = CFG.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
|
||||
response = requests.post(
|
||||
api_url,
|
||||
headers=headers,
|
||||
data=audio,
|
||||
)
|
||||
|
||||
text = json.loads(response.content.decode("utf-8"))["text"]
|
||||
return f"The audio says: {text}"
|
||||
64
autogpt/commands/decorators.py
Normal file
64
autogpt/commands/decorators.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import functools
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
def sanitize_path_arg(arg_name: str):
|
||||
def decorator(func: Callable):
|
||||
# Get position of path parameter, in case it is passed as a positional argument
|
||||
try:
|
||||
arg_index = list(func.__annotations__.keys()).index(arg_name)
|
||||
except ValueError:
|
||||
raise TypeError(
|
||||
f"Sanitized parameter '{arg_name}' absent or not annotated on function '{func.__name__}'"
|
||||
)
|
||||
|
||||
# Get position of agent parameter, in case it is passed as a positional argument
|
||||
try:
|
||||
agent_arg_index = list(func.__annotations__.keys()).index("agent")
|
||||
except ValueError:
|
||||
raise TypeError(
|
||||
f"Parameter 'agent' absent or not annotated on function '{func.__name__}'"
|
||||
)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
logger.debug(f"Sanitizing arg '{arg_name}' on function '{func.__name__}'")
|
||||
logger.debug(f"Function annotations: {func.__annotations__}")
|
||||
|
||||
# Get Agent from the called function's arguments
|
||||
agent = kwargs.get(
|
||||
"agent", len(args) > agent_arg_index and args[agent_arg_index]
|
||||
)
|
||||
logger.debug(f"Args: {args}")
|
||||
logger.debug(f"KWArgs: {kwargs}")
|
||||
logger.debug(f"Agent argument lifted from function call: {agent}")
|
||||
if not isinstance(agent, Agent):
|
||||
raise RuntimeError("Could not get Agent from decorated command's args")
|
||||
|
||||
# Sanitize the specified path argument, if one is given
|
||||
given_path: str | Path | None = kwargs.get(
|
||||
arg_name, len(args) > arg_index and args[arg_index] or None
|
||||
)
|
||||
if given_path:
|
||||
if given_path in {"", "/"}:
|
||||
sanitized_path = str(agent.workspace.root)
|
||||
else:
|
||||
sanitized_path = str(agent.workspace.get_path(given_path))
|
||||
|
||||
if arg_name in kwargs:
|
||||
kwargs[arg_name] = sanitized_path
|
||||
else:
|
||||
# args is an immutable tuple; must be converted to a list to update
|
||||
arg_list = list(args)
|
||||
arg_list[arg_index] = sanitized_path
|
||||
args = tuple(arg_list)
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -4,17 +4,82 @@ import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
from docker.errors import ImageNotFound
|
||||
from docker.errors import DockerException, ImageNotFound
|
||||
from docker.models.containers import Container as DockerContainer
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
from .decorators import sanitize_path_arg
|
||||
|
||||
ALLOWLIST_CONTROL = "allowlist"
|
||||
DENYLIST_CONTROL = "denylist"
|
||||
|
||||
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str) -> str:
|
||||
@command(
|
||||
"execute_python_code",
|
||||
"Creates a Python file and executes it",
|
||||
{
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code to run",
|
||||
"required": True,
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "A name to be given to the python file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
def execute_python_code(code: str, name: str, agent: Agent) -> str:
|
||||
"""Create and execute a Python file in a Docker container and return the STDOUT of the
|
||||
executed code. If there is any data that needs to be captured use a print statement
|
||||
|
||||
Args:
|
||||
code (str): The Python code to run
|
||||
name (str): A name to be given to the Python file
|
||||
|
||||
Returns:
|
||||
str: The STDOUT captured from the code when it ran
|
||||
"""
|
||||
ai_name = agent.ai_config.ai_name
|
||||
code_dir = agent.workspace.get_path(Path(ai_name, "executed_code"))
|
||||
os.makedirs(code_dir, exist_ok=True)
|
||||
|
||||
if not name.endswith(".py"):
|
||||
name = name + ".py"
|
||||
|
||||
# The `name` arg is not covered by @sanitize_path_arg,
|
||||
# so sanitization must be done here to prevent path traversal.
|
||||
file_path = agent.workspace.get_path(code_dir / name)
|
||||
if not file_path.is_relative_to(code_dir):
|
||||
return "Error: 'name' argument resulted in path traversal, operation aborted"
|
||||
|
||||
try:
|
||||
with open(file_path, "w+", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
return execute_python_file(str(file_path), agent)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command(
|
||||
"execute_python_file",
|
||||
"Executes an existing Python file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of te file to execute",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
@@ -23,23 +88,36 @@ def execute_python_file(filename: str) -> str:
|
||||
Returns:
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(f"Executing file '{filename}'")
|
||||
logger.info(
|
||||
f"Executing python file '{filename}' in working directory '{agent.config.workspace_path}'"
|
||||
)
|
||||
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
if not os.path.isfile(filename):
|
||||
return f"Error: File '{filename}' does not exist."
|
||||
file_path = Path(filename)
|
||||
if not file_path.is_file():
|
||||
# Mimic the response that you get from the command line so that it's easier to identify
|
||||
return (
|
||||
f"python: can't open file '{filename}': [Errno 2] No such file or directory"
|
||||
)
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
logger.debug(
|
||||
f"Auto-GPT is running in a Docker container; executing {file_path} directly..."
|
||||
)
|
||||
result = subprocess.run(
|
||||
f"python {filename}", capture_output=True, encoding="utf8", shell=True
|
||||
["python", str(file_path)],
|
||||
capture_output=True,
|
||||
encoding="utf8",
|
||||
cwd=agent.config.workspace_path,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
else:
|
||||
return f"Error: {result.stderr}"
|
||||
|
||||
logger.debug("Auto-GPT is not running in a Docker container")
|
||||
try:
|
||||
client = docker.from_env()
|
||||
# You can replace this with the desired Python image/version
|
||||
@@ -48,10 +126,10 @@ def execute_python_file(filename: str) -> str:
|
||||
image_name = "python:3-alpine"
|
||||
try:
|
||||
client.images.get(image_name)
|
||||
logger.warn(f"Image '{image_name}' found locally")
|
||||
logger.debug(f"Image '{image_name}' found locally")
|
||||
except ImageNotFound:
|
||||
logger.info(
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub..."
|
||||
)
|
||||
# Use the low-level API to stream the pull response
|
||||
low_level_client = docker.APIClient()
|
||||
@@ -63,20 +141,25 @@ def execute_python_file(filename: str) -> str:
|
||||
logger.info(f"{status}: {progress}")
|
||||
elif status:
|
||||
logger.info(status)
|
||||
container = client.containers.run(
|
||||
|
||||
logger.debug(f"Running {file_path} in a {image_name} container...")
|
||||
container: DockerContainer = client.containers.run(
|
||||
image_name,
|
||||
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
||||
[
|
||||
"python",
|
||||
file_path.relative_to(agent.workspace.root).as_posix(),
|
||||
],
|
||||
volumes={
|
||||
CFG.workspace_path: {
|
||||
str(agent.config.workspace_path): {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
"mode": "rw",
|
||||
}
|
||||
},
|
||||
working_dir="/workspace",
|
||||
stderr=True,
|
||||
stdout=True,
|
||||
detach=True,
|
||||
)
|
||||
) # type: ignore
|
||||
|
||||
container.wait()
|
||||
logs = container.logs().decode("utf-8")
|
||||
@@ -87,7 +170,7 @@ def execute_python_file(filename: str) -> str:
|
||||
|
||||
return logs
|
||||
|
||||
except docker.errors.DockerException as e:
|
||||
except DockerException as e:
|
||||
logger.warn(
|
||||
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
||||
)
|
||||
@@ -97,16 +180,43 @@ def execute_python_file(filename: str) -> str:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def validate_command(command: str, config: Config) -> bool:
|
||||
"""Validate a command to ensure it is allowed
|
||||
|
||||
Args:
|
||||
command (str): The command to validate
|
||||
config (Config): The config to use to validate the command
|
||||
|
||||
Returns:
|
||||
bool: True if the command is allowed, False otherwise
|
||||
"""
|
||||
if not command:
|
||||
return False
|
||||
|
||||
command_name = command.split()[0]
|
||||
|
||||
if config.shell_command_control == ALLOWLIST_CONTROL:
|
||||
return command_name in config.shell_allowlist
|
||||
else:
|
||||
return command_name not in config.shell_denylist
|
||||
|
||||
|
||||
@command(
|
||||
"execute_shell",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
"Executes a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"command_line": {
|
||||
"type": "string",
|
||||
"description": "The command line to execute",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
enabled=lambda config: config.execute_local_commands,
|
||||
disabled_reason="You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str) -> str:
|
||||
def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
Args:
|
||||
@@ -115,11 +225,14 @@ def execute_shell(command_line: str) -> str:
|
||||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = Path.cwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
||||
os.chdir(CFG.workspace_path)
|
||||
if not current_dir.is_relative_to(agent.config.workspace_path):
|
||||
os.chdir(agent.config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
@@ -136,14 +249,20 @@ def execute_shell(command_line: str) -> str:
|
||||
|
||||
@command(
|
||||
"execute_shell_popen",
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
'"command_line": "<command_line>"',
|
||||
CFG.execute_local_commands,
|
||||
"Executes a Shell Command, non-interactive commands only",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
lambda config: config.execute_local_commands,
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line) -> str:
|
||||
def execute_shell_popen(command_line, agent: Agent) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
@@ -153,11 +272,14 @@ def execute_shell_popen(command_line) -> str:
|
||||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if CFG.workspace_path not in current_dir:
|
||||
os.chdir(CFG.workspace_path)
|
||||
if agent.config.workspace_path not in current_dir:
|
||||
os.chdir(agent.config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
|
||||
@@ -1,83 +1,144 @@
|
||||
"""File operations for AutoGPT"""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
from typing import Generator
|
||||
from pathlib import Path
|
||||
from typing import Generator, Literal
|
||||
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
|
||||
CFG = Config()
|
||||
from .decorators import sanitize_path_arg
|
||||
from .file_operations_utils import read_textual_file
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
|
||||
|
||||
def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
"""Check if the operation has already been performed on the given file
|
||||
def text_checksum(text: str) -> str:
|
||||
"""Get the hex checksum for the given text."""
|
||||
return hashlib.md5(text.encode("utf-8")).hexdigest()
|
||||
|
||||
Args:
|
||||
operation (str): The operation to check for
|
||||
filename (str): The name of the file to check for
|
||||
|
||||
def operations_from_log(
|
||||
log_path: str,
|
||||
) -> Generator[tuple[Operation, str, str | None], None, None]:
|
||||
"""Parse the file operations log and return a tuple containing the log entries"""
|
||||
try:
|
||||
log = open(log_path, "r", encoding="utf-8")
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
for line in log:
|
||||
line = line.replace("File Operation Logger", "").strip()
|
||||
if not line:
|
||||
continue
|
||||
operation, tail = line.split(": ", maxsplit=1)
|
||||
operation = operation.strip()
|
||||
if operation in ("write", "append"):
|
||||
try:
|
||||
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
|
||||
except ValueError:
|
||||
logger.warn(f"File log entry lacks checksum: '{line}'")
|
||||
path, checksum = tail.strip(), None
|
||||
yield (operation, path, checksum)
|
||||
elif operation == "delete":
|
||||
yield (operation, tail.strip(), None)
|
||||
|
||||
log.close()
|
||||
|
||||
|
||||
def file_operations_state(log_path: str) -> dict[str, str]:
|
||||
"""Iterates over the operations log and returns the expected state.
|
||||
|
||||
Parses a log file at config.file_logger_path to construct a dictionary that maps
|
||||
each file path written or appended to its checksum. Deleted files are removed
|
||||
from the dictionary.
|
||||
|
||||
Returns:
|
||||
bool: True if the operation has already been performed on the file
|
||||
A dictionary mapping file paths to their checksums.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If config.file_logger_path is not found.
|
||||
ValueError: If the log file content is not in the expected format.
|
||||
"""
|
||||
log_content = read_file(CFG.file_logger_path)
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
return log_entry in log_content
|
||||
state = {}
|
||||
for operation, path, checksum in operations_from_log(log_path):
|
||||
if operation in ("write", "append"):
|
||||
state[path] = checksum
|
||||
elif operation == "delete":
|
||||
del state[path]
|
||||
return state
|
||||
|
||||
|
||||
def log_operation(operation: str, filename: str) -> None:
|
||||
@sanitize_path_arg("filename")
|
||||
def is_duplicate_operation(
|
||||
operation: Operation, filename: str, agent: Agent, checksum: str | None = None
|
||||
) -> bool:
|
||||
"""Check if the operation has already been performed
|
||||
|
||||
Args:
|
||||
operation: The operation to check for
|
||||
filename: The name of the file to check for
|
||||
agent: The agent
|
||||
checksum: The checksum of the contents to be written
|
||||
|
||||
Returns:
|
||||
True if the operation has already been performed on the file
|
||||
"""
|
||||
# Make the filename into a relative path if possible
|
||||
with contextlib.suppress(ValueError):
|
||||
filename = str(Path(filename).relative_to(agent.workspace.root))
|
||||
|
||||
state = file_operations_state(agent.config.file_logger_path)
|
||||
if operation == "delete" and filename not in state:
|
||||
return True
|
||||
if operation == "write" and state.get(filename) == checksum:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@sanitize_path_arg("filename")
|
||||
def log_operation(
|
||||
operation: Operation, filename: str, agent: Agent, checksum: str | None = None
|
||||
) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
Args:
|
||||
operation (str): The operation to log
|
||||
filename (str): The name of the file the operation was performed on
|
||||
operation: The operation to log
|
||||
filename: The name of the file the operation was performed on
|
||||
checksum: The checksum of the contents to be written
|
||||
"""
|
||||
log_entry = f"{operation}: {filename}\n"
|
||||
append_to_file(CFG.file_logger_path, log_entry, should_log=False)
|
||||
# Make the filename into a relative path if possible
|
||||
with contextlib.suppress(ValueError):
|
||||
filename = str(Path(filename).relative_to(agent.workspace.root))
|
||||
|
||||
log_entry = f"{operation}: {filename}"
|
||||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(
|
||||
agent.config.file_logger_path, f"{log_entry}\n", agent, should_log=False
|
||||
)
|
||||
|
||||
|
||||
def split_file(
|
||||
content: str, max_length: int = 4000, overlap: int = 0
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Split text into chunks of a specified maximum length with a specified overlap
|
||||
between chunks.
|
||||
|
||||
:param content: The input text to be split into chunks
|
||||
:param max_length: The maximum length of each chunk,
|
||||
default is 4000 (about 1k token)
|
||||
:param overlap: The number of overlapping characters between chunks,
|
||||
default is no overlap
|
||||
:return: A generator yielding chunks of text
|
||||
"""
|
||||
start = 0
|
||||
content_length = len(content)
|
||||
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + overlap - 1]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
|
||||
if len(chunk) <= overlap:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
start += max_length - overlap
|
||||
|
||||
|
||||
@command("read_file", "Read file", '"filename": "<filename>"')
|
||||
def read_file(filename: str) -> str:
|
||||
@command(
|
||||
"read_file",
|
||||
"Read an existing file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The path of the file to read",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def read_file(filename: str, agent: Agent) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
Args:
|
||||
@@ -87,49 +148,63 @@ def read_file(filename: str) -> str:
|
||||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
content = read_textual_file(filename, logger)
|
||||
|
||||
# TODO: invalidate/update memory when file is edited
|
||||
file_memory = MemoryItem.from_text_file(content, filename, agent.config)
|
||||
if len(file_memory.chunks) > 1:
|
||||
return file_memory.summary
|
||||
|
||||
return content
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def ingest_file(
|
||||
filename: str, memory, max_length: int = 4000, overlap: int = 200
|
||||
filename: str,
|
||||
memory: VectorMemory,
|
||||
) -> None:
|
||||
"""
|
||||
Ingest a file by reading its content, splitting it into chunks with a specified
|
||||
maximum length and overlap, and adding the chunks to the memory storage.
|
||||
|
||||
:param filename: The name of the file to ingest
|
||||
:param memory: An object with an add() method to store the chunks in memory
|
||||
:param max_length: The maximum length of each chunk, default is 4000
|
||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||
Args:
|
||||
filename: The name of the file to ingest
|
||||
memory: An object with an add() method to store the chunks in memory
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Working with file {filename}")
|
||||
logger.info(f"Ingesting file {filename}")
|
||||
content = read_file(filename)
|
||||
content_length = len(content)
|
||||
logger.info(f"File length: {content_length} characters")
|
||||
|
||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||
# TODO: differentiate between different types of files
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
logger.debug(f"Created memory: {file_memory.dump(True)}")
|
||||
memory.add(file_memory)
|
||||
|
||||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||
except Exception as e:
|
||||
logger.info(f"Error while ingesting file '{filename}': {str(e)}")
|
||||
logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
|
||||
except Exception as err:
|
||||
logger.warn(f"Error while ingesting file '{filename}': {err}")
|
||||
|
||||
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str) -> str:
|
||||
@command(
|
||||
"write_to_file",
|
||||
"Writes to a file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write to",
|
||||
"required": True,
|
||||
},
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "The text to write to the file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
aliases=["write_file", "create_file"],
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
Args:
|
||||
@@ -139,23 +214,40 @@ def write_to_file(filename: str, text: str) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if check_duplicate_operation("write", filename):
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, agent, checksum):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename)
|
||||
log_operation("write", filename, agent, checksum)
|
||||
return "File written to successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command(
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
"append_to_file",
|
||||
"Appends to a file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write to",
|
||||
"required": True,
|
||||
},
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "The text to write to the file",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
@sanitize_path_arg("filename")
|
||||
def append_to_file(
|
||||
filename: str, text: str, agent: Agent, should_log: bool = True
|
||||
) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
Args:
|
||||
@@ -169,19 +261,32 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "a") as f:
|
||||
with open(filename, "a", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
|
||||
if should_log:
|
||||
log_operation("append", filename)
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, agent, checksum=checksum)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str) -> str:
|
||||
@command(
|
||||
"delete_file",
|
||||
"Deletes a file",
|
||||
{
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to delete",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("filename")
|
||||
def delete_file(filename: str, agent: Agent) -> str:
|
||||
"""Delete a file
|
||||
|
||||
Args:
|
||||
@@ -190,19 +295,30 @@ def delete_file(filename: str) -> str:
|
||||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if check_duplicate_operation("delete", filename):
|
||||
if is_duplicate_operation("delete", filename, agent):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename)
|
||||
log_operation("delete", filename, agent)
|
||||
return "File deleted successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command("search_files", "Search Files", '"directory": "<directory>"')
|
||||
def search_files(directory: str) -> list[str]:
|
||||
"""Search for files in a directory
|
||||
@command(
|
||||
"list_files",
|
||||
"Lists Files in a Directory",
|
||||
{
|
||||
"directory": {
|
||||
"type": "string",
|
||||
"description": "The directory to list files in",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
@sanitize_path_arg("directory")
|
||||
def list_files(directory: str, agent: Agent) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
|
||||
Args:
|
||||
directory (str): The directory to search in
|
||||
@@ -217,56 +333,8 @@ def search_files(directory: str) -> list[str]:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), CFG.workspace_path
|
||||
os.path.join(root, file), agent.config.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
return found_files
|
||||
|
||||
|
||||
@command(
|
||||
"download_file",
|
||||
"Download File",
|
||||
'"url": "<url>", "filename": "<filename>"',
|
||||
CFG.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
filename (str): Filename to save the file as
|
||||
"""
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
|
||||
total_size = 0
|
||||
downloaded_size = 0
|
||||
|
||||
with session.get(url, allow_redirects=True, stream=True) as r:
|
||||
r.raise_for_status()
|
||||
total_size = int(r.headers.get("Content-Length", 0))
|
||||
downloaded_size = 0
|
||||
|
||||
with open(filename, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
|
||||
# Update the progress message
|
||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||
spinner.update_message(f"{message} {progress}")
|
||||
|
||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
|
||||
except requests.HTTPError as e:
|
||||
return f"Got an HTTP Error whilst trying to download file: {e}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
161
autogpt/commands/file_operations_utils.py
Normal file
161
autogpt/commands/file_operations_utils.py
Normal file
@@ -0,0 +1,161 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import charset_normalizer
|
||||
import docx
|
||||
import markdown
|
||||
import PyPDF2
|
||||
import yaml
|
||||
from bs4 import BeautifulSoup
|
||||
from pylatexenc.latex2text import LatexNodes2Text
|
||||
|
||||
from autogpt import logs
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class ParserStrategy:
|
||||
def read(self, file_path: str) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
# Basic text file reading
|
||||
class TXTParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
charset_match = charset_normalizer.from_path(file_path).best()
|
||||
logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
|
||||
return str(charset_match)
|
||||
|
||||
|
||||
# Reading text from binary file using pdf parser
|
||||
class PDFParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
parser = PyPDF2.PdfReader(file_path)
|
||||
text = ""
|
||||
for page_idx in range(len(parser.pages)):
|
||||
text += parser.pages[page_idx].extract_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading text from binary file using docs parser
|
||||
class DOCXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
doc_file = docx.Document(file_path)
|
||||
text = ""
|
||||
for para in doc_file.paragraphs:
|
||||
text += para.text
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class JSONParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = json.load(f)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class XMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "xml")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
# Reading as dictionary and returning string format
|
||||
class YAMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
text = str(data)
|
||||
return text
|
||||
|
||||
|
||||
class HTMLParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
soup = BeautifulSoup(f, "html.parser")
|
||||
text = soup.get_text()
|
||||
return text
|
||||
|
||||
|
||||
class MarkdownParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
html = markdown.markdown(f.read())
|
||||
text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True))
|
||||
return text
|
||||
|
||||
|
||||
class LaTeXParser(ParserStrategy):
|
||||
def read(self, file_path: str) -> str:
|
||||
with open(file_path, "r") as f:
|
||||
latex = f.read()
|
||||
text = LatexNodes2Text().latex_to_text(latex)
|
||||
return text
|
||||
|
||||
|
||||
class FileContext:
|
||||
def __init__(self, parser: ParserStrategy, logger: logs.Logger):
|
||||
self.parser = parser
|
||||
self.logger = logger
|
||||
|
||||
def set_parser(self, parser: ParserStrategy) -> None:
|
||||
self.logger.debug(f"Setting Context Parser to {parser}")
|
||||
self.parser = parser
|
||||
|
||||
def read_file(self, file_path) -> str:
|
||||
self.logger.debug(f"Reading file {file_path} with parser {self.parser}")
|
||||
return self.parser.read(file_path)
|
||||
|
||||
|
||||
extension_to_parser = {
|
||||
".txt": TXTParser(),
|
||||
".csv": TXTParser(),
|
||||
".pdf": PDFParser(),
|
||||
".docx": DOCXParser(),
|
||||
".json": JSONParser(),
|
||||
".xml": XMLParser(),
|
||||
".yaml": YAMLParser(),
|
||||
".yml": YAMLParser(),
|
||||
".html": HTMLParser(),
|
||||
".htm": HTMLParser(),
|
||||
".xhtml": HTMLParser(),
|
||||
".md": MarkdownParser(),
|
||||
".markdown": MarkdownParser(),
|
||||
".tex": LaTeXParser(),
|
||||
}
|
||||
|
||||
|
||||
def is_file_binary_fn(file_path: str):
|
||||
"""Given a file path load all its content and checks if the null bytes is present
|
||||
|
||||
Args:
|
||||
file_path (_type_): _description_
|
||||
|
||||
Returns:
|
||||
bool: is_binary
|
||||
"""
|
||||
with open(file_path, "rb") as f:
|
||||
file_data = f.read()
|
||||
if b"\x00" in file_data:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def read_textual_file(file_path: str, logger: logs.Logger) -> str:
|
||||
if not os.path.isfile(file_path):
|
||||
raise FileNotFoundError(
|
||||
f"read_file {file_path} failed: no such file or directory"
|
||||
)
|
||||
is_binary = is_file_binary_fn(file_path)
|
||||
file_extension = os.path.splitext(file_path)[1].lower()
|
||||
parser = extension_to_parser.get(file_extension)
|
||||
if not parser:
|
||||
if is_binary:
|
||||
raise ValueError(f"Unsupported binary file format: {file_extension}")
|
||||
# fallback to txt file parser (to support script and code files loading)
|
||||
parser = TXTParser()
|
||||
file_context = FileContext(parser, logger)
|
||||
return file_context.read_file(file_path)
|
||||
@@ -1,22 +1,35 @@
|
||||
"""Git operations for autogpt"""
|
||||
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
from .decorators import sanitize_path_arg
|
||||
|
||||
|
||||
@command(
|
||||
"clone_repository",
|
||||
"Clone Repository",
|
||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||
CFG.github_username and CFG.github_api_key,
|
||||
"Clones a Repository",
|
||||
{
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the repository to clone",
|
||||
"required": True,
|
||||
},
|
||||
"clone_path": {
|
||||
"type": "string",
|
||||
"description": "The path to clone the repository to",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
lambda config: bool(config.github_username and config.github_api_key),
|
||||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@sanitize_path_arg("clone_path")
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str) -> str:
|
||||
def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
@@ -27,7 +40,11 @@ def clone_repository(url: str, clone_path: str) -> str:
|
||||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||
auth_repo_url = (
|
||||
f"//{agent.config.github_username}:{agent.config.github_api_key}@".join(
|
||||
split_url
|
||||
)
|
||||
)
|
||||
try:
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
""" Image Generation Module for AutoGPT."""
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
|
||||
@@ -7,15 +9,25 @@ import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
|
||||
def generate_image(prompt: str, size: int = 256) -> str:
|
||||
@command(
|
||||
"generate_image",
|
||||
"Generates an Image",
|
||||
{
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "The prompt used to generate the image",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
lambda config: bool(config.image_provider),
|
||||
"Requires a image provider to be set.",
|
||||
)
|
||||
def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
@@ -25,21 +37,21 @@ def generate_image(prompt: str, size: int = 256) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
filename = agent.config.workspace_path / f"{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if CFG.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size)
|
||||
if agent.config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, agent)
|
||||
# HuggingFace
|
||||
elif CFG.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename)
|
||||
elif agent.config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, agent)
|
||||
# SD WebUI
|
||||
elif CFG.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, size)
|
||||
elif agent.config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, agent, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
|
||||
"""Generate an image with HuggingFace's API.
|
||||
|
||||
Args:
|
||||
@@ -49,35 +61,57 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = (
|
||||
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
|
||||
)
|
||||
if CFG.huggingface_api_token is None:
|
||||
API_URL = f"https://api-inference.huggingface.co/models/{agent.config.huggingface_image_model}"
|
||||
if agent.config.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {CFG.huggingface_api_token}",
|
||||
"Authorization": f"Bearer {agent.config.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
retry_count = 0
|
||||
while retry_count < 10:
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
},
|
||||
)
|
||||
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
if response.ok:
|
||||
try:
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
image.save(filename)
|
||||
return f"Saved to disk:{filename}"
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
error = json.loads(response.text)
|
||||
if "estimated_time" in error:
|
||||
delay = error["estimated_time"]
|
||||
logger.debug(response.text)
|
||||
logger.info("Retrying in", delay)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
break
|
||||
|
||||
image.save(filename)
|
||||
retry_count += 1
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
return f"Error creating image."
|
||||
|
||||
|
||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
def generate_image_with_dalle(
|
||||
prompt: str, filename: str, size: int, agent: Agent
|
||||
) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
Args:
|
||||
@@ -102,7 +136,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=CFG.openai_api_key,
|
||||
api_key=agent.config.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
@@ -118,6 +152,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
agent: Agent,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
@@ -134,19 +169,19 @@ def generate_image_with_sd_webui(
|
||||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if CFG.sd_webui_auth:
|
||||
username, password = CFG.sd_webui_auth.split(":")
|
||||
if agent.config.sd_webui_auth:
|
||||
username, password = agent.config.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
|
||||
f"{agent.config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"sampler_index": "DDIM",
|
||||
"steps": 20,
|
||||
"cfg_scale": 7.0,
|
||||
"config_scale": 7.0,
|
||||
"width": size,
|
||||
"height": size,
|
||||
"n_iter": 1,
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"improve_code",
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code to be improved.
|
||||
Returns:
|
||||
A result string from create chat completion. Improved code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def generate_improved_code(suggestions: list[str], code: str) -> str:"
|
||||
)
|
||||
args = [json.dumps(suggestions), code]
|
||||
description_string = (
|
||||
"Improves the provided code based on the suggestions"
|
||||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
33
autogpt/commands/task_statuses.py
Normal file
33
autogpt/commands/task_statuses.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""Task Statuses module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import NoReturn
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
@command(
|
||||
"goals_accomplished",
|
||||
"Goals are accomplished and there is nothing left to do",
|
||||
{
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "A summary to the user of how the goals were accomplished",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
def task_complete(reason: str, agent: Agent) -> NoReturn:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
Parameters:
|
||||
reason (str): A summary to the user of how the goals were accomplished.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
logger.info(title="Shutting down...\n", message=reason)
|
||||
quit()
|
||||
@@ -1,41 +0,0 @@
|
||||
"""A module that contains a command to send a tweet."""
|
||||
import os
|
||||
|
||||
import tweepy
|
||||
|
||||
from autogpt.commands.command import command
|
||||
|
||||
|
||||
@command(
|
||||
"send_tweet",
|
||||
"Send Tweet",
|
||||
'"tweet_text": "<tweet_text>"',
|
||||
)
|
||||
def send_tweet(tweet_text: str) -> str:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Args:
|
||||
tweet_text (str): Text to be tweeted.
|
||||
|
||||
Returns:
|
||||
A result from sending the tweet.
|
||||
"""
|
||||
consumer_key = os.environ.get("TW_CONSUMER_KEY")
|
||||
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
|
||||
access_token = os.environ.get("TW_ACCESS_TOKEN")
|
||||
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
|
||||
# Authenticate to Twitter
|
||||
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
||||
auth.set_access_token(access_token, access_token_secret)
|
||||
|
||||
# Create API object
|
||||
api = tweepy.API(auth)
|
||||
|
||||
# Send tweet
|
||||
try:
|
||||
api.update_status(tweet_text)
|
||||
return "Tweet sent successfully!"
|
||||
except tweepy.TweepyException as e:
|
||||
return f"Error sending tweet: {e.reason}"
|
||||
@@ -1,82 +0,0 @@
|
||||
"""Web scraping commands using Playwright"""
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.logs import logger
|
||||
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
logger.info(
|
||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape text from
|
||||
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch()
|
||||
page = browser.new_page()
|
||||
|
||||
try:
|
||||
page.goto(url)
|
||||
html_content = page.content()
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
|
||||
except Exception as e:
|
||||
text = f"Error: {str(e)}"
|
||||
|
||||
finally:
|
||||
browser.close()
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape links from
|
||||
|
||||
Returns:
|
||||
Union[str, List[str]]: The scraped links
|
||||
"""
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch()
|
||||
page = browser.new_page()
|
||||
|
||||
try:
|
||||
page.goto(url)
|
||||
html_content = page.content()
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
formatted_links = format_hyperlinks(hyperlinks)
|
||||
|
||||
except Exception as e:
|
||||
formatted_links = f"Error: {str(e)}"
|
||||
|
||||
finally:
|
||||
browser.close()
|
||||
|
||||
return formatted_links
|
||||
@@ -1,112 +0,0 @@
|
||||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from __future__ import annotations
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
CFG = Config()
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({"User-Agent": CFG.user_agent})
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
Args:
|
||||
url (str): The URL to get the response from
|
||||
timeout (int): The timeout for the HTTP request
|
||||
|
||||
Returns:
|
||||
tuple[None, str] | tuple[Response, None]: The response and error message
|
||||
|
||||
Raises:
|
||||
ValueError: If the URL is invalid
|
||||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
response = session.get(url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
return None, f"Error: HTTP {str(response.status_code)} error"
|
||||
|
||||
return response, None
|
||||
except ValueError as ve:
|
||||
# Handle invalid URL format
|
||||
return None, f"Error: {str(ve)}"
|
||||
|
||||
except requests.exceptions.RequestException as re:
|
||||
# Handle exceptions related to the HTTP request
|
||||
# (e.g., connection errors, timeouts, etc.)
|
||||
return None, f"Error: {str(re)}"
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape text from
|
||||
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape links from
|
||||
|
||||
Returns:
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup, url)
|
||||
|
||||
return format_hyperlinks(hyperlinks)
|
||||
|
||||
|
||||
def create_message(chunk, question):
|
||||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the'
|
||||
" text, summarize the text.",
|
||||
}
|
||||
@@ -2,17 +2,30 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from itertools import islice
|
||||
|
||||
from duckduckgo_search import ddg
|
||||
from duckduckgo_search import DDGS
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
|
||||
CFG = Config()
|
||||
DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
|
||||
|
||||
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
|
||||
def google_search(query: str, num_results: int = 8) -> str:
|
||||
@command(
|
||||
"web_search",
|
||||
"Searches the web",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
aliases=["search"],
|
||||
)
|
||||
def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
@@ -23,15 +36,20 @@ def google_search(query: str, num_results: int = 8) -> str:
|
||||
str: The results of the search.
|
||||
"""
|
||||
search_results = []
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
attempts = 0
|
||||
|
||||
results = ddg(query, max_results=num_results)
|
||||
if not results:
|
||||
return json.dumps(search_results)
|
||||
while attempts < DUCKDUCKGO_MAX_ATTEMPTS:
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
for j in results:
|
||||
search_results.append(j)
|
||||
results = DDGS().text(query)
|
||||
search_results = list(islice(results, num_results))
|
||||
|
||||
if search_results:
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
attempts += 1
|
||||
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
@@ -40,11 +58,19 @@ def google_search(query: str, num_results: int = 8) -> str:
|
||||
@command(
|
||||
"google",
|
||||
"Google Search",
|
||||
'"query": "<query>"',
|
||||
bool(CFG.google_api_key),
|
||||
"Configure google_api_key.",
|
||||
{
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query",
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
lambda config: bool(config.google_api_key)
|
||||
and bool(config.google_custom_search_engine_id),
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
aliases=["search"],
|
||||
)
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
Args:
|
||||
@@ -60,8 +86,8 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = CFG.google_api_key
|
||||
custom_search_engine_id = CFG.custom_search_engine_id
|
||||
api_key = agent.config.google_api_key
|
||||
custom_search_engine_id = agent.config.google_custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
@@ -100,7 +126,7 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
|
||||
def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
Return the results of a google search in a safe format.
|
||||
Return the results of a Google search in a safe format.
|
||||
|
||||
Args:
|
||||
results (str | list): The search results.
|
||||
@@ -110,7 +136,7 @@ def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
if isinstance(results, list):
|
||||
safe_message = json.dumps(
|
||||
[result.encode("utf-8", "ignore") for result in results]
|
||||
[result.encode("utf-8", "ignore").decode("utf-8") for result in results]
|
||||
)
|
||||
else:
|
||||
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
|
||||
@@ -4,37 +4,55 @@ from __future__ import annotations
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import Optional, Type
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.chrome.service import Service as ChromeDriverService
|
||||
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.edge.service import Service as EdgeDriverService
|
||||
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.firefox.service import Service as GeckoDriverService
|
||||
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, get_memory
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@command(
|
||||
"browse_website",
|
||||
"Browse Website",
|
||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
"Browses a Website",
|
||||
{
|
||||
"url": {"type": "string", "description": "The URL to visit", "required": True},
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "What you want to find on the website",
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
def browse_website(url: str, question: str, agent: Agent) -> str:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
@@ -45,25 +63,25 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url)
|
||||
driver, text = scrape_text_with_selenium(url, agent)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
msg = e.msg.split("\n")[0]
|
||||
return f"Error: {msg}", None
|
||||
return f"Error: {msg}"
|
||||
|
||||
add_header(driver)
|
||||
summary_text = summary.summarize_text(url, text, question, driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, agent, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to 5
|
||||
if len(links) > 5:
|
||||
links = links[:5]
|
||||
close_browser(driver)
|
||||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
|
||||
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
@@ -74,44 +92,49 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
"""
|
||||
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
||||
|
||||
options_available = {
|
||||
options_available: dict[str, Type[BrowserOptions]] = {
|
||||
"chrome": ChromeOptions,
|
||||
"safari": SafariOptions,
|
||||
"edge": EdgeOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"safari": SafariOptions,
|
||||
}
|
||||
|
||||
options = options_available[CFG.selenium_web_browser]()
|
||||
options: BrowserOptions = options_available[agent.config.selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if CFG.selenium_web_browser == "firefox":
|
||||
if CFG.selenium_headless:
|
||||
if agent.config.selenium_web_browser == "firefox":
|
||||
if agent.config.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = webdriver.Firefox(
|
||||
executable_path=GeckoDriverManager().install(), options=options
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
)
|
||||
elif CFG.selenium_web_browser == "safari":
|
||||
elif agent.config.selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif agent.config.selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = webdriver.Safari(options=options)
|
||||
driver = SafariDriver(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if CFG.selenium_headless:
|
||||
if agent.config.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
chromium_driver_path = Path("/usr/bin/chromedriver")
|
||||
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=chromium_driver_path
|
||||
driver = ChromeDriver(
|
||||
service=ChromeDriverService(str(chromium_driver_path))
|
||||
if chromium_driver_path.exists()
|
||||
else ChromeDriverManager().install(),
|
||||
else ChromeDriverService(ChromeDriverManager().install()),
|
||||
options=options,
|
||||
)
|
||||
driver.get(url)
|
||||
@@ -175,4 +198,40 @@ def add_header(driver: WebDriver) -> None:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
|
||||
try:
|
||||
with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file:
|
||||
overlay_script = overlay_file.read()
|
||||
driver.execute_script(overlay_script)
|
||||
except Exception as e:
|
||||
print(f"Error executing overlay.js: {e}")
|
||||
|
||||
|
||||
def summarize_memorize_webpage(
|
||||
url: str,
|
||||
text: str,
|
||||
question: str,
|
||||
agent: Agent,
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
url (str): The url of the text
|
||||
text (str): The text to summarize
|
||||
question (str): The question to ask the model
|
||||
driver (WebDriver): The webdriver to use to scroll the page
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
memory = get_memory(agent.config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, agent.config, question=question)
|
||||
memory.add(new_memory)
|
||||
return new_memory.summary
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
"""A module that contains a function to generate test cases for the submitted code."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm import call_ai_function
|
||||
|
||||
|
||||
@command(
|
||||
"write_tests",
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code
|
||||
in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||
)
|
||||
args = [code, json.dumps(focus)]
|
||||
description_string = (
|
||||
"Generates test cases for the existing code, focusing on"
|
||||
" specific areas if required."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
@@ -1,11 +1,12 @@
|
||||
"""
|
||||
This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
from .ai_config import AIConfig
|
||||
from .config import Config, ConfigBuilder, check_openai_api_key
|
||||
|
||||
__all__ = [
|
||||
"check_openai_api_key",
|
||||
"AIConfig",
|
||||
"Config",
|
||||
"ConfigBuilder",
|
||||
]
|
||||
|
||||
@@ -4,18 +4,16 @@ A module that contains the AIConfig class object that contains the configuration
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional, Type
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
import distro
|
||||
import yaml
|
||||
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.models.command_registry import CommandRegistry
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
|
||||
|
||||
class AIConfig:
|
||||
@@ -33,7 +31,7 @@ class AIConfig:
|
||||
self,
|
||||
ai_name: str = "",
|
||||
ai_role: str = "",
|
||||
ai_goals: list | None = None,
|
||||
ai_goals: list[str] = [],
|
||||
api_budget: float = 0.0,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -47,33 +45,29 @@ class AIConfig:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if ai_goals is None:
|
||||
ai_goals = []
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
self.api_budget = api_budget
|
||||
self.prompt_generator = None
|
||||
self.command_registry = None
|
||||
self.prompt_generator: PromptGenerator | None = None
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
|
||||
@staticmethod
|
||||
def load(config_file: str = SAVE_FILE) -> "AIConfig":
|
||||
def load(ai_settings_file: str | Path) -> "AIConfig":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
|
||||
loaded from yaml file if yaml file exists, else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
config_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
ai_settings_file (Path): The path to the config yaml file.
|
||||
|
||||
Returns:
|
||||
cls (object): An instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
with open(ai_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
@@ -86,16 +80,15 @@ class AIConfig:
|
||||
for goal in config_params.get("ai_goals", [])
|
||||
]
|
||||
api_budget = config_params.get("api_budget", 0.0)
|
||||
# type: Type[AIConfig]
|
||||
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
def save(self, config_file: str = SAVE_FILE) -> None:
|
||||
def save(self, ai_settings_file: str | Path) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file(str): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
ai_settings_file (Path): The path to the config yaml file.
|
||||
|
||||
Returns:
|
||||
None
|
||||
@@ -107,11 +100,11 @@ class AIConfig:
|
||||
"ai_goals": self.ai_goals,
|
||||
"api_budget": self.api_budget,
|
||||
}
|
||||
with open(config_file, "w", encoding="utf-8") as file:
|
||||
with open(ai_settings_file, "w", encoding="utf-8") as file:
|
||||
yaml.dump(config, file, allow_unicode=True)
|
||||
|
||||
def construct_full_prompt(
|
||||
self, prompt_generator: Optional[PromptGenerator] = None
|
||||
self, config, prompt_generator: Optional[PromptGenerator] = None
|
||||
) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
@@ -131,22 +124,20 @@ class AIConfig:
|
||||
""
|
||||
)
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.prompts.prompt import build_default_prompt_generator
|
||||
|
||||
cfg = Config()
|
||||
if prompt_generator is None:
|
||||
prompt_generator = build_default_prompt_generator()
|
||||
prompt_generator = build_default_prompt_generator(config)
|
||||
prompt_generator.goals = self.ai_goals
|
||||
prompt_generator.name = self.ai_name
|
||||
prompt_generator.role = self.ai_role
|
||||
prompt_generator.command_registry = self.command_registry
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in config.plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
prompt_generator = plugin.post_prompt(prompt_generator)
|
||||
|
||||
if cfg.execute_local_commands:
|
||||
if config.execute_local_commands:
|
||||
# add OS info to prompt
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
@@ -164,5 +155,5 @@ class AIConfig:
|
||||
if self.api_budget > 0.0:
|
||||
full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}"
|
||||
self.prompt_generator = prompt_generator
|
||||
full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}"
|
||||
full_prompt += f"\n\n{prompt_generator.generate_prompt_string(config)}"
|
||||
return full_prompt
|
||||
|
||||
@@ -1,282 +1,399 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
import os
|
||||
from typing import List
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
import openai
|
||||
import yaml
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
from pydantic import Field, validator
|
||||
|
||||
from autogpt.singleton import Singleton
|
||||
from autogpt.core.configuration.schema import Configurable, SystemSettings
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
AI_SETTINGS_FILE = "ai_settings.yaml"
|
||||
AZURE_CONFIG_FILE = "azure.yaml"
|
||||
PLUGINS_CONFIG_FILE = "plugins_config.yaml"
|
||||
PROMPT_SETTINGS_FILE = "prompt_settings.yaml"
|
||||
|
||||
GPT_4_MODEL = "gpt-4"
|
||||
GPT_3_MODEL = "gpt-3.5-turbo"
|
||||
|
||||
|
||||
class Config(metaclass=Singleton):
|
||||
"""
|
||||
Configuration class to store the state of bools for different scripts access.
|
||||
"""
|
||||
class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
name: str = "Auto-GPT configuration"
|
||||
description: str = "Default configuration for the Auto-GPT application."
|
||||
########################
|
||||
# Application Settings #
|
||||
########################
|
||||
skip_news: bool = False
|
||||
skip_reprompt: bool = False
|
||||
authorise_key: str = "y"
|
||||
exit_key: str = "n"
|
||||
debug_mode: bool = False
|
||||
plain_output: bool = False
|
||||
chat_messages_enabled: bool = True
|
||||
# TTS configuration
|
||||
speak_mode: bool = False
|
||||
text_to_speech_provider: str = "gtts"
|
||||
streamelements_voice: str = "Brian"
|
||||
elevenlabs_voice_id: Optional[str] = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the Config class"""
|
||||
self.workspace_path = None
|
||||
self.file_logger_path = None
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Paths
|
||||
ai_settings_file: str = AI_SETTINGS_FILE
|
||||
prompt_settings_file: str = PROMPT_SETTINGS_FILE
|
||||
workdir: Path = None
|
||||
workspace_path: Optional[Path] = None
|
||||
file_logger_path: Optional[str] = None
|
||||
# Model configuration
|
||||
fast_llm: str = "gpt-3.5-turbo"
|
||||
smart_llm: str = "gpt-4"
|
||||
temperature: float = 0
|
||||
openai_functions: bool = False
|
||||
embedding_model: str = "text-embedding-ada-002"
|
||||
browse_spacy_language_model: str = "en_core_web_sm"
|
||||
# Run loop configuration
|
||||
continuous_mode: bool = False
|
||||
continuous_limit: int = 0
|
||||
|
||||
self.debug_mode = False
|
||||
self.continuous_mode = False
|
||||
self.continuous_limit = 0
|
||||
self.speak_mode = False
|
||||
self.skip_reprompt = False
|
||||
self.allow_downloads = False
|
||||
self.skip_news = False
|
||||
##########
|
||||
# Memory #
|
||||
##########
|
||||
memory_backend: str = "json_file"
|
||||
memory_index: str = "auto-gpt-memory"
|
||||
redis_host: str = "localhost"
|
||||
redis_port: int = 6379
|
||||
redis_password: str = ""
|
||||
wipe_redis_on_start: bool = True
|
||||
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
############
|
||||
# Commands #
|
||||
############
|
||||
# General
|
||||
disabled_command_categories: list[str] = Field(default_factory=list)
|
||||
# File ops
|
||||
restrict_to_workspace: bool = True
|
||||
allow_downloads: bool = False
|
||||
# Shell commands
|
||||
shell_command_control: str = "denylist"
|
||||
execute_local_commands: bool = False
|
||||
shell_denylist: list[str] = Field(default_factory=lambda: ["sudo", "su"])
|
||||
shell_allowlist: list[str] = Field(default_factory=list)
|
||||
# Text to image
|
||||
image_provider: Optional[str] = None
|
||||
huggingface_image_model: str = "CompVis/stable-diffusion-v1-4"
|
||||
sd_webui_url: Optional[str] = "http://localhost:7860"
|
||||
image_size: int = 256
|
||||
# Audio to text
|
||||
audio_to_text_provider: str = "huggingface"
|
||||
huggingface_audio_to_text_model: Optional[str] = None
|
||||
# Web browsing
|
||||
selenium_web_browser: str = "chrome"
|
||||
selenium_headless: bool = True
|
||||
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "0"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
)
|
||||
self.restrict_to_workspace = (
|
||||
os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
|
||||
)
|
||||
###################
|
||||
# Plugin Settings #
|
||||
###################
|
||||
plugins_dir: str = "plugins"
|
||||
plugins_config_file: str = PLUGINS_CONFIG_FILE
|
||||
plugins_config: PluginsConfig = Field(
|
||||
default_factory=lambda: PluginsConfig(plugins={})
|
||||
)
|
||||
plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
|
||||
plugins_allowlist: list[str] = Field(default_factory=list)
|
||||
plugins_denylist: list[str] = Field(default_factory=list)
|
||||
plugins_openai: list[str] = Field(default_factory=list)
|
||||
|
||||
###############
|
||||
# Credentials #
|
||||
###############
|
||||
# OpenAI
|
||||
openai_api_key: Optional[str] = None
|
||||
openai_api_type: Optional[str] = None
|
||||
openai_api_base: Optional[str] = None
|
||||
openai_api_version: Optional[str] = None
|
||||
openai_organization: Optional[str] = None
|
||||
use_azure: bool = False
|
||||
azure_config_file: Optional[str] = AZURE_CONFIG_FILE
|
||||
azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None
|
||||
# Elevenlabs
|
||||
elevenlabs_api_key: Optional[str] = None
|
||||
# Github
|
||||
github_api_key: Optional[str] = None
|
||||
github_username: Optional[str] = None
|
||||
# Google
|
||||
google_api_key: Optional[str] = None
|
||||
google_custom_search_engine_id: Optional[str] = None
|
||||
# Huggingface
|
||||
huggingface_api_token: Optional[str] = None
|
||||
# Stable Diffusion
|
||||
sd_webui_auth: Optional[str] = None
|
||||
|
||||
@validator("plugins", each_item=True)
|
||||
def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
|
||||
assert issubclass(
|
||||
p.__class__, AutoGPTPluginTemplate
|
||||
), f"{p} does not subclass AutoGPTPluginTemplate"
|
||||
assert (
|
||||
p.__class__.__name__ != "AutoGPTPluginTemplate"
|
||||
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
|
||||
return p
|
||||
|
||||
def get_openai_credentials(self, model: str) -> dict[str, str]:
|
||||
credentials = {
|
||||
"api_key": self.openai_api_key,
|
||||
"api_base": self.openai_api_base,
|
||||
"organization": self.openai_organization,
|
||||
}
|
||||
if self.use_azure:
|
||||
self.load_azure_config()
|
||||
openai.api_type = self.openai_api_type
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
azure_credentials = self.get_azure_credentials(model)
|
||||
credentials.update(azure_credentials)
|
||||
return credentials
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
||||
def get_azure_credentials(self, model: str) -> dict[str, str]:
|
||||
"""Get the kwargs for the Azure API."""
|
||||
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
|
||||
self.use_brian_tts = False
|
||||
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
|
||||
|
||||
self.github_api_key = os.getenv("GITHUB_API_KEY")
|
||||
self.github_username = os.getenv("GITHUB_USERNAME")
|
||||
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_port = os.getenv("WEAVIATE_PORT")
|
||||
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
|
||||
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
|
||||
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = (
|
||||
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
# Fix --gpt3only and --gpt4only in combination with Azure
|
||||
fast_llm = (
|
||||
self.fast_llm
|
||||
if not (
|
||||
self.fast_llm == self.smart_llm
|
||||
and self.fast_llm.startswith(GPT_4_MODEL)
|
||||
)
|
||||
else f"not_{self.fast_llm}"
|
||||
)
|
||||
smart_llm = (
|
||||
self.smart_llm
|
||||
if not (
|
||||
self.smart_llm == self.fast_llm
|
||||
and self.smart_llm.startswith(GPT_3_MODEL)
|
||||
)
|
||||
else f"not_{self.smart_llm}"
|
||||
)
|
||||
|
||||
# milvus or zilliz cloud configuration.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
self.milvus_username = os.getenv("MILVUS_USERNAME")
|
||||
self.milvus_password = os.getenv("MILVUS_PASSWORD")
|
||||
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
|
||||
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
self.huggingface_image_model = os.getenv(
|
||||
"HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
self.huggingface_audio_to_text_model = os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
)
|
||||
self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
|
||||
self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
|
||||
|
||||
# Selenium browser settings
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
|
||||
|
||||
# User agent header to use when making HTTP requests
|
||||
# Some websites might just completely deny request with an error code if
|
||||
# no user agent was found.
|
||||
self.user_agent = os.getenv(
|
||||
"USER_AGENT",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
||||
|
||||
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
|
||||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
self.plugins_openai = []
|
||||
|
||||
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
|
||||
if plugins_allowlist:
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
self.plugins_denylist = []
|
||||
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
Returns the relevant deployment id for the model specified.
|
||||
|
||||
Parameters:
|
||||
model(str): The model to map to the deployment id.
|
||||
|
||||
Returns:
|
||||
The matching deployment id if found, otherwise an empty string.
|
||||
"""
|
||||
if model == self.fast_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"fast_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == self.smart_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"smart_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == "text-embedding-ada-002":
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
deployment_id = {
|
||||
fast_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"fast_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
smart_llm: self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_deployment_id",
|
||||
self.azure_model_to_deployment_id_map.get(
|
||||
"smart_llm_model_deployment_id" # backwards compatibility
|
||||
),
|
||||
),
|
||||
self.embedding_model: self.azure_model_to_deployment_id_map.get(
|
||||
"embedding_model_deployment_id"
|
||||
] # type: ignore
|
||||
),
|
||||
}.get(model, None)
|
||||
|
||||
kwargs = {
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
}
|
||||
if model == self.embedding_model:
|
||||
kwargs["engine"] = deployment_id
|
||||
else:
|
||||
return ""
|
||||
kwargs["deployment_id"] = deployment_id
|
||||
return kwargs
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
|
||||
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
|
||||
class ConfigBuilder(Configurable[Config]):
|
||||
default_settings = Config()
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls, workdir: Path) -> Config:
|
||||
"""Initialize the Config class"""
|
||||
config_dict = {
|
||||
"workdir": workdir,
|
||||
"authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
|
||||
"exit_key": os.getenv("EXIT_KEY"),
|
||||
"plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
|
||||
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
|
||||
"ai_settings_file": os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE),
|
||||
"prompt_settings_file": os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE
|
||||
),
|
||||
"fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
|
||||
"smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
|
||||
"embedding_model": os.getenv("EMBEDDING_MODEL"),
|
||||
"browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"),
|
||||
"openai_api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"use_azure": os.getenv("USE_AZURE") == "True",
|
||||
"azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE),
|
||||
"execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False")
|
||||
== "True",
|
||||
"restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True")
|
||||
== "True",
|
||||
"openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True",
|
||||
"elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"),
|
||||
"streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"),
|
||||
"text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"),
|
||||
"github_api_key": os.getenv("GITHUB_API_KEY"),
|
||||
"github_username": os.getenv("GITHUB_USERNAME"),
|
||||
"google_api_key": os.getenv("GOOGLE_API_KEY"),
|
||||
"image_provider": os.getenv("IMAGE_PROVIDER"),
|
||||
"huggingface_api_token": os.getenv("HUGGINGFACE_API_TOKEN"),
|
||||
"huggingface_image_model": os.getenv("HUGGINGFACE_IMAGE_MODEL"),
|
||||
"audio_to_text_provider": os.getenv("AUDIO_TO_TEXT_PROVIDER"),
|
||||
"huggingface_audio_to_text_model": os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
),
|
||||
"sd_webui_url": os.getenv("SD_WEBUI_URL"),
|
||||
"sd_webui_auth": os.getenv("SD_WEBUI_AUTH"),
|
||||
"selenium_web_browser": os.getenv("USE_WEB_BROWSER"),
|
||||
"selenium_headless": os.getenv("HEADLESS_BROWSER", "True") == "True",
|
||||
"user_agent": os.getenv("USER_AGENT"),
|
||||
"memory_backend": os.getenv("MEMORY_BACKEND"),
|
||||
"memory_index": os.getenv("MEMORY_INDEX"),
|
||||
"redis_host": os.getenv("REDIS_HOST"),
|
||||
"redis_password": os.getenv("REDIS_PASSWORD"),
|
||||
"wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
|
||||
"plugins_dir": os.getenv("PLUGINS_DIR"),
|
||||
"plugins_config_file": os.getenv(
|
||||
"PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE
|
||||
),
|
||||
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
|
||||
}
|
||||
|
||||
config_dict["disabled_command_categories"] = _safe_split(
|
||||
os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
)
|
||||
|
||||
config_dict["shell_denylist"] = _safe_split(
|
||||
os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
|
||||
)
|
||||
config_dict["shell_allowlist"] = _safe_split(
|
||||
os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
|
||||
)
|
||||
|
||||
config_dict["google_custom_search_engine_id"] = os.getenv(
|
||||
"GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
)
|
||||
|
||||
config_dict["elevenlabs_voice_id"] = os.getenv(
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
)
|
||||
if not config_dict["text_to_speech_provider"]:
|
||||
if os.getenv("USE_MAC_OS_TTS"):
|
||||
default_tts_provider = "macos"
|
||||
elif config_dict["elevenlabs_api_key"]:
|
||||
default_tts_provider = "elevenlabs"
|
||||
elif os.getenv("USE_BRIAN_TTS"):
|
||||
default_tts_provider = "streamelements"
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
config_dict["text_to_speech_provider"] = default_tts_provider
|
||||
|
||||
config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS"))
|
||||
config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS"))
|
||||
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["image_size"] = int(os.getenv("IMAGE_SIZE"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["redis_port"] = int(os.getenv("REDIS_PORT"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["temperature"] = float(os.getenv("TEMPERATURE"))
|
||||
|
||||
if config_dict["use_azure"]:
|
||||
azure_config = cls.load_azure_config(
|
||||
workdir / config_dict["azure_config_file"]
|
||||
)
|
||||
config_dict.update(azure_config)
|
||||
|
||||
elif os.getenv("OPENAI_API_BASE_URL"):
|
||||
config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL")
|
||||
|
||||
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
if openai_organization is not None:
|
||||
config_dict["openai_organization"] = openai_organization
|
||||
|
||||
config_dict_without_none_values = {
|
||||
k: v for k, v in config_dict.items() if v is not None
|
||||
}
|
||||
|
||||
config = cls.build_agent_configuration(config_dict_without_none_values)
|
||||
|
||||
# Set secondary config variables (that depend on other config variables)
|
||||
|
||||
config.plugins_config = PluginsConfig.load_config(
|
||||
config.workdir / config.plugins_config_file,
|
||||
config.plugins_denylist,
|
||||
config.plugins_allowlist,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
@classmethod
|
||||
def load_azure_config(cls, config_file: Path) -> Dict[str, str]:
|
||||
"""
|
||||
Loads the configuration parameters for Azure hosting from the specified file
|
||||
path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
|
||||
config_file (Path): The path to the config yaml file.
|
||||
|
||||
Returns:
|
||||
None
|
||||
Dict
|
||||
"""
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
self.openai_api_type = config_params.get("azure_api_type") or "azure"
|
||||
self.openai_api_base = config_params.get("azure_api_base") or ""
|
||||
self.openai_api_version = (
|
||||
config_params.get("azure_api_version") or "2023-03-15-preview"
|
||||
)
|
||||
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {})
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
|
||||
def set_continuous_mode(self, value: bool) -> None:
|
||||
"""Set the continuous mode value."""
|
||||
self.continuous_mode = value
|
||||
|
||||
def set_continuous_limit(self, value: int) -> None:
|
||||
"""Set the continuous limit value."""
|
||||
self.continuous_limit = value
|
||||
|
||||
def set_speak_mode(self, value: bool) -> None:
|
||||
"""Set the speak mode value."""
|
||||
self.speak_mode = value
|
||||
|
||||
def set_fast_llm_model(self, value: str) -> None:
|
||||
"""Set the fast LLM model value."""
|
||||
self.fast_llm_model = value
|
||||
|
||||
def set_smart_llm_model(self, value: str) -> None:
|
||||
"""Set the smart LLM model value."""
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_fast_token_limit(self, value: int) -> None:
|
||||
"""Set the fast token limit value."""
|
||||
self.fast_token_limit = value
|
||||
|
||||
def set_smart_token_limit(self, value: int) -> None:
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_browse_chunk_max_length(self, value: int) -> None:
|
||||
"""Set the browse_website command chunk max length value."""
|
||||
self.browse_chunk_max_length = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
|
||||
def set_elevenlabs_api_key(self, value: str) -> None:
|
||||
"""Set the ElevenLabs API key value."""
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
def set_elevenlabs_voice_1_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 1 ID value."""
|
||||
self.elevenlabs_voice_1_id = value
|
||||
|
||||
def set_elevenlabs_voice_2_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 2 ID value."""
|
||||
self.elevenlabs_voice_2_id = value
|
||||
|
||||
def set_google_api_key(self, value: str) -> None:
|
||||
"""Set the Google API key value."""
|
||||
self.google_api_key = value
|
||||
|
||||
def set_custom_search_engine_id(self, value: str) -> None:
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str) -> None:
|
||||
"""Set the Pinecone API key value."""
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str) -> None:
|
||||
"""Set the Pinecone region value."""
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool) -> None:
|
||||
"""Set the debug mode value."""
|
||||
self.debug_mode = value
|
||||
|
||||
def set_plugins(self, value: list) -> None:
|
||||
"""Set the plugins value."""
|
||||
self.plugins = value
|
||||
|
||||
def set_temperature(self, value: int) -> None:
|
||||
"""Set the temperature value."""
|
||||
self.temperature = value
|
||||
|
||||
def set_memory_backend(self, name: str) -> None:
|
||||
"""Set the memory backend name."""
|
||||
self.memory_backend = name
|
||||
return {
|
||||
"openai_api_type": config_params.get("azure_api_type", "azure"),
|
||||
"openai_api_base": config_params.get("azure_api_base", ""),
|
||||
"openai_api_version": config_params.get(
|
||||
"azure_api_version", "2023-03-15-preview"
|
||||
),
|
||||
"azure_model_to_deployment_id_map": config_params.get(
|
||||
"azure_model_map", {}
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def check_openai_api_key() -> None:
|
||||
def check_openai_api_key(config: Config) -> None:
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
cfg = Config()
|
||||
if not cfg.openai_api_key:
|
||||
if not config.openai_api_key:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
+ Fore.RESET
|
||||
)
|
||||
print("You can get your key from https://platform.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
openai_api_key = input(
|
||||
"If you do have the key, please enter your OpenAI API key now:\n"
|
||||
)
|
||||
key_pattern = r"^sk-\w{48}"
|
||||
openai_api_key = openai_api_key.strip()
|
||||
if re.search(key_pattern, openai_api_key):
|
||||
os.environ["OPENAI_API_KEY"] = openai_api_key
|
||||
config.openai_api_key = openai_api_key
|
||||
print(
|
||||
Fore.GREEN
|
||||
+ "OpenAI API key successfully set!\n"
|
||||
+ Fore.YELLOW
|
||||
+ "NOTE: The API key you've set is only temporary.\n"
|
||||
+ "For longer sessions, please set it in .env file"
|
||||
+ Fore.RESET
|
||||
)
|
||||
else:
|
||||
print("Invalid OpenAI API key!")
|
||||
exit(1)
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
|
||||
47
autogpt/config/prompt_config.py
Normal file
47
autogpt/config/prompt_config.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# sourcery skip: do-not-use-staticmethod
|
||||
"""
|
||||
A module that contains the PromptConfig class object that contains the configuration
|
||||
"""
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
||||
class PromptConfig:
|
||||
"""
|
||||
A class object that contains the configuration information for the prompt, which will be used by the prompt generator
|
||||
|
||||
Attributes:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
"""
|
||||
|
||||
def __init__(self, prompt_settings_file: str) -> None:
|
||||
"""
|
||||
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else raises error.
|
||||
|
||||
Parameters:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(prompt_settings_file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
with open(prompt_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
||||
self.constraints = config_params.get("constraints", [])
|
||||
self.resources = config_params.get("resources", [])
|
||||
self.performance_evaluations = config_params.get("performance_evaluations", [])
|
||||
272
autogpt/core/ARCHITECTURE_NOTES.md
Normal file
272
autogpt/core/ARCHITECTURE_NOTES.md
Normal file
@@ -0,0 +1,272 @@
|
||||
# Re-architecture Notes
|
||||
|
||||
## Key Documents
|
||||
|
||||
- [Planned Agent Workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
|
||||
- [Original Architecture Diagram](https://www.figma.com/file/fwdj44tPR7ArYtnGGUKknw/Modular-Architecture?type=whiteboard&node-id=0-1) - This is sadly well out of date at this point.
|
||||
- [Kanban](https://github.com/orgs/Significant-Gravitas/projects/1/views/1?filterQuery=label%3Are-arch)
|
||||
|
||||
## The Motivation
|
||||
|
||||
The `master` branch of Auto-GPT is an organically grown amalgamation of many thoughts
|
||||
and ideas about agent-driven autonomous systems. It lacks clear abstraction boundaries,
|
||||
has issues of global state and poorly encapsulated state, and is generally just hard to
|
||||
make effective changes to. Mainly it's just a system that's hard to make changes to.
|
||||
And research in the field is moving fast, so we want to be able to try new ideas
|
||||
quickly.
|
||||
|
||||
## Initial Planning
|
||||
|
||||
A large group of maintainers and contributors met do discuss the architectural
|
||||
challenges associated with the existing codebase. Many much-desired features (building
|
||||
new user interfaces, enabling project-specific agents, enabling multi-agent systems)
|
||||
are bottlenecked by the global state in the system. We discussed the tradeoffs between
|
||||
an incremental system transition and a big breaking version change and decided to go
|
||||
for the breaking version change. We justified this by saying:
|
||||
|
||||
- We can maintain, in essence, the same user experience as now even with a radical
|
||||
restructuring of the codebase
|
||||
- Our developer audience is struggling to use the existing codebase to build
|
||||
applications and libraries of their own, so this breaking change will largely be
|
||||
welcome.
|
||||
|
||||
## Primary Goals
|
||||
|
||||
- Separate the AutoGPT application code from the library code.
|
||||
- Remove global state from the system
|
||||
- Allow for multiple agents per user (with facilities for running simultaneously)
|
||||
- Create a serializable representation of an Agent
|
||||
- Encapsulate the core systems in abstractions with clear boundaries.
|
||||
|
||||
## Secondary goals
|
||||
|
||||
- Use existing tools to ditch any unneccesary cruft in the codebase (document loading,
|
||||
json parsing, anything easier to replace than to port).
|
||||
- Bring in the [core agent loop updates](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
|
||||
being developed simultaneously by @Pwuts
|
||||
|
||||
# The Agent Subsystems
|
||||
|
||||
## Configuration
|
||||
|
||||
We want a lot of things from a configuration system. We lean heavily on it in the
|
||||
`master` branch to allow several parts of the system to communicate with each other.
|
||||
[Recent work](https://github.com/Significant-Gravitas/Auto-GPT/pull/4737) has made it
|
||||
so that the config is no longer a singleton object that is materialized from the import
|
||||
state, but it's still treated as a
|
||||
[god object](https://en.wikipedia.org/wiki/God_object) containing all information about
|
||||
the system and _critically_ allowing any system to reference configuration information
|
||||
about other parts of the system.
|
||||
|
||||
### What we want
|
||||
|
||||
- It should still be reasonable to collate the entire system configuration in a
|
||||
sensible way.
|
||||
- The configuration should be validatable and validated.
|
||||
- The system configuration should be a _serializable_ representation of an `Agent`.
|
||||
- The configuration system should provide a clear (albeit very low-level) contract
|
||||
about user-configurable aspects of the system.
|
||||
- The configuration should reasonably manage default values and user-provided overrides.
|
||||
- The configuration system needs to handle credentials in a reasonable way.
|
||||
- The configuration should be the representation of some amount of system state, like
|
||||
api budgets and resource usage. These aspects are recorded in the configuration and
|
||||
updated by the system itself.
|
||||
- Agent systems should have encapsulated views of the configuration. E.g. the memory
|
||||
system should know about memory configuration but nothing about command configuration.
|
||||
|
||||
## Workspace
|
||||
|
||||
There are two ways to think about the workspace:
|
||||
|
||||
- The workspace is a scratch space for an agent where it can store files, write code,
|
||||
and do pretty much whatever else it likes.
|
||||
- The workspace is, at any given point in time, the single source of truth for what an
|
||||
agent is. It contains the serializable state (the configuration) as well as all
|
||||
other working state (stored files, databases, memories, custom code).
|
||||
|
||||
In the existing system there is **one** workspace. And because the workspace holds so
|
||||
much agent state, that means a user can only work with one agent at a time.
|
||||
|
||||
## Memory
|
||||
|
||||
The memory system has been under extremely active development.
|
||||
See [#3536](https://github.com/Significant-Gravitas/Auto-GPT/issues/3536) and
|
||||
[#4208](https://github.com/Significant-Gravitas/Auto-GPT/pull/4208) for discussion and
|
||||
work in the `master` branch. The TL;DR is
|
||||
that we noticed a couple of months ago that the `Agent` performed **worse** with
|
||||
permanent memory than without it. Since then the knowledge storage and retrieval
|
||||
system has been [redesigned](https://whimsical.com/memory-system-8Ae6x6QkjDwQAUe9eVJ6w1)
|
||||
and partially implemented in the `master` branch.
|
||||
|
||||
## Planning/Prompt-Engineering
|
||||
|
||||
The planning system is the system that translates user desires/agent intentions into
|
||||
language model prompts. In the course of development, it has become pretty clear
|
||||
that `Planning` is the wrong name for this system
|
||||
|
||||
### What we want
|
||||
|
||||
- It should be incredibly obvious what's being passed to a language model, when it's
|
||||
being passed, and what the language model response is. The landscape of language
|
||||
model research is developing very rapidly, so building complex abstractions between
|
||||
users/contributors and the language model interactions is going to make it very
|
||||
difficult for us to nimbly respond to new research developments.
|
||||
- Prompt-engineering should ideally be exposed in a parameterizeable way to users.
|
||||
- We should, where possible, leverage OpenAI's new
|
||||
[function calling api](https://openai.com/blog/function-calling-and-other-api-updates)
|
||||
to get outputs in a standard machine-readable format and avoid the deep pit of
|
||||
parsing json (and fixing unparsable json).
|
||||
|
||||
### Planning Strategies
|
||||
|
||||
The [new agent workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ)
|
||||
has many, many interaction points for language models. We really would like to not
|
||||
distribute prompt templates and raw strings all through the system. The re-arch solution
|
||||
is to encapsulate language model interactions into planning strategies.
|
||||
These strategies are defined by
|
||||
|
||||
- The `LanguageModelClassification` they use (`FAST` or `SMART`)
|
||||
- A function `build_prompt` that takes strategy specific arguments and constructs a
|
||||
`LanguageModelPrompt` (a simple container for lists of messages and functions to
|
||||
pass to the language model)
|
||||
- A function `parse_content` that parses the response content (a dict) into a better
|
||||
formatted dict. Contracts here are intentionally loose and will tighten once we have
|
||||
at least one other language model provider.
|
||||
|
||||
## Resources
|
||||
|
||||
Resources are kinds of services we consume from external APIs. They may have associated
|
||||
credentials and costs we need to manage. Management of those credentials is implemented
|
||||
as manipulation of the resource configuration. We have two categories of resources
|
||||
currently
|
||||
|
||||
- AI/ML model providers (including language model providers and embedding model providers, ie OpenAI)
|
||||
- Memory providers (e.g. Pinecone, Weaviate, ChromaDB, etc.)
|
||||
|
||||
### What we want
|
||||
|
||||
- Resource abstractions should provide a common interface to different service providers
|
||||
for a particular kind of service.
|
||||
- Resource abstractions should manipulate the configuration to manage their credentials
|
||||
and budget/accounting.
|
||||
- Resource abstractions should be composable over an API (e.g. I should be able to make
|
||||
an OpenAI provider that is both a LanguageModelProvider and an EmbeddingModelProvider
|
||||
and use it wherever I need those services).
|
||||
|
||||
## Abilities
|
||||
|
||||
Along with planning and memory usage, abilities are one of the major augmentations of
|
||||
augmented language models. They allow us to expand the scope of what language models
|
||||
can do by hooking them up to code they can execute to obtain new knowledge or influence
|
||||
the world.
|
||||
|
||||
### What we want
|
||||
|
||||
- Abilities should have an extremely clear interface that users can write to.
|
||||
- Abilities should have an extremely clear interface that a language model can
|
||||
understand
|
||||
- Abilities should be declarative about their dependencies so the system can inject them
|
||||
- Abilities should be executable (where sensible) in an async run loop.
|
||||
- Abilities should be not have side effects unless those side effects are clear in
|
||||
their representation to an agent (e.g. the BrowseWeb ability shouldn't write a file,
|
||||
but the WriteFile ability can).
|
||||
|
||||
## Plugins
|
||||
|
||||
Users want to add lots of features that we don't want to support as first-party.
|
||||
Or solution to this is a plugin system to allow users to plug in their functionality or
|
||||
to construct their agent from a public plugin marketplace. Our primary concern in the
|
||||
re-arch is to build a stateless plugin service interface and a simple implementation
|
||||
that can load plugins from installed packages or from zip files. Future efforts will
|
||||
expand this system to allow plugins to load from a marketplace or some other kind
|
||||
of service.
|
||||
|
||||
### What is a Plugin
|
||||
|
||||
Plugins are a kind of garbage term. They refer to a number of things.
|
||||
|
||||
- New commands for the agent to execute. This is the most common usage.
|
||||
- Replacements for entire subsystems like memory or language model providers
|
||||
- Application plugins that do things like send emails or communicate via whatsapp
|
||||
- The repositories contributors create that may themselves have multiple plugins in them.
|
||||
|
||||
### Usage in the existing system
|
||||
|
||||
The current plugin system is _hook-based_. This means plugins don't correspond to
|
||||
kinds of objects in the system, but rather to times in the system at which we defer
|
||||
execution to them. The main advantage of this setup is that user code can hijack
|
||||
pretty much any behavior of the agent by injecting code that supercedes the normal
|
||||
agent execution. The disadvantages to this approach are numerous:
|
||||
|
||||
- We have absolutely no mechanisms to enforce any security measures because the threat
|
||||
surface is everything.
|
||||
- We cannot reason about agent behavior in a cohesive way because control flow can be
|
||||
ceded to user code at pretty much any point and arbitrarily change or break the
|
||||
agent behavior
|
||||
- The interface for designing a plugin is kind of terrible and difficult to standardize
|
||||
- The hook based implementation means we couple ourselves to a particular flow of
|
||||
control (or otherwise risk breaking plugin behavior). E.g. many of the hook targets
|
||||
in the [old workflow](https://whimsical.com/agent-workflow-VAzeKcup3SR7awpNZJKTyK)
|
||||
are not present or mean something entirely different in the
|
||||
[new workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ).
|
||||
- Etc.
|
||||
|
||||
### What we want
|
||||
|
||||
- A concrete definition of a plugin that is narrow enough in scope that we can define
|
||||
it well and reason about how it will work in the system.
|
||||
- A set of abstractions that let us define a plugin by its storage format and location
|
||||
- A service interface that knows how to parse the plugin abstractions and turn them
|
||||
into concrete classes and objects.
|
||||
|
||||
|
||||
## Some Notes on how and why we'll use OO in this project
|
||||
|
||||
First and foremost, Python itself is an object-oriented language. It's
|
||||
underlying [data model](https://docs.python.org/3/reference/datamodel.html) is built
|
||||
with object-oriented programming in mind. It offers useful tools like abstract base
|
||||
classes to communicate interfaces to developers who want to, e.g., write plugins, or
|
||||
help work on implementations. If we were working in a different language that offered
|
||||
different tools, we'd use a different paradigm.
|
||||
|
||||
While many things are classes in the re-arch, they are not classes in the same way.
|
||||
There are three kinds of things (roughly) that are written as classes in the re-arch:
|
||||
1. **Configuration**: Auto-GPT has *a lot* of configuration. This configuration
|
||||
is *data* and we use **[Pydantic](https://docs.pydantic.dev/latest/)** to manage it as
|
||||
pydantic is basically industry standard for this stuff. It provides runtime validation
|
||||
for all the configuration and allows us to easily serialize configuration to both basic
|
||||
python types (dicts, lists, and primatives) as well as serialize to json, which is
|
||||
important for us being able to put representations of agents
|
||||
[on the wire](https://en.wikipedia.org/wiki/Wire_protocol) for web applications and
|
||||
agent-to-agent communication. *These are essentially
|
||||
[structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language)) rather than
|
||||
traditional classes.*
|
||||
2. **Internal Data**: Very similar to configuration, Auto-GPT passes around boatloads
|
||||
of internal data. We are interacting with language models and language model APIs
|
||||
which means we are handling lots of *structured* but *raw* text. Here we also
|
||||
leverage **pydantic** to both *parse* and *validate* the internal data and also to
|
||||
give us concrete types which we can use static type checkers to validate against
|
||||
and discover problems before they show up as bugs at runtime. *These are
|
||||
essentially [structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language))
|
||||
rather than traditional classes.*
|
||||
3. **System Interfaces**: This is our primary traditional use of classes in the
|
||||
re-arch. We have a bunch of systems. We want many of those systems to have
|
||||
alternative implementations (e.g. via plugins). We use abstract base classes to
|
||||
define interfaces to communicate with people who might want to provide those
|
||||
plugins. We provide a single concrete implementation of most of those systems as a
|
||||
subclass of the interface. This should not be controversial.
|
||||
|
||||
The approach is consistent with
|
||||
[prior](https://github.com/Significant-Gravitas/Auto-GPT/issues/2458)
|
||||
[work](https://github.com/Significant-Gravitas/Auto-GPT/pull/2442) done by other
|
||||
maintainers in this direction.
|
||||
|
||||
From an organization standpoint, OO programming is by far the most popular programming
|
||||
paradigm (especially for Python). It's the one most often taught in programming classes
|
||||
and the one with the most available online training for people interested in
|
||||
contributing.
|
||||
|
||||
Finally, and importantly, we scoped the plan and initial design of the re-arch as a
|
||||
large group of maintainers and collaborators early on. This is consistent with the
|
||||
design we chose and no-one offered alternatives.
|
||||
|
||||
94
autogpt/core/README.md
Normal file
94
autogpt/core/README.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Auto-GPT Core
|
||||
|
||||
This subpackage contains the ongoing work for the
|
||||
[Auto-GPT Re-arch](https://github.com/Significant-Gravitas/Auto-GPT/issues/4770). It is
|
||||
a work in progress and is not yet feature complete. In particular, it does not yet
|
||||
have many of the Auto-GPT commands implemented and is pending ongoing work to
|
||||
[re-incorporate vector-based memory and knowledge retrieval](https://github.com/Significant-Gravitas/Auto-GPT/issues/3536).
|
||||
|
||||
## [Overview](ARCHITECTURE_NOTES.md)
|
||||
|
||||
The Auto-GPT Re-arch is a re-implementation of the Auto-GPT agent that is designed to be more modular,
|
||||
more extensible, and more maintainable than the original Auto-GPT agent. It is also designed to be
|
||||
more accessible to new developers and to be easier to contribute to. The re-arch is a work in progress
|
||||
and is not yet feature complete. It is also not yet ready for production use.
|
||||
|
||||
## Running the Re-arch Code
|
||||
|
||||
There are two client applications for Auto-GPT included.
|
||||
|
||||
Unlike the main version of Auto-GPT, the re-arch requires you to actually install Auto-GPT in your python
|
||||
environment to run this application. To do so, run
|
||||
|
||||
```
|
||||
pip install -e REPOSITORY_ROOT
|
||||
```
|
||||
|
||||
where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. The `REPOSITORY_ROOT`
|
||||
is the directory that contains the `setup.py` file and is the main, top-level directory of the repository
|
||||
when you clone it.
|
||||
|
||||
## CLI Application
|
||||
|
||||
:star2: **This is the reference application I'm working with for now** :star2:
|
||||
|
||||
The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py)
|
||||
|
||||
You'll then need a settings file. Run
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings
|
||||
```
|
||||
|
||||
This will write a file called `default_agent_settings.yaml` with all the user-modifiable
|
||||
configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory
|
||||
in your user directory if it doesn't exist). Your user directory is located in different places
|
||||
depending on your operating system:
|
||||
|
||||
- On Linux, it's `/home/USERNAME`
|
||||
- On Windows, it's `C:\Users\USERNAME`
|
||||
- On Mac, it's `/Users/USERNAME`
|
||||
|
||||
At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run
|
||||
the model.
|
||||
|
||||
You can then run Auto-GPT with
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run
|
||||
```
|
||||
|
||||
to launch the interaction loop.
|
||||
|
||||
### CLI Web App
|
||||
|
||||
:warning: I am not actively developing this application. I am primarily working with the traditional CLI app
|
||||
described above. It is a very good place to get involved if you have web application design experience and are
|
||||
looking to get involved in the re-arch.
|
||||
|
||||
The second app is still a CLI, but it sets up a local webserver that the client application talks to
|
||||
rather than invoking calls to the Agent library code directly. This application is essentially a sketch
|
||||
at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed.
|
||||
|
||||
- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/cli.py)
|
||||
- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/client/client.py)
|
||||
- [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/server/api.py)
|
||||
|
||||
To run, you still need to generate a default configuration. You can do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings
|
||||
```
|
||||
|
||||
It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key.
|
||||
|
||||
To run, do
|
||||
|
||||
```
|
||||
python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client
|
||||
```
|
||||
|
||||
This will launch a webserver and then start the client cli application to communicate with it.
|
||||
4
autogpt/core/ability/__init__.py
Normal file
4
autogpt/core/ability/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
"""The command system provides a way to extend the functionality of the AI agent."""
|
||||
from autogpt.core.ability.base import Ability, AbilityRegistry
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry
|
||||
92
autogpt/core/ability/base.py
Normal file
92
autogpt/core/ability/base.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import abc
|
||||
from pprint import pformat
|
||||
from typing import ClassVar
|
||||
|
||||
import inflection
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
|
||||
|
||||
class AbilityConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
from autogpt.core.plugin.base import PluginLocation
|
||||
|
||||
location: PluginLocation
|
||||
packages_required: list[str] = Field(default_factory=list)
|
||||
language_model_required: LanguageModelConfiguration = None
|
||||
memory_provider_required: bool = False
|
||||
workspace_required: bool = False
|
||||
|
||||
|
||||
class Ability(abc.ABC):
|
||||
"""A class representing an agent ability."""
|
||||
|
||||
default_configuration: ClassVar[AbilityConfiguration]
|
||||
|
||||
@classmethod
|
||||
def name(cls) -> str:
|
||||
"""The name of the ability."""
|
||||
return inflection.underscore(cls.__name__)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def description(cls) -> str:
|
||||
"""A detailed description of what the ability does."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def arguments(cls) -> dict:
|
||||
"""A dict of arguments in standard json schema format."""
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
"""A list of required arguments."""
|
||||
return []
|
||||
|
||||
@abc.abstractmethod
|
||||
async def __call__(self, *args, **kwargs) -> AbilityResult:
|
||||
...
|
||||
|
||||
def __str__(self) -> str:
|
||||
return pformat(self.dump)
|
||||
|
||||
def dump(self) -> dict:
|
||||
return {
|
||||
"name": self.name(),
|
||||
"description": self.description(),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": self.arguments(),
|
||||
"required": self.required_arguments(),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class AbilityRegistry(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def list_abilities(self) -> list[str]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def perform(self, ability_name: str, **kwargs) -> AbilityResult:
|
||||
...
|
||||
6
autogpt/core/ability/builtins/__init__.py
Normal file
6
autogpt/core/ability/builtins/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility
|
||||
from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel
|
||||
|
||||
BUILTIN_ABILITIES = {
|
||||
QueryLanguageModel.name(): QueryLanguageModel,
|
||||
}
|
||||
102
autogpt/core/ability/builtins/create_new_ability.py
Normal file
102
autogpt/core/ability/builtins/create_new_ability.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
|
||||
|
||||
class CreateNewAbility(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.CreateNewAbility",
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Create a new ability by writing python code."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"ability_name": {
|
||||
"type": "string",
|
||||
"description": "A meaningful and concise name for the new ability.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the ability and its uses, including any limitations.",
|
||||
},
|
||||
"arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the argument.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "The type of the argument. Must be a standard json schema type.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "A detailed description of the argument and its uses.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"description": "A list of arguments that the ability will accept.",
|
||||
},
|
||||
"required_arguments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The names of the arguments that are required.",
|
||||
},
|
||||
"description": "A list of the names of the arguments that are required.",
|
||||
},
|
||||
"package_requirements": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "The of the Python package that is required to execute the ability.",
|
||||
},
|
||||
"description": "A list of the names of the Python packages that are required to execute the ability.",
|
||||
},
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "The Python code that will be executed when the ability is called.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return [
|
||||
"ability_name",
|
||||
"description",
|
||||
"arguments",
|
||||
"required_arguments",
|
||||
"package_requirements",
|
||||
"code",
|
||||
]
|
||||
|
||||
async def __call__(
|
||||
self,
|
||||
ability_name: str,
|
||||
description: str,
|
||||
arguments: list[dict],
|
||||
required_arguments: list[str],
|
||||
package_requirements: list[str],
|
||||
code: str,
|
||||
) -> AbilityResult:
|
||||
raise NotImplementedError
|
||||
167
autogpt/core/ability/builtins/file_operations.py
Normal file
167
autogpt/core/ability/builtins/file_operations.py
Normal file
@@ -0,0 +1,167 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class ReadFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Read and parse all text from a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to read.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(self, filename: str) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
pass
|
||||
except ImportError:
|
||||
message = "Package charset_normalizer is not installed."
|
||||
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if not file_path.exists():
|
||||
message = f"File {filename} does not exist."
|
||||
if not file_path.is_file():
|
||||
message = f"{filename} is not a file."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename):
|
||||
return result
|
||||
|
||||
from unstructured.partition.auto import partition
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
elements = partition(str(file_path))
|
||||
# TODO: Lots of other potentially useful information is available
|
||||
# in the partitioned file. Consider returning more of it.
|
||||
new_knowledge = Knowledge(
|
||||
content="\n\n".join([element.text for element in elements]),
|
||||
content_type=ContentType.TEXT,
|
||||
content_metadata={"filename": filename},
|
||||
)
|
||||
success = True
|
||||
message = f"File {file_path} read successfully."
|
||||
except IOError as e:
|
||||
new_knowledge = None
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
new_knowledge=new_knowledge,
|
||||
)
|
||||
|
||||
|
||||
class WriteFile(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
packages_required=["unstructured"],
|
||||
workspace_required=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Write text to a file."
|
||||
|
||||
@property
|
||||
def arguments(self) -> dict:
|
||||
return {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to write.",
|
||||
},
|
||||
"contents": {
|
||||
"type": "string",
|
||||
"description": "The contents of the file to write.",
|
||||
},
|
||||
}
|
||||
|
||||
def _check_preconditions(
|
||||
self, filename: str, contents: str
|
||||
) -> AbilityResult | None:
|
||||
message = ""
|
||||
try:
|
||||
file_path = self._workspace.get_path(filename)
|
||||
if file_path.exists():
|
||||
message = f"File {filename} already exists."
|
||||
if len(contents):
|
||||
message = f"File {filename} was not given any content."
|
||||
except ValueError as e:
|
||||
message = str(e)
|
||||
|
||||
if message:
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename, "contents": contents},
|
||||
success=False,
|
||||
message=message,
|
||||
data=None,
|
||||
)
|
||||
|
||||
def __call__(self, filename: str, contents: str) -> AbilityResult:
|
||||
if result := self._check_preconditions(filename, contents):
|
||||
return result
|
||||
|
||||
file_path = self._workspace.get_path(filename)
|
||||
try:
|
||||
directory = os.path.dirname(file_path)
|
||||
os.makedirs(directory)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(contents)
|
||||
success = True
|
||||
message = f"File {file_path} written successfully."
|
||||
except IOError as e:
|
||||
success = False
|
||||
message = str(e)
|
||||
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"filename": filename},
|
||||
success=success,
|
||||
message=message,
|
||||
)
|
||||
78
autogpt/core/ability/builtins/query_language_model.py
Normal file
78
autogpt/core/ability/builtins/query_language_model.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
MessageRole,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
|
||||
|
||||
class QueryLanguageModel(Ability):
|
||||
default_configuration = AbilityConfiguration(
|
||||
location=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.builtins.QueryLanguageModel",
|
||||
),
|
||||
language_model_required=LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
configuration: AbilityConfiguration,
|
||||
language_model_provider: LanguageModelProvider,
|
||||
):
|
||||
self._logger = logger
|
||||
self._configuration = configuration
|
||||
self._language_model_provider = language_model_provider
|
||||
|
||||
@classmethod
|
||||
def description(cls) -> str:
|
||||
return "Query a language model. A query should be a question and any relevant context."
|
||||
|
||||
@classmethod
|
||||
def arguments(cls) -> dict:
|
||||
return {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "A query for a language model. A query should contain a question and any relevant context.",
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def required_arguments(cls) -> list[str]:
|
||||
return ["query"]
|
||||
|
||||
async def __call__(self, query: str) -> AbilityResult:
|
||||
messages = [
|
||||
LanguageModelMessage(
|
||||
content=query,
|
||||
role=MessageRole.USER,
|
||||
),
|
||||
]
|
||||
model_response = await self._language_model_provider.create_language_completion(
|
||||
model_prompt=messages,
|
||||
functions=[],
|
||||
model_name=self._configuration.language_model_required.model_name,
|
||||
completion_parser=self._parse_response,
|
||||
)
|
||||
return AbilityResult(
|
||||
ability_name=self.name(),
|
||||
ability_args={"query": query},
|
||||
success=True,
|
||||
message=model_response.content["content"],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_response(response_content: dict) -> dict:
|
||||
return {"content": response_content["content"]}
|
||||
30
autogpt/core/ability/schema.py
Normal file
30
autogpt/core/ability/schema.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import enum
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ContentType(str, enum.Enum):
|
||||
# TBD what these actually are.
|
||||
TEXT = "text"
|
||||
CODE = "code"
|
||||
|
||||
|
||||
class Knowledge(BaseModel):
|
||||
content: str
|
||||
content_type: ContentType
|
||||
content_metadata: dict[str, Any]
|
||||
|
||||
|
||||
class AbilityResult(BaseModel):
|
||||
"""The AbilityResult is a standard response struct for an ability."""
|
||||
|
||||
ability_name: str
|
||||
ability_args: dict[str, str]
|
||||
success: bool
|
||||
message: str
|
||||
new_knowledge: Knowledge = None
|
||||
|
||||
def summary(self) -> str:
|
||||
kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items())
|
||||
return f"{self.ability_name}({kwargs}): {self.message}"
|
||||
96
autogpt/core/ability/simple.py
Normal file
96
autogpt/core/ability/simple.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
|
||||
from autogpt.core.ability.builtins import BUILTIN_ABILITIES
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.plugin.simple import SimplePluginService
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
)
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
|
||||
class AbilityRegistryConfiguration(SystemConfiguration):
|
||||
"""Configuration for the AbilityRegistry subsystem."""
|
||||
|
||||
abilities: dict[str, AbilityConfiguration]
|
||||
|
||||
|
||||
class AbilityRegistrySettings(SystemSettings):
|
||||
configuration: AbilityRegistryConfiguration
|
||||
|
||||
|
||||
class SimpleAbilityRegistry(AbilityRegistry, Configurable):
|
||||
default_settings = AbilityRegistrySettings(
|
||||
name="simple_ability_registry",
|
||||
description="A simple ability registry.",
|
||||
configuration=AbilityRegistryConfiguration(
|
||||
abilities={
|
||||
ability_name: ability.default_configuration
|
||||
for ability_name, ability in BUILTIN_ABILITIES.items()
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AbilityRegistrySettings,
|
||||
logger: logging.Logger,
|
||||
memory: Memory,
|
||||
workspace: Workspace,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._memory = memory
|
||||
self._workspace = workspace
|
||||
self._model_providers = model_providers
|
||||
self._abilities = []
|
||||
for (
|
||||
ability_name,
|
||||
ability_configuration,
|
||||
) in self._configuration.abilities.items():
|
||||
self.register_ability(ability_name, ability_configuration)
|
||||
|
||||
def register_ability(
|
||||
self, ability_name: str, ability_configuration: AbilityConfiguration
|
||||
) -> None:
|
||||
ability_class = SimplePluginService.get_plugin(ability_configuration.location)
|
||||
ability_args = {
|
||||
"logger": self._logger.getChild(ability_name),
|
||||
"configuration": ability_configuration,
|
||||
}
|
||||
if ability_configuration.packages_required:
|
||||
# TODO: Check packages are installed and maybe install them.
|
||||
pass
|
||||
if ability_configuration.memory_provider_required:
|
||||
ability_args["memory"] = self._memory
|
||||
if ability_configuration.workspace_required:
|
||||
ability_args["workspace"] = self._workspace
|
||||
if ability_configuration.language_model_required:
|
||||
ability_args["language_model_provider"] = self._model_providers[
|
||||
ability_configuration.language_model_required.provider_name
|
||||
]
|
||||
ability = ability_class(**ability_args)
|
||||
self._abilities.append(ability)
|
||||
|
||||
def list_abilities(self) -> list[str]:
|
||||
return [
|
||||
f"{ability.name()}: {ability.description()}" for ability in self._abilities
|
||||
]
|
||||
|
||||
def dump_abilities(self) -> list[dict]:
|
||||
return [ability.dump() for ability in self._abilities]
|
||||
|
||||
def get_ability(self, ability_name: str) -> Ability:
|
||||
for ability in self._abilities:
|
||||
if ability.name() == ability_name:
|
||||
return ability
|
||||
raise ValueError(f"Ability '{ability_name}' not found.")
|
||||
|
||||
async def perform(self, ability_name: str, **kwargs) -> AbilityResult:
|
||||
ability = self.get_ability(ability_name)
|
||||
return await ability(**kwargs)
|
||||
3
autogpt/core/agent/__init__.py
Normal file
3
autogpt/core/agent/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""The Agent is an autonomouos entity guided by a LLM provider."""
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.agent.simple import AgentSettings, SimpleAgent
|
||||
26
autogpt/core/agent/base.py
Normal file
26
autogpt/core/agent/base.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import abc
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class Agent(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "Agent":
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def __repr__(self):
|
||||
...
|
||||
391
autogpt/core/agent/simple.py
Normal file
391
autogpt/core/agent/simple.py
Normal file
@@ -0,0 +1,391 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.ability import (
|
||||
AbilityRegistrySettings,
|
||||
AbilityResult,
|
||||
SimpleAbilityRegistry,
|
||||
)
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory import MemorySettings, SimpleMemory
|
||||
from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus
|
||||
from autogpt.core.plugin.simple import (
|
||||
PluginLocation,
|
||||
PluginStorageFormat,
|
||||
SimplePluginService,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings
|
||||
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
|
||||
|
||||
class AgentSystems(SystemConfiguration):
|
||||
ability_registry: PluginLocation
|
||||
memory: PluginLocation
|
||||
openai_provider: PluginLocation
|
||||
planning: PluginLocation
|
||||
workspace: PluginLocation
|
||||
|
||||
|
||||
class AgentConfiguration(SystemConfiguration):
|
||||
cycle_count: int
|
||||
max_task_cycle_count: int
|
||||
creation_time: str
|
||||
name: str
|
||||
role: str
|
||||
goals: list[str]
|
||||
systems: AgentSystems
|
||||
|
||||
|
||||
class AgentSystemSettings(SystemSettings):
|
||||
configuration: AgentConfiguration
|
||||
|
||||
|
||||
class AgentSettings(BaseModel):
|
||||
agent: AgentSystemSettings
|
||||
ability_registry: AbilityRegistrySettings
|
||||
memory: MemorySettings
|
||||
openai_provider: OpenAISettings
|
||||
planning: PlannerSettings
|
||||
workspace: WorkspaceSettings
|
||||
|
||||
def update_agent_name_and_goals(self, agent_goals: dict) -> None:
|
||||
self.agent.configuration.name = agent_goals["agent_name"]
|
||||
self.agent.configuration.role = agent_goals["agent_role"]
|
||||
self.agent.configuration.goals = agent_goals["agent_goals"]
|
||||
|
||||
|
||||
class SimpleAgent(Agent, Configurable):
|
||||
default_settings = AgentSystemSettings(
|
||||
name="simple_agent",
|
||||
description="A simple agent.",
|
||||
configuration=AgentConfiguration(
|
||||
name="Entrepreneur-GPT",
|
||||
role=(
|
||||
"An AI designed to autonomously develop and run businesses with "
|
||||
"the sole goal of increasing your net worth."
|
||||
),
|
||||
goals=[
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
],
|
||||
cycle_count=0,
|
||||
max_task_cycle_count=3,
|
||||
creation_time="",
|
||||
systems=AgentSystems(
|
||||
ability_registry=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.ability.SimpleAbilityRegistry",
|
||||
),
|
||||
memory=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.memory.SimpleMemory",
|
||||
),
|
||||
openai_provider=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.resource.model_providers.OpenAIProvider",
|
||||
),
|
||||
planning=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.planning.SimplePlanner",
|
||||
),
|
||||
workspace=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route="autogpt.core.workspace.SimpleWorkspace",
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: AgentSystemSettings,
|
||||
logger: logging.Logger,
|
||||
ability_registry: SimpleAbilityRegistry,
|
||||
memory: SimpleMemory,
|
||||
openai_provider: OpenAIProvider,
|
||||
planning: SimplePlanner,
|
||||
workspace: SimpleWorkspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._ability_registry = ability_registry
|
||||
self._memory = memory
|
||||
# FIXME: Need some work to make this work as a dict of providers
|
||||
# Getting the construction of the config to work is a bit tricky
|
||||
self._openai_provider = openai_provider
|
||||
self._planning = planning
|
||||
self._workspace = workspace
|
||||
self._task_queue = []
|
||||
self._completed_tasks = []
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
@classmethod
|
||||
def from_workspace(
|
||||
cls,
|
||||
workspace_path: Path,
|
||||
logger: logging.Logger,
|
||||
) -> "SimpleAgent":
|
||||
agent_settings = SimpleWorkspace.load_agent_settings(workspace_path)
|
||||
agent_args = {}
|
||||
|
||||
agent_args["settings"] = agent_settings.agent
|
||||
agent_args["logger"] = logger
|
||||
agent_args["workspace"] = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["openai_provider"] = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger,
|
||||
)
|
||||
agent_args["planning"] = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger,
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
agent_args["memory"] = cls._get_system_instance(
|
||||
"memory",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
)
|
||||
|
||||
agent_args["ability_registry"] = cls._get_system_instance(
|
||||
"ability_registry",
|
||||
agent_settings,
|
||||
logger,
|
||||
workspace=agent_args["workspace"],
|
||||
memory=agent_args["memory"],
|
||||
model_providers={"openai": agent_args["openai_provider"]},
|
||||
)
|
||||
|
||||
return cls(**agent_args)
|
||||
|
||||
async def build_initial_plan(self) -> dict:
|
||||
plan = await self._planning.make_initial_plan(
|
||||
agent_name=self._configuration.name,
|
||||
agent_role=self._configuration.role,
|
||||
agent_goals=self._configuration.goals,
|
||||
abilities=self._ability_registry.list_abilities(),
|
||||
)
|
||||
tasks = [Task.parse_obj(task) for task in plan.content["task_list"]]
|
||||
|
||||
# TODO: Should probably do a step to evaluate the quality of the generated tasks,
|
||||
# and ensure that they have actionable ready and acceptance criteria
|
||||
|
||||
self._task_queue.extend(tasks)
|
||||
self._task_queue.sort(key=lambda t: t.priority, reverse=True)
|
||||
self._task_queue[-1].context.status = TaskStatus.READY
|
||||
return plan.content
|
||||
|
||||
async def determine_next_ability(self, *args, **kwargs):
|
||||
if not self._task_queue:
|
||||
return {"response": "I don't have any tasks to work on right now."}
|
||||
|
||||
self._configuration.cycle_count += 1
|
||||
task = self._task_queue.pop()
|
||||
self._logger.info(f"Working on task: {task}")
|
||||
|
||||
task = await self._evaluate_task_and_add_context(task)
|
||||
next_ability = await self._choose_next_ability(
|
||||
task,
|
||||
self._ability_registry.dump_abilities(),
|
||||
)
|
||||
self._current_task = task
|
||||
self._next_ability = next_ability.content
|
||||
return self._current_task, self._next_ability
|
||||
|
||||
async def execute_next_ability(self, user_input: str, *args, **kwargs):
|
||||
if user_input == "y":
|
||||
ability = self._ability_registry.get_ability(
|
||||
self._next_ability["next_ability"]
|
||||
)
|
||||
ability_response = await ability(**self._next_ability["ability_arguments"])
|
||||
await self._update_tasks_and_memory(ability_response)
|
||||
if self._current_task.context.status == TaskStatus.DONE:
|
||||
self._completed_tasks.append(self._current_task)
|
||||
else:
|
||||
self._task_queue.append(self._current_task)
|
||||
self._current_task = None
|
||||
self._next_ability = None
|
||||
|
||||
return ability_response.dict()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
async def _evaluate_task_and_add_context(self, task: Task) -> Task:
|
||||
"""Evaluate the task and add context to it."""
|
||||
if task.context.status == TaskStatus.IN_PROGRESS:
|
||||
# Nothing to do here
|
||||
return task
|
||||
else:
|
||||
self._logger.debug(f"Evaluating task {task} and adding relevant context.")
|
||||
# TODO: Look up relevant memories (need working memory system)
|
||||
# TODO: Evaluate whether there is enough information to start the task (language model call).
|
||||
task.context.enough_info = True
|
||||
task.context.status = TaskStatus.IN_PROGRESS
|
||||
return task
|
||||
|
||||
async def _choose_next_ability(self, task: Task, ability_schema: list[dict]):
|
||||
"""Choose the next ability to use for the task."""
|
||||
self._logger.debug(f"Choosing next ability for task {task}.")
|
||||
if task.context.cycle_count > self._configuration.max_task_cycle_count:
|
||||
# Don't hit the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
elif not task.context.enough_info:
|
||||
# Don't ask the LLM, just set the next action as "breakdown_task" with an appropriate reason
|
||||
raise NotImplementedError
|
||||
else:
|
||||
next_ability = await self._planning.determine_next_ability(
|
||||
task, ability_schema
|
||||
)
|
||||
return next_ability
|
||||
|
||||
async def _update_tasks_and_memory(self, ability_result: AbilityResult):
|
||||
self._current_task.context.cycle_count += 1
|
||||
self._current_task.context.prior_actions.append(ability_result)
|
||||
# TODO: Summarize new knowledge
|
||||
# TODO: store knowledge and summaries in memory and in relevant tasks
|
||||
# TODO: evaluate whether the task is complete
|
||||
|
||||
def __repr__(self):
|
||||
return "SimpleAgent()"
|
||||
|
||||
################################################################
|
||||
# Factory interface for agent bootstrapping and initialization #
|
||||
################################################################
|
||||
|
||||
@classmethod
|
||||
def build_user_configuration(cls) -> dict[str, Any]:
|
||||
"""Build the user's configuration."""
|
||||
configuration_dict = {
|
||||
"agent": cls.get_user_config(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
for system_name, system_location in system_locations.items():
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.get_user_config()
|
||||
configuration_dict = _prune_empty_dicts(configuration_dict)
|
||||
return configuration_dict
|
||||
|
||||
@classmethod
|
||||
def compile_settings(
|
||||
cls, logger: logging.Logger, user_configuration: dict
|
||||
) -> AgentSettings:
|
||||
"""Compile the user's configuration with the defaults."""
|
||||
logger.debug("Processing agent system configuration.")
|
||||
configuration_dict = {
|
||||
"agent": cls.build_agent_configuration(
|
||||
user_configuration.get("agent", {})
|
||||
).dict(),
|
||||
}
|
||||
|
||||
system_locations = configuration_dict["agent"]["configuration"]["systems"]
|
||||
|
||||
# Build up default configuration
|
||||
for system_name, system_location in system_locations.items():
|
||||
logger.debug(f"Compiling configuration for system {system_name}")
|
||||
system_class = SimplePluginService.get_plugin(system_location)
|
||||
configuration_dict[system_name] = system_class.build_agent_configuration(
|
||||
user_configuration.get(system_name, {})
|
||||
).dict()
|
||||
|
||||
return AgentSettings.parse_obj(configuration_dict)
|
||||
|
||||
@classmethod
|
||||
async def determine_agent_name_and_goals(
|
||||
cls,
|
||||
user_objective: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
) -> dict:
|
||||
logger.debug("Loading OpenAI provider.")
|
||||
provider: OpenAIProvider = cls._get_system_instance(
|
||||
"openai_provider",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
logger.debug("Loading agent planner.")
|
||||
agent_planner: SimplePlanner = cls._get_system_instance(
|
||||
"planning",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
model_providers={"openai": provider},
|
||||
)
|
||||
logger.debug("determining agent name and goals.")
|
||||
model_response = await agent_planner.decide_name_and_goals(
|
||||
user_objective,
|
||||
)
|
||||
|
||||
return model_response.content
|
||||
|
||||
@classmethod
|
||||
def provision_agent(
|
||||
cls,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
agent_settings.agent.configuration.creation_time = datetime.now().strftime(
|
||||
"%Y%m%d_%H%M%S"
|
||||
)
|
||||
workspace: SimpleWorkspace = cls._get_system_instance(
|
||||
"workspace",
|
||||
agent_settings,
|
||||
logger=logger,
|
||||
)
|
||||
return workspace.setup_workspace(agent_settings, logger)
|
||||
|
||||
@classmethod
|
||||
def _get_system_instance(
|
||||
cls,
|
||||
system_name: str,
|
||||
agent_settings: AgentSettings,
|
||||
logger: logging.Logger,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
system_locations = agent_settings.agent.configuration.systems.dict()
|
||||
|
||||
system_settings = getattr(agent_settings, system_name)
|
||||
system_class = SimplePluginService.get_plugin(system_locations[system_name])
|
||||
system_instance = system_class(
|
||||
system_settings,
|
||||
*args,
|
||||
logger=logger.getChild(system_name),
|
||||
**kwargs,
|
||||
)
|
||||
return system_instance
|
||||
|
||||
|
||||
def _prune_empty_dicts(d: dict) -> dict:
|
||||
"""
|
||||
Prune branches from a nested dictionary if the branch only contains empty dictionaries at the leaves.
|
||||
|
||||
Args:
|
||||
d: The dictionary to prune.
|
||||
|
||||
Returns:
|
||||
The pruned dictionary.
|
||||
"""
|
||||
pruned = {}
|
||||
for key, value in d.items():
|
||||
if isinstance(value, dict):
|
||||
pruned_value = _prune_empty_dicts(value)
|
||||
if (
|
||||
pruned_value
|
||||
): # if the pruned dictionary is not empty, add it to the result
|
||||
pruned[key] = pruned_value
|
||||
else:
|
||||
pruned[key] = value
|
||||
return pruned
|
||||
7
autogpt/core/configuration/__init__.py
Normal file
7
autogpt/core/configuration/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""The configuration encapsulates settings for all Agent subsystems."""
|
||||
from autogpt.core.configuration.schema import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
107
autogpt/core/configuration/schema.py
Normal file
107
autogpt/core/configuration/schema.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import abc
|
||||
import typing
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
def UserConfigurable(*args, **kwargs):
|
||||
return Field(*args, **kwargs, user_configurable=True)
|
||||
|
||||
|
||||
class SystemConfiguration(BaseModel):
|
||||
def get_user_config(self) -> dict[str, Any]:
|
||||
return _get_user_config_fields(self)
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class SystemSettings(BaseModel):
|
||||
"""A base class for all system settings."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
S = TypeVar("S", bound=SystemSettings)
|
||||
|
||||
|
||||
class Configurable(abc.ABC, Generic[S]):
|
||||
"""A base class for all configurable objects."""
|
||||
|
||||
prefix: str = ""
|
||||
default_settings: typing.ClassVar[S]
|
||||
|
||||
@classmethod
|
||||
def get_user_config(cls) -> dict[str, Any]:
|
||||
return _get_user_config_fields(cls.default_settings)
|
||||
|
||||
@classmethod
|
||||
def build_agent_configuration(cls, configuration: dict) -> S:
|
||||
"""Process the configuration for this object."""
|
||||
|
||||
defaults = cls.default_settings.dict()
|
||||
final_configuration = deep_update(defaults, configuration)
|
||||
|
||||
return cls.default_settings.__class__.parse_obj(final_configuration)
|
||||
|
||||
|
||||
def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
|
||||
"""
|
||||
Get the user config fields of a Pydantic model instance.
|
||||
|
||||
Args:
|
||||
instance: The Pydantic model instance.
|
||||
|
||||
Returns:
|
||||
The user config fields of the instance.
|
||||
"""
|
||||
user_config_fields = {}
|
||||
|
||||
for name, value in instance.__dict__.items():
|
||||
field_info = instance.__fields__[name]
|
||||
if "user_configurable" in field_info.field_info.extra:
|
||||
user_config_fields[name] = value
|
||||
elif isinstance(value, SystemConfiguration):
|
||||
user_config_fields[name] = value.get_user_config()
|
||||
elif isinstance(value, list) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value
|
||||
):
|
||||
user_config_fields[name] = [i.get_user_config() for i in value]
|
||||
elif isinstance(value, dict) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value.values()
|
||||
):
|
||||
user_config_fields[name] = {
|
||||
k: v.get_user_config() for k, v in value.items()
|
||||
}
|
||||
|
||||
return user_config_fields
|
||||
|
||||
|
||||
def deep_update(original_dict: dict, update_dict: dict) -> dict:
|
||||
"""
|
||||
Recursively update a dictionary.
|
||||
|
||||
Args:
|
||||
original_dict (dict): The dictionary to be updated.
|
||||
update_dict (dict): The dictionary to update with.
|
||||
|
||||
Returns:
|
||||
dict: The updated dictionary.
|
||||
"""
|
||||
for key, value in update_dict.items():
|
||||
if (
|
||||
key in original_dict
|
||||
and isinstance(original_dict[key], dict)
|
||||
and isinstance(value, dict)
|
||||
):
|
||||
original_dict[key] = deep_update(original_dict[key], value)
|
||||
else:
|
||||
original_dict[key] = value
|
||||
return original_dict
|
||||
3
autogpt/core/memory/__init__.py
Normal file
3
autogpt/core/memory/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""The memory subsystem manages the Agent's long-term memory."""
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.memory.simple import MemorySettings, SimpleMemory
|
||||
13
autogpt/core/memory/base.py
Normal file
13
autogpt/core/memory/base.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import abc
|
||||
|
||||
|
||||
class Memory(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MemoryItem(abc.ABC):
|
||||
pass
|
||||
|
||||
|
||||
class MessageHistory(abc.ABC):
|
||||
pass
|
||||
47
autogpt/core/memory/simple.py
Normal file
47
autogpt/core/memory/simple.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class MemoryConfiguration(SystemConfiguration):
|
||||
pass
|
||||
|
||||
|
||||
class MemorySettings(SystemSettings):
|
||||
configuration: MemoryConfiguration
|
||||
|
||||
|
||||
class MessageHistory:
|
||||
def __init__(self, previous_message_history: list[str]):
|
||||
self._message_history = previous_message_history
|
||||
|
||||
|
||||
class SimpleMemory(Memory, Configurable):
|
||||
default_settings = MemorySettings(
|
||||
name="simple_memory",
|
||||
description="A simple memory.",
|
||||
configuration=MemoryConfiguration(),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: MemorySettings,
|
||||
logger: logging.Logger,
|
||||
workspace: Workspace,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._message_history = self._load_message_history(workspace)
|
||||
|
||||
@staticmethod
|
||||
def _load_message_history(workspace: Workspace):
|
||||
message_history_path = workspace.get_path("message_history.json")
|
||||
if message_history_path.exists():
|
||||
with message_history_path.open("r") as f:
|
||||
message_history = json.load(f)
|
||||
else:
|
||||
message_history = []
|
||||
return MessageHistory(message_history)
|
||||
10
autogpt/core/planning/__init__.py
Normal file
10
autogpt/core/planning/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""The planning system organizes the Agent's activities."""
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
TaskStatus,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.simple import PlannerSettings, SimplePlanner
|
||||
76
autogpt/core/planning/base.py
Normal file
76
autogpt/core/planning/base.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import abc
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
|
||||
# class Planner(abc.ABC):
|
||||
# """Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
#
|
||||
# @staticmethod
|
||||
# @abc.abstractmethod
|
||||
# async def decide_name_and_goals(
|
||||
# user_objective: str,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Decide the name and goals of an Agent from a user-defined objective.
|
||||
#
|
||||
# Args:
|
||||
# user_objective: The user-defined objective for the agent.
|
||||
#
|
||||
# Returns:
|
||||
# The agent name and goals as a response from the language model.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# async def plan(self, context: PlanningContext) -> LanguageModelResponse:
|
||||
# """Plan the next ability for the Agent.
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# progress, result, memories, and feedback.
|
||||
#
|
||||
#
|
||||
# Returns:
|
||||
# The next ability the agent should take along with thoughts and reasoning.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# def reflect(
|
||||
# self,
|
||||
# context: ReflectionContext,
|
||||
# ) -> LanguageModelResponse:
|
||||
# """Reflect on a planned ability and provide self-criticism.
|
||||
#
|
||||
#
|
||||
# Args:
|
||||
# context: A context object containing information about the agent's
|
||||
# reasoning, plan, thoughts, and criticism.
|
||||
#
|
||||
# Returns:
|
||||
# Self-criticism about the agent's plan.
|
||||
#
|
||||
# """
|
||||
# ...
|
||||
|
||||
|
||||
class PromptStrategy(abc.ABC):
|
||||
default_configuration: SystemConfiguration
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def build_prompt(self, *_, **kwargs) -> LanguageModelPrompt:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def parse_response_content(self, response_content: dict) -> dict:
|
||||
...
|
||||
76
autogpt/core/planning/schema.py
Normal file
76
autogpt/core/planning/schema.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import enum
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProviderModelResponse,
|
||||
)
|
||||
|
||||
|
||||
class LanguageModelClassification(str, enum.Enum):
|
||||
"""The LanguageModelClassification is a functional description of the model.
|
||||
|
||||
This is used to determine what kind of model to use for a given prompt.
|
||||
Sometimes we prefer a faster or cheaper model to accomplish a task when
|
||||
possible.
|
||||
|
||||
"""
|
||||
|
||||
FAST_MODEL: str = "fast_model"
|
||||
SMART_MODEL: str = "smart_model"
|
||||
|
||||
|
||||
class LanguageModelPrompt(BaseModel):
|
||||
messages: list[LanguageModelMessage]
|
||||
functions: list[LanguageModelFunction] = Field(default_factory=list)
|
||||
|
||||
def __str__(self):
|
||||
return "\n\n".join([f"{m.role.value}: {m.content}" for m in self.messages])
|
||||
|
||||
|
||||
class LanguageModelResponse(LanguageModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
|
||||
class TaskType(str, enum.Enum):
|
||||
RESEARCH: str = "research"
|
||||
WRITE: str = "write"
|
||||
EDIT: str = "edit"
|
||||
CODE: str = "code"
|
||||
DESIGN: str = "design"
|
||||
TEST: str = "test"
|
||||
PLAN: str = "plan"
|
||||
|
||||
|
||||
class TaskStatus(str, enum.Enum):
|
||||
BACKLOG: str = "backlog"
|
||||
READY: str = "ready"
|
||||
IN_PROGRESS: str = "in_progress"
|
||||
DONE: str = "done"
|
||||
|
||||
|
||||
class TaskContext(BaseModel):
|
||||
cycle_count: int = 0
|
||||
status: TaskStatus = TaskStatus.BACKLOG
|
||||
parent: "Task" = None
|
||||
prior_actions: list[AbilityResult] = Field(default_factory=list)
|
||||
memories: list = Field(default_factory=list)
|
||||
user_input: list[str] = Field(default_factory=list)
|
||||
supplementary_info: list[str] = Field(default_factory=list)
|
||||
enough_info: bool = False
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
objective: str
|
||||
type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema
|
||||
priority: int
|
||||
ready_criteria: list[str]
|
||||
acceptance_criteria: list[str]
|
||||
context: TaskContext = Field(default_factory=TaskContext)
|
||||
|
||||
|
||||
# Need to resolve the circular dependency between Task and TaskContext once both models are defined.
|
||||
TaskContext.update_forward_refs()
|
||||
182
autogpt/core/planning/simple.py
Normal file
182
autogpt/core/planning/simple.py
Normal file
@@ -0,0 +1,182 @@
|
||||
import logging
|
||||
import platform
|
||||
import time
|
||||
|
||||
import distro
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.planning import strategies
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelResponse,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelProvider,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
class LanguageModelConfiguration(SystemConfiguration):
|
||||
"""Struct for model configuration."""
|
||||
|
||||
model_name: str = UserConfigurable()
|
||||
provider_name: ModelProviderName = UserConfigurable()
|
||||
temperature: float = UserConfigurable()
|
||||
|
||||
|
||||
class PromptStrategiesConfiguration(SystemConfiguration):
|
||||
name_and_goals: strategies.NameAndGoalsConfiguration
|
||||
initial_plan: strategies.InitialPlanConfiguration
|
||||
next_ability: strategies.NextAbilityConfiguration
|
||||
|
||||
|
||||
class PlannerConfiguration(SystemConfiguration):
|
||||
"""Configuration for the Planner subsystem."""
|
||||
|
||||
models: dict[LanguageModelClassification, LanguageModelConfiguration]
|
||||
prompt_strategies: PromptStrategiesConfiguration
|
||||
|
||||
|
||||
class PlannerSettings(SystemSettings):
|
||||
"""Settings for the Planner subsystem."""
|
||||
|
||||
configuration: PlannerConfiguration
|
||||
|
||||
|
||||
class SimplePlanner(Configurable):
|
||||
"""Manages the agent's planning and goal-setting by constructing language model prompts."""
|
||||
|
||||
default_settings = PlannerSettings(
|
||||
name="planner",
|
||||
description="Manages the agent's planning and goal-setting by constructing language model prompts.",
|
||||
configuration=PlannerConfiguration(
|
||||
models={
|
||||
LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT3,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration(
|
||||
model_name=OpenAIModelName.GPT4,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
temperature=0.9,
|
||||
),
|
||||
},
|
||||
prompt_strategies=PromptStrategiesConfiguration(
|
||||
name_and_goals=strategies.NameAndGoals.default_configuration,
|
||||
initial_plan=strategies.InitialPlan.default_configuration,
|
||||
next_ability=strategies.NextAbility.default_configuration,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: PlannerSettings,
|
||||
logger: logging.Logger,
|
||||
model_providers: dict[ModelProviderName, LanguageModelProvider],
|
||||
workspace: Workspace = None, # Workspace is not available during bootstrapping.
|
||||
) -> None:
|
||||
self._configuration = settings.configuration
|
||||
self._logger = logger
|
||||
self._workspace = workspace
|
||||
|
||||
self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {}
|
||||
for model, model_config in self._configuration.models.items():
|
||||
self._providers[model] = model_providers[model_config.provider_name]
|
||||
|
||||
self._prompt_strategies = {
|
||||
"name_and_goals": strategies.NameAndGoals(
|
||||
**self._configuration.prompt_strategies.name_and_goals.dict()
|
||||
),
|
||||
"initial_plan": strategies.InitialPlan(
|
||||
**self._configuration.prompt_strategies.initial_plan.dict()
|
||||
),
|
||||
"next_ability": strategies.NextAbility(
|
||||
**self._configuration.prompt_strategies.next_ability.dict()
|
||||
),
|
||||
}
|
||||
|
||||
async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["name_and_goals"],
|
||||
user_objective=user_objective,
|
||||
)
|
||||
|
||||
async def make_initial_plan(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
) -> LanguageModelResponse:
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["initial_plan"],
|
||||
agent_name=agent_name,
|
||||
agent_role=agent_role,
|
||||
agent_goals=agent_goals,
|
||||
abilities=abilities,
|
||||
)
|
||||
|
||||
async def determine_next_ability(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
):
|
||||
return await self.chat_with_model(
|
||||
self._prompt_strategies["next_ability"],
|
||||
task=task,
|
||||
ability_schema=ability_schema,
|
||||
)
|
||||
|
||||
async def chat_with_model(
|
||||
self,
|
||||
prompt_strategy: PromptStrategy,
|
||||
**kwargs,
|
||||
) -> LanguageModelResponse:
|
||||
model_classification = prompt_strategy.model_classification
|
||||
model_configuration = self._configuration.models[model_classification].dict()
|
||||
self._logger.debug(f"Using model configuration: {model_configuration}")
|
||||
del model_configuration["provider_name"]
|
||||
provider = self._providers[model_classification]
|
||||
|
||||
template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy)
|
||||
template_kwargs.update(kwargs)
|
||||
prompt = prompt_strategy.build_prompt(**template_kwargs)
|
||||
|
||||
self._logger.debug(f"Using prompt:\n{prompt}\n\n")
|
||||
response = await provider.create_language_completion(
|
||||
model_prompt=prompt.messages,
|
||||
functions=prompt.functions,
|
||||
**model_configuration,
|
||||
completion_parser=prompt_strategy.parse_response_content,
|
||||
)
|
||||
return LanguageModelResponse.parse_obj(response.dict())
|
||||
|
||||
def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy):
|
||||
provider = self._providers[strategy.model_classification]
|
||||
template_kwargs = {
|
||||
"os_info": get_os_info(),
|
||||
"api_budget": provider.get_remaining_budget(),
|
||||
"current_time": time.strftime("%c"),
|
||||
}
|
||||
return template_kwargs
|
||||
|
||||
|
||||
def get_os_info() -> str:
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
platform.platform(terse=True)
|
||||
if os_name != "Linux"
|
||||
else distro.name(pretty=True)
|
||||
)
|
||||
return os_info
|
||||
12
autogpt/core/planning/strategies/__init__.py
Normal file
12
autogpt/core/planning/strategies/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from autogpt.core.planning.strategies.initial_plan import (
|
||||
InitialPlan,
|
||||
InitialPlanConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.name_and_goals import (
|
||||
NameAndGoals,
|
||||
NameAndGoalsConfiguration,
|
||||
)
|
||||
from autogpt.core.planning.strategies.next_ability import (
|
||||
NextAbility,
|
||||
NextAbilityConfiguration,
|
||||
)
|
||||
190
autogpt/core/planning/strategies/initial_plan.py
Normal file
190
autogpt/core/planning/strategies/initial_plan.py
Normal file
@@ -0,0 +1,190 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
TaskType,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class InitialPlanConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_plan_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class InitialPlan(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = (
|
||||
"You are an expert project planner. You're responsibility is to create work plans for autonomous agents. "
|
||||
"You will be given a name, a role, set of goals for the agent to accomplish. Your job is to "
|
||||
"break down those goals into a set of tasks that the agent can accomplish to achieve those goals. "
|
||||
"Agents are resourceful, but require clear instructions. Each task you create should have clearly defined "
|
||||
"`ready_criteria` that the agent can check to see if the task is ready to be started. Each task should "
|
||||
"also have clearly defined `acceptance_criteria` that the agent can check to evaluate if the task is complete. "
|
||||
"You should create as many tasks as you think is necessary to accomplish the goals.\n\n"
|
||||
"System Info:\n{system_info}"
|
||||
)
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}"
|
||||
)
|
||||
|
||||
DEFAULT_CREATE_PLAN_FUNCTION = {
|
||||
"name": "create_initial_agent_plan",
|
||||
"description": "Creates a set of tasks that forms the initial plan for an autonomous agent.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"task_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"objective": {
|
||||
"type": "string",
|
||||
"description": "An imperative verb phrase that succinctly describes the task.",
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "A categorization for the task. ",
|
||||
"enum": [t.value for t in TaskType],
|
||||
},
|
||||
"acceptance_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met for the task to be considered complete.",
|
||||
},
|
||||
},
|
||||
"priority": {
|
||||
"type": "integer",
|
||||
"description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.",
|
||||
"minimum": 1,
|
||||
"maximum": 10,
|
||||
},
|
||||
"ready_criteria": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "A list of measurable and testable criteria that must be met before the task can be started.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"objective",
|
||||
"type",
|
||||
"acceptance_criteria",
|
||||
"priority",
|
||||
"ready_criteria",
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = InitialPlanConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
create_plan_function: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_plan_function = create_plan_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_role: str,
|
||||
agent_goals: list[str],
|
||||
abilities: list[str],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"agent_name": agent_name,
|
||||
"agent_role": agent_role,
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
template_kwargs["agent_goals"] = to_numbered_list(
|
||||
agent_goals, **template_kwargs
|
||||
)
|
||||
template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs)
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info, **template_kwargs
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
create_plan_function = LanguageModelFunction(
|
||||
json_schema=self._create_plan_function,
|
||||
)
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=[create_plan_function],
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response["task_list"] = [
|
||||
Task.parse_obj(task) for task in parsed_response["task_list"]
|
||||
]
|
||||
return parsed_response
|
||||
139
autogpt/core/planning/strategies/name_and_goals.py
Normal file
139
autogpt/core/planning/strategies/name_and_goals.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NameAndGoalsConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt: str = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
create_agent_function: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NameAndGoals(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT = (
|
||||
"Your job is to respond to a user-defined task by invoking the `create_agent` function "
|
||||
"to generate an autonomous agent to complete the task. You should supply a role-based "
|
||||
"name for the agent, an informative description for what the agent does, and 1 to 5 "
|
||||
"goals that are optimally aligned with the successful completion of its assigned task.\n\n"
|
||||
"Example Input:\n"
|
||||
"Help me with marketing my business\n\n"
|
||||
"Example Function Call:\n"
|
||||
"create_agent(name='CMOGPT', "
|
||||
"description='A professional digital marketer AI that assists Solopreneurs in "
|
||||
"growing their businesses by providing world-class expertise in solving "
|
||||
"marketing problems for SaaS, content products, agencies, and more.', "
|
||||
"goals=['Engage in effective problem-solving, prioritization, planning, and "
|
||||
"supporting execution to address your marketing needs as your virtual Chief "
|
||||
"Marketing Officer.', 'Provide specific, actionable, and concise advice to "
|
||||
"help you make informed decisions without the use of platitudes or overly "
|
||||
"wordy explanations.', 'Identify and prioritize quick wins and cost-effective "
|
||||
"campaigns that maximize results with minimal time and budget investment.', "
|
||||
"'Proactively take the lead in guiding you and offering suggestions when faced "
|
||||
"with unclear information or uncertainty to ensure your marketing strategy "
|
||||
"remains on track.'])"
|
||||
)
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = "'{user_objective}'"
|
||||
|
||||
DEFAULT_CREATE_AGENT_FUNCTION = {
|
||||
"name": "create_agent",
|
||||
"description": ("Create a new autonomous AI agent to complete a given task."),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "A short role-based name for an autonomous agent.",
|
||||
},
|
||||
"agent_role": {
|
||||
"type": "string",
|
||||
"description": "An informative one sentence description of what the AI agent does",
|
||||
},
|
||||
"agent_goals": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"maxItems": 5,
|
||||
"items": {
|
||||
"type": "string",
|
||||
},
|
||||
"description": (
|
||||
"One to five highly effective goals that are optimally aligned with the completion of a "
|
||||
"specific task. The number and complexity of the goals should correspond to the "
|
||||
"complexity of the agent's primary objective."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["agent_name", "agent_role", "agent_goals"],
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NameAndGoalsConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: str,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = create_agent_function
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt:
|
||||
system_message = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_message,
|
||||
)
|
||||
user_message = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(
|
||||
user_objective=user_objective,
|
||||
),
|
||||
)
|
||||
create_agent_function = LanguageModelFunction(
|
||||
json_schema=self._create_agent_function,
|
||||
)
|
||||
prompt = LanguageModelPrompt(
|
||||
messages=[system_message, user_message],
|
||||
functions=[create_agent_function],
|
||||
# TODO
|
||||
tokens_used=0,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
parsed_response = json_loads(response_content["function_call"]["arguments"])
|
||||
return parsed_response
|
||||
183
autogpt/core/planning/strategies/next_ability.py
Normal file
183
autogpt/core/planning/strategies/next_ability.py
Normal file
@@ -0,0 +1,183 @@
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.base import PromptStrategy
|
||||
from autogpt.core.planning.schema import (
|
||||
LanguageModelClassification,
|
||||
LanguageModelPrompt,
|
||||
Task,
|
||||
)
|
||||
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
MessageRole,
|
||||
)
|
||||
|
||||
|
||||
class NextAbilityConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable()
|
||||
system_prompt_template: str = UserConfigurable()
|
||||
system_info: list[str] = UserConfigurable()
|
||||
user_prompt_template: str = UserConfigurable()
|
||||
additional_ability_arguments: dict = UserConfigurable()
|
||||
|
||||
|
||||
class NextAbility(PromptStrategy):
|
||||
DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}"
|
||||
|
||||
DEFAULT_SYSTEM_INFO = [
|
||||
"The OS you are running on is: {os_info}",
|
||||
"It takes money to let you run. Your API budget is ${api_budget:.3f}",
|
||||
"The current time and date is {current_time}",
|
||||
]
|
||||
|
||||
DEFAULT_USER_PROMPT_TEMPLATE = (
|
||||
"Your current task is is {task_objective}.\n"
|
||||
"You have taken {cycle_count} actions on this task already. "
|
||||
"Here is the actions you have taken and their results:\n"
|
||||
"{action_history}\n\n"
|
||||
"Here is additional information that may be useful to you:\n"
|
||||
"{additional_info}\n\n"
|
||||
"Additionally, you should consider the following:\n"
|
||||
"{user_input}\n\n"
|
||||
"Your task of {task_objective} is complete when the following acceptance criteria have been met:\n"
|
||||
"{acceptance_criteria}\n\n"
|
||||
"Please choose one of the provided functions to accomplish this task. "
|
||||
"Some tasks may require multiple functions to accomplish. If that is the case, choose the function that "
|
||||
"you think is most appropriate for the current situation given your progress so far."
|
||||
)
|
||||
|
||||
DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = {
|
||||
"motivation": {
|
||||
"type": "string",
|
||||
"description": "Your justification for choosing choosing this function instead of a different one.",
|
||||
},
|
||||
"self_criticism": {
|
||||
"type": "string",
|
||||
"description": "Thoughtful self-criticism that explains why this function may not be the best choice.",
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.",
|
||||
},
|
||||
}
|
||||
|
||||
default_configuration = NextAbilityConfiguration(
|
||||
model_classification=LanguageModelClassification.SMART_MODEL,
|
||||
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
|
||||
system_info=DEFAULT_SYSTEM_INFO,
|
||||
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
|
||||
additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt_template: str,
|
||||
system_info: list[str],
|
||||
user_prompt_template: str,
|
||||
additional_ability_arguments: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_template = system_prompt_template
|
||||
self._system_info = system_info
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._additional_ability_arguments = additional_ability_arguments
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
task: Task,
|
||||
ability_schema: list[dict],
|
||||
os_info: str,
|
||||
api_budget: float,
|
||||
current_time: str,
|
||||
**kwargs,
|
||||
) -> LanguageModelPrompt:
|
||||
template_kwargs = {
|
||||
"os_info": os_info,
|
||||
"api_budget": api_budget,
|
||||
"current_time": current_time,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
for ability in ability_schema:
|
||||
ability["parameters"]["properties"].update(
|
||||
self._additional_ability_arguments
|
||||
)
|
||||
ability["parameters"]["required"] += list(
|
||||
self._additional_ability_arguments.keys()
|
||||
)
|
||||
|
||||
template_kwargs["task_objective"] = task.objective
|
||||
template_kwargs["cycle_count"] = task.context.cycle_count
|
||||
template_kwargs["action_history"] = to_numbered_list(
|
||||
[action.summary() for action in task.context.prior_actions],
|
||||
no_items_response="You have not taken any actions yet.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["additional_info"] = to_numbered_list(
|
||||
[memory.summary() for memory in task.context.memories]
|
||||
+ [info for info in task.context.supplementary_info],
|
||||
no_items_response="There is no additional information available at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["user_input"] = to_numbered_list(
|
||||
[user_input for user_input in task.context.user_input],
|
||||
no_items_response="There are no additional considerations at this time.",
|
||||
**template_kwargs,
|
||||
)
|
||||
template_kwargs["acceptance_criteria"] = to_numbered_list(
|
||||
[acceptance_criteria for acceptance_criteria in task.acceptance_criteria],
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
template_kwargs["system_info"] = to_numbered_list(
|
||||
self._system_info,
|
||||
**template_kwargs,
|
||||
)
|
||||
|
||||
system_prompt = LanguageModelMessage(
|
||||
role=MessageRole.SYSTEM,
|
||||
content=self._system_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
user_prompt = LanguageModelMessage(
|
||||
role=MessageRole.USER,
|
||||
content=self._user_prompt_template.format(**template_kwargs),
|
||||
)
|
||||
functions = [
|
||||
LanguageModelFunction(json_schema=ability) for ability in ability_schema
|
||||
]
|
||||
|
||||
return LanguageModelPrompt(
|
||||
messages=[system_prompt, user_prompt],
|
||||
functions=functions,
|
||||
# TODO:
|
||||
tokens_used=0,
|
||||
)
|
||||
|
||||
def parse_response_content(
|
||||
self,
|
||||
response_content: dict,
|
||||
) -> dict:
|
||||
"""Parse the actual text response from the objective model.
|
||||
|
||||
Args:
|
||||
response_content: The raw response content from the objective model.
|
||||
|
||||
Returns:
|
||||
The parsed response.
|
||||
|
||||
"""
|
||||
function_name = response_content["function_call"]["name"]
|
||||
function_arguments = json_loads(response_content["function_call"]["arguments"])
|
||||
parsed_response = {
|
||||
"motivation": function_arguments.pop("motivation"),
|
||||
"self_criticism": function_arguments.pop("self_criticism"),
|
||||
"reasoning": function_arguments.pop("reasoning"),
|
||||
"next_ability": function_name,
|
||||
"ability_arguments": function_arguments,
|
||||
}
|
||||
return parsed_response
|
||||
27
autogpt/core/planning/strategies/utils.py
Normal file
27
autogpt/core/planning/strategies/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import ast
|
||||
import json
|
||||
|
||||
|
||||
def to_numbered_list(
|
||||
items: list[str], no_items_response: str = "", **template_args
|
||||
) -> str:
|
||||
if items:
|
||||
return "\n".join(
|
||||
f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items)
|
||||
)
|
||||
else:
|
||||
return no_items_response
|
||||
|
||||
|
||||
def json_loads(json_str: str):
|
||||
# TODO: this is a hack function for now. Trying to see what errors show up in testing.
|
||||
# Can hopefully just replace with a call to ast.literal_eval (the function api still
|
||||
# sometimes returns json strings with minor issues like trailing commas).
|
||||
try:
|
||||
return ast.literal_eval(json_str)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
try:
|
||||
print(f"json decode error {e}. trying literal eval")
|
||||
return ast.literal_eval(json_str)
|
||||
except Exception:
|
||||
breakpoint()
|
||||
102
autogpt/core/planning/templates.py
Normal file
102
autogpt/core/planning/templates.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# Rules of thumb:
|
||||
# - Templates don't add new lines at the end of the string. This is the
|
||||
# responsibility of the or a consuming template.
|
||||
|
||||
####################
|
||||
# Planner defaults #
|
||||
####################
|
||||
|
||||
|
||||
USER_OBJECTIVE = (
|
||||
"Write a wikipedia style article about the project: "
|
||||
"https://github.com/significant-gravitas/Auto-GPT"
|
||||
)
|
||||
|
||||
|
||||
ABILITIES = (
|
||||
'analyze_code: Analyze Code, args: "code": "<full_code_string>"',
|
||||
'execute_python_file: Execute Python File, args: "filename": "<filename>"',
|
||||
'append_to_file: Append to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'delete_file: Delete file, args: "filename": "<filename>"',
|
||||
'list_files: List Files in Directory, args: "directory": "<directory>"',
|
||||
'read_file: Read a file, args: "filename": "<filename>"',
|
||||
'write_to_file: Write to file, args: "filename": "<filename>", "text": "<text>"',
|
||||
'google: Google Search, args: "query": "<query>"',
|
||||
'improve_code: Get Improved Code, args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
'browse_website: Browse Website, args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
'write_tests: Write Tests, args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
'get_hyperlinks: Get hyperlinks, args: "url": "<url>"',
|
||||
'get_text_summary: Get text summary, args: "url": "<url>", "question": "<question>"',
|
||||
'task_complete: Task Complete (Shutdown), args: "reason": "<reason>"',
|
||||
)
|
||||
|
||||
|
||||
# Plan Prompt
|
||||
# -----------
|
||||
|
||||
|
||||
PLAN_PROMPT_CONSTRAINTS = (
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so "
|
||||
"immediately save important information to files.",
|
||||
"If you are unsure how you previously did something or want to recall past "
|
||||
"events, thinking about similar events will help you remember.",
|
||||
"No user assistance",
|
||||
"Exclusively use the commands listed below e.g. command_name",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_RESOURCES = (
|
||||
"Internet access for searches and information gathering.",
|
||||
"Long-term memory management.",
|
||||
"File output.",
|
||||
)
|
||||
|
||||
PLAN_PROMPT_PERFORMANCE_EVALUATIONS = (
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities.",
|
||||
"Constructively self-criticize your big-picture behavior constantly.",
|
||||
"Reflect on past decisions and strategies to refine your approach.",
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps.",
|
||||
"Write all code to a file",
|
||||
)
|
||||
|
||||
|
||||
PLAN_PROMPT_RESPONSE_DICT = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user",
|
||||
},
|
||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||
}
|
||||
|
||||
PLAN_PROMPT_RESPONSE_FORMAT = (
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n"
|
||||
"{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
PLAN_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
|
||||
PLAN_PROMPT_MAIN = (
|
||||
"{header}\n\n"
|
||||
"GOALS:\n\n{goals}\n\n"
|
||||
"Info:\n{info}\n\n"
|
||||
"Constraints:\n{constraints}\n\n"
|
||||
"Commands:\n{commands}\n\n"
|
||||
"Resources:\n{resources}\n\n"
|
||||
"Performance Evaluations:\n{performance_evaluations}\n\n"
|
||||
"You should only respond in JSON format as described below\n"
|
||||
"Response Format:\n{response_json_structure}\n"
|
||||
"Ensure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
|
||||
###########################
|
||||
# Parameterized templates #
|
||||
###########################
|
||||
2
autogpt/core/plugin/__init__.py
Normal file
2
autogpt/core/plugin/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
"""The plugin system allows the Agent to be extended with new functionality."""
|
||||
from autogpt.core.plugin.base import PluginService
|
||||
155
autogpt/core/plugin/base.py
Normal file
155
autogpt/core/plugin/base.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import TYPE_CHECKING, Type
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.ability import Ability, AbilityRegistry
|
||||
from autogpt.core.memory import Memory
|
||||
from autogpt.core.resource.model_providers import (
|
||||
EmbeddingModelProvider,
|
||||
LanguageModelProvider,
|
||||
)
|
||||
|
||||
# Expand to other types as needed
|
||||
PluginType = (
|
||||
Type[Ability] # Swappable now
|
||||
| Type[AbilityRegistry] # Swappable maybe never
|
||||
| Type[LanguageModelProvider] # Swappable soon
|
||||
| Type[EmbeddingModelProvider] # Swappable soon
|
||||
| Type[Memory] # Swappable now
|
||||
# | Type[Planner] # Swappable soon
|
||||
)
|
||||
|
||||
|
||||
class PluginStorageFormat(str, enum.Enum):
|
||||
"""Supported plugin storage formats.
|
||||
|
||||
Plugins can be stored at one of these supported locations.
|
||||
|
||||
"""
|
||||
|
||||
INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults
|
||||
WORKSPACE = "workspace" # Required now
|
||||
# OPENAPI_URL = "open_api_url" # Soon (requires some tooling we don't have yet).
|
||||
# OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now)
|
||||
# GIT = "git" # Maybe later (or soon)
|
||||
# PYPI = "pypi" # Maybe later
|
||||
# AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service" # Long term solution, requires design
|
||||
# AUTO = "auto" # Feature for later maybe, automatically find plugin.
|
||||
|
||||
|
||||
# Installed package example
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
# Workspace example
|
||||
# PluginLocation(
|
||||
# storage_format='workspace',
|
||||
# storage_route='relative/path/to/plugin.pkl'
|
||||
# OR
|
||||
# storage_route='relative/path/to/plugin.py'
|
||||
# )
|
||||
# Git
|
||||
# PluginLocation(
|
||||
# storage_format='git',
|
||||
# Exact format TBD.
|
||||
# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py'
|
||||
# )
|
||||
# PyPI
|
||||
# PluginLocation(
|
||||
# storage_format='pypi',
|
||||
# storage_route='package_name'
|
||||
# )
|
||||
|
||||
|
||||
# PluginLocation(
|
||||
# storage_format='installed_package',
|
||||
# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
|
||||
# )
|
||||
|
||||
|
||||
# A plugin storage route.
|
||||
#
|
||||
# This is a string that specifies where to load a plugin from
|
||||
# (e.g. an import path or file path).
|
||||
PluginStorageRoute = str
|
||||
|
||||
|
||||
class PluginLocation(SystemConfiguration):
|
||||
"""A plugin location.
|
||||
|
||||
This is a combination of a plugin storage format and a plugin storage route.
|
||||
It is used by the PluginService to load plugins.
|
||||
|
||||
"""
|
||||
|
||||
storage_format: PluginStorageFormat = UserConfigurable()
|
||||
storage_route: PluginStorageRoute = UserConfigurable()
|
||||
|
||||
|
||||
class PluginMetadata(BaseModel):
|
||||
"""Metadata about a plugin."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
location: PluginLocation
|
||||
|
||||
|
||||
class PluginService(abc.ABC):
|
||||
"""Base class for plugin service.
|
||||
|
||||
The plugin service should be stateless. This defines the interface for
|
||||
loading plugins from various storage formats.
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def get_plugin(plugin_location: PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
...
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
...
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an installed package."""
|
||||
...
|
||||
74
autogpt/core/plugin/simple.py
Normal file
74
autogpt/core/plugin/simple.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.core.plugin.base import (
|
||||
PluginLocation,
|
||||
PluginService,
|
||||
PluginStorageFormat,
|
||||
PluginStorageRoute,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.plugin.base import PluginType
|
||||
|
||||
|
||||
class SimplePluginService(PluginService):
|
||||
@staticmethod
|
||||
def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType":
|
||||
"""Get a plugin from a plugin location."""
|
||||
if isinstance(plugin_location, dict):
|
||||
plugin_location = PluginLocation.parse_obj(plugin_location)
|
||||
if plugin_location.storage_format == PluginStorageFormat.WORKSPACE:
|
||||
return SimplePluginService.load_from_workspace(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE:
|
||||
return SimplePluginService.load_from_installed_package(
|
||||
plugin_location.storage_route
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Plugin storage format {plugin_location.storage_format} is not implemented."
|
||||
)
|
||||
|
||||
####################################
|
||||
# Low-level storage format loaders #
|
||||
####################################
|
||||
@staticmethod
|
||||
def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from a file path."""
|
||||
# TODO: Define an on disk storage format and implement this.
|
||||
# Can pull from existing zip file loading implementation
|
||||
raise NotImplemented("Loading from file path is not implemented.")
|
||||
|
||||
@staticmethod
|
||||
def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from an import path."""
|
||||
module_path, _, class_name = plugin_route.rpartition(".")
|
||||
return getattr(import_module(module_path), class_name)
|
||||
|
||||
@staticmethod
|
||||
def resolve_name_to_path(
|
||||
plugin_route: PluginStorageRoute, path_type: str
|
||||
) -> PluginStorageRoute:
|
||||
"""Resolve a plugin name to a plugin path."""
|
||||
# TODO: Implement a discovery system for finding plugins by name from known
|
||||
# storage locations. E.g. if we know that path_type is a file path, we can
|
||||
# search the workspace for it. If it's an import path, we can check the core
|
||||
# system and the auto_gpt_plugins package.
|
||||
raise NotImplemented("Resolving plugin name to path is not implemented.")
|
||||
|
||||
#####################################
|
||||
# High-level storage format loaders #
|
||||
#####################################
|
||||
|
||||
@staticmethod
|
||||
def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
"""Load a plugin from the workspace."""
|
||||
plugin = SimplePluginService.load_from_file_path(plugin_route)
|
||||
return plugin
|
||||
|
||||
@staticmethod
|
||||
def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
|
||||
plugin = SimplePluginService.load_from_import_path(plugin_route)
|
||||
return plugin
|
||||
7
autogpt/core/resource/__init__.py
Normal file
7
autogpt/core/resource/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from autogpt.core.resource.schema import (
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
44
autogpt/core/resource/model_providers/__init__.py
Normal file
44
autogpt/core/resource/model_providers/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from autogpt.core.resource.model_providers.openai import (
|
||||
OPEN_AI_MODELS,
|
||||
OpenAIModelName,
|
||||
OpenAIProvider,
|
||||
OpenAISettings,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
MessageRole,
|
||||
ModelProvider,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderModelInfo,
|
||||
ModelProviderModelResponse,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ModelProvider",
|
||||
"ModelProviderName",
|
||||
"ModelProviderSettings",
|
||||
"EmbeddingModelProvider",
|
||||
"EmbeddingModelProviderModelResponse",
|
||||
"LanguageModelProvider",
|
||||
"LanguageModelProviderModelResponse",
|
||||
"LanguageModelFunction",
|
||||
"LanguageModelMessage",
|
||||
"MessageRole",
|
||||
"OpenAIModelName",
|
||||
"OPEN_AI_MODELS",
|
||||
"OpenAIProvider",
|
||||
"OpenAISettings",
|
||||
]
|
||||
373
autogpt/core/resource/model_providers/openai.py
Normal file
373
autogpt/core/resource/model_providers/openai.py
Normal file
@@ -0,0 +1,373 @@
|
||||
import enum
|
||||
import functools
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
Embedding,
|
||||
EmbeddingModelProvider,
|
||||
EmbeddingModelProviderModelInfo,
|
||||
EmbeddingModelProviderModelResponse,
|
||||
LanguageModelFunction,
|
||||
LanguageModelMessage,
|
||||
LanguageModelProvider,
|
||||
LanguageModelProviderModelInfo,
|
||||
LanguageModelProviderModelResponse,
|
||||
ModelProviderBudget,
|
||||
ModelProviderCredentials,
|
||||
ModelProviderName,
|
||||
ModelProviderService,
|
||||
ModelProviderSettings,
|
||||
ModelProviderUsage,
|
||||
)
|
||||
|
||||
OpenAIEmbeddingParser = Callable[[Embedding], Embedding]
|
||||
OpenAIChatParser = Callable[[str], dict]
|
||||
|
||||
|
||||
class OpenAIModelName(str, enum.Enum):
|
||||
ADA = "text-embedding-ada-002"
|
||||
GPT3 = "gpt-3.5-turbo-0613"
|
||||
GPT3_16K = "gpt-3.5-turbo-16k-0613"
|
||||
GPT4 = "gpt-4-0613"
|
||||
GPT4_32K = "gpt-4-32k-0613"
|
||||
|
||||
|
||||
OPEN_AI_EMBEDDING_MODELS = {
|
||||
OpenAIModelName.ADA: EmbeddingModelProviderModelInfo(
|
||||
name=OpenAIModelName.ADA,
|
||||
service=ModelProviderService.EMBEDDING,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_LANGUAGE_MODELS = {
|
||||
OpenAIModelName.GPT3: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.0015,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
OpenAIModelName.GPT3_16K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT3,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.003,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=16384,
|
||||
),
|
||||
OpenAIModelName.GPT4: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
OpenAIModelName.GPT4_32K: LanguageModelProviderModelInfo(
|
||||
name=OpenAIModelName.GPT4_32K,
|
||||
service=ModelProviderService.LANGUAGE,
|
||||
provider_name=ModelProviderName.OPENAI,
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
OPEN_AI_MODELS = {
|
||||
**OPEN_AI_LANGUAGE_MODELS,
|
||||
**OPEN_AI_EMBEDDING_MODELS,
|
||||
}
|
||||
|
||||
|
||||
class OpenAIConfiguration(SystemConfiguration):
|
||||
retries_per_request: int = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAIModelProviderBudget(ModelProviderBudget):
|
||||
graceful_shutdown_threshold: float = UserConfigurable()
|
||||
warning_threshold: float = UserConfigurable()
|
||||
|
||||
|
||||
class OpenAISettings(ModelProviderSettings):
|
||||
configuration: OpenAIConfiguration
|
||||
credentials: ModelProviderCredentials()
|
||||
budget: OpenAIModelProviderBudget
|
||||
|
||||
|
||||
class OpenAIProvider(
|
||||
Configurable,
|
||||
LanguageModelProvider,
|
||||
EmbeddingModelProvider,
|
||||
):
|
||||
default_settings = OpenAISettings(
|
||||
name="openai_provider",
|
||||
description="Provides access to OpenAI's API.",
|
||||
configuration=OpenAIConfiguration(
|
||||
retries_per_request=10,
|
||||
),
|
||||
credentials=ModelProviderCredentials(),
|
||||
budget=OpenAIModelProviderBudget(
|
||||
total_budget=math.inf,
|
||||
total_cost=0.0,
|
||||
remaining_budget=math.inf,
|
||||
usage=ModelProviderUsage(
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
total_tokens=0,
|
||||
),
|
||||
graceful_shutdown_threshold=0.005,
|
||||
warning_threshold=0.01,
|
||||
),
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
settings: OpenAISettings,
|
||||
logger: logging.Logger,
|
||||
):
|
||||
self._configuration = settings.configuration
|
||||
self._credentials = settings.credentials
|
||||
self._budget = settings.budget
|
||||
|
||||
self._logger = logger
|
||||
|
||||
retry_handler = _OpenAIRetryHandler(
|
||||
logger=self._logger,
|
||||
num_retries=self._configuration.retries_per_request,
|
||||
)
|
||||
|
||||
self._create_completion = retry_handler(_create_completion)
|
||||
self._create_embedding = retry_handler(_create_embedding)
|
||||
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
"""Get the token limit for a given model."""
|
||||
return OPEN_AI_MODELS[model_name].max_tokens
|
||||
|
||||
def get_remaining_budget(self) -> float:
|
||||
"""Get the remaining budget."""
|
||||
return self._budget.remaining_budget
|
||||
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: OpenAIModelName,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
"""Create a completion using the OpenAI API."""
|
||||
completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
|
||||
response = await self._create_completion(
|
||||
messages=model_prompt,
|
||||
**completion_kwargs,
|
||||
)
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_LANGUAGE_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
|
||||
parsed_response = completion_parser(
|
||||
response.choices[0].message.to_dict_recursive()
|
||||
)
|
||||
response = LanguageModelProviderModelResponse(
|
||||
content=parsed_response, **response_args
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: OpenAIModelName,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
"""Create an embedding using the OpenAI API."""
|
||||
embedding_kwargs = self._get_embedding_kwargs(model_name, **kwargs)
|
||||
response = await self._create_embedding(text=text, **embedding_kwargs)
|
||||
|
||||
response_args = {
|
||||
"model_info": OPEN_AI_EMBEDDING_MODELS[model_name],
|
||||
"prompt_tokens_used": response.usage.prompt_tokens,
|
||||
"completion_tokens_used": response.usage.completion_tokens,
|
||||
}
|
||||
response = EmbeddingModelProviderModelResponse(
|
||||
**response_args,
|
||||
embedding=embedding_parser(response.embeddings[0]),
|
||||
)
|
||||
self._budget.update_usage_and_cost(response)
|
||||
return response
|
||||
|
||||
def _get_completion_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
functions: list[LanguageModelFunction],
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for completion API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the chat API call.
|
||||
|
||||
"""
|
||||
completion_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
if functions:
|
||||
completion_kwargs["functions"] = functions
|
||||
|
||||
return completion_kwargs
|
||||
|
||||
def _get_embedding_kwargs(
|
||||
self,
|
||||
model_name: OpenAIModelName,
|
||||
**kwargs,
|
||||
) -> dict:
|
||||
"""Get kwargs for embedding API call.
|
||||
|
||||
Args:
|
||||
model: The model to use.
|
||||
kwargs: Keyword arguments to override the default values.
|
||||
|
||||
Returns:
|
||||
The kwargs for the embedding API call.
|
||||
|
||||
"""
|
||||
embedding_kwargs = {
|
||||
"model": model_name,
|
||||
**kwargs,
|
||||
**self._credentials.unmasked(),
|
||||
}
|
||||
|
||||
return embedding_kwargs
|
||||
|
||||
def __repr__(self):
|
||||
return "OpenAIProvider()"
|
||||
|
||||
|
||||
async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding:
|
||||
"""Embed text using the OpenAI API.
|
||||
|
||||
Args:
|
||||
text str: The text to embed.
|
||||
model_name str: The name of the model to use.
|
||||
|
||||
Returns:
|
||||
str: The embedding.
|
||||
"""
|
||||
return await openai.Embedding.acreate(
|
||||
input=[text],
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
async def _create_completion(
|
||||
messages: list[LanguageModelMessage], *_, **kwargs
|
||||
) -> openai.Completion:
|
||||
"""Create a chat completion using the OpenAI API.
|
||||
|
||||
Args:
|
||||
messages: The prompt to use.
|
||||
|
||||
Returns:
|
||||
The completion.
|
||||
|
||||
"""
|
||||
messages = [message.dict() for message in messages]
|
||||
if "functions" in kwargs:
|
||||
kwargs["functions"] = [function.json_schema for function in kwargs["functions"]]
|
||||
return await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_P = ParamSpec("_P")
|
||||
|
||||
|
||||
class _OpenAIRetryHandler:
|
||||
"""Retry Handler for OpenAI API call.
|
||||
|
||||
Args:
|
||||
num_retries int: Number of retries. Defaults to 10.
|
||||
backoff_base float: Base for exponential backoff. Defaults to 2.
|
||||
warn_user bool: Whether to warn the user. Defaults to True.
|
||||
"""
|
||||
|
||||
_retry_limit_msg = "Error: Reached rate limit, passing..."
|
||||
_api_key_error_msg = (
|
||||
"Please double check that you have setup a PAID OpenAI API Account. You can "
|
||||
"read more here: https://docs.agpt.co/setup/#getting-an-api-key"
|
||||
)
|
||||
_backoff_msg = "Error: API Bad gateway. Waiting {backoff} seconds..."
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: logging.Logger,
|
||||
num_retries: int = 10,
|
||||
backoff_base: float = 2.0,
|
||||
warn_user: bool = True,
|
||||
):
|
||||
self._logger = logger
|
||||
self._num_retries = num_retries
|
||||
self._backoff_base = backoff_base
|
||||
self._warn_user = warn_user
|
||||
|
||||
def _log_rate_limit_error(self) -> None:
|
||||
self._logger.debug(self._retry_limit_msg)
|
||||
if self._warn_user:
|
||||
self._logger.warning(self._api_key_error_msg)
|
||||
self._warn_user = False
|
||||
|
||||
def _backoff(self, attempt: int) -> None:
|
||||
backoff = self._backoff_base ** (attempt + 2)
|
||||
self._logger.debug(self._backoff_msg.format(backoff=backoff))
|
||||
time.sleep(backoff)
|
||||
|
||||
def __call__(self, func: Callable[_P, _T]) -> Callable[_P, _T]:
|
||||
@functools.wraps(func)
|
||||
async def _wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T:
|
||||
num_attempts = self._num_retries + 1 # +1 for the first attempt
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
try:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
except RateLimitError:
|
||||
if attempt == num_attempts:
|
||||
raise
|
||||
self._log_rate_limit_error()
|
||||
|
||||
except APIError as e:
|
||||
if (e.http_status != 502) or (attempt == num_attempts):
|
||||
raise
|
||||
|
||||
self._backoff(attempt)
|
||||
|
||||
return _wrapped
|
||||
219
autogpt/core/resource/model_providers/schema.py
Normal file
219
autogpt/core/resource/model_providers/schema.py
Normal file
@@ -0,0 +1,219 @@
|
||||
import abc
|
||||
import enum
|
||||
from typing import Callable, ClassVar
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, validator
|
||||
|
||||
from autogpt.core.configuration import UserConfigurable
|
||||
from autogpt.core.resource.schema import (
|
||||
Embedding,
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderService(str, enum.Enum):
|
||||
"""A ModelService describes what kind of service the model provides."""
|
||||
|
||||
EMBEDDING: str = "embedding"
|
||||
LANGUAGE: str = "language"
|
||||
TEXT: str = "text"
|
||||
|
||||
|
||||
class ModelProviderName(str, enum.Enum):
|
||||
OPENAI: str = "openai"
|
||||
|
||||
|
||||
class MessageRole(str, enum.Enum):
|
||||
USER = "user"
|
||||
SYSTEM = "system"
|
||||
ASSISTANT = "assistant"
|
||||
|
||||
|
||||
class LanguageModelMessage(BaseModel):
|
||||
role: MessageRole
|
||||
content: str
|
||||
|
||||
|
||||
class LanguageModelFunction(BaseModel):
|
||||
json_schema: dict
|
||||
|
||||
|
||||
class ModelProviderModelInfo(BaseModel):
|
||||
"""Struct for model information.
|
||||
|
||||
Would be lovely to eventually get this directly from APIs, but needs to be
|
||||
scraped from websites for now.
|
||||
|
||||
"""
|
||||
|
||||
name: str
|
||||
service: ModelProviderService
|
||||
provider_name: ModelProviderName
|
||||
prompt_token_cost: float = 0.0
|
||||
completion_token_cost: float = 0.0
|
||||
|
||||
|
||||
class ModelProviderModelResponse(BaseModel):
|
||||
"""Standard response struct for a response from a model."""
|
||||
|
||||
prompt_tokens_used: int
|
||||
completion_tokens_used: int
|
||||
model_info: ModelProviderModelInfo
|
||||
|
||||
|
||||
class ModelProviderCredentials(ProviderCredentials):
|
||||
"""Credentials for a model provider."""
|
||||
|
||||
api_key: SecretStr | None = UserConfigurable(default=None)
|
||||
api_type: SecretStr | None = UserConfigurable(default=None)
|
||||
api_base: SecretStr | None = UserConfigurable(default=None)
|
||||
api_version: SecretStr | None = UserConfigurable(default=None)
|
||||
deployment_id: SecretStr | None = UserConfigurable(default=None)
|
||||
|
||||
def unmasked(self) -> dict:
|
||||
return unmask(self)
|
||||
|
||||
class Config:
|
||||
extra = "ignore"
|
||||
|
||||
|
||||
def unmask(model: BaseModel):
|
||||
unmasked_fields = {}
|
||||
for field_name, field in model.__fields__.items():
|
||||
value = getattr(model, field_name)
|
||||
if isinstance(value, SecretStr):
|
||||
unmasked_fields[field_name] = value.get_secret_value()
|
||||
else:
|
||||
unmasked_fields[field_name] = value
|
||||
return unmasked_fields
|
||||
|
||||
|
||||
class ModelProviderUsage(ProviderUsage):
|
||||
"""Usage for a particular model from a model provider."""
|
||||
|
||||
completion_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
|
||||
def update_usage(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
self.completion_tokens += model_response.completion_tokens_used
|
||||
self.prompt_tokens += model_response.prompt_tokens_used
|
||||
self.total_tokens += (
|
||||
model_response.completion_tokens_used + model_response.prompt_tokens_used
|
||||
)
|
||||
|
||||
|
||||
class ModelProviderBudget(ProviderBudget):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ModelProviderUsage
|
||||
|
||||
def update_usage_and_cost(
|
||||
self,
|
||||
model_response: ModelProviderModelResponse,
|
||||
) -> None:
|
||||
"""Update the usage and cost of the provider."""
|
||||
model_info = model_response.model_info
|
||||
self.usage.update_usage(model_response)
|
||||
incremental_cost = (
|
||||
model_response.completion_tokens_used * model_info.completion_token_cost
|
||||
+ model_response.prompt_tokens_used * model_info.prompt_token_cost
|
||||
) / 1000.0
|
||||
self.total_cost += incremental_cost
|
||||
self.remaining_budget -= incremental_cost
|
||||
|
||||
|
||||
class ModelProviderSettings(ProviderSettings):
|
||||
resource_type = ResourceType.MODEL
|
||||
credentials: ModelProviderCredentials
|
||||
budget: ModelProviderBudget
|
||||
|
||||
|
||||
class ModelProvider(abc.ABC):
|
||||
"""A ModelProvider abstracts the details of a particular provider of models."""
|
||||
|
||||
defaults: ClassVar[ModelProviderSettings]
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_token_limit(self, model_name: str) -> int:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_remaining_budget(self) -> float:
|
||||
...
|
||||
|
||||
|
||||
####################
|
||||
# Embedding Models #
|
||||
####################
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for embedding model information."""
|
||||
|
||||
model_service = ModelProviderService.EMBEDDING
|
||||
embedding_dimensions: int
|
||||
|
||||
|
||||
class EmbeddingModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from an embedding model."""
|
||||
|
||||
embedding: Embedding = Field(default_factory=list)
|
||||
|
||||
@classmethod
|
||||
@validator("completion_tokens_used")
|
||||
def _verify_no_completion_tokens_used(cls, v):
|
||||
if v > 0:
|
||||
raise ValueError("Embeddings should not have completion tokens used.")
|
||||
return v
|
||||
|
||||
|
||||
class EmbeddingModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_embedding(
|
||||
self,
|
||||
text: str,
|
||||
model_name: str,
|
||||
embedding_parser: Callable[[Embedding], Embedding],
|
||||
**kwargs,
|
||||
) -> EmbeddingModelProviderModelResponse:
|
||||
...
|
||||
|
||||
|
||||
###################
|
||||
# Language Models #
|
||||
###################
|
||||
|
||||
|
||||
class LanguageModelProviderModelInfo(ModelProviderModelInfo):
|
||||
"""Struct for language model information."""
|
||||
|
||||
model_service = ModelProviderService.LANGUAGE
|
||||
max_tokens: int
|
||||
|
||||
|
||||
class LanguageModelProviderModelResponse(ModelProviderModelResponse):
|
||||
"""Standard response struct for a response from a language model."""
|
||||
|
||||
content: dict = None
|
||||
|
||||
|
||||
class LanguageModelProvider(ModelProvider):
|
||||
@abc.abstractmethod
|
||||
async def create_language_completion(
|
||||
self,
|
||||
model_prompt: list[LanguageModelMessage],
|
||||
functions: list[LanguageModelFunction],
|
||||
model_name: str,
|
||||
completion_parser: Callable[[dict], dict],
|
||||
**kwargs,
|
||||
) -> LanguageModelProviderModelResponse:
|
||||
...
|
||||
57
autogpt/core/resource/schema.py
Normal file
57
autogpt/core/resource/schema.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import abc
|
||||
import enum
|
||||
|
||||
from pydantic import SecretBytes, SecretField, SecretStr
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
|
||||
|
||||
class ResourceType(str, enum.Enum):
|
||||
"""An enumeration of resource types."""
|
||||
|
||||
MODEL = "model"
|
||||
MEMORY = "memory"
|
||||
|
||||
|
||||
class ProviderUsage(SystemConfiguration, abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def update_usage(self, *args, **kwargs) -> None:
|
||||
"""Update the usage of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderBudget(SystemConfiguration):
|
||||
total_budget: float = UserConfigurable()
|
||||
total_cost: float
|
||||
remaining_budget: float
|
||||
usage: ProviderUsage
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_usage_and_cost(self, *args, **kwargs) -> None:
|
||||
"""Update the usage and cost of the resource."""
|
||||
...
|
||||
|
||||
|
||||
class ProviderCredentials(SystemConfiguration):
|
||||
"""Struct for credentials."""
|
||||
|
||||
class Config:
|
||||
json_encoders = {
|
||||
SecretStr: lambda v: v.get_secret_value() if v else None,
|
||||
SecretBytes: lambda v: v.get_secret_value() if v else None,
|
||||
SecretField: lambda v: v.get_secret_value() if v else None,
|
||||
}
|
||||
|
||||
|
||||
class ProviderSettings(SystemSettings):
|
||||
resource_type: ResourceType
|
||||
credentials: ProviderCredentials | None = None
|
||||
budget: ProviderBudget | None = None
|
||||
|
||||
|
||||
# Used both by model providers and memory providers
|
||||
Embedding = list[float]
|
||||
3
autogpt/core/runner/__init__.py
Normal file
3
autogpt/core/runner/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
This module contains the runner for the v2 agent server and client.
|
||||
"""
|
||||
47
autogpt/core/runner/cli_app/cli.py
Normal file
47
autogpt/core/runner/cli_app/cli.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
import yaml
|
||||
|
||||
from autogpt.core.runner.cli_app.main import run_auto_gpt
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions
|
||||
|
||||
|
||||
@click.group()
|
||||
def autogpt():
|
||||
"""Temporary command group for v2 commands."""
|
||||
pass
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
@click.option(
|
||||
"--pdb",
|
||||
is_flag=True,
|
||||
help="Drop into a debugger if an error is raised.",
|
||||
)
|
||||
@coroutine
|
||||
async def run(settings_file: str, pdb: bool) -> None:
|
||||
"""Run the Auto-GPT agent."""
|
||||
click.echo("Running Auto-GPT agent...")
|
||||
settings_file = Path(settings_file)
|
||||
settings = {}
|
||||
if settings_file.exists():
|
||||
settings = yaml.safe_load(settings_file.read_text())
|
||||
main = handle_exceptions(run_auto_gpt, with_debugger=pdb)
|
||||
await main(settings)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt()
|
||||
69
autogpt/core/runner/cli_app/main.py
Normal file
69
autogpt/core/runner/cli_app/main.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import click
|
||||
|
||||
from autogpt.core.agent import AgentSettings, SimpleAgent
|
||||
from autogpt.core.runner.client_lib.logging import get_client_logger
|
||||
from autogpt.core.runner.client_lib.parser import (
|
||||
parse_ability_result,
|
||||
parse_agent_name_and_goals,
|
||||
parse_agent_plan,
|
||||
parse_next_ability,
|
||||
)
|
||||
|
||||
|
||||
async def run_auto_gpt(user_configuration: dict):
|
||||
"""Run the Auto-GPT CLI client."""
|
||||
|
||||
client_logger = get_client_logger()
|
||||
client_logger.debug("Getting agent settings")
|
||||
|
||||
agent_workspace = (
|
||||
user_configuration.get("workspace", {}).get("configuration", {}).get("root", "")
|
||||
)
|
||||
|
||||
if not agent_workspace: # We don't have an agent yet.
|
||||
#################
|
||||
# Bootstrapping #
|
||||
#################
|
||||
# Step 1. Collate the user's settings with the default system settings.
|
||||
agent_settings: AgentSettings = SimpleAgent.compile_settings(
|
||||
client_logger,
|
||||
user_configuration,
|
||||
)
|
||||
|
||||
# Step 2. Get a name and goals for the agent.
|
||||
# First we need to figure out what the user wants to do with the agent.
|
||||
# We'll do this by asking the user for a prompt.
|
||||
user_objective = click.prompt("What do you want Auto-GPT to do?")
|
||||
# Ask a language model to determine a name and goals for a suitable agent.
|
||||
name_and_goals = await SimpleAgent.determine_agent_name_and_goals(
|
||||
user_objective,
|
||||
agent_settings,
|
||||
client_logger,
|
||||
)
|
||||
print(parse_agent_name_and_goals(name_and_goals))
|
||||
# Finally, update the agent settings with the name and goals.
|
||||
agent_settings.update_agent_name_and_goals(name_and_goals)
|
||||
|
||||
# Step 3. Provision the agent.
|
||||
agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger)
|
||||
print("agent is provisioned")
|
||||
|
||||
# launch agent interaction loop
|
||||
agent = SimpleAgent.from_workspace(
|
||||
agent_workspace,
|
||||
client_logger,
|
||||
)
|
||||
print("agent is loaded")
|
||||
|
||||
plan = await agent.build_initial_plan()
|
||||
print(parse_agent_plan(plan))
|
||||
|
||||
while True:
|
||||
current_task, next_ability = await agent.determine_next_ability(plan)
|
||||
print(parse_next_ability(current_task, next_ability))
|
||||
user_input = click.prompt(
|
||||
"Should the agent proceed with this ability?",
|
||||
default="y",
|
||||
)
|
||||
ability_result = await agent.execute_next_ability(user_input)
|
||||
print(parse_ability_result(ability_result))
|
||||
101
autogpt/core/runner/cli_web_app/cli.py
Normal file
101
autogpt/core/runner/cli_web_app/cli.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import contextlib
|
||||
import pathlib
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import click
|
||||
import requests
|
||||
import uvicorn
|
||||
import yaml
|
||||
|
||||
from autogpt.core.runner.client_lib.shared_click_commands import (
|
||||
DEFAULT_SETTINGS_FILE,
|
||||
make_settings,
|
||||
status,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.utils import coroutine
|
||||
|
||||
|
||||
@click.group()
|
||||
def autogpt():
|
||||
"""Temporary command group for v2 commands."""
|
||||
pass
|
||||
|
||||
|
||||
autogpt.add_command(make_settings)
|
||||
autogpt.add_command(status)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"host",
|
||||
"--host",
|
||||
default="localhost",
|
||||
help="The host for the webserver.",
|
||||
type=click.STRING,
|
||||
)
|
||||
@click.option(
|
||||
"port",
|
||||
"--port",
|
||||
default=8080,
|
||||
help="The port of the webserver.",
|
||||
type=click.INT,
|
||||
)
|
||||
def server(host: str, port: int) -> None:
|
||||
"""Run the Auto-GPT runner httpserver."""
|
||||
click.echo("Running Auto-GPT runner httpserver...")
|
||||
uvicorn.run(
|
||||
"autogpt.core.runner.cli_web_app.server.api:app",
|
||||
workers=1,
|
||||
host=host,
|
||||
port=port,
|
||||
reload=True,
|
||||
)
|
||||
|
||||
|
||||
@autogpt.command()
|
||||
@click.option(
|
||||
"--settings-file",
|
||||
type=click.Path(),
|
||||
default=DEFAULT_SETTINGS_FILE,
|
||||
)
|
||||
@coroutine
|
||||
async def client(settings_file) -> None:
|
||||
"""Run the Auto-GPT runner client."""
|
||||
settings_file = pathlib.Path(settings_file)
|
||||
settings = {}
|
||||
if settings_file.exists():
|
||||
settings = yaml.safe_load(settings_file.read_text())
|
||||
|
||||
from autogpt.core.runner.cli_web_app.client.client import run
|
||||
|
||||
with autogpt_server():
|
||||
run()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def autogpt_server():
|
||||
host = "localhost"
|
||||
port = 8080
|
||||
cmd = shlex.split(
|
||||
f"{sys.executable} autogpt/core/runner/cli_web_app/cli.py server --host {host} --port {port}"
|
||||
)
|
||||
server_process = subprocess.Popen(
|
||||
args=cmd,
|
||||
)
|
||||
started = False
|
||||
|
||||
while not started:
|
||||
try:
|
||||
requests.get(f"http://{host}:{port}")
|
||||
started = True
|
||||
except requests.exceptions.ConnectionError:
|
||||
time.sleep(0.2)
|
||||
yield server_process
|
||||
server_process.terminate()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
autogpt()
|
||||
0
autogpt/core/runner/cli_web_app/client/__init__.py
Normal file
0
autogpt/core/runner/cli_web_app/client/__init__.py
Normal file
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user