mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
407 Commits
default-ke
...
detached
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
47a996e75d | ||
|
|
2ff033aeb2 | ||
|
|
95b1beecaf | ||
|
|
cb5452f7ae | ||
|
|
02a60966d4 | ||
|
|
f28112f5e2 | ||
|
|
20f018b2e5 | ||
|
|
6f144ef77a | ||
|
|
bd1fa8f6c2 | ||
|
|
2121ffd06b | ||
|
|
0c2940353f | ||
|
|
d26105d382 | ||
|
|
7d48eebc78 | ||
|
|
c6b36fbad7 | ||
|
|
4aa5f53710 | ||
|
|
75f9b072a6 | ||
|
|
63af42dafb | ||
|
|
29f177e70d | ||
|
|
eeb5b4aa46 | ||
|
|
520b1d7940 | ||
|
|
f8b00e55d0 | ||
|
|
4b8087c067 | ||
|
|
d2f3f53f57 | ||
|
|
ab3643388f | ||
|
|
845c8c51e5 | ||
|
|
118fdeeb1d | ||
|
|
97d00455ef | ||
|
|
ae9bd87161 | ||
|
|
a556995d1f | ||
|
|
fd6c1d9f4f | ||
|
|
14cc21a843 | ||
|
|
772baff6db | ||
|
|
5dd151b41e | ||
|
|
86fbbae65c | ||
|
|
6bfe7ff497 | ||
|
|
effd1e35a3 | ||
|
|
4aae15d769 | ||
|
|
f62fa3e1e3 | ||
|
|
708ed9a91c | ||
|
|
951948d239 | ||
|
|
f1414550f9 | ||
|
|
c6e838da37 | ||
|
|
06b403f2b0 | ||
|
|
03f776681a | ||
|
|
3d21d54dab | ||
|
|
eef9bbe991 | ||
|
|
464b5309d7 | ||
|
|
f00654cb2c | ||
|
|
bc8ae1f542 | ||
|
|
f2816f98e9 | ||
|
|
5ee8b62d67 | ||
|
|
8b4bb27077 | ||
|
|
6954f4eb0e | ||
|
|
c14ab0c37a | ||
|
|
13da8af170 | ||
|
|
63e3244e7e | ||
|
|
19095be249 | ||
|
|
26a6bd4d10 | ||
|
|
92bfbfad57 | ||
|
|
cf43248ab8 | ||
|
|
aea6e7caed | ||
|
|
c84cc292f1 | ||
|
|
d84ddfcf1a | ||
|
|
5fa5b7104a | ||
|
|
33dd2eb919 | ||
|
|
a5734a57d5 | ||
|
|
274419d393 | ||
|
|
520d0ca0e4 | ||
|
|
84076ebee1 | ||
|
|
2e934dfff3 | ||
|
|
c1c3345bc0 | ||
|
|
fb9a543e35 | ||
|
|
e81083d9ab | ||
|
|
865e3c056d | ||
|
|
8fccf2eed3 | ||
|
|
1f34f78e4e | ||
|
|
29cff1bb4e | ||
|
|
402789d8cd | ||
|
|
6fa4b8cb11 | ||
|
|
f36d95aaa8 | ||
|
|
a660833744 | ||
|
|
e840106949 | ||
|
|
6c109adf0b | ||
|
|
bff0dc3d82 | ||
|
|
cd7dfbb8b3 | ||
|
|
a2895a2ca0 | ||
|
|
e30dac575d | ||
|
|
918538147c | ||
|
|
1e8a272ac6 | ||
|
|
1c6890486f | ||
|
|
29688758c4 | ||
|
|
2a66295a92 | ||
|
|
4db8e746d7 | ||
|
|
0551bec096 | ||
|
|
bd2f172e6d | ||
|
|
9a4ff9023d | ||
|
|
8987fdd48c | ||
|
|
6a1cea4c4e | ||
|
|
f27f596f58 | ||
|
|
ea214d9168 | ||
|
|
f9633ffb71 | ||
|
|
e140873dd4 | ||
|
|
dd0081ab35 | ||
|
|
bbbdb5665b | ||
|
|
e628a25533 | ||
|
|
52b3148196 | ||
|
|
05c76738a4 | ||
|
|
639242ac68 | ||
|
|
ce667f6287 | ||
|
|
98ab525e39 | ||
|
|
c707ee9cb6 | ||
|
|
b64c536eca | ||
|
|
5c0f979b9c | ||
|
|
b048385091 | ||
|
|
67244759c7 | ||
|
|
a3655b8a85 | ||
|
|
aafc101224 | ||
|
|
ef3f7aad18 | ||
|
|
aaa0b79f08 | ||
|
|
e907ffda6e | ||
|
|
ef7e50403e | ||
|
|
1e872406ca | ||
|
|
5ee909f687 | ||
|
|
ff1fa2af2d | ||
|
|
ee3252bdb1 | ||
|
|
4a8f3dbbb1 | ||
|
|
4a163e5b54 | ||
|
|
ca0b2311e8 | ||
|
|
d03fd930c6 | ||
|
|
603fec3467 | ||
|
|
c53c7f8dd8 | ||
|
|
ce3539ff16 | ||
|
|
ea8f164b93 | ||
|
|
3c0dea0017 | ||
|
|
9e4246602d | ||
|
|
1f0cbc6500 | ||
|
|
e6e47373ac | ||
|
|
f981a74a10 | ||
|
|
09dd391041 | ||
|
|
0b5b95eff5 | ||
|
|
4adbbc52f2 | ||
|
|
359ae8307a | ||
|
|
f719c7e70e | ||
|
|
c960bd870c | ||
|
|
dfb7cf19f7 | ||
|
|
d6ecf80197 | ||
|
|
c0f77c8e7a | ||
|
|
47759f6951 | ||
|
|
bcaf3241da | ||
|
|
c25d03e945 | ||
|
|
91edf08540 | ||
|
|
b08ad973fa | ||
|
|
a037c431cd | ||
|
|
86c544177e | ||
|
|
af9ea5bc31 | ||
|
|
25fa1bee1e | ||
|
|
c76c077522 | ||
|
|
4259ad686e | ||
|
|
9a2664be35 | ||
|
|
9070378e60 | ||
|
|
f17c20ed91 | ||
|
|
799c6e550a | ||
|
|
21100c109a | ||
|
|
b92c4774a6 | ||
|
|
3a127dc355 | ||
|
|
c19703150a | ||
|
|
3e7d0e7f1b | ||
|
|
e84910b423 | ||
|
|
44f73078f7 | ||
|
|
cd106611eb | ||
|
|
45fe26b58d | ||
|
|
ab0aaf55a9 | ||
|
|
57eb7db93c | ||
|
|
39f2745e89 | ||
|
|
f9d7ca70c6 | ||
|
|
d5ec9f898e | ||
|
|
11a4f9fdaa | ||
|
|
9b3bef7c3e | ||
|
|
6052a0aa07 | ||
|
|
111fb8e69f | ||
|
|
78f5cc4608 | ||
|
|
b223cc0233 | ||
|
|
db44d8c2ec | ||
|
|
594aa996d7 | ||
|
|
e26513f5e4 | ||
|
|
1a822683a9 | ||
|
|
7cf029da68 | ||
|
|
4d4cc60071 | ||
|
|
952f6f58ef | ||
|
|
8950021be5 | ||
|
|
151fad5ced | ||
|
|
27320d279e | ||
|
|
edaa938f99 | ||
|
|
74d8aa6d90 | ||
|
|
3ea50f3b70 | ||
|
|
3aebed61d2 | ||
|
|
26caf1c6af | ||
|
|
8b5175980f | ||
|
|
c2e036abd1 | ||
|
|
09bb57ed0d | ||
|
|
fc0c3e311f | ||
|
|
e2a848d5fb | ||
|
|
67ff738fda | ||
|
|
c995e89c26 | ||
|
|
22fff451da | ||
|
|
53dcec5dd7 | ||
|
|
ae1389760c | ||
|
|
030ea6d9a0 | ||
|
|
c7cd6dc026 | ||
|
|
b1a747a1d1 | ||
|
|
ff03619196 | ||
|
|
fa04f802b2 | ||
|
|
62aa002b37 | ||
|
|
6fef4fd66e | ||
|
|
c028dd647e | ||
|
|
308a9bc40e | ||
|
|
d8317c8257 | ||
|
|
7e63a8f94a | ||
|
|
456b1e55aa | ||
|
|
db0f1fa0ec | ||
|
|
757fb075b1 | ||
|
|
b9551db516 | ||
|
|
525b894618 | ||
|
|
b2972cc9c6 | ||
|
|
8938209d0d | ||
|
|
1e620fdb13 | ||
|
|
8a68516f0b | ||
|
|
8f5bd40791 | ||
|
|
19dc2093df | ||
|
|
ca43f71fa8 | ||
|
|
2fa4d8042c | ||
|
|
cc17147a98 | ||
|
|
75f3092ba2 | ||
|
|
4730bf6f95 | ||
|
|
88885f12fa | ||
|
|
e990a9d301 | ||
|
|
e2df6019fb | ||
|
|
6d812acb20 | ||
|
|
08a8abeb58 | ||
|
|
1321faf9db | ||
|
|
f3583e8ea6 | ||
|
|
f3a4fd2a41 | ||
|
|
643c33456e | ||
|
|
6846d8f49e | ||
|
|
8ded935e71 | ||
|
|
370e87d2e2 | ||
|
|
e908068f22 | ||
|
|
4e44a79785 | ||
|
|
9e411efd6e | ||
|
|
e4995096ef | ||
|
|
d9b8e0d273 | ||
|
|
404d0638de | ||
|
|
37607d104c | ||
|
|
2715b81ff5 | ||
|
|
27c9ec5bcd | ||
|
|
17e79ad88d | ||
|
|
7f318685af | ||
|
|
e5f5005ab8 | ||
|
|
f4dac22335 | ||
|
|
0c517216df | ||
|
|
83817cb641 | ||
|
|
e5ea62dc25 | ||
|
|
81febb6589 | ||
|
|
30a62f898c | ||
|
|
1622a4aaa8 | ||
|
|
5e386fda33 | ||
|
|
2437f58849 | ||
|
|
9f4be6f0c9 | ||
|
|
e4a9c8216f | ||
|
|
f19ed9f652 | ||
|
|
30376a8ec8 | ||
|
|
32680a549e | ||
|
|
68158de126 | ||
|
|
6f3828fc99 | ||
|
|
26b1bca033 | ||
|
|
7f6354caae | ||
|
|
5d4d2486da | ||
|
|
2c0286e411 | ||
|
|
fca8d61cc4 | ||
|
|
2b4af19799 | ||
|
|
615a9746dc | ||
|
|
ac33c1eb03 | ||
|
|
d6d2820b92 | ||
|
|
3982e20faa | ||
|
|
c029fde502 | ||
|
|
405dd1659e | ||
|
|
2d0e51fe28 | ||
|
|
6f07d24e93 | ||
|
|
9292597d56 | ||
|
|
f6eebcab6e | ||
|
|
9fe3fed1a2 | ||
|
|
769ab18cca | ||
|
|
d46219c80f | ||
|
|
97015a91ad | ||
|
|
a2ef456525 | ||
|
|
1c71351652 | ||
|
|
bd5d2b1e86 | ||
|
|
8502928a21 | ||
|
|
c1f97415fb | ||
|
|
74e677baec | ||
|
|
992989ee71 | ||
|
|
d8145c158c | ||
|
|
9ad5e1f808 | ||
|
|
7b92bae942 | ||
|
|
c03e2fb949 | ||
|
|
dbc603c6eb | ||
|
|
c582b5512a | ||
|
|
e654aa1e7a | ||
|
|
e37744b9f2 | ||
|
|
bc1df92c29 | ||
|
|
04473cad1e | ||
|
|
2a74381ae8 | ||
|
|
d42ed088dd | ||
|
|
2aed470d26 | ||
|
|
61f1d0cdb5 | ||
|
|
d742019349 | ||
|
|
fd9968683c | ||
|
|
daa054c79c | ||
|
|
fe98abf875 | ||
|
|
a6f763db24 | ||
|
|
a09fb731b5 | ||
|
|
07ae912cfe | ||
|
|
7501089bbf | ||
|
|
f811a6ffc0 | ||
|
|
5e2d29f27e | ||
|
|
72cbbbbbc9 | ||
|
|
1e3b1dad06 | ||
|
|
d3266d003d | ||
|
|
fa7b8c54ff | ||
|
|
ce124a74b5 | ||
|
|
1bc445007a | ||
|
|
6dbc0f7270 | ||
|
|
4989e3c282 | ||
|
|
7194f099b9 | ||
|
|
1114b421d0 | ||
|
|
95e184c85f | ||
|
|
78fe578177 | ||
|
|
723055ce1d | ||
|
|
24cc5131d7 | ||
|
|
9748cd08bf | ||
|
|
f892894193 | ||
|
|
8803740d47 | ||
|
|
55803bcd54 | ||
|
|
f6b5e13c2f | ||
|
|
a80b0de18d | ||
|
|
c64a2d4101 | ||
|
|
1de99ca4df | ||
|
|
a8e5a0d98e | ||
|
|
210c140a0f | ||
|
|
0cf3b9f48b | ||
|
|
9dc7bb8497 | ||
|
|
22fb9bc635 | ||
|
|
3ac0e2d1d3 | ||
|
|
d7e59966ef | ||
|
|
a5c63880b8 | ||
|
|
230ec1c88c | ||
|
|
31450fcb9c | ||
|
|
1286a1b034 | ||
|
|
f607efd74f | ||
|
|
0f503aa467 | ||
|
|
538f945edc | ||
|
|
dc6c1bb8b0 | ||
|
|
9fd6d3df42 | ||
|
|
0d8dfaf312 | ||
|
|
007a773296 | ||
|
|
351fdcef32 | ||
|
|
beedc4b971 | ||
|
|
41e3c4f6bd | ||
|
|
b4097f3a51 | ||
|
|
fcd61a69f7 | ||
|
|
53a0ee2523 | ||
|
|
8f980c43c5 | ||
|
|
7d8f2e6cc1 | ||
|
|
1d2e7b89ea | ||
|
|
7aea24285a | ||
|
|
ef9308bed4 | ||
|
|
aec715cb2b | ||
|
|
5e2874c315 | ||
|
|
3a1574e4bd | ||
|
|
d220562806 | ||
|
|
bd39d5da0b | ||
|
|
9f79e70b0f | ||
|
|
46b8f9af0a | ||
|
|
03b8f5ec6e | ||
|
|
b78c43111f | ||
|
|
81d1be73cd | ||
|
|
6da8007ce0 | ||
|
|
2b0ec123cd | ||
|
|
591a2bc431 | ||
|
|
00b8d219f2 | ||
|
|
e04beffe62 | ||
|
|
769058a8c9 | ||
|
|
b5dd75fad2 | ||
|
|
f109c3e019 | ||
|
|
198a1048e8 | ||
|
|
6e205cb850 | ||
|
|
4ab3f42780 | ||
|
|
679245416f | ||
|
|
88e278b736 | ||
|
|
c533044cdc | ||
|
|
c07cf8a7b8 | ||
|
|
fc51176a56 | ||
|
|
6718007d9b | ||
|
|
612e7cfed5 | ||
|
|
52ee846744 | ||
|
|
62a3e1c127 | ||
|
|
ef7cfbb860 |
40
.dockerignore
Normal file
40
.dockerignore
Normal file
@@ -0,0 +1,40 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
classic/run
|
||||
|
||||
# AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
!classic/original_autogpt/pyproject.toml
|
||||
!classic/original_autogpt/poetry.lock
|
||||
!classic/original_autogpt/README.md
|
||||
!classic/original_autogpt/tests/
|
||||
|
||||
# Benchmark
|
||||
!classic/benchmark/agbenchmark/
|
||||
!classic/benchmark/pyproject.toml
|
||||
!classic/benchmark/poetry.lock
|
||||
!classic/benchmark/README.md
|
||||
|
||||
# Forge
|
||||
!classic/forge/
|
||||
!classic/forge/pyproject.toml
|
||||
!classic/forge/poetry.lock
|
||||
!classic/forge/README.md
|
||||
|
||||
# Frontend
|
||||
!classic/frontend/build/web/
|
||||
|
||||
# Platform
|
||||
!autogpt_platform/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
|
||||
autogpt_platform/frontend/.next/
|
||||
autogpt_platform/frontend/node_modules
|
||||
autogpt_platform/frontend/.env.example
|
||||
autogpt_platform/frontend/.env.local
|
||||
autogpt_platform/backend/.env
|
||||
autogpt_platform/backend/.venv/
|
||||
|
||||
autogpt_platform/market/.env
|
||||
43
.github/PULL_REQUEST_TEMPLATE.md
vendored
43
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,23 +1,38 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### Checklist 📋
|
||||
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
#### For code changes:
|
||||
- [ ] I have clearly listed my changes in the PR description
|
||||
- [ ] I have made a test plan
|
||||
- [ ] I have tested my changes according to the test plan:
|
||||
<!-- Put your test plan here: -->
|
||||
- [ ] ...
|
||||
|
||||
<!--
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
-->
|
||||
<details>
|
||||
<summary>Example test plan</summary>
|
||||
|
||||
- [ ] Create from scratch and execute an agent with at least 3 blocks
|
||||
- [ ] Import an agent from file upload, and confirm it executes correctly
|
||||
- [ ] Upload agent to marketplace
|
||||
- [ ] Import an agent from marketplace and confirm it executes correctly
|
||||
- [ ] Edit an agent from monitor, and confirm it executes correctly
|
||||
</details>
|
||||
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
|
||||
<details>
|
||||
<summary>Examples of configuration changes</summary>
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
</details>
|
||||
|
||||
179
.github/dependabot.yml
vendored
Normal file
179
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
version: 2
|
||||
updates:
|
||||
# autogpt_libs (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/autogpt_libs"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# backend (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/backend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
directory: "autogpt_platform/frontend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "autogpt_platform/infra"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# market (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/market"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docker
|
||||
- package-ecosystem: "docker"
|
||||
directory: "autogpt_platform/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Submodules
|
||||
- package-ecosystem: "gitsubmodule"
|
||||
directory: "autogpt_platform/supabase"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
directory: "docs/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
31
.github/labeler.yml
vendored
31
.github/labeler.yml
vendored
@@ -1,27 +1,32 @@
|
||||
AutoGPT Agent:
|
||||
Classic AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/original_autogpt/**
|
||||
|
||||
Classic Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/benchmark/**
|
||||
|
||||
Classic Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/frontend/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/forge/**
|
||||
|
||||
Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/benchmark/**
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/frontend/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Builder:
|
||||
platform/frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/autogpt_builder/**
|
||||
- any-glob-to-any-file: autogpt_platform/frontend/**
|
||||
|
||||
Server:
|
||||
platform/backend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/autogpt_server/**
|
||||
- any-glob-to-any-file: autogpt_platform/backend/**
|
||||
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
|
||||
|
||||
platform/blocks:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**
|
||||
|
||||
4
.github/workflows/classic-autogpt-ci.yml
vendored
4
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -2,12 +2,12 @@ name: Classic - AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
@@ -5,7 +5,7 @@ on:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: development
|
||||
BASE_BRANCH: dev
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
@@ -15,46 +15,46 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
94
.github/workflows/classic-autogpt-docker-ci.yml
vendored
94
.github/workflows/classic-autogpt-docker-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development ]
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -34,58 +34,58 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -117,16 +117,16 @@ jobs:
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published, edited ]
|
||||
types: [published, edited]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -19,69 +19,69 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
4
.github/workflows/classic-autogpts-ci.yml
vendored
4
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -16,7 +16,7 @@ on:
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
28
.github/workflows/classic-benchmark-ci.yml
vendored
28
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ forge ]
|
||||
agent-name: [forge]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
@@ -146,23 +146,23 @@ jobs:
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
poetry run agbenchmark --mock
|
||||
# poetry run agbenchmark --mock
|
||||
|
||||
CHANGED=$(git diff --name-only | grep -E '(agclassic/benchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
|
||||
4
.github/workflows/classic-forge-ci.yml
vendored
4
.github/workflows/classic-forge-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
|
||||
68
.github/workflows/classic-frontend-ci.yml
vendored
68
.github/workflows/classic-frontend-ci.yml
vendored
@@ -4,15 +4,15 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
- dev
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -21,40 +21,40 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
|
||||
10
.github/workflows/classic-python-checks.yml
vendored
10
.github/workflows/classic-python-checks.yml
vendored
@@ -2,18 +2,18 @@ name: Classic - Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
@@ -21,7 +21,7 @@ on:
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
|
||||
98
.github/workflows/codeql.yml
vendored
Normal file
98
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
pull_request:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
merge_group:
|
||||
schedule:
|
||||
- cron: '15 4 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: typescript
|
||||
build-mode: none
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
config: |
|
||||
paths-ignore:
|
||||
- classic/frontend/build/**
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
55
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
Normal file
55
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: AutoGPT Platform - Deploy Prod Environment
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: production
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
@@ -1,41 +0,0 @@
|
||||
name: Platform - AutoGPT Builder CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'autogpt_platform/autogpt_builder/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'autogpt_platform/autogpt_builder/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/autogpt_builder
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '21'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Check formatting with Prettier
|
||||
run: |
|
||||
npx prettier --check .
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
npm run lint
|
||||
57
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
Normal file
57
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: Platform - AutoGPT Builder Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
155
.github/workflows/platform-autogpt-server-ci.yml
vendored
155
.github/workflows/platform-autogpt-server-ci.yml
vendored
@@ -1,155 +0,0 @@
|
||||
name: Platform - AutoGPT Server CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "autogpt_platform/autogpt_server/**"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "autogpt_platform/autogpt_server/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/autogpt_server
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Setup PostgreSQL
|
||||
uses: ikalnytskyi/action-setup-postgres@v6
|
||||
with:
|
||||
username: ${{ secrets.DB_USER || 'postgres' }}
|
||||
password: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
database: postgres
|
||||
port: 5432
|
||||
id: postgres
|
||||
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: "."
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
|
||||
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
DB_NAME: postgres
|
||||
DB_PORT: 5432
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
134
.github/workflows/platform-backend-ci.yml
vendored
Normal file
134
.github/workflows/platform-backend-ci.yml
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/backend
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: bitnami/redis:6.2
|
||||
env:
|
||||
REDIS_PASSWORD: testpassword
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -s -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
REDIS_HOST: 'localhost'
|
||||
REDIS_PORT: '6379'
|
||||
REDIS_PASSWORD: 'testpassword'
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
101
.github/workflows/platform-frontend-ci.yml
vendored
Normal file
101
.github/workflows/platform-frontend-ci.yml
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
yarn lint
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: false
|
||||
dotnet: false
|
||||
haskell: false
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Setup Builder .env
|
||||
run: |
|
||||
cp .env.example .env
|
||||
|
||||
- name: Install Playwright Browsers
|
||||
run: yarn playwright install --with-deps
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
yarn test
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report/
|
||||
retention-days: 30
|
||||
126
.github/workflows/platform-market-ci.yml
vendored
Normal file
126
.github/workflows/platform-market-ci.yml
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-market-ci.yml"
|
||||
- "autogpt_platform/market/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-market-ci.yml"
|
||||
- "autogpt_platform/market/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/market
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/market/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
# Tests comment out because they do not work with prisma mock, nor have they been updated since they were created
|
||||
# - name: Run pytest with coverage
|
||||
# run: |
|
||||
# if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
# poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
# else
|
||||
# poetry run pytest -s -vv test
|
||||
# fi
|
||||
# if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
# env:
|
||||
# LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
# DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
# SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
# SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
# SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
# REDIS_HOST: 'localhost'
|
||||
# REDIS_PORT: '6379'
|
||||
# REDIS_PASSWORD: 'testpassword'
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
21
.github/workflows/repo-pr-enforce-base-branch.yml
vendored
Normal file
21
.github/workflows/repo-pr-enforce-base-branch.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Repo - Enforce dev as base branch
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [ master ]
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
check_pr_target:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Check if PR is from dev or hotfix
|
||||
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.number }} --repo "$REPO" \
|
||||
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
|
||||
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
2
.github/workflows/repo-pr-label.yml
vendored
2
.github/workflows/repo-pr-label.yml
vendored
@@ -3,7 +3,7 @@ name: Repo - Pull Request auto-label
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths-ignore:
|
||||
- 'classic/forge/tests/vcr_cassettes'
|
||||
- 'classic/benchmark/reports/**'
|
||||
|
||||
1
.github/workflows/repo-workflow-checker.yml
vendored
1
.github/workflows/repo-workflow-checker.yml
vendored
@@ -2,6 +2,7 @@ name: Repo - PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
|
||||
@@ -5,6 +5,8 @@ import sys
|
||||
import time
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
CHECK_INTERVAL = 30
|
||||
|
||||
|
||||
def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
"""Retrieve and return necessary environment variables."""
|
||||
@@ -12,7 +14,11 @@ def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
|
||||
event = json.load(f)
|
||||
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
# Handle both PR and merge group events
|
||||
if "pull_request" in event:
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
else:
|
||||
sha = os.environ["GITHUB_SHA"]
|
||||
|
||||
return (
|
||||
os.environ["GITHUB_API_URL"],
|
||||
@@ -93,9 +99,10 @@ def main():
|
||||
break
|
||||
|
||||
print(
|
||||
"Some check runs are still in progress. Waiting 3 minutes before checking again..."
|
||||
"Some check runs are still in progress. "
|
||||
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
|
||||
)
|
||||
time.sleep(180)
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
|
||||
if all_others_passed:
|
||||
print("All other completed check runs have passed. This check passes.")
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -157,7 +157,7 @@ openai/
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
agclassic/benchmark/reports/
|
||||
classic/benchmark/agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
@@ -170,4 +170,6 @@ pri*
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
autogpt_platform/autogpt_server/settings.py
|
||||
autogpt_platform/backend/settings.py
|
||||
/.auth
|
||||
/autogpt_platform/frontend/.auth
|
||||
|
||||
@@ -10,27 +10,126 @@ repos:
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
- id: detect-secrets
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
stages: [push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
|
||||
hooks:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
hooks:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Backend
|
||||
alias: ruff-lint-platform-backend
|
||||
files: ^autogpt_platform/backend/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
args: [--fix]
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C classic/original_autogpt run isort
|
||||
- id: isort
|
||||
name: Lint (isort) - AutoGPT Platform - Backend
|
||||
alias: isort-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run isort -p backend
|
||||
files: ^autogpt_platform/backend/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run isort -p autogpt
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C classic/forge run isort
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -C classic/forge run isort -p forge
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C classic/benchmark run isort
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run isort -p agbenchmark
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
@@ -42,7 +141,6 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
name: Lint (Black)
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
@@ -50,20 +148,20 @@ repos:
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
@@ -72,56 +170,89 @@ repos:
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
name: Typecheck - AutoGPT Platform - Backend
|
||||
alias: pyright-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run pyright
|
||||
args: [-p, autogpt_platform/backend, autogpt_platform/backend]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
name: Typecheck - AutoGPT Platform - Libs
|
||||
alias: pyright-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs run pyright
|
||||
args: [-p, autogpt_platform/autogpt_libs, autogpt_platform/autogpt_libs]
|
||||
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, classic/original_autogpt, classic/original_autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
args: [-p, forge, forge]
|
||||
files: ^classic/forge/(classic/forge/|poetry\.lock$)
|
||||
args: [-p, classic/forge, classic/forge]
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
|
||||
args: [-p, classic/benchmark, classic/benchmark]
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
- id: pytest
|
||||
name: Run tests - AutoGPT Platform - Backend
|
||||
alias: pytest-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
alias: pytest-classic-autogpt
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Forge (excl. slow tests)
|
||||
alias: pytest-classic-forge
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
|
||||
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Benchmark
|
||||
alias: pytest-classic-benchmark
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
17
.vscode/all-projects.code-workspace
vendored
17
.vscode/all-projects.code-workspace
vendored
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "autogpt_server",
|
||||
"path": "../autogpt_platform/autogpt_server"
|
||||
"name": "frontend",
|
||||
"path": "../autogpt_platform/frontend"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"path": "../autogpt_platform/autogpt_builder"
|
||||
"name": "backend",
|
||||
"path": "../autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
@@ -24,10 +24,7 @@
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
},
|
||||
|
||||
{
|
||||
"name": "classic - autogpt",
|
||||
"path": "../classic/original_autogpt"
|
||||
@@ -44,6 +41,10 @@
|
||||
"name": "classic - frontend",
|
||||
"path": "../classic/frontend"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
|
||||
67
.vscode/launch.json
vendored
Normal file
67
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend: Server Side",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"command": "yarn dev"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Client Side",
|
||||
"type": "msedge",
|
||||
"request": "launch",
|
||||
"url": "http://localhost:3000"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Full Stack",
|
||||
"type": "node-terminal",
|
||||
|
||||
"request": "launch",
|
||||
"command": "yarn dev",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"serverReadyAction": {
|
||||
"pattern": "- Local:.+(https?://.+)",
|
||||
"uriFormat": "%s",
|
||||
"action": "debugWithEdge"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Backend",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
// "env": {
|
||||
// "ENV": "dev"
|
||||
// },
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "Marketplace",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "autogpt_platform.market.main",
|
||||
"env": {
|
||||
"ENV": "dev"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/market/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/market"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Everything",
|
||||
"configurations": ["Backend", "Frontend: Full Stack"],
|
||||
// "preLaunchTask": "${defaultBuildTask}",
|
||||
"stopAll": true,
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"order": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -10,6 +10,9 @@ Also check out our [🚀 Roadmap][roadmap] for information about our priorities
|
||||
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
|
||||
## Contributing to the AutoGPT Platform Folder
|
||||
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
|
||||
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
|
||||
8
LICENSE
8
LICENSE
@@ -1,7 +1,13 @@
|
||||
All portions of this repository are under one of two licenses. The majority of the AutoGPT repository is under the MIT License below. The autogpt_platform folder is under the
|
||||
Polyform Shield License.
|
||||
|
||||
|
||||
MIT License
|
||||
|
||||
|
||||
Copyright (c) 2023 Toran Bruce Richards
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
@@ -9,9 +15,11 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
|
||||
89
README.md
89
README.md
@@ -1,43 +1,71 @@
|
||||
# AutoGPT: Build & Use AI Agents
|
||||
# AutoGPT: Build, Deploy, and Run AI Agents
|
||||
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
**AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
|
||||
**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows.
|
||||
|
||||
## How to Get Started
|
||||
## Hosting Options
|
||||
- Download to self-host
|
||||
- [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta
|
||||
|
||||
https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
|
||||
## How to Setup for Self-Hosting
|
||||
> [!NOTE]
|
||||
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
|
||||
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
|
||||
|
||||
### 🧱 AutoGPT Builder
|
||||
https://github.com/user-attachments/assets/d04273a5-b36a-4a37-818e-f631ce72d603
|
||||
|
||||
The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
|
||||
This tutorial assumes you have Docker, VSCode, git and npm installed.
|
||||
|
||||
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
|
||||
### 🧱 AutoGPT Frontend
|
||||
|
||||
The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life:
|
||||
|
||||
**Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents.
|
||||
|
||||
**Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action.
|
||||
|
||||
**Deployment Controls:** Manage the lifecycle of your agents, from testing to production.
|
||||
|
||||
**Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately.
|
||||
|
||||
**Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface.
|
||||
|
||||
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
|
||||
|
||||
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
|
||||
The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly.
|
||||
|
||||
**Source Code:** The core logic that drives our agents and automation processes.
|
||||
|
||||
**Infrastructure:** Robust systems that ensure reliable and scalable performance.
|
||||
|
||||
**Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents.
|
||||
|
||||
### 🐙 Example Agents
|
||||
|
||||
Here are two examples of what you can do with AutoGPT:
|
||||
|
||||
1. **Reddit Marketing Agent**
|
||||
- This agent reads comments on Reddit.
|
||||
- It looks for people asking about your product.
|
||||
- It then automatically responds to them.
|
||||
1. **Generate Viral Videos from Trending Topics**
|
||||
- This agent reads topics on Reddit.
|
||||
- It identifies trending topics.
|
||||
- It then automatically creates a short-form video based on the content.
|
||||
|
||||
2. **YouTube Content Repurposing Agent**
|
||||
2. **Identify Top Quotes from Videos for Social Media**
|
||||
- This agent subscribes to your YouTube channel.
|
||||
- When you post a new video, it transcribes it.
|
||||
- It uses AI to write a search engine optimized blog post.
|
||||
- Then, it publishes this blog post to your Medium account.
|
||||
- It uses AI to identify the most impactful quotes to generate a summary.
|
||||
- Then, it writes a post to automatically publish to your social media.
|
||||
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT!
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
|
||||
|
||||
---
|
||||
### Mission and Licencing
|
||||
Our mission is to provide the tools, so that you can focus on what matters:
|
||||
|
||||
- 🏗️ **Building** - Lay the foundation for something amazing.
|
||||
@@ -50,20 +78,28 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
|
||||
 | 
|
||||
**🚀 [Contributing](CONTRIBUTING.md)**
|
||||
|
||||
**Licensing:**
|
||||
|
||||
MIT License: The majority of the AutoGPT repository is under the MIT License.
|
||||
|
||||
Polyform Shield License: This license applies to the autogpt_platform folder.
|
||||
|
||||
For more information, see https://agpt.co/blog/introducing-the-autogpt-platform
|
||||
|
||||
---
|
||||
## 🤖 AutoGPT Classic
|
||||
> Below is information about the classic version of AutoGPT.
|
||||
|
||||
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
|
||||
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
|
||||
|
||||
### 🏗️ Forge
|
||||
|
||||
**Forge your own agent!** – Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
**Forge your own agent!** – Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
|
||||
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) –
|
||||
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
|
||||
|
||||
### 🎯 Benchmark
|
||||
|
||||
@@ -71,9 +107,9 @@ This guide will walk you through the process of creating your own agent and usin
|
||||
|
||||
<!-- TODO: insert visual demonstrating the benchmark -->
|
||||
|
||||
📦 [`agbenchmark`](https://pypi.org/project/agclassic/benchmark/) on Pypi
|
||||
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
|
||||
 | 
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
|
||||
|
||||
### 💻 UI
|
||||
|
||||
@@ -83,7 +119,7 @@ This guide will walk you through the process of creating your own agent and usin
|
||||
|
||||
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
|
||||
|
||||
### ⌨️ CLI
|
||||
|
||||
@@ -122,6 +158,8 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
|
||||
---
|
||||
|
||||
## Stars stats
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
|
||||
<picture>
|
||||
@@ -131,3 +169,10 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
## ⚡ Contributors
|
||||
|
||||
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
|
||||
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
47
SECURITY.md
Normal file
47
SECURITY.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
|
||||
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
|
||||
|
||||
Instead, please report them via:
|
||||
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
|
||||
- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty
|
||||
|
||||
### Reporting Process
|
||||
1. **Submit Report**: Use one of the above channels to submit your report
|
||||
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
|
||||
3. **Collaboration**: We will collaborate with you to understand and validate the issue
|
||||
4. **Resolution**: We will work on a fix and coordinate the release process
|
||||
|
||||
### Disclosure Policy
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
Only the following versions are eligible for security updates:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|-----------|
|
||||
| Latest release on master branch | ✅ |
|
||||
| Development commits (pre-master) | ✅ |
|
||||
| Classic folder (deprecated) | ❌ |
|
||||
| All other versions | ❌ |
|
||||
|
||||
## Security Best Practices
|
||||
When using this project:
|
||||
1. Always use the latest stable version
|
||||
2. Review security advisories before updating
|
||||
3. Follow our security documentation and guidelines
|
||||
4. Keep your dependencies up to date
|
||||
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
|
||||
|
||||
## Past Security Advisories
|
||||
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
|
||||
|
||||
---
|
||||
Last updated: November 2024
|
||||
2
autogpt_platform/.gitignore
vendored
Normal file
2
autogpt_platform/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
21
autogpt_platform/Contributor License Agreement (CLA).md
Normal file
21
autogpt_platform/Contributor License Agreement (CLA).md
Normal file
@@ -0,0 +1,21 @@
|
||||
**Determinist Ltd**
|
||||
|
||||
**Contributor License Agreement (“Agreement”)**
|
||||
|
||||
Thank you for your interest in the AutoGPT open source project at [https://github.com/Significant-Gravitas/AutoGPT](https://github.com/Significant-Gravitas/AutoGPT) stewarded by Determinist Ltd (“**Determinist**”), with offices at 3rd Floor 1 Ashley Road, Altrincham, Cheshire, WA14 2DT, United Kingdom. The form of license below is a document that clarifies the terms under which You, the person listed below, may contribute software code described below (the “**Contribution**”) to the project. We appreciate your participation in our project, and your help in improving our products, so we want you to understand what will be done with the Contributions. This license is for your protection as well as the protection of Determinist and its licensees; it does not change your rights to use your own Contributions for any other purpose.
|
||||
|
||||
By submitting a Pull Request which modifies the content of the “autogpt\_platform” folder at [https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt\_platform](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt_platform), You hereby agree:
|
||||
|
||||
1\. **You grant us the ability to use the Contributions in any way**. You hereby grant to Determinist a non-exclusive, irrevocable, worldwide, royalty-free, sublicenseable, transferable license under all of Your relevant intellectual property rights (including copyright, patent, and any other rights), to use, copy, prepare derivative works of, distribute and publicly perform and display the Contributions on any licensing terms, including without limitation: (a) open source licenses like the GNU General Public License (GPL), the GNU Lesser General Public License (LGPL), the Common Public License, or the Berkeley Science Division license (BSD); and (b) binary, proprietary, or commercial licenses.
|
||||
|
||||
2\. **Grant of Patent License**. You hereby grant to Determinist a worldwide, non-exclusive, royalty-free, irrevocable, license, under any rights you may have, now or in the future, in any patents or patent applications, to make, have made, use, offer to sell, sell, and import products containing the Contribution or portions of the Contribution. This license extends to patent claims that are infringed by the Contribution alone or by combination of the Contribution with other inventions.
|
||||
|
||||
4\. **Limitations on Licenses**. The licenses granted in this Agreement will continue for the duration of the applicable patent or intellectual property right under which such license is granted. The licenses granted in this Agreement will include the right to grant and authorize sublicenses, so long as the sublicenses are within the scope of the licenses granted in this Agreement. Except for the licenses granted herein, You reserve all right, title, and interest in and to the Contribution.
|
||||
|
||||
5\. **You are able to grant us these rights**. You represent that You are legally entitled to grant the above license. If Your employer has rights to intellectual property that You create, You represent that You are authorized to make the Contributions on behalf of that employer, or that Your employer has waived such rights for the Contributions.
|
||||
|
||||
3\. **The Contributions are your original work**. You represent that the Contributions are Your original works of authorship, and to Your knowledge, no other person claims, or has the right to claim, any right in any invention or patent related to the Contributions. You also represent that You are not legally obligated, whether by entering into an agreement or otherwise, in any way that conflicts with the terms of this license. For example, if you have signed an agreement requiring you to assign the intellectual property rights in the Contributions to an employer or customer, that would conflict with the terms of this license.
|
||||
|
||||
6\. **We determine the code that is in our products**. You understand that the decision to include the Contribution in any product or source repository is entirely that of Determinist, and this agreement does not guarantee that the Contributions will be included in any product.
|
||||
|
||||
7\. **No Implied Warranties.** Determinist acknowledges that, except as explicitly described in this Agreement, the Contribution is provided on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
164
autogpt_platform/LICENCE.txt
Normal file
164
autogpt_platform/LICENCE.txt
Normal file
@@ -0,0 +1,164 @@
|
||||
# PolyForm Shield License 1.0.0
|
||||
|
||||
<https://polyformproject.org/licenses/shield/1.0.0>
|
||||
|
||||
## Acceptance
|
||||
|
||||
In order to get any license under these terms, you must agree
|
||||
to them as both strict obligations and conditions to all
|
||||
your licenses.
|
||||
|
||||
## Copyright License
|
||||
|
||||
The licensor grants you a copyright license for the
|
||||
software to do everything you might do with the software
|
||||
that would otherwise infringe the licensor's copyright
|
||||
in it for any permitted purpose. However, you may
|
||||
only distribute the software according to [Distribution
|
||||
License](#distribution-license) and make changes or new works
|
||||
based on the software according to [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Distribution License
|
||||
|
||||
The licensor grants you an additional copyright license
|
||||
to distribute copies of the software. Your license
|
||||
to distribute covers distributing the software with
|
||||
changes and new works permitted by [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
|
||||
## Notices
|
||||
|
||||
You must ensure that anyone who gets a copy of any part of
|
||||
the software from you also gets a copy of these terms or the
|
||||
URL for them above, as well as copies of any plain-text lines
|
||||
beginning with `Required Notice:` that the licensor provided
|
||||
with the software. For example:
|
||||
|
||||
> Required Notice: Copyright Yoyodyne, Inc. (http://example.com)
|
||||
|
||||
## Changes and New Works License
|
||||
|
||||
The licensor grants you an additional copyright license to
|
||||
make changes and new works based on the software for any
|
||||
permitted purpose.
|
||||
|
||||
## Patent License
|
||||
|
||||
The licensor grants you a patent license for the software that
|
||||
covers patent claims the licensor can license, or becomes able
|
||||
to license, that you would infringe by using the software.
|
||||
|
||||
## Noncompete
|
||||
|
||||
Any purpose is a permitted purpose, except for providing any
|
||||
product that competes with the software or any product the
|
||||
licensor or any of its affiliates provides using the software.
|
||||
|
||||
## Competition
|
||||
|
||||
Goods and services compete even when they provide functionality
|
||||
through different kinds of interfaces or for different technical
|
||||
platforms. Applications can compete with services, libraries
|
||||
with plugins, frameworks with development tools, and so on,
|
||||
even if they're written in different programming languages
|
||||
or for different computer architectures. Goods and services
|
||||
compete even when provided free of charge. If you market a
|
||||
product as a practical substitute for the software or another
|
||||
product, it definitely competes.
|
||||
|
||||
## New Products
|
||||
|
||||
If you are using the software to provide a product that does
|
||||
not compete, but the licensor or any of its affiliates brings
|
||||
your product into competition by providing a new version of
|
||||
the software or another product using the software, you may
|
||||
continue using versions of the software available under these
|
||||
terms beforehand to provide your competing product, but not
|
||||
any later versions.
|
||||
|
||||
## Discontinued Products
|
||||
|
||||
You may begin using the software to compete with a product
|
||||
or service that the licensor or any of its affiliates has
|
||||
stopped providing, unless the licensor includes a plain-text
|
||||
line beginning with `Licensor Line of Business:` with the
|
||||
software that mentions that line of business. For example:
|
||||
|
||||
> Licensor Line of Business: YoyodyneCMS Content Management
|
||||
System (http://example.com/cms)
|
||||
|
||||
## Sales of Business
|
||||
|
||||
If the licensor or any of its affiliates sells a line of
|
||||
business developing the software or using the software
|
||||
to provide a product, the buyer can also enforce
|
||||
[Noncompete](#noncompete) for that product.
|
||||
|
||||
## Fair Use
|
||||
|
||||
You may have "fair use" rights for the software under the
|
||||
law. These terms do not limit them.
|
||||
|
||||
## No Other Rights
|
||||
|
||||
These terms do not allow you to sublicense or transfer any of
|
||||
your licenses to anyone else, or prevent the licensor from
|
||||
granting licenses to anyone else. These terms do not imply
|
||||
any other licenses.
|
||||
|
||||
## Patent Defense
|
||||
|
||||
If you make any written claim that the software infringes or
|
||||
contributes to infringement of any patent, your patent license
|
||||
for the software granted under these terms ends immediately. If
|
||||
your company makes such a claim, your patent license ends
|
||||
immediately for work on behalf of your company.
|
||||
|
||||
## Violations
|
||||
|
||||
The first time you are notified in writing that you have
|
||||
violated any of these terms, or done anything with the software
|
||||
not covered by your licenses, your licenses can nonetheless
|
||||
continue if you come into full compliance with these terms,
|
||||
and take practical steps to correct past violations, within
|
||||
32 days of receiving notice. Otherwise, all your licenses
|
||||
end immediately.
|
||||
|
||||
## No Liability
|
||||
|
||||
***As far as the law allows, the software comes as is, without
|
||||
any warranty or condition, and the licensor will not be liable
|
||||
to you for any damages arising out of these terms or the use
|
||||
or nature of the software, under any kind of legal claim.***
|
||||
|
||||
## Definitions
|
||||
|
||||
The **licensor** is the individual or entity offering these
|
||||
terms, and the **software** is the software the licensor makes
|
||||
available under these terms.
|
||||
|
||||
A **product** can be a good or service, or a combination
|
||||
of them.
|
||||
|
||||
**You** refers to the individual or entity agreeing to these
|
||||
terms.
|
||||
|
||||
**Your company** is any legal entity, sole proprietorship,
|
||||
or other kind of organization that you work for, plus all
|
||||
its affiliates.
|
||||
|
||||
**Affiliates** means the other organizations than an
|
||||
organization has control over, is under the control of, or is
|
||||
under common control with.
|
||||
|
||||
**Control** means ownership of substantially all the assets of
|
||||
an entity, or the power to direct its management and policies
|
||||
by vote, contract, or otherwise. Control can be direct or
|
||||
indirect.
|
||||
|
||||
**Your licenses** are all the licenses granted to you for the
|
||||
software under these terms.
|
||||
|
||||
**Use** means anything you do with the software requiring one
|
||||
of your licenses.
|
||||
@@ -8,46 +8,67 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
- Node.js & NPM (for running the frontend application)
|
||||
|
||||
### Running the System
|
||||
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine.
|
||||
2. Navigate to autogpt_platform/supabase
|
||||
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
|
||||
```
|
||||
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
|
||||
cd AutoGPT/autogpt_platform
|
||||
```
|
||||
|
||||
2. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
|
||||
3. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
4. Navigate back to rnd (cd ..)
|
||||
5. Run the following command:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
6. Run the following command:
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
4. Run the following command:
|
||||
```
|
||||
docker compose -f docker-compose.combined.yml up -d
|
||||
docker compose up -d
|
||||
```
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
5. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
6. Run the following command:
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
7. Run the following command:
|
||||
```
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
This command will install the necessary dependencies and start the frontend application in development mode.
|
||||
If you are using Yarn, you can run the following commands instead:
|
||||
```
|
||||
yarn install && yarn dev
|
||||
```
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.combined.yml` file in detached mode.
|
||||
7. Navigate to autogpt_platform/autogpt_builder.
|
||||
8. Run the following command:
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
9. Run the following command:
|
||||
```
|
||||
yarn dev
|
||||
```
|
||||
8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
|
||||
|
||||
- `docker compose -f docker-compose.combined.yml up -d`: Start the services in detached mode.
|
||||
- `docker compose -f docker-compose.combined.yml stop`: Stop the running services without removing them.
|
||||
- `docker compose up -d`: Start the services in detached mode.
|
||||
- `docker compose stop`: Stop the running services without removing them.
|
||||
- `docker compose rm`: Remove stopped service containers.
|
||||
- `docker compose build`: Build or rebuild services.
|
||||
- `docker compose down`: Stop and remove containers, networks, and volumes.
|
||||
@@ -128,6 +149,3 @@ To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml
|
||||
3. Save the file and run `docker compose up -d` to apply the changes.
|
||||
|
||||
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"extends": "next/core-web-vitals"
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
# Base stage for both dev and prod
|
||||
FROM node:21-alpine AS base
|
||||
WORKDIR /app
|
||||
COPY autogpt_platform/autogpt_builder/package.json autogpt_platform/autogpt_builder/yarn.lock ./
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
# Dev stage
|
||||
FROM base AS dev
|
||||
ENV NODE_ENV=development
|
||||
COPY autogpt_platform/autogpt_builder/ .
|
||||
EXPOSE 3000
|
||||
CMD ["yarn", "run", "dev"]
|
||||
|
||||
# Build stage for prod
|
||||
FROM base AS build
|
||||
COPY autogpt_platform/autogpt_builder/ .
|
||||
RUN npm run build
|
||||
|
||||
# Prod stage
|
||||
FROM node:21-alpine AS prod
|
||||
ENV NODE_ENV=production
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=build /app/package.json /app/yarn.lock ./
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
COPY --from=build /app/.next ./.next
|
||||
COPY --from=build /app/public ./public
|
||||
COPY --from=build /app/next.config.mjs ./next.config.mjs
|
||||
|
||||
EXPOSE 3000
|
||||
CMD ["npm", "start"]
|
||||
@@ -1,41 +0,0 @@
|
||||
This is the frontend for AutoGPT's next generation
|
||||
|
||||
## Getting Started
|
||||
|
||||
Run the following installation once.
|
||||
|
||||
```bash
|
||||
npm install
|
||||
# or
|
||||
yarn install
|
||||
# or
|
||||
pnpm install
|
||||
# or
|
||||
bun install
|
||||
```
|
||||
|
||||
Next, run the development server:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
# or
|
||||
yarn dev
|
||||
# or
|
||||
pnpm dev
|
||||
# or
|
||||
bun dev
|
||||
```
|
||||
|
||||
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
|
||||
|
||||
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
|
||||
|
||||
For subsequent runs, you do not have to `npm install` again. Simply do `npm run dev`.
|
||||
|
||||
If the project is updated via git, you will need to `npm install` after each update.
|
||||
|
||||
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
|
||||
|
||||
## Deploy
|
||||
|
||||
TODO
|
||||
@@ -1,72 +0,0 @@
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"format": "prettier --write ."
|
||||
},
|
||||
"dependencies": {
|
||||
"@hookform/resolvers": "^3.9.0",
|
||||
"@next/third-parties": "^14.2.5",
|
||||
"@radix-ui/react-avatar": "^1.1.0",
|
||||
"@radix-ui/react-checkbox": "^1.1.1",
|
||||
"@radix-ui/react-collapsible": "^1.1.0",
|
||||
"@radix-ui/react-dialog": "^1.1.1",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.1",
|
||||
"@radix-ui/react-icons": "^1.3.0",
|
||||
"@radix-ui/react-label": "^2.1.0",
|
||||
"@radix-ui/react-popover": "^1.1.1",
|
||||
"@radix-ui/react-scroll-area": "^1.1.0",
|
||||
"@radix-ui/react-select": "^2.1.1",
|
||||
"@radix-ui/react-separator": "^1.1.0",
|
||||
"@radix-ui/react-slot": "^1.1.0",
|
||||
"@radix-ui/react-switch": "^1.1.0",
|
||||
"@radix-ui/react-toast": "^1.2.1",
|
||||
"@radix-ui/react-tooltip": "^1.1.2",
|
||||
"@sentry/nextjs": "^8",
|
||||
"@supabase/ssr": "^0.4.0",
|
||||
"@supabase/supabase-js": "^2.45.0",
|
||||
"@tanstack/react-table": "^8.20.5",
|
||||
"@xyflow/react": "^12.1.0",
|
||||
"ajv": "^8.17.1",
|
||||
"class-variance-authority": "^0.7.0",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "1.0.0",
|
||||
"date-fns": "^3.6.0",
|
||||
"dotenv": "^16.4.5",
|
||||
"lucide-react": "^0.407.0",
|
||||
"moment": "^2.30.1",
|
||||
"next": "14.2.4",
|
||||
"next-themes": "^0.3.0",
|
||||
"react": "^18",
|
||||
"react-day-picker": "^8.10.1",
|
||||
"react-dom": "^18",
|
||||
"react-hook-form": "^7.52.1",
|
||||
"react-icons": "^5.2.1",
|
||||
"react-markdown": "^9.0.1",
|
||||
"react-modal": "^3.16.1",
|
||||
"react-shepherd": "^6.1.1",
|
||||
"recharts": "^2.12.7",
|
||||
"tailwind-merge": "^2.3.0",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"uuid": "^10.0.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20",
|
||||
"@types/react": "^18",
|
||||
"@types/react-dom": "^18",
|
||||
"@types/react-modal": "^3.16.3",
|
||||
"eslint": "^8",
|
||||
"eslint-config-next": "14.2.4",
|
||||
"postcss": "^8",
|
||||
"prettier": "^3.3.3",
|
||||
"prettier-plugin-tailwindcss": "^0.6.6",
|
||||
"tailwindcss": "^3.4.1",
|
||||
"typescript": "^5"
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import FlowEditor from '@/components/Flow';
|
||||
|
||||
export default function Home() {
|
||||
const query = useSearchParams();
|
||||
|
||||
return (
|
||||
<FlowEditor
|
||||
className="flow-container w-full min-h-[86vh] border border-gray-300 dark:border-gray-700 rounded-lg"
|
||||
flowID={query.get("flowID") ?? query.get("templateID") ?? undefined}
|
||||
template={!!query.get("templateID")}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,178 +0,0 @@
|
||||
"use client";
|
||||
import React, { useCallback, useEffect, useMemo, useState } from "react";
|
||||
|
||||
import AutoGPTServerAPI, {
|
||||
GraphMeta,
|
||||
NodeExecutionResult,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
|
||||
import { Card } from "@/components/ui/card";
|
||||
import { FlowRun } from "@/lib/types";
|
||||
import {
|
||||
AgentFlowList,
|
||||
FlowInfo,
|
||||
FlowRunInfo,
|
||||
FlowRunsList,
|
||||
FlowRunsStats,
|
||||
} from "@/components/monitor";
|
||||
|
||||
const Monitor = () => {
|
||||
const [flows, setFlows] = useState<GraphMeta[]>([]);
|
||||
const [flowRuns, setFlowRuns] = useState<FlowRun[]>([]);
|
||||
const [selectedFlow, setSelectedFlow] = useState<GraphMeta | null>(null);
|
||||
const [selectedRun, setSelectedRun] = useState<FlowRun | null>(null);
|
||||
|
||||
const api = useMemo(() => new AutoGPTServerAPI(), []);
|
||||
|
||||
const refreshFlowRuns = useCallback(
|
||||
(flowID: string) => {
|
||||
// Fetch flow run IDs
|
||||
api.listGraphRunIDs(flowID).then((runIDs) =>
|
||||
runIDs.map((runID) => {
|
||||
let run;
|
||||
if (
|
||||
(run = flowRuns.find((fr) => fr.id == runID)) &&
|
||||
!["waiting", "running"].includes(run.status)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Fetch flow run
|
||||
api.getGraphExecutionInfo(flowID, runID).then((execInfo) =>
|
||||
setFlowRuns((flowRuns) => {
|
||||
if (execInfo.length == 0) return flowRuns;
|
||||
|
||||
const flowRunIndex = flowRuns.findIndex((fr) => fr.id == runID);
|
||||
const flowRun = flowRunFromNodeExecutionResults(execInfo);
|
||||
if (flowRunIndex > -1) {
|
||||
flowRuns.splice(flowRunIndex, 1, flowRun);
|
||||
} else {
|
||||
flowRuns.push(flowRun);
|
||||
}
|
||||
return [...flowRuns];
|
||||
}),
|
||||
);
|
||||
}),
|
||||
);
|
||||
},
|
||||
[api, flowRuns],
|
||||
);
|
||||
|
||||
const fetchFlowsAndRuns = useCallback(() => {
|
||||
api.listGraphs().then((flows) => {
|
||||
setFlows(flows);
|
||||
flows.map((flow) => refreshFlowRuns(flow.id));
|
||||
});
|
||||
}, [api, refreshFlowRuns]);
|
||||
|
||||
useEffect(() => fetchFlowsAndRuns(), [fetchFlowsAndRuns]);
|
||||
useEffect(() => {
|
||||
const intervalId = setInterval(
|
||||
() => flows.map((f) => refreshFlowRuns(f.id)),
|
||||
5000,
|
||||
);
|
||||
return () => clearInterval(intervalId);
|
||||
}, [flows, refreshFlowRuns]);
|
||||
|
||||
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
|
||||
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3 space-y-4";
|
||||
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
|
||||
|
||||
return (
|
||||
<div className="grid grid-cols-1 gap-4 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10">
|
||||
<AgentFlowList
|
||||
className={column1}
|
||||
flows={flows}
|
||||
flowRuns={flowRuns}
|
||||
selectedFlow={selectedFlow}
|
||||
onSelectFlow={(f) => {
|
||||
setSelectedRun(null);
|
||||
setSelectedFlow(f.id == selectedFlow?.id ? null : f);
|
||||
}}
|
||||
/>
|
||||
<FlowRunsList
|
||||
className={column2}
|
||||
flows={flows}
|
||||
runs={[
|
||||
...(selectedFlow
|
||||
? flowRuns.filter((v) => v.graphID == selectedFlow.id)
|
||||
: flowRuns),
|
||||
].sort((a, b) => Number(a.startTime) - Number(b.startTime))}
|
||||
selectedRun={selectedRun}
|
||||
onSelectRun={(r) => setSelectedRun(r.id == selectedRun?.id ? null : r)}
|
||||
/>
|
||||
{(selectedRun && (
|
||||
<FlowRunInfo
|
||||
flow={selectedFlow || flows.find((f) => f.id == selectedRun.graphID)!}
|
||||
flowRun={selectedRun}
|
||||
className={column3}
|
||||
/>
|
||||
)) ||
|
||||
(selectedFlow && (
|
||||
<FlowInfo
|
||||
flow={selectedFlow}
|
||||
flowRuns={flowRuns.filter((r) => r.graphID == selectedFlow.id)}
|
||||
className={column3}
|
||||
/>
|
||||
)) || (
|
||||
<Card className={`p-6 ${column3}`}>
|
||||
<FlowRunsStats flows={flows} flowRuns={flowRuns} />
|
||||
</Card>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
function flowRunFromNodeExecutionResults(
|
||||
nodeExecutionResults: NodeExecutionResult[],
|
||||
): FlowRun {
|
||||
// Determine overall status
|
||||
let status: "running" | "waiting" | "success" | "failed" = "success";
|
||||
for (const execution of nodeExecutionResults) {
|
||||
if (execution.status === "FAILED") {
|
||||
status = "failed";
|
||||
break;
|
||||
} else if (["QUEUED", "RUNNING"].includes(execution.status)) {
|
||||
status = "running";
|
||||
break;
|
||||
} else if (execution.status === "INCOMPLETE") {
|
||||
status = "waiting";
|
||||
}
|
||||
}
|
||||
|
||||
// Determine aggregate startTime, endTime, and totalRunTime
|
||||
const now = Date.now();
|
||||
const startTime = Math.min(
|
||||
...nodeExecutionResults.map((ner) => ner.add_time.getTime()),
|
||||
now,
|
||||
);
|
||||
const endTime = ["success", "failed"].includes(status)
|
||||
? Math.max(
|
||||
...nodeExecutionResults.map((ner) => ner.end_time?.getTime() || 0),
|
||||
startTime,
|
||||
)
|
||||
: now;
|
||||
const duration = (endTime - startTime) / 1000; // Convert to seconds
|
||||
const totalRunTime =
|
||||
nodeExecutionResults.reduce(
|
||||
(cum, node) =>
|
||||
cum +
|
||||
((node.end_time?.getTime() ?? now) -
|
||||
(node.start_time?.getTime() ?? now)),
|
||||
0,
|
||||
) / 1000;
|
||||
|
||||
return {
|
||||
id: nodeExecutionResults[0].graph_exec_id,
|
||||
graphID: nodeExecutionResults[0].graph_id,
|
||||
graphVersion: nodeExecutionResults[0].graph_version,
|
||||
status,
|
||||
startTime,
|
||||
endTime,
|
||||
duration,
|
||||
totalRunTime,
|
||||
nodeExecutionResults: nodeExecutionResults,
|
||||
};
|
||||
}
|
||||
|
||||
export default Monitor;
|
||||
@@ -1,33 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useSupabase } from "@/components/SupabaseProvider";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import useUser from "@/hooks/useUser";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { FaSpinner } from "react-icons/fa";
|
||||
|
||||
export default function PrivatePage() {
|
||||
const { user, isLoading, error } = useUser();
|
||||
const { supabase } = useSupabase();
|
||||
const router = useRouter();
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="flex h-[80vh] items-center justify-center">
|
||||
<FaSpinner className="mr-2 h-16 w-16 animate-spin" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (error || !user || !supabase) {
|
||||
router.push("/login");
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<div>
|
||||
<p>Hello {user.email}</p>
|
||||
<Button onClick={() => supabase.auth.signOut()}>Log out</Button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,660 +0,0 @@
|
||||
import React, {
|
||||
useState,
|
||||
useEffect,
|
||||
useCallback,
|
||||
useRef,
|
||||
useContext,
|
||||
} from "react";
|
||||
import { NodeProps, useReactFlow, Node, Edge } from "@xyflow/react";
|
||||
import "@xyflow/react/dist/style.css";
|
||||
import "./customnode.css";
|
||||
import InputModalComponent from "./InputModalComponent";
|
||||
import OutputModalComponent from "./OutputModalComponent";
|
||||
import {
|
||||
BlockIORootSchema,
|
||||
BlockIOStringSubSchema,
|
||||
Category,
|
||||
NodeExecutionResult,
|
||||
BlockUIType,
|
||||
BlockCost,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { beautifyString, cn, setNestedProperty } from "@/lib/utils";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Switch } from "@/components/ui/switch";
|
||||
import { Copy, Trash2 } from "lucide-react";
|
||||
import { history } from "./history";
|
||||
import NodeHandle from "./NodeHandle";
|
||||
import {
|
||||
NodeGenericInputField,
|
||||
NodeTextBoxInput,
|
||||
} from "./node-input-components";
|
||||
import SchemaTooltip from "./SchemaTooltip";
|
||||
import { getPrimaryCategoryColor } from "@/lib/utils";
|
||||
import { FlowContext } from "./Flow";
|
||||
import { Badge } from "./ui/badge";
|
||||
import DataTable from "./DataTable";
|
||||
|
||||
type ParsedKey = { key: string; index?: number };
|
||||
|
||||
export type ConnectionData = Array<{
|
||||
edge_id: string;
|
||||
source: string;
|
||||
sourceHandle: string;
|
||||
target: string;
|
||||
targetHandle: string;
|
||||
}>;
|
||||
|
||||
export type CustomNodeData = {
|
||||
blockType: string;
|
||||
blockCosts: BlockCost[];
|
||||
title: string;
|
||||
description: string;
|
||||
categories: Category[];
|
||||
inputSchema: BlockIORootSchema;
|
||||
outputSchema: BlockIORootSchema;
|
||||
hardcodedValues: { [key: string]: any };
|
||||
connections: ConnectionData;
|
||||
isOutputOpen: boolean;
|
||||
status?: NodeExecutionResult["status"];
|
||||
/** executionResults contains outputs across multiple executions
|
||||
* with the last element being the most recent output */
|
||||
executionResults?: {
|
||||
execId: string;
|
||||
data: NodeExecutionResult["output_data"];
|
||||
}[];
|
||||
block_id: string;
|
||||
backend_id?: string;
|
||||
errors?: { [key: string]: string };
|
||||
isOutputStatic?: boolean;
|
||||
uiType: BlockUIType;
|
||||
};
|
||||
|
||||
export type CustomNode = Node<CustomNodeData, "custom">;
|
||||
|
||||
export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
|
||||
const [isOutputOpen, setIsOutputOpen] = useState(data.isOutputOpen || false);
|
||||
const [isAdvancedOpen, setIsAdvancedOpen] = useState(false);
|
||||
const [isModalOpen, setIsModalOpen] = useState(false);
|
||||
const [activeKey, setActiveKey] = useState<string | null>(null);
|
||||
const [inputModalValue, setInputModalValue] = useState<string>("");
|
||||
const [isOutputModalOpen, setIsOutputModalOpen] = useState(false);
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
const { updateNodeData, deleteElements, addNodes, getNode } = useReactFlow<
|
||||
CustomNode,
|
||||
Edge
|
||||
>();
|
||||
const isInitialSetup = useRef(true);
|
||||
const flowContext = useContext(FlowContext);
|
||||
|
||||
if (!flowContext) {
|
||||
throw new Error("FlowContext consumer must be inside FlowEditor component");
|
||||
}
|
||||
|
||||
const { setIsAnyModalOpen, getNextNodeId } = flowContext;
|
||||
|
||||
useEffect(() => {
|
||||
if (data.executionResults || data.status) {
|
||||
setIsOutputOpen(true);
|
||||
}
|
||||
}, [data.executionResults, data.status]);
|
||||
|
||||
useEffect(() => {
|
||||
setIsOutputOpen(data.isOutputOpen);
|
||||
}, [data.isOutputOpen]);
|
||||
|
||||
useEffect(() => {
|
||||
setIsAnyModalOpen?.(isModalOpen || isOutputModalOpen);
|
||||
}, [isModalOpen, isOutputModalOpen, data, setIsAnyModalOpen]);
|
||||
|
||||
useEffect(() => {
|
||||
isInitialSetup.current = false;
|
||||
}, []);
|
||||
|
||||
const setHardcodedValues = (values: any) => {
|
||||
updateNodeData(id, { hardcodedValues: values });
|
||||
};
|
||||
|
||||
const setErrors = (errors: { [key: string]: string }) => {
|
||||
updateNodeData(id, { errors });
|
||||
};
|
||||
|
||||
const toggleOutput = (checked: boolean) => {
|
||||
setIsOutputOpen(checked);
|
||||
};
|
||||
|
||||
const toggleAdvancedSettings = (checked: boolean) => {
|
||||
setIsAdvancedOpen(checked);
|
||||
};
|
||||
|
||||
const generateOutputHandles = (
|
||||
schema: BlockIORootSchema,
|
||||
nodeType: BlockUIType,
|
||||
) => {
|
||||
if (
|
||||
!schema?.properties ||
|
||||
nodeType === BlockUIType.OUTPUT ||
|
||||
nodeType === BlockUIType.NOTE
|
||||
)
|
||||
return null;
|
||||
const keys = Object.keys(schema.properties);
|
||||
return keys.map((key) => (
|
||||
<div key={key}>
|
||||
<NodeHandle
|
||||
keyName={key}
|
||||
isConnected={isHandleConnected(key)}
|
||||
schema={schema.properties[key]}
|
||||
side="right"
|
||||
/>
|
||||
</div>
|
||||
));
|
||||
};
|
||||
|
||||
const generateInputHandles = (
|
||||
schema: BlockIORootSchema,
|
||||
nodeType: BlockUIType,
|
||||
) => {
|
||||
if (!schema?.properties) return null;
|
||||
let keys = Object.entries(schema.properties);
|
||||
switch (nodeType) {
|
||||
case BlockUIType.INPUT:
|
||||
// For INPUT blocks, dont include connection handles
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || !isAdvanced) && (
|
||||
<div key={propKey}>
|
||||
<span className="text-m green -mb-1 text-gray-900">
|
||||
{propSchema.title || beautifyString(propKey)}
|
||||
</span>
|
||||
<div key={propKey} onMouseOver={() => {}}>
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
className="mb-2 mt-1"
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
case BlockUIType.NOTE:
|
||||
// For NOTE blocks, don't render any input handles
|
||||
const [noteKey, noteSchema] = keys[0];
|
||||
return (
|
||||
<div key={noteKey}>
|
||||
<NodeTextBoxInput
|
||||
className=""
|
||||
selfKey={noteKey}
|
||||
schema={noteSchema as BlockIOStringSubSchema}
|
||||
value={getValue(noteKey)}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
error={data.errors?.[noteKey] ?? ""}
|
||||
displayName={noteSchema.title || beautifyString(noteKey)}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
case BlockUIType.OUTPUT:
|
||||
// For OUTPUT blocks, only show the 'value' property
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || !isAdvanced) && (
|
||||
<div key={propKey} onMouseOver={() => {}}>
|
||||
{propKey !== "value" ? (
|
||||
<span className="text-m green -mb-1 text-gray-900">
|
||||
{propSchema.title || beautifyString(propKey)}
|
||||
</span>
|
||||
) : (
|
||||
<NodeHandle
|
||||
keyName={propKey}
|
||||
isConnected={isConnected}
|
||||
isRequired={isRequired}
|
||||
schema={propSchema}
|
||||
side="left"
|
||||
/>
|
||||
)}
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
className="mb-2 mt-1"
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
default:
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || isConnected || !isAdvanced) && (
|
||||
<div key={propKey} onMouseOver={() => {}}>
|
||||
<NodeHandle
|
||||
keyName={propKey}
|
||||
isConnected={isConnected}
|
||||
isRequired={isRequired}
|
||||
schema={propSchema}
|
||||
side="left"
|
||||
/>
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
className="mb-2 mt-1"
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
}
|
||||
};
|
||||
const handleInputChange = (path: string, value: any) => {
|
||||
const keys = parseKeys(path);
|
||||
const newValues = JSON.parse(JSON.stringify(data.hardcodedValues));
|
||||
let current = newValues;
|
||||
|
||||
for (let i = 0; i < keys.length - 1; i++) {
|
||||
const { key: currentKey, index } = keys[i];
|
||||
if (index !== undefined) {
|
||||
if (!current[currentKey]) current[currentKey] = [];
|
||||
if (!current[currentKey][index]) current[currentKey][index] = {};
|
||||
current = current[currentKey][index];
|
||||
} else {
|
||||
if (!current[currentKey]) current[currentKey] = {};
|
||||
current = current[currentKey];
|
||||
}
|
||||
}
|
||||
|
||||
const lastKey = keys[keys.length - 1];
|
||||
if (lastKey.index !== undefined) {
|
||||
if (!current[lastKey.key]) current[lastKey.key] = [];
|
||||
current[lastKey.key][lastKey.index] = value;
|
||||
} else {
|
||||
current[lastKey.key] = value;
|
||||
}
|
||||
|
||||
// console.log(`Updating hardcoded values for node ${id}:`, newValues);
|
||||
|
||||
if (!isInitialSetup.current) {
|
||||
history.push({
|
||||
type: "UPDATE_INPUT",
|
||||
payload: { nodeId: id, oldValues: data.hardcodedValues, newValues },
|
||||
undo: () => setHardcodedValues(data.hardcodedValues),
|
||||
redo: () => setHardcodedValues(newValues),
|
||||
});
|
||||
}
|
||||
|
||||
setHardcodedValues(newValues);
|
||||
const errors = data.errors || {};
|
||||
// Remove error with the same key
|
||||
setNestedProperty(errors, path, null);
|
||||
setErrors({ ...errors });
|
||||
};
|
||||
|
||||
// Helper function to parse keys with array indices
|
||||
//TODO move to utils
|
||||
const parseKeys = (key: string): ParsedKey[] => {
|
||||
const splits = key.split(/_@_|_#_|_\$_|\./);
|
||||
const keys: ParsedKey[] = [];
|
||||
let currentKey: string | null = null;
|
||||
|
||||
splits.forEach((split) => {
|
||||
const isInteger = /^\d+$/.test(split);
|
||||
if (!isInteger) {
|
||||
if (currentKey !== null) {
|
||||
keys.push({ key: currentKey });
|
||||
}
|
||||
currentKey = split;
|
||||
} else {
|
||||
if (currentKey !== null) {
|
||||
keys.push({ key: currentKey, index: parseInt(split, 10) });
|
||||
currentKey = null;
|
||||
} else {
|
||||
throw new Error("Invalid key format: array index without a key");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (currentKey !== null) {
|
||||
keys.push({ key: currentKey });
|
||||
}
|
||||
|
||||
return keys;
|
||||
};
|
||||
|
||||
const getValue = (key: string) => {
|
||||
const keys = parseKeys(key);
|
||||
return keys.reduce((acc, k) => {
|
||||
if (acc === undefined) return undefined;
|
||||
if (k.index !== undefined) {
|
||||
return Array.isArray(acc[k.key]) ? acc[k.key][k.index] : undefined;
|
||||
}
|
||||
return acc[k.key];
|
||||
}, data.hardcodedValues as any);
|
||||
};
|
||||
|
||||
const isHandleConnected = (key: string) => {
|
||||
return (
|
||||
data.connections &&
|
||||
data.connections.some((conn: any) => {
|
||||
if (typeof conn === "string") {
|
||||
const [source, target] = conn.split(" -> ");
|
||||
return (
|
||||
(target.includes(key) && target.includes(data.title)) ||
|
||||
(source.includes(key) && source.includes(data.title))
|
||||
);
|
||||
}
|
||||
return (
|
||||
(conn.target === id && conn.targetHandle === key) ||
|
||||
(conn.source === id && conn.sourceHandle === key)
|
||||
);
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
const handleInputClick = (key: string) => {
|
||||
console.log(`Opening modal for key: ${key}`);
|
||||
setActiveKey(key);
|
||||
const value = getValue(key);
|
||||
setInputModalValue(
|
||||
typeof value === "object" ? JSON.stringify(value, null, 2) : value,
|
||||
);
|
||||
setIsModalOpen(true);
|
||||
};
|
||||
|
||||
const handleModalSave = (value: string) => {
|
||||
if (activeKey) {
|
||||
try {
|
||||
const parsedValue = JSON.parse(value);
|
||||
handleInputChange(activeKey, parsedValue);
|
||||
} catch (error) {
|
||||
handleInputChange(activeKey, value);
|
||||
}
|
||||
}
|
||||
setIsModalOpen(false);
|
||||
setActiveKey(null);
|
||||
};
|
||||
|
||||
const handleOutputClick = () => {
|
||||
setIsOutputModalOpen(true);
|
||||
};
|
||||
|
||||
const handleHovered = () => {
|
||||
setIsHovered(true);
|
||||
};
|
||||
|
||||
const handleMouseLeave = () => {
|
||||
setIsHovered(false);
|
||||
};
|
||||
|
||||
const deleteNode = useCallback(() => {
|
||||
console.log("Deleting node:", id);
|
||||
|
||||
// Remove the node
|
||||
deleteElements({ nodes: [{ id }] });
|
||||
}, [id, deleteElements]);
|
||||
|
||||
const copyNode = useCallback(() => {
|
||||
const newId = getNextNodeId();
|
||||
const currentNode = getNode(id);
|
||||
|
||||
if (!currentNode) {
|
||||
console.error("Cannot copy node: current node not found");
|
||||
return;
|
||||
}
|
||||
|
||||
const verticalOffset = height ?? 100;
|
||||
|
||||
const newNode: CustomNode = {
|
||||
id: newId,
|
||||
type: currentNode.type,
|
||||
position: {
|
||||
x: currentNode.position.x,
|
||||
y: currentNode.position.y - verticalOffset - 20,
|
||||
},
|
||||
data: {
|
||||
...data,
|
||||
title: `${data.title} (Copy)`,
|
||||
block_id: data.block_id,
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
},
|
||||
};
|
||||
|
||||
addNodes(newNode);
|
||||
|
||||
history.push({
|
||||
type: "ADD_NODE",
|
||||
payload: { node: newNode },
|
||||
undo: () => deleteElements({ nodes: [{ id: newId }] }),
|
||||
redo: () => addNodes(newNode),
|
||||
});
|
||||
}, [id, data, height, addNodes, deleteElements, getNode, getNextNodeId]);
|
||||
|
||||
const hasConfigErrors =
|
||||
data.errors &&
|
||||
Object.entries(data.errors).some(([_, value]) => value !== null);
|
||||
const outputData = data.executionResults?.at(-1)?.data;
|
||||
const hasOutputError =
|
||||
typeof outputData === "object" &&
|
||||
outputData !== null &&
|
||||
"error" in outputData;
|
||||
|
||||
useEffect(() => {
|
||||
if (hasConfigErrors) {
|
||||
const filteredErrors = Object.fromEntries(
|
||||
Object.entries(data.errors || {}).filter(
|
||||
([_, value]) => value !== null,
|
||||
),
|
||||
);
|
||||
console.error(
|
||||
"Block configuration errors for",
|
||||
data.title,
|
||||
":",
|
||||
filteredErrors,
|
||||
);
|
||||
}
|
||||
if (hasOutputError) {
|
||||
console.error(
|
||||
"Block output contains error for",
|
||||
data.title,
|
||||
":",
|
||||
outputData.error,
|
||||
);
|
||||
}
|
||||
}, [hasConfigErrors, hasOutputError, data.errors, outputData, data.title]);
|
||||
|
||||
const blockClasses = [
|
||||
"custom-node",
|
||||
"dark-theme",
|
||||
"rounded-xl",
|
||||
"border",
|
||||
"bg-white/[.9]",
|
||||
"shadow-md",
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(" ");
|
||||
|
||||
const errorClass =
|
||||
hasConfigErrors || hasOutputError ? "border-red-500 border-2" : "";
|
||||
|
||||
const statusClass =
|
||||
hasConfigErrors || hasOutputError
|
||||
? "failed"
|
||||
: (data.status?.toLowerCase() ?? "");
|
||||
|
||||
const hasAdvancedFields =
|
||||
data.inputSchema &&
|
||||
Object.entries(data.inputSchema.properties).some(([key, value]) => {
|
||||
return (
|
||||
value.advanced === true && !data.inputSchema.required?.includes(key)
|
||||
);
|
||||
});
|
||||
|
||||
const inputValues = data.hardcodedValues;
|
||||
const blockCost =
|
||||
data.blockCosts &&
|
||||
data.blockCosts.find((cost) =>
|
||||
Object.entries(cost.cost_filter).every(
|
||||
// Undefined, null, or empty values are considered equal
|
||||
([key, value]) =>
|
||||
value === inputValues[key] || (!value && !inputValues[key]),
|
||||
),
|
||||
);
|
||||
console.debug(`Block cost ${inputValues}|${data.blockCosts}=${blockCost}`);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`${data.uiType === BlockUIType.NOTE ? "w-[300px]" : "w-[500px]"} ${blockClasses} ${errorClass} ${statusClass} ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : "bg-white"}`}
|
||||
onMouseEnter={handleHovered}
|
||||
onMouseLeave={handleMouseLeave}
|
||||
data-id={`custom-node-${id}`}
|
||||
>
|
||||
<div
|
||||
className={`mb-2 p-3 ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : getPrimaryCategoryColor(data.categories)} rounded-t-xl`}
|
||||
>
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="font-roboto p-3 text-lg font-semibold">
|
||||
{beautifyString(
|
||||
data.blockType?.replace(/Block$/, "") || data.title,
|
||||
)}
|
||||
</div>
|
||||
<SchemaTooltip description={data.description} />
|
||||
</div>
|
||||
<div className="flex gap-[5px]">
|
||||
{isHovered && (
|
||||
<>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
onClick={copyNode}
|
||||
title="Copy node"
|
||||
>
|
||||
<Copy size={18} />
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
onClick={deleteNode}
|
||||
title="Delete node"
|
||||
>
|
||||
<Trash2 size={18} />
|
||||
</Button>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{blockCost && (
|
||||
<div className="p-3 text-right font-semibold">
|
||||
Cost: {blockCost.cost_amount} / {blockCost.cost_type}
|
||||
</div>
|
||||
)}
|
||||
{data.uiType !== BlockUIType.NOTE ? (
|
||||
<div className="flex items-start justify-between p-3">
|
||||
<div>
|
||||
{data.inputSchema &&
|
||||
generateInputHandles(data.inputSchema, data.uiType)}
|
||||
</div>
|
||||
<div className="flex-none">
|
||||
{data.outputSchema &&
|
||||
generateOutputHandles(data.outputSchema, data.uiType)}
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div>
|
||||
{data.inputSchema &&
|
||||
generateInputHandles(data.inputSchema, data.uiType)}
|
||||
</div>
|
||||
)}
|
||||
{isOutputOpen && data.uiType !== BlockUIType.NOTE && (
|
||||
<div
|
||||
data-id="latest-output"
|
||||
className="nodrag m-3 break-words rounded-md border-[1.5px] p-2"
|
||||
>
|
||||
{(data.executionResults?.length ?? 0) > 0 ? (
|
||||
<>
|
||||
<DataTable
|
||||
title="Latest Output"
|
||||
truncateLongData
|
||||
data={data.executionResults!.at(-1)?.data || {}}
|
||||
/>
|
||||
<div className="flex justify-end">
|
||||
<Button variant="ghost" onClick={handleOutputClick}>
|
||||
View More
|
||||
</Button>
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<span>No outputs yet</span>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{data.uiType !== BlockUIType.NOTE && (
|
||||
<div className="mt-2.5 flex items-center pb-4 pl-4">
|
||||
<Switch checked={isOutputOpen} onCheckedChange={toggleOutput} />
|
||||
<span className="m-1 mr-4">Output</span>
|
||||
{hasAdvancedFields && (
|
||||
<>
|
||||
<Switch onCheckedChange={toggleAdvancedSettings} />
|
||||
<span className="m-1">Advanced</span>
|
||||
</>
|
||||
)}
|
||||
{data.status && (
|
||||
<Badge
|
||||
variant="outline"
|
||||
data-id={`badge-${id}-${data.status}`}
|
||||
className={cn(data.status.toLowerCase(), "ml-auto mr-5")}
|
||||
>
|
||||
{data.status}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<InputModalComponent
|
||||
title={activeKey ? `Enter ${beautifyString(activeKey)}` : undefined}
|
||||
isOpen={isModalOpen}
|
||||
onClose={() => setIsModalOpen(false)}
|
||||
onSave={handleModalSave}
|
||||
defaultValue={inputModalValue}
|
||||
key={activeKey}
|
||||
/>
|
||||
<OutputModalComponent
|
||||
isOpen={isOutputModalOpen}
|
||||
onClose={() => setIsOutputModalOpen(false)}
|
||||
executionResults={data.executionResults?.toReversed() || []}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
import Link from "next/link";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import React from "react";
|
||||
import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet";
|
||||
import Image from "next/image";
|
||||
import getServerUser from "@/hooks/getServerUser";
|
||||
import ProfileDropdown from "./ProfileDropdown";
|
||||
import {
|
||||
IconCircleUser,
|
||||
IconMenu,
|
||||
IconPackage2,
|
||||
IconRefresh,
|
||||
IconSquareActivity,
|
||||
IconWorkFlow,
|
||||
} from "@/components/ui/icons";
|
||||
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
|
||||
import CreditButton from "@/components/CreditButton";
|
||||
|
||||
export async function NavBar() {
|
||||
const isAvailable = Boolean(
|
||||
process.env.NEXT_PUBLIC_SUPABASE_URL &&
|
||||
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY,
|
||||
);
|
||||
const { user } = await getServerUser();
|
||||
|
||||
return (
|
||||
<header className="sticky top-0 z-50 flex h-16 items-center gap-4 border-b bg-background px-4 md:px-6">
|
||||
<div className="flex flex-1 items-center gap-4">
|
||||
<Sheet>
|
||||
<SheetTrigger asChild>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
className="shrink-0 md:hidden"
|
||||
>
|
||||
<IconMenu />
|
||||
<span className="sr-only">Toggle navigation menu</span>
|
||||
</Button>
|
||||
</SheetTrigger>
|
||||
<SheetContent side="left">
|
||||
<nav className="grid gap-6 text-lg font-medium">
|
||||
<Link
|
||||
href="/"
|
||||
className="flex flex-row gap-2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<IconSquareActivity /> Monitor
|
||||
</Link>
|
||||
<Link
|
||||
href="/build"
|
||||
className="flex flex-row gap-2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<IconWorkFlow /> Build
|
||||
</Link>
|
||||
<Link
|
||||
href="/marketplace"
|
||||
className="flex flex-row gap-2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<IconPackage2 /> Marketplace
|
||||
</Link>
|
||||
</nav>
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
<nav className="hidden md:flex md:flex-row md:items-center md:gap-5 lg:gap-6">
|
||||
<Link
|
||||
href="/"
|
||||
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<IconSquareActivity /> Monitor
|
||||
</Link>
|
||||
<Link
|
||||
href="/build"
|
||||
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<IconWorkFlow /> Build
|
||||
</Link>
|
||||
<Link
|
||||
href="/marketplace"
|
||||
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<IconPackage2 /> Marketplace
|
||||
</Link>
|
||||
</nav>
|
||||
</div>
|
||||
<div className="relative flex flex-1 justify-center">
|
||||
<a
|
||||
className="pointer-events-auto flex place-items-center gap-2"
|
||||
href="https://news.agpt.co/"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
>
|
||||
By{" "}
|
||||
<Image
|
||||
src="/AUTOgpt_Logo_dark.png"
|
||||
alt="AutoGPT Logo"
|
||||
width={100}
|
||||
height={20}
|
||||
priority
|
||||
/>
|
||||
</a>
|
||||
</div>
|
||||
<div className="flex flex-1 items-center justify-end gap-4">
|
||||
{isAvailable && user && <CreditButton />}
|
||||
|
||||
{isAvailable && !user && (
|
||||
<Link
|
||||
href="/login"
|
||||
className="flex flex-row items-center gap-2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
Log In
|
||||
<IconCircleUser />
|
||||
</Link>
|
||||
)}
|
||||
{isAvailable && user && <ProfileDropdown />}
|
||||
</div>
|
||||
</header>
|
||||
);
|
||||
}
|
||||
@@ -1,171 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import { Card, CardContent, CardHeader } from "@/components/ui/card";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { ToyBrick } from "lucide-react";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { ScrollArea } from "@/components/ui/scroll-area";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
import { Block } from "@/lib/autogpt-server-api";
|
||||
import { PlusIcon } from "@radix-ui/react-icons";
|
||||
import { IconToyBrick } from "@/components/ui/icons";
|
||||
import SchemaTooltip from "@/components/SchemaTooltip";
|
||||
import { getPrimaryCategoryColor } from "@/lib/utils";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
|
||||
interface BlocksControlProps {
|
||||
blocks: Block[];
|
||||
addBlock: (id: string, name: string) => void;
|
||||
pinBlocksPopover: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* A React functional component that displays a control for managing blocks.
|
||||
*
|
||||
* @component
|
||||
* @param {Object} BlocksControlProps - The properties for the BlocksControl component.
|
||||
* @param {Block[]} BlocksControlProps.blocks - An array of blocks to be displayed and filtered.
|
||||
* @param {(id: string, name: string) => void} BlocksControlProps.addBlock - A function to call when a block is added.
|
||||
* @returns The rendered BlocksControl component.
|
||||
*/
|
||||
export const BlocksControl: React.FC<BlocksControlProps> = ({
|
||||
blocks,
|
||||
addBlock,
|
||||
pinBlocksPopover,
|
||||
}) => {
|
||||
const [searchQuery, setSearchQuery] = useState("");
|
||||
const [selectedCategory, setSelectedCategory] = useState<string | null>(null);
|
||||
|
||||
// Extract unique categories from blocks
|
||||
const categories = Array.from(
|
||||
new Set(
|
||||
blocks.flatMap((block) => block.categories.map((cat) => cat.category)),
|
||||
),
|
||||
);
|
||||
|
||||
const filteredBlocks = blocks.filter(
|
||||
(block: Block) =>
|
||||
(block.name.toLowerCase().includes(searchQuery.toLowerCase()) ||
|
||||
beautifyString(block.name)
|
||||
.toLowerCase()
|
||||
.includes(searchQuery.toLowerCase())) &&
|
||||
(!selectedCategory ||
|
||||
block.categories.some((cat) => cat.category === selectedCategory)),
|
||||
);
|
||||
|
||||
return (
|
||||
<Popover open={pinBlocksPopover ? true : undefined}>
|
||||
<Tooltip delayDuration={500}>
|
||||
<TooltipTrigger asChild>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
data-id="blocks-control-popover-trigger"
|
||||
>
|
||||
<IconToyBrick />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right">Blocks</TooltipContent>
|
||||
</Tooltip>
|
||||
<PopoverContent
|
||||
side="right"
|
||||
sideOffset={22}
|
||||
align="start"
|
||||
className="w-[30rem] p-0"
|
||||
data-id="blocks-control-popover-content"
|
||||
>
|
||||
<Card className="border-none shadow-md">
|
||||
<CardHeader className="flex flex-col gap-x-8 gap-y-2 p-3 px-2">
|
||||
<div className="items-center justify-between">
|
||||
<Label
|
||||
htmlFor="search-blocks"
|
||||
className="whitespace-nowrap border-b-2 border-violet-500 text-base font-semibold text-black 2xl:text-xl"
|
||||
data-id="blocks-control-label"
|
||||
>
|
||||
Blocks
|
||||
</Label>
|
||||
</div>
|
||||
<Input
|
||||
id="search-blocks"
|
||||
type="text"
|
||||
placeholder="Search blocks..."
|
||||
value={searchQuery}
|
||||
onChange={(e) => setSearchQuery(e.target.value)}
|
||||
data-id="blocks-control-search-input"
|
||||
/>
|
||||
<div className="mt-2 flex flex-wrap gap-2">
|
||||
{categories.map((category) => (
|
||||
<Badge
|
||||
key={category}
|
||||
variant={
|
||||
selectedCategory === category ? "default" : "outline"
|
||||
}
|
||||
className={`cursor-pointer ${getPrimaryCategoryColor([{ category, description: "" }])}`}
|
||||
onClick={() =>
|
||||
setSelectedCategory(
|
||||
selectedCategory === category ? null : category,
|
||||
)
|
||||
}
|
||||
>
|
||||
{beautifyString(category)}
|
||||
</Badge>
|
||||
))}
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent className="p-1">
|
||||
<ScrollArea
|
||||
className="h-[60vh]"
|
||||
data-id="blocks-control-scroll-area"
|
||||
>
|
||||
{filteredBlocks.map((block) => (
|
||||
<Card
|
||||
key={block.id}
|
||||
className={`m-2 ${getPrimaryCategoryColor(block.categories)}`}
|
||||
data-id={`block-card-${block.id}`}
|
||||
>
|
||||
<div className="m-3 flex items-center justify-between">
|
||||
<div className="mr-2 min-w-0 flex-1">
|
||||
<span
|
||||
className="block truncate font-medium"
|
||||
data-id={`block-name-${block.id}`}
|
||||
>
|
||||
{beautifyString(block.name)}
|
||||
</span>
|
||||
</div>
|
||||
<SchemaTooltip description={block.description} />
|
||||
<div
|
||||
className="flex flex-shrink-0 items-center gap-1"
|
||||
data-id={`block-tooltip-${block.id}`}
|
||||
>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={() => addBlock(block.id, block.name)}
|
||||
aria-label="Add block"
|
||||
data-id={`add-block-button-${block.id}`}
|
||||
>
|
||||
<PlusIcon />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
))}
|
||||
</ScrollArea>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
@@ -1,175 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import Link from "next/link";
|
||||
import {
|
||||
ArrowLeft,
|
||||
Download,
|
||||
Calendar,
|
||||
Tag,
|
||||
ChevronDown,
|
||||
ChevronUp,
|
||||
} from "lucide-react";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
AgentDetailResponse,
|
||||
InstallationLocation,
|
||||
} from "@/lib/marketplace-api";
|
||||
import dynamic from "next/dynamic";
|
||||
import { Node, Edge } from "@xyflow/react";
|
||||
import MarketplaceAPI from "@/lib/marketplace-api";
|
||||
import AutoGPTServerAPI, { GraphCreatable } from "@/lib/autogpt-server-api";
|
||||
|
||||
const ReactFlow = dynamic(
|
||||
() => import("@xyflow/react").then((mod) => mod.ReactFlow),
|
||||
{ ssr: false },
|
||||
);
|
||||
const Controls = dynamic(
|
||||
() => import("@xyflow/react").then((mod) => mod.Controls),
|
||||
{ ssr: false },
|
||||
);
|
||||
const Background = dynamic(
|
||||
() => import("@xyflow/react").then((mod) => mod.Background),
|
||||
{ ssr: false },
|
||||
);
|
||||
|
||||
import "@xyflow/react/dist/style.css";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { makeAnalyticsEvent } from "./actions";
|
||||
|
||||
function convertGraphToReactFlow(graph: any): { nodes: Node[]; edges: Edge[] } {
|
||||
const nodes: Node[] = graph.nodes.map((node: any) => {
|
||||
let label = node.block_id || "Unknown";
|
||||
try {
|
||||
label = beautifyString(label);
|
||||
} catch (error) {
|
||||
console.error("Error beautifying node label:", error);
|
||||
}
|
||||
|
||||
return {
|
||||
id: node.id,
|
||||
position: node.metadata.position || { x: 0, y: 0 },
|
||||
data: {
|
||||
label,
|
||||
blockId: node.block_id,
|
||||
inputDefault: node.input_default || {},
|
||||
...node, // Include all other node data
|
||||
},
|
||||
type: "custom",
|
||||
};
|
||||
});
|
||||
|
||||
const edges: Edge[] = graph.links.map((link: any) => ({
|
||||
id: `${link.source_id}-${link.sink_id}`,
|
||||
source: link.source_id,
|
||||
target: link.sink_id,
|
||||
sourceHandle: link.source_name,
|
||||
targetHandle: link.sink_name,
|
||||
type: "custom",
|
||||
data: {
|
||||
sourceId: link.source_id,
|
||||
targetId: link.sink_id,
|
||||
sourceName: link.source_name,
|
||||
targetName: link.sink_name,
|
||||
isStatic: link.is_static,
|
||||
},
|
||||
}));
|
||||
|
||||
return { nodes, edges };
|
||||
}
|
||||
|
||||
async function installGraph(id: string): Promise<void> {
|
||||
const apiUrl =
|
||||
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
|
||||
"http://localhost:8015/api/v1/market";
|
||||
const api = new MarketplaceAPI(apiUrl);
|
||||
|
||||
const serverAPIUrl = process.env.AGPT_SERVER_API_URL;
|
||||
const serverAPI = new AutoGPTServerAPI(serverAPIUrl);
|
||||
try {
|
||||
console.log(`Installing agent with id: ${id}`);
|
||||
let agent = await api.downloadAgent(id);
|
||||
console.log(`Agent downloaded:`, agent);
|
||||
const data: GraphCreatable = {
|
||||
id: agent.id,
|
||||
version: agent.version,
|
||||
is_active: true,
|
||||
is_template: false,
|
||||
name: agent.name,
|
||||
description: agent.description,
|
||||
nodes: agent.graph.nodes,
|
||||
links: agent.graph.links,
|
||||
};
|
||||
const result = await serverAPI.createTemplate(data);
|
||||
makeAnalyticsEvent({
|
||||
event_name: "agent_installed_from_marketplace",
|
||||
event_data: {
|
||||
marketplace_agent_id: id,
|
||||
installed_agent_id: result.id,
|
||||
installation_location: InstallationLocation.CLOUD,
|
||||
},
|
||||
});
|
||||
console.log(`Agent installed successfully`, result);
|
||||
} catch (error) {
|
||||
console.error(`Error installing agent:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function AgentDetailContent({ agent }: { agent: AgentDetailResponse }) {
|
||||
const [isGraphExpanded, setIsGraphExpanded] = useState(false);
|
||||
const { nodes, edges } = convertGraphToReactFlow(agent.graph);
|
||||
|
||||
return (
|
||||
<div className="mx-auto max-w-7xl px-4 py-4 sm:px-6 lg:px-8">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<Link
|
||||
href="/marketplace"
|
||||
className="inline-flex items-center text-indigo-600 hover:text-indigo-500"
|
||||
>
|
||||
<ArrowLeft className="mr-2" size={20} />
|
||||
Back to Marketplace
|
||||
</Link>
|
||||
<Button
|
||||
onClick={() => installGraph(agent.id)}
|
||||
className="inline-flex items-center rounded-md border border-transparent bg-indigo-600 px-4 py-2 text-sm font-medium text-white shadow-sm hover:bg-indigo-700 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2"
|
||||
>
|
||||
<Download className="mr-2" size={16} />
|
||||
Download Agent
|
||||
</Button>
|
||||
</div>
|
||||
<div className="overflow-hidden bg-white shadow sm:rounded-lg">
|
||||
<div className="px-4 py-5 sm:px-6">
|
||||
<h1 className="text-3xl font-bold text-gray-900">{agent.name}</h1>
|
||||
<p className="mt-1 max-w-2xl text-sm text-gray-500">
|
||||
{agent.description}
|
||||
</p>
|
||||
</div>
|
||||
<div className="border-t border-gray-200 px-4 py-5 sm:p-0">
|
||||
<dl className="sm:divide-y sm:divide-gray-200">
|
||||
<div className="py-4 sm:grid sm:grid-cols-3 sm:gap-4 sm:px-6 sm:py-5">
|
||||
<dt className="flex items-center text-sm font-medium text-gray-500">
|
||||
<Calendar className="mr-2" size={16} />
|
||||
Last Updated
|
||||
</dt>
|
||||
<dd className="mt-1 text-sm text-gray-900 sm:col-span-2 sm:mt-0">
|
||||
{new Date(agent.updatedAt).toLocaleDateString()}
|
||||
</dd>
|
||||
</div>
|
||||
<div className="py-4 sm:grid sm:grid-cols-3 sm:gap-4 sm:px-6 sm:py-5">
|
||||
<dt className="flex items-center text-sm font-medium text-gray-500">
|
||||
<Tag className="mr-2" size={16} />
|
||||
Categories
|
||||
</dt>
|
||||
<dd className="mt-1 text-sm text-gray-900 sm:col-span-2 sm:mt-0">
|
||||
{agent.categories.join(", ")}
|
||||
</dd>
|
||||
</div>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default AgentDetailContent;
|
||||
@@ -1,79 +0,0 @@
|
||||
import React, { useCallback } from "react";
|
||||
import AutoGPTServerAPI, { GraphMeta } from "@/lib/autogpt-server-api";
|
||||
import { FlowRun } from "@/lib/types";
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||
import Link from "next/link";
|
||||
import { Button, buttonVariants } from "@/components/ui/button";
|
||||
import { IconSquare } from "@/components/ui/icons";
|
||||
import { Pencil2Icon } from "@radix-ui/react-icons";
|
||||
import moment from "moment/moment";
|
||||
import { FlowRunStatusBadge } from "@/components/monitor/FlowRunStatusBadge";
|
||||
|
||||
export const FlowRunInfo: React.FC<
|
||||
React.HTMLAttributes<HTMLDivElement> & {
|
||||
flow: GraphMeta;
|
||||
flowRun: FlowRun;
|
||||
}
|
||||
> = ({ flow, flowRun, ...props }) => {
|
||||
if (flowRun.graphID != flow.id) {
|
||||
throw new Error(
|
||||
`FlowRunInfo can't be used with non-matching flowRun.flowID and flow.id`,
|
||||
);
|
||||
}
|
||||
|
||||
const handleStopRun = useCallback(() => {
|
||||
const api = new AutoGPTServerAPI();
|
||||
api.stopGraphExecution(flow.id, flowRun.id);
|
||||
}, [flow.id, flowRun.id]);
|
||||
|
||||
return (
|
||||
<Card {...props}>
|
||||
<CardHeader className="flex-row items-center justify-between space-x-3 space-y-0">
|
||||
<div>
|
||||
<CardTitle>
|
||||
{flow.name} <span className="font-light">v{flow.version}</span>
|
||||
</CardTitle>
|
||||
<p className="mt-2">
|
||||
Agent ID: <code>{flow.id}</code>
|
||||
</p>
|
||||
<p className="mt-1">
|
||||
Run ID: <code>{flowRun.id}</code>
|
||||
</p>
|
||||
</div>
|
||||
<div className="flex space-x-2">
|
||||
{flowRun.status === "running" && (
|
||||
<Button onClick={handleStopRun} variant="destructive">
|
||||
<IconSquare className="mr-2" /> Stop Run
|
||||
</Button>
|
||||
)}
|
||||
<Link
|
||||
className={buttonVariants({ variant: "outline" })}
|
||||
href={`/build?flowID=${flow.id}`}
|
||||
>
|
||||
<Pencil2Icon className="mr-2" /> Edit Agent
|
||||
</Link>
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<p>
|
||||
<strong>Status:</strong>{" "}
|
||||
<FlowRunStatusBadge status={flowRun.status} />
|
||||
</p>
|
||||
<p>
|
||||
<strong>Started:</strong>{" "}
|
||||
{moment(flowRun.startTime).format("YYYY-MM-DD HH:mm:ss")}
|
||||
</p>
|
||||
<p>
|
||||
<strong>Finished:</strong>{" "}
|
||||
{moment(flowRun.endTime).format("YYYY-MM-DD HH:mm:ss")}
|
||||
</p>
|
||||
<p>
|
||||
<strong>Duration (run time):</strong> {flowRun.duration} (
|
||||
{flowRun.totalRunTime}) seconds
|
||||
</p>
|
||||
{/* <p><strong>Total cost:</strong> €1,23</p> */}
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
export default FlowRunInfo;
|
||||
@@ -1,26 +0,0 @@
|
||||
import * as React from "react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
export interface InputProps
|
||||
extends React.InputHTMLAttributes<HTMLInputElement> {}
|
||||
|
||||
const Input = React.forwardRef<HTMLInputElement, InputProps>(
|
||||
({ className, type, ...props }, ref) => {
|
||||
return (
|
||||
<input
|
||||
type={type}
|
||||
className={cn(
|
||||
"flex h-9 w-full rounded-md border border-gray-200 bg-transparent px-3 py-1 text-sm shadow-sm transition-colors file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-gray-500 focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-gray-400 disabled:cursor-not-allowed disabled:opacity-50 dark:border-gray-800 dark:placeholder:text-gray-400 dark:focus-visible:ring-gray-300",
|
||||
type == "file" ? "pb-0.5 pt-1.5" : "", // fix alignment
|
||||
className,
|
||||
)}
|
||||
ref={ref}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
},
|
||||
);
|
||||
Input.displayName = "Input";
|
||||
|
||||
export { Input };
|
||||
@@ -1,321 +0,0 @@
|
||||
import { SupabaseClient } from "@supabase/supabase-js";
|
||||
import {
|
||||
Block,
|
||||
Graph,
|
||||
GraphCreatable,
|
||||
GraphUpdateable,
|
||||
GraphMeta,
|
||||
GraphExecuteResponse,
|
||||
NodeExecutionResult,
|
||||
User,
|
||||
AnalyticsMetrics,
|
||||
AnalyticsDetails,
|
||||
} from "./types";
|
||||
|
||||
export default class BaseAutoGPTServerAPI {
|
||||
private baseUrl: string;
|
||||
private wsUrl: string;
|
||||
private webSocket: WebSocket | null = null;
|
||||
private wsConnecting: Promise<void> | null = null;
|
||||
private wsMessageHandlers: Record<string, Set<(data: any) => void>> = {};
|
||||
private supabaseClient: SupabaseClient | null = null;
|
||||
|
||||
constructor(
|
||||
baseUrl: string = process.env.NEXT_PUBLIC_AGPT_SERVER_URL ||
|
||||
"http://localhost:8006/api",
|
||||
wsUrl: string = process.env.NEXT_PUBLIC_AGPT_WS_SERVER_URL ||
|
||||
"ws://localhost:8001/ws",
|
||||
supabaseClient: SupabaseClient | null = null,
|
||||
) {
|
||||
this.baseUrl = baseUrl;
|
||||
this.wsUrl = wsUrl;
|
||||
this.supabaseClient = supabaseClient;
|
||||
}
|
||||
|
||||
async createUser(): Promise<User> {
|
||||
return this._request("POST", "/auth/user", {});
|
||||
}
|
||||
|
||||
async getUserCredit(): Promise<{ credits: number }> {
|
||||
return this._get(`/credits`);
|
||||
}
|
||||
|
||||
async getBlocks(): Promise<Block[]> {
|
||||
return await this._get("/blocks");
|
||||
}
|
||||
|
||||
async listGraphs(): Promise<GraphMeta[]> {
|
||||
return this._get("/graphs");
|
||||
}
|
||||
|
||||
async listTemplates(): Promise<GraphMeta[]> {
|
||||
return this._get("/templates");
|
||||
}
|
||||
|
||||
async getGraph(id: string, version?: number): Promise<Graph> {
|
||||
const query = version !== undefined ? `?version=${version}` : "";
|
||||
return this._get(`/graphs/${id}` + query);
|
||||
}
|
||||
|
||||
async getTemplate(id: string, version?: number): Promise<Graph> {
|
||||
const query = version !== undefined ? `?version=${version}` : "";
|
||||
return this._get(`/templates/${id}` + query);
|
||||
}
|
||||
|
||||
async getGraphAllVersions(id: string): Promise<Graph[]> {
|
||||
return this._get(`/graphs/${id}/versions`);
|
||||
}
|
||||
|
||||
async getTemplateAllVersions(id: string): Promise<Graph[]> {
|
||||
return this._get(`/templates/${id}/versions`);
|
||||
}
|
||||
|
||||
async createGraph(graphCreateBody: GraphCreatable): Promise<Graph>;
|
||||
async createGraph(
|
||||
fromTemplateID: string,
|
||||
templateVersion: number,
|
||||
): Promise<Graph>;
|
||||
async createGraph(
|
||||
graphOrTemplateID: GraphCreatable | string,
|
||||
templateVersion?: number,
|
||||
): Promise<Graph> {
|
||||
let requestBody: GraphCreateRequestBody;
|
||||
|
||||
if (typeof graphOrTemplateID == "string") {
|
||||
if (templateVersion == undefined) {
|
||||
throw new Error("templateVersion not specified");
|
||||
}
|
||||
requestBody = {
|
||||
template_id: graphOrTemplateID,
|
||||
template_version: templateVersion,
|
||||
};
|
||||
} else {
|
||||
requestBody = { graph: graphOrTemplateID };
|
||||
}
|
||||
|
||||
return this._request("POST", "/graphs", requestBody);
|
||||
}
|
||||
|
||||
async createTemplate(templateCreateBody: GraphCreatable): Promise<Graph> {
|
||||
const requestBody: GraphCreateRequestBody = { graph: templateCreateBody };
|
||||
return this._request("POST", "/templates", requestBody);
|
||||
}
|
||||
|
||||
async updateGraph(id: string, graph: GraphUpdateable): Promise<Graph> {
|
||||
return await this._request("PUT", `/graphs/${id}`, graph);
|
||||
}
|
||||
|
||||
async updateTemplate(id: string, template: GraphUpdateable): Promise<Graph> {
|
||||
return await this._request("PUT", `/templates/${id}`, template);
|
||||
}
|
||||
|
||||
async setGraphActiveVersion(id: string, version: number): Promise<Graph> {
|
||||
return this._request("PUT", `/graphs/${id}/versions/active`, {
|
||||
active_graph_version: version,
|
||||
});
|
||||
}
|
||||
|
||||
async executeGraph(
|
||||
id: string,
|
||||
inputData: { [key: string]: any } = {},
|
||||
): Promise<GraphExecuteResponse> {
|
||||
return this._request("POST", `/graphs/${id}/execute`, inputData);
|
||||
}
|
||||
|
||||
async listGraphRunIDs(
|
||||
graphID: string,
|
||||
graphVersion?: number,
|
||||
): Promise<string[]> {
|
||||
const query =
|
||||
graphVersion !== undefined ? `?graph_version=${graphVersion}` : "";
|
||||
return this._get(`/graphs/${graphID}/executions` + query);
|
||||
}
|
||||
|
||||
async getGraphExecutionInfo(
|
||||
graphID: string,
|
||||
runID: string,
|
||||
): Promise<NodeExecutionResult[]> {
|
||||
return (await this._get(`/graphs/${graphID}/executions/${runID}`)).map(
|
||||
parseNodeExecutionResultTimestamps,
|
||||
);
|
||||
}
|
||||
|
||||
async stopGraphExecution(
|
||||
graphID: string,
|
||||
runID: string,
|
||||
): Promise<NodeExecutionResult[]> {
|
||||
return (
|
||||
await this._request("POST", `/graphs/${graphID}/executions/${runID}/stop`)
|
||||
).map(parseNodeExecutionResultTimestamps);
|
||||
}
|
||||
|
||||
async logMetric(metric: AnalyticsMetrics) {
|
||||
return this._request("POST", "/analytics/log_raw_metric", metric);
|
||||
}
|
||||
|
||||
async logAnalytic(analytic: AnalyticsDetails) {
|
||||
return this._request("POST", "/analytics/log_raw_analytics", analytic);
|
||||
}
|
||||
|
||||
private async _get(path: string) {
|
||||
return this._request("GET", path);
|
||||
}
|
||||
|
||||
private async _request(
|
||||
method: "GET" | "POST" | "PUT" | "PATCH",
|
||||
path: string,
|
||||
payload?: { [key: string]: any },
|
||||
) {
|
||||
if (method != "GET") {
|
||||
console.debug(`${method} ${path} payload:`, payload);
|
||||
}
|
||||
|
||||
const token =
|
||||
(await this.supabaseClient?.auth.getSession())?.data.session
|
||||
?.access_token || "";
|
||||
|
||||
const response = await fetch(this.baseUrl + path, {
|
||||
method,
|
||||
headers:
|
||||
method != "GET"
|
||||
? {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: token ? `Bearer ${token}` : "",
|
||||
}
|
||||
: {
|
||||
Authorization: token ? `Bearer ${token}` : "",
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
const response_data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`${method} ${path} returned non-OK response:`,
|
||||
response_data.detail,
|
||||
response,
|
||||
);
|
||||
throw new Error(`HTTP error ${response.status}! ${response_data.detail}`);
|
||||
}
|
||||
return response_data;
|
||||
}
|
||||
|
||||
async connectWebSocket(): Promise<void> {
|
||||
this.wsConnecting ??= new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const token =
|
||||
(await this.supabaseClient?.auth.getSession())?.data.session
|
||||
?.access_token || "";
|
||||
|
||||
const wsUrlWithToken = `${this.wsUrl}?token=${token}`;
|
||||
this.webSocket = new WebSocket(wsUrlWithToken);
|
||||
|
||||
this.webSocket.onopen = () => {
|
||||
console.debug("WebSocket connection established");
|
||||
resolve();
|
||||
};
|
||||
|
||||
this.webSocket.onclose = (event) => {
|
||||
console.debug("WebSocket connection closed", event);
|
||||
this.webSocket = null;
|
||||
};
|
||||
|
||||
this.webSocket.onerror = (error) => {
|
||||
console.error("WebSocket error:", error);
|
||||
reject(error);
|
||||
};
|
||||
|
||||
this.webSocket.onmessage = (event) => {
|
||||
const message: WebsocketMessage = JSON.parse(event.data);
|
||||
if (message.method == "execution_event") {
|
||||
message.data = parseNodeExecutionResultTimestamps(message.data);
|
||||
}
|
||||
this.wsMessageHandlers[message.method]?.forEach((handler) =>
|
||||
handler(message.data),
|
||||
);
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error connecting to WebSocket:", error);
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
return this.wsConnecting;
|
||||
}
|
||||
|
||||
disconnectWebSocket() {
|
||||
if (this.webSocket && this.webSocket.readyState === WebSocket.OPEN) {
|
||||
this.webSocket.close();
|
||||
}
|
||||
}
|
||||
|
||||
sendWebSocketMessage<M extends keyof WebsocketMessageTypeMap>(
|
||||
method: M,
|
||||
data: WebsocketMessageTypeMap[M],
|
||||
callCount = 0,
|
||||
) {
|
||||
if (this.webSocket && this.webSocket.readyState === WebSocket.OPEN) {
|
||||
this.webSocket.send(JSON.stringify({ method, data }));
|
||||
} else {
|
||||
this.connectWebSocket().then(() => {
|
||||
callCount == 0
|
||||
? this.sendWebSocketMessage(method, data, callCount + 1)
|
||||
: setTimeout(
|
||||
() => {
|
||||
this.sendWebSocketMessage(method, data, callCount + 1);
|
||||
},
|
||||
2 ** (callCount - 1) * 1000,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
onWebSocketMessage<M extends keyof WebsocketMessageTypeMap>(
|
||||
method: M,
|
||||
handler: (data: WebsocketMessageTypeMap[M]) => void,
|
||||
): () => void {
|
||||
this.wsMessageHandlers[method] ??= new Set();
|
||||
this.wsMessageHandlers[method].add(handler);
|
||||
|
||||
// Return detacher
|
||||
return () => this.wsMessageHandlers[method].delete(handler);
|
||||
}
|
||||
|
||||
subscribeToExecution(graphId: string) {
|
||||
this.sendWebSocketMessage("subscribe", { graph_id: graphId });
|
||||
}
|
||||
}
|
||||
|
||||
/* *** UTILITY TYPES *** */
|
||||
|
||||
type GraphCreateRequestBody =
|
||||
| {
|
||||
template_id: string;
|
||||
template_version: number;
|
||||
}
|
||||
| {
|
||||
graph: GraphCreatable;
|
||||
};
|
||||
|
||||
type WebsocketMessageTypeMap = {
|
||||
subscribe: { graph_id: string };
|
||||
execution_event: NodeExecutionResult;
|
||||
};
|
||||
|
||||
type WebsocketMessage = {
|
||||
[M in keyof WebsocketMessageTypeMap]: {
|
||||
method: M;
|
||||
data: WebsocketMessageTypeMap[M];
|
||||
};
|
||||
}[keyof WebsocketMessageTypeMap];
|
||||
|
||||
/* *** HELPER FUNCTIONS *** */
|
||||
|
||||
function parseNodeExecutionResultTimestamps(result: any): NodeExecutionResult {
|
||||
return {
|
||||
...result,
|
||||
add_time: new Date(result.add_time),
|
||||
queue_time: result.queue_time ? new Date(result.queue_time) : undefined,
|
||||
start_time: result.start_time ? new Date(result.start_time) : undefined,
|
||||
end_time: result.end_time ? new Date(result.end_time) : undefined,
|
||||
};
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
import { type ClassValue, clsx } from "clsx";
|
||||
import { twMerge } from "tailwind-merge";
|
||||
import { Category } from "./autogpt-server-api/types";
|
||||
|
||||
export function cn(...inputs: ClassValue[]) {
|
||||
return twMerge(clsx(inputs));
|
||||
}
|
||||
|
||||
/** Derived from https://stackoverflow.com/a/7616484 */
|
||||
export function hashString(str: string): number {
|
||||
let hash = 0,
|
||||
chr: number;
|
||||
if (str.length === 0) return hash;
|
||||
for (let i = 0; i < str.length; i++) {
|
||||
chr = str.charCodeAt(i);
|
||||
hash = (hash << 5) - hash + chr;
|
||||
hash |= 0; // Convert to 32bit integer
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
/** Derived from https://stackoverflow.com/a/32922084 */
|
||||
export function deepEquals(x: any, y: any): boolean {
|
||||
const ok = Object.keys,
|
||||
tx = typeof x,
|
||||
ty = typeof y;
|
||||
|
||||
const res =
|
||||
x &&
|
||||
y &&
|
||||
tx === ty &&
|
||||
(tx === "object"
|
||||
? ok(x).length === ok(y).length &&
|
||||
ok(x).every((key) => deepEquals(x[key], y[key]))
|
||||
: x === y);
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Get tailwind text color class from type name */
|
||||
export function getTypeTextColor(type: string | null): string {
|
||||
if (type === null) return "text-gray-500";
|
||||
return (
|
||||
{
|
||||
string: "text-green-500",
|
||||
number: "text-blue-500",
|
||||
boolean: "text-yellow-500",
|
||||
object: "text-purple-500",
|
||||
array: "text-indigo-500",
|
||||
null: "text-gray-500",
|
||||
any: "text-gray-500",
|
||||
"": "text-gray-500",
|
||||
}[type] || "text-gray-500"
|
||||
);
|
||||
}
|
||||
|
||||
/** Get tailwind bg color class from type name */
|
||||
export function getTypeBgColor(type: string | null): string {
|
||||
if (type === null) return "border-gray-500";
|
||||
return (
|
||||
{
|
||||
string: "border-green-500",
|
||||
number: "border-blue-500",
|
||||
boolean: "border-yellow-500",
|
||||
object: "border-purple-500",
|
||||
array: "border-indigo-500",
|
||||
null: "border-gray-500",
|
||||
any: "border-gray-500",
|
||||
"": "border-gray-500",
|
||||
}[type] || "border-gray-500"
|
||||
);
|
||||
}
|
||||
|
||||
export function getTypeColor(type: string | null): string {
|
||||
if (type === null) return "#6b7280";
|
||||
return (
|
||||
{
|
||||
string: "#22c55e",
|
||||
number: "#3b82f6",
|
||||
boolean: "#eab308",
|
||||
object: "#a855f7",
|
||||
array: "#6366f1",
|
||||
null: "#6b7280",
|
||||
any: "#6b7280",
|
||||
"": "#6b7280",
|
||||
}[type] || "#6b7280"
|
||||
);
|
||||
}
|
||||
|
||||
export function beautifyString(name: string): string {
|
||||
// Regular expression to identify places to split, considering acronyms
|
||||
const result = name
|
||||
.replace(/([a-z])([A-Z])/g, "$1 $2") // Add space before capital letters
|
||||
.replace(/([A-Z])([A-Z][a-z])/g, "$1 $2") // Add space between acronyms and next word
|
||||
.replace(/_/g, " ") // Replace underscores with spaces
|
||||
.replace(/\b\w/g, (char) => char.toUpperCase()); // Capitalize the first letter of each word
|
||||
|
||||
return applyExceptions(result);
|
||||
}
|
||||
|
||||
const exceptionMap: Record<string, string> = {
|
||||
"Auto GPT": "AutoGPT",
|
||||
Gpt: "GPT",
|
||||
Creds: "Credentials",
|
||||
Id: "ID",
|
||||
Openai: "OpenAI",
|
||||
Api: "API",
|
||||
Url: "URL",
|
||||
Http: "HTTP",
|
||||
Json: "JSON",
|
||||
};
|
||||
|
||||
const applyExceptions = (str: string): string => {
|
||||
Object.keys(exceptionMap).forEach((key) => {
|
||||
const regex = new RegExp(`\\b${key}\\b`, "g");
|
||||
str = str.replace(regex, exceptionMap[key]);
|
||||
});
|
||||
return str;
|
||||
};
|
||||
|
||||
export function exportAsJSONFile(obj: object, filename: string): void {
|
||||
// Create downloadable blob
|
||||
const jsonString = JSON.stringify(obj, null, 2);
|
||||
const blob = new Blob([jsonString], { type: "application/json" });
|
||||
const url = URL.createObjectURL(blob);
|
||||
|
||||
// Trigger the browser to download the blob to a file
|
||||
const link = document.createElement("a");
|
||||
link.href = url;
|
||||
link.download = filename;
|
||||
document.body.appendChild(link);
|
||||
link.click();
|
||||
document.body.removeChild(link);
|
||||
|
||||
// Clean up
|
||||
URL.revokeObjectURL(url);
|
||||
}
|
||||
|
||||
export function setNestedProperty(obj: any, path: string, value: any) {
|
||||
const keys = path.split(/[\/.]/); // Split by / or .
|
||||
let current = obj;
|
||||
|
||||
for (let i = 0; i < keys.length - 1; i++) {
|
||||
const key = keys[i];
|
||||
if (!current[key] || typeof current[key] !== "object") {
|
||||
current[key] = {};
|
||||
}
|
||||
current = current[key];
|
||||
}
|
||||
|
||||
current[keys[keys.length - 1]] = value;
|
||||
}
|
||||
|
||||
export function removeEmptyStringsAndNulls(obj: any): any {
|
||||
if (Array.isArray(obj)) {
|
||||
// If obj is an array, recursively remove empty strings and nulls from its elements
|
||||
return obj
|
||||
.map((item) => removeEmptyStringsAndNulls(item))
|
||||
.filter(
|
||||
(item) =>
|
||||
item !== null && (typeof item !== "string" || item.trim() !== ""),
|
||||
);
|
||||
} else if (typeof obj === "object" && obj !== null) {
|
||||
// If obj is an object, recursively remove empty strings and nulls from its properties
|
||||
for (const key in obj) {
|
||||
if (obj.hasOwnProperty(key)) {
|
||||
const value = obj[key];
|
||||
if (
|
||||
value === null ||
|
||||
(typeof value === "string" && value.trim() === "")
|
||||
) {
|
||||
delete obj[key];
|
||||
} else {
|
||||
obj[key] = removeEmptyStringsAndNulls(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
export const categoryColorMap: Record<string, string> = {
|
||||
AI: "bg-orange-300/[.7]",
|
||||
SOCIAL: "bg-yellow-300/[.7]",
|
||||
TEXT: "bg-green-300/[.7]",
|
||||
SEARCH: "bg-blue-300/[.7]",
|
||||
BASIC: "bg-purple-300/[.7]",
|
||||
INPUT: "bg-cyan-300/[.7]",
|
||||
OUTPUT: "bg-red-300/[.7]",
|
||||
LOGIC: "bg-teal-300/[.7]",
|
||||
};
|
||||
|
||||
export function getPrimaryCategoryColor(categories: Category[]): string {
|
||||
if (categories.length === 0) {
|
||||
return "bg-gray-300/[.7]";
|
||||
}
|
||||
return categoryColorMap[categories[0].category] || "bg-gray-300/[.7]";
|
||||
}
|
||||
|
||||
export function filterBlocksByType<T>(
|
||||
blocks: T[],
|
||||
predicate: (block: T) => boolean,
|
||||
): T[] {
|
||||
return blocks.filter(predicate);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,34 @@
|
||||
import hashlib
|
||||
import secrets
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class APIKeyContainer(NamedTuple):
|
||||
"""Container for API key parts."""
|
||||
|
||||
raw: str
|
||||
prefix: str
|
||||
postfix: str
|
||||
hash: str
|
||||
|
||||
|
||||
class APIKeyManager:
|
||||
PREFIX: str = "agpt_"
|
||||
PREFIX_LENGTH: int = 8
|
||||
POSTFIX_LENGTH: int = 8
|
||||
|
||||
def generate_api_key(self) -> APIKeyContainer:
|
||||
"""Generate a new API key with all its parts."""
|
||||
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
|
||||
return APIKeyContainer(
|
||||
raw=raw_key,
|
||||
prefix=raw_key[: self.PREFIX_LENGTH],
|
||||
postfix=raw_key[-self.POSTFIX_LENGTH :],
|
||||
hash=hashlib.sha256(raw_key.encode()).hexdigest(),
|
||||
)
|
||||
|
||||
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
|
||||
"""Verify if a provided API key matches the stored hash."""
|
||||
if not provided_key.startswith(self.PREFIX):
|
||||
return False
|
||||
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
||||
@@ -1,7 +1,8 @@
|
||||
import fastapi
|
||||
|
||||
from .config import Settings
|
||||
from .middleware import auth_middleware
|
||||
from .models import User
|
||||
from .models import DEFAULT_USER_ID, User
|
||||
|
||||
|
||||
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
|
||||
@@ -16,8 +17,12 @@ def requires_admin_user(
|
||||
|
||||
def verify_user(payload: dict | None, admin_only: bool) -> User:
|
||||
if not payload:
|
||||
if Settings.ENABLE_AUTH:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=401, detail="Authorization header is missing"
|
||||
)
|
||||
# This handles the case when authentication is disabled
|
||||
payload = {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "admin"}
|
||||
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
|
||||
|
||||
user_id = payload.get("sub")
|
||||
|
||||
|
||||
@@ -7,12 +7,13 @@ from .config import settings
|
||||
from .jwt_utils import parse_jwt_token
|
||||
|
||||
security = HTTPBearer()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def auth_middleware(request: Request):
|
||||
if not settings.ENABLE_AUTH:
|
||||
# If authentication is disabled, allow the request to proceed
|
||||
logging.warn("Auth disabled")
|
||||
logger.warn("Auth disabled")
|
||||
return {}
|
||||
|
||||
security = HTTPBearer()
|
||||
@@ -24,7 +25,7 @@ async def auth_middleware(request: Request):
|
||||
try:
|
||||
payload = parse_jwt_token(credentials.credentials)
|
||||
request.state.user = payload
|
||||
logging.info("Token decoded successfully")
|
||||
logger.debug("Token decoded successfully")
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=401, detail=str(e))
|
||||
return payload
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
# Using dataclass here to avoid adding dependency on pydantic
|
||||
@dataclass(frozen=True)
|
||||
|
||||
@@ -0,0 +1,167 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import logging
|
||||
from functools import wraps
|
||||
from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, Union, cast
|
||||
|
||||
import ldclient
|
||||
from fastapi import HTTPException
|
||||
from ldclient import Context, LDClient
|
||||
from ldclient.config import Config
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from .config import SETTINGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def get_client() -> LDClient:
|
||||
"""Get the LaunchDarkly client singleton."""
|
||||
return ldclient.get()
|
||||
|
||||
|
||||
def initialize_launchdarkly() -> None:
|
||||
sdk_key = SETTINGS.launch_darkly_sdk_key
|
||||
logger.debug(
|
||||
f"Initializing LaunchDarkly with SDK key: {'present' if sdk_key else 'missing'}"
|
||||
)
|
||||
|
||||
if not sdk_key:
|
||||
logger.warning("LaunchDarkly SDK key not configured")
|
||||
return
|
||||
|
||||
config = Config(sdk_key)
|
||||
ldclient.set_config(config)
|
||||
|
||||
if ldclient.get().is_initialized():
|
||||
logger.info("LaunchDarkly client initialized successfully")
|
||||
else:
|
||||
logger.error("LaunchDarkly client failed to initialize")
|
||||
|
||||
|
||||
def shutdown_launchdarkly() -> None:
|
||||
"""Shutdown the LaunchDarkly client."""
|
||||
if ldclient.get().is_initialized():
|
||||
ldclient.get().close()
|
||||
logger.info("LaunchDarkly client closed successfully")
|
||||
|
||||
|
||||
def create_context(
|
||||
user_id: str, additional_attributes: Optional[Dict[str, Any]] = None
|
||||
) -> Context:
|
||||
"""Create LaunchDarkly context with optional additional attributes."""
|
||||
builder = Context.builder(str(user_id)).kind("user")
|
||||
if additional_attributes:
|
||||
for key, value in additional_attributes.items():
|
||||
builder.set(key, value)
|
||||
return builder.build()
|
||||
|
||||
|
||||
def feature_flag(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""
|
||||
Decorator for feature flag protected endpoints.
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
func: Callable[P, Union[T, Awaitable[T]]]
|
||||
) -> Callable[P, Union[T, Awaitable[T]]]:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
result = func(*args, **kwargs)
|
||||
if asyncio.iscoroutine(result):
|
||||
return await result
|
||||
return cast(T, result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
return cast(T, func(*args, **kwargs))
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
return cast(
|
||||
Callable[P, Union[T, Awaitable[T]]],
|
||||
async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper,
|
||||
)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def percentage_rollout(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for percentage-based rollouts."""
|
||||
return feature_flag(flag_key, default)
|
||||
|
||||
|
||||
def beta_feature(
|
||||
flag_key: Optional[str] = None,
|
||||
unauthorized_response: Any = {"message": "Not available in beta"},
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for beta features."""
|
||||
actual_key = f"beta-{flag_key}" if flag_key else "beta"
|
||||
return feature_flag(actual_key, False)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_flag_variation(flag_key: str, return_value: Any):
|
||||
"""Context manager for testing feature flags."""
|
||||
original_variation = get_client().variation
|
||||
get_client().variation = lambda key, context, default: (
|
||||
return_value if key == flag_key else original_variation(key, context, default)
|
||||
)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
get_client().variation = original_variation
|
||||
@@ -0,0 +1,44 @@
|
||||
import pytest
|
||||
from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation
|
||||
from ldclient import LDClient
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ld_client(mocker):
|
||||
client = mocker.Mock(spec=LDClient)
|
||||
mocker.patch("ldclient.get", return_value=client)
|
||||
client.is_initialized.return_value = True
|
||||
return client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_enabled(ld_client):
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == "success"
|
||||
ld_client.variation.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_unauthorized_response(ld_client):
|
||||
ld_client.variation.return_value = False
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == {"error": "disabled"}
|
||||
|
||||
|
||||
def test_mock_flag_variation(ld_client):
|
||||
with mock_flag_variation("test-flag", True):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
|
||||
with mock_flag_variation("test-flag", False):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
@@ -0,0 +1,15 @@
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
launch_darkly_sdk_key: str = Field(
|
||||
default="",
|
||||
description="The Launch Darkly SDK key",
|
||||
validation_alias="LAUNCH_DARKLY_SDK_KEY"
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
|
||||
|
||||
|
||||
SETTINGS = Settings()
|
||||
@@ -6,6 +6,7 @@ from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter, StructuredLoggingFormatter
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
from .store import SupabaseIntegrationCredentialsStore
|
||||
from .types import APIKeyCredentials, OAuth2Credentials
|
||||
|
||||
__all__ = [
|
||||
"SupabaseIntegrationCredentialsStore",
|
||||
"APIKeyCredentials",
|
||||
"OAuth2Credentials",
|
||||
]
|
||||
@@ -1,145 +0,0 @@
|
||||
import secrets
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import cast
|
||||
|
||||
from supabase import Client
|
||||
|
||||
from .types import (
|
||||
Credentials,
|
||||
OAuth2Credentials,
|
||||
OAuthState,
|
||||
UserMetadata,
|
||||
UserMetadataRaw,
|
||||
)
|
||||
|
||||
|
||||
class SupabaseIntegrationCredentialsStore:
|
||||
def __init__(self, supabase: Client):
|
||||
self.supabase = supabase
|
||||
|
||||
def add_creds(self, user_id: str, credentials: Credentials) -> None:
|
||||
if self.get_creds_by_id(user_id, credentials.id):
|
||||
raise ValueError(
|
||||
f"Can not re-create existing credentials with ID {credentials.id} "
|
||||
f"for user with ID {user_id}"
|
||||
)
|
||||
self._set_user_integration_creds(
|
||||
user_id, [*self.get_all_creds(user_id), credentials]
|
||||
)
|
||||
|
||||
def get_all_creds(self, user_id: str) -> list[Credentials]:
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
return UserMetadata.model_validate(user_metadata).integration_credentials
|
||||
|
||||
def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
|
||||
credentials = self.get_all_creds(user_id)
|
||||
return next((c for c in credentials if c.id == credentials_id), None)
|
||||
|
||||
def get_creds_by_provider(self, user_id: str, provider: str) -> list[Credentials]:
|
||||
credentials = self.get_all_creds(user_id)
|
||||
return [c for c in credentials if c.provider == provider]
|
||||
|
||||
def get_authorized_providers(self, user_id: str) -> list[str]:
|
||||
credentials = self.get_all_creds(user_id)
|
||||
return list(set(c.provider for c in credentials))
|
||||
|
||||
def update_creds(self, user_id: str, updated: Credentials) -> None:
|
||||
current = self.get_creds_by_id(user_id, updated.id)
|
||||
if not current:
|
||||
raise ValueError(
|
||||
f"Credentials with ID {updated.id} "
|
||||
f"for user with ID {user_id} not found"
|
||||
)
|
||||
if type(current) is not type(updated):
|
||||
raise TypeError(
|
||||
f"Can not update credentials with ID {updated.id} "
|
||||
f"from type {type(current)} "
|
||||
f"to type {type(updated)}"
|
||||
)
|
||||
|
||||
# Ensure no scopes are removed when updating credentials
|
||||
if (
|
||||
isinstance(updated, OAuth2Credentials)
|
||||
and isinstance(current, OAuth2Credentials)
|
||||
and not set(updated.scopes).issuperset(current.scopes)
|
||||
):
|
||||
raise ValueError(
|
||||
f"Can not update credentials with ID {updated.id} "
|
||||
f"and scopes {current.scopes} "
|
||||
f"to more restrictive set of scopes {updated.scopes}"
|
||||
)
|
||||
|
||||
# Update the credentials
|
||||
updated_credentials_list = [
|
||||
updated if c.id == updated.id else c for c in self.get_all_creds(user_id)
|
||||
]
|
||||
self._set_user_integration_creds(user_id, updated_credentials_list)
|
||||
|
||||
def delete_creds_by_id(self, user_id: str, credentials_id: str) -> None:
|
||||
filtered_credentials = [
|
||||
c for c in self.get_all_creds(user_id) if c.id != credentials_id
|
||||
]
|
||||
self._set_user_integration_creds(user_id, filtered_credentials)
|
||||
|
||||
async def store_state_token(self, user_id: str, provider: str) -> str:
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
|
||||
|
||||
state = OAuthState(
|
||||
token=token, provider=provider, expires_at=int(expires_at.timestamp())
|
||||
)
|
||||
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
oauth_states.append(state.model_dump())
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
|
||||
return token
|
||||
|
||||
async def verify_state_token(self, user_id: str, token: str, provider: str) -> bool:
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
valid_state = next(
|
||||
(
|
||||
state
|
||||
for state in oauth_states
|
||||
if state["token"] == token
|
||||
and state["provider"] == provider
|
||||
and state["expires_at"] > now.timestamp()
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if valid_state:
|
||||
# Remove the used state
|
||||
oauth_states.remove(valid_state)
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _set_user_integration_creds(
|
||||
self, user_id: str, credentials: list[Credentials]
|
||||
) -> None:
|
||||
raw_metadata = self._get_user_metadata(user_id)
|
||||
raw_metadata.update(
|
||||
{"integration_credentials": [c.model_dump() for c in credentials]}
|
||||
)
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": raw_metadata}
|
||||
)
|
||||
|
||||
def _get_user_metadata(self, user_id: str) -> UserMetadataRaw:
|
||||
response = self.supabase.auth.admin.get_user_by_id(user_id)
|
||||
if not response.user:
|
||||
raise ValueError(f"User with ID {user_id} not found")
|
||||
return cast(UserMetadataRaw, response.user.user_metadata)
|
||||
@@ -1,60 +0,0 @@
|
||||
from typing import Annotated, Any, Literal, Optional, TypedDict
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, field_serializer
|
||||
|
||||
|
||||
class _BaseCredentials(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
provider: str
|
||||
title: Optional[str]
|
||||
|
||||
@field_serializer("*")
|
||||
def dump_secret_strings(value: Any, _info):
|
||||
if isinstance(value, SecretStr):
|
||||
return value.get_secret_value()
|
||||
return value
|
||||
|
||||
|
||||
class OAuth2Credentials(_BaseCredentials):
|
||||
type: Literal["oauth2"] = "oauth2"
|
||||
username: Optional[str]
|
||||
"""Username of the third-party service user that these credentials belong to"""
|
||||
access_token: SecretStr
|
||||
access_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the access token expires (if at all)"""
|
||||
refresh_token: Optional[SecretStr]
|
||||
refresh_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the refresh token expires (if at all)"""
|
||||
scopes: list[str]
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class APIKeyCredentials(_BaseCredentials):
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: SecretStr
|
||||
expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
|
||||
|
||||
|
||||
Credentials = Annotated[
|
||||
OAuth2Credentials | APIKeyCredentials,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
class OAuthState(BaseModel):
|
||||
token: str
|
||||
provider: str
|
||||
expires_at: int
|
||||
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
|
||||
|
||||
|
||||
class UserMetadata(BaseModel):
|
||||
integration_credentials: list[Credentials] = Field(default_factory=list)
|
||||
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
|
||||
|
||||
|
||||
class UserMetadataRaw(TypedDict, total=False):
|
||||
integration_credentials: list[dict]
|
||||
integration_oauth_states: list[dict]
|
||||
20
autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py
Normal file
20
autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import threading
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
|
||||
thread_local = threading.local()
|
||||
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
cache = getattr(thread_local, "cache", None)
|
||||
if cache is None:
|
||||
cache = thread_local.cache = {}
|
||||
key = (args, tuple(sorted(kwargs.items())))
|
||||
if key not in cache:
|
||||
cache[key] = func(*args, **kwargs)
|
||||
return cache[key]
|
||||
|
||||
return wrapper
|
||||
@@ -0,0 +1,56 @@
|
||||
from contextlib import contextmanager
|
||||
from threading import Lock
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from expiringdict import ExpiringDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from redis import Redis
|
||||
from redis.lock import Lock as RedisLock
|
||||
|
||||
|
||||
class RedisKeyedMutex:
|
||||
"""
|
||||
This class provides a mutex that can be locked and unlocked by a specific key,
|
||||
using Redis as a distributed locking provider.
|
||||
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
|
||||
in case the key is not unlocked for a specified duration, to prevent memory leaks.
|
||||
"""
|
||||
|
||||
def __init__(self, redis: "Redis", timeout: int | None = 60):
|
||||
self.redis = redis
|
||||
self.timeout = timeout
|
||||
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
|
||||
max_len=6000, max_age_seconds=self.timeout
|
||||
)
|
||||
self.locks_lock = Lock()
|
||||
|
||||
@contextmanager
|
||||
def locked(self, key: Any):
|
||||
lock = self.acquire(key)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
def acquire(self, key: Any) -> "RedisLock":
|
||||
"""Acquires and returns a lock with the given key"""
|
||||
with self.locks_lock:
|
||||
if key not in self.locks:
|
||||
self.locks[key] = self.redis.lock(
|
||||
str(key), self.timeout, thread_local=False
|
||||
)
|
||||
lock = self.locks[key]
|
||||
lock.acquire()
|
||||
return lock
|
||||
|
||||
def release(self, key: Any):
|
||||
if lock := self.locks.get(key):
|
||||
lock.release()
|
||||
|
||||
def release_all_locks(self):
|
||||
"""Call this on process termination to ensure all locks are released"""
|
||||
self.locks_lock.acquire(blocking=False)
|
||||
for lock in self.locks.values():
|
||||
if lock.locked() and lock.owned():
|
||||
lock.release()
|
||||
440
autogpt_platform/autogpt_libs/poetry.lock
generated
440
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -377,6 +377,20 @@ files = [
|
||||
[package.extras]
|
||||
test = ["pytest (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "expiringdict"
|
||||
version = "1.2.2"
|
||||
description = "Dictionary with auto-expiring values for caching purposes"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "expiringdict-1.2.2-py3-none-any.whl", hash = "sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8"},
|
||||
{file = "expiringdict-1.2.2.tar.gz", hash = "sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
||||
|
||||
[[package]]
|
||||
name = "frozenlist"
|
||||
version = "1.4.1"
|
||||
@@ -569,13 +583,13 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-logging"
|
||||
version = "3.11.2"
|
||||
version = "3.11.3"
|
||||
description = "Stackdriver Logging API client library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "google_cloud_logging-3.11.2-py2.py3-none-any.whl", hash = "sha256:0a755f04f184fbe77ad608258dc283a032485ebb4d0e2b2501964059ee9c898f"},
|
||||
{file = "google_cloud_logging-3.11.2.tar.gz", hash = "sha256:4897441c2b74f6eda9181c23a8817223b6145943314a821d64b729d30766cb2b"},
|
||||
{file = "google_cloud_logging-3.11.3-py2.py3-none-any.whl", hash = "sha256:b8ec23f2998f76a58f8492db26a0f4151dd500425c3f08448586b85972f3c494"},
|
||||
{file = "google_cloud_logging-3.11.3.tar.gz", hash = "sha256:0a73cd94118875387d4535371d9e9426861edef8e44fba1261e86782d5b8d54f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -612,17 +626,17 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "gotrue"
|
||||
version = "2.8.1"
|
||||
version = "2.10.0"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "gotrue-2.8.1-py3-none-any.whl", hash = "sha256:97dff077d71cca629f046c35ba34fae132b69c55fe271651766ddcf6d8132468"},
|
||||
{file = "gotrue-2.8.1.tar.gz", hash = "sha256:644d0096c4c390f7e36d9cb05271a7091c01e7dc6d506eb117b8fe8fc48eb8d9"},
|
||||
{file = "gotrue-2.10.0-py3-none-any.whl", hash = "sha256:768e58207488e5184ffbdc4351b7280d913daf97962f4e9f2cca05c80004b042"},
|
||||
{file = "gotrue-2.10.0.tar.gz", hash = "sha256:4edf4c251da3535f2b044e23deba221e848ca1210c17d0c7a9b19f79a1e3f3c0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
pydantic = ">=1.10,<3"
|
||||
|
||||
[[package]]
|
||||
@@ -840,6 +854,17 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linke
|
||||
perf = ["ipython"]
|
||||
test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.0.0"
|
||||
description = "brain-dead simple config-ini parsing"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
|
||||
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multidict"
|
||||
version = "6.1.0"
|
||||
@@ -970,22 +995,37 @@ files = [
|
||||
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.5.0"
|
||||
description = "plugin and hook calling mechanisms for python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
|
||||
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "tox"]
|
||||
testing = ["pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "0.16.11"
|
||||
version = "0.18.0"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "postgrest-0.16.11-py3-none-any.whl", hash = "sha256:22fb6b817ace1f68aa648fd4ce0f56d2786c9260fa4ed2cb9046191231a682b8"},
|
||||
{file = "postgrest-0.16.11.tar.gz", hash = "sha256:10af51b4c39e288ad7df2db92d6a61fb3c4683131b40561f473e3de116e83fa5"},
|
||||
{file = "postgrest-0.18.0-py3-none-any.whl", hash = "sha256:200baad0d23fee986b3a0ffd3e07bfe0cdd40e09760f11e8e13a6c0c2376d5fa"},
|
||||
{file = "postgrest-0.18.0.tar.gz", hash = "sha256:29c1a94801a17eb9ad590189993fe5a7a6d8c1bfc11a3c9d0ce7ba146454ebb3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
deprecation = ">=2.1.0,<3.0.0"
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
pydantic = ">=1.9,<3.0"
|
||||
strenum = ">=0.4.9,<0.5.0"
|
||||
strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""}
|
||||
|
||||
[[package]]
|
||||
name = "proto-plus"
|
||||
@@ -1031,6 +1071,7 @@ description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
|
||||
{file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
|
||||
]
|
||||
|
||||
@@ -1041,6 +1082,7 @@ description = "A collection of ASN.1-based protocols modules"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
|
||||
{file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
|
||||
]
|
||||
|
||||
@@ -1049,22 +1091,19 @@ pyasn1 = ">=0.4.6,<0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.9.1"
|
||||
version = "2.10.2"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"},
|
||||
{file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"},
|
||||
{file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"},
|
||||
{file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.6.0"
|
||||
pydantic-core = "2.23.3"
|
||||
typing-extensions = [
|
||||
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
|
||||
]
|
||||
pydantic-core = "2.27.1"
|
||||
typing-extensions = ">=4.12.2"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
@@ -1072,100 +1111,111 @@ timezone = ["tzdata"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.23.3"
|
||||
version = "2.27.1"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"},
|
||||
{file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"},
|
||||
{file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"},
|
||||
{file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"},
|
||||
{file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"},
|
||||
{file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"},
|
||||
{file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"},
|
||||
{file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"},
|
||||
{file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"},
|
||||
{file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"},
|
||||
{file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"},
|
||||
{file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"},
|
||||
{file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"},
|
||||
{file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"},
|
||||
{file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"},
|
||||
{file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"},
|
||||
{file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"},
|
||||
{file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"},
|
||||
{file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"},
|
||||
{file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"},
|
||||
{file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"},
|
||||
{file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"},
|
||||
{file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"},
|
||||
{file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"},
|
||||
{file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"},
|
||||
{file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"},
|
||||
{file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"},
|
||||
{file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"},
|
||||
{file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1173,13 +1223,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.5.2"
|
||||
version = "2.6.1"
|
||||
description = "Settings management using Pydantic"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_settings-2.5.2-py3-none-any.whl", hash = "sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907"},
|
||||
{file = "pydantic_settings-2.5.2.tar.gz", hash = "sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0"},
|
||||
{file = "pydantic_settings-2.6.1-py3-none-any.whl", hash = "sha256:7fb0637c786a558d3103436278a7c4f1cfd29ba8973238a50c5bb9a55387da87"},
|
||||
{file = "pydantic_settings-2.6.1.tar.gz", hash = "sha256:e0f92546d8a9923cb8941689abf85d6601a8c19a23e97a34b2964a2e3f813ca0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1193,13 +1243,13 @@ yaml = ["pyyaml (>=6.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyjwt"
|
||||
version = "2.9.0"
|
||||
version = "2.10.0"
|
||||
description = "JSON Web Token implementation in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"},
|
||||
{file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"},
|
||||
{file = "PyJWT-2.10.0-py3-none-any.whl", hash = "sha256:543b77207db656de204372350926bed5a86201c4cbff159f623f79c7bb487a15"},
|
||||
{file = "pyjwt-2.10.0.tar.gz", hash = "sha256:7628a7eb7938959ac1b26e819a1df0fd3259505627b575e4bad6d08f76db695c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -1208,6 +1258,63 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte
|
||||
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.3.3"
|
||||
description = "pytest: simple powerful testing with Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"},
|
||||
{file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
|
||||
iniconfig = "*"
|
||||
packaging = "*"
|
||||
pluggy = ">=1.5,<2"
|
||||
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-asyncio"
|
||||
version = "0.24.0"
|
||||
description = "Pytest support for asyncio"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"},
|
||||
{file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=8.2,<9"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
|
||||
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-mock"
|
||||
version = "3.14.0"
|
||||
description = "Thin-wrapper around the mock package for easier use with pytest"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
|
||||
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=6.2.5"
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "pytest-asyncio", "tox"]
|
||||
|
||||
[[package]]
|
||||
name = "python-dateutil"
|
||||
version = "2.9.0.post0"
|
||||
@@ -1253,6 +1360,24 @@ python-dateutil = ">=2.8.1,<3.0.0"
|
||||
typing-extensions = ">=4.12.2,<5.0.0"
|
||||
websockets = ">=11,<13"
|
||||
|
||||
[[package]]
|
||||
name = "redis"
|
||||
version = "5.2.0"
|
||||
description = "Python client for Redis database and key-value store"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "redis-5.2.0-py3-none-any.whl", hash = "sha256:ae174f2bb3b1bf2b09d54bf3e51fbc1469cf6c10aa03e21141f51969801a7897"},
|
||||
{file = "redis-5.2.0.tar.gz", hash = "sha256:0b1087665a771b1ff2e003aa5bdd354f15a70c9e25d5a7dbf9c722c16528a7b0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
|
||||
|
||||
[package.extras]
|
||||
hiredis = ["hiredis (>=3.0.0)"]
|
||||
ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.3"
|
||||
@@ -1288,6 +1413,33 @@ files = [
|
||||
[package.dependencies]
|
||||
pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.8.0"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.8.0-py3-none-linux_armv6l.whl", hash = "sha256:fcb1bf2cc6706adae9d79c8d86478677e3bbd4ced796ccad106fd4776d395fea"},
|
||||
{file = "ruff-0.8.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:295bb4c02d58ff2ef4378a1870c20af30723013f441c9d1637a008baaf928c8b"},
|
||||
{file = "ruff-0.8.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7b1f1c76b47c18fa92ee78b60d2d20d7e866c55ee603e7d19c1e991fad933a9a"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb0d4f250a7711b67ad513fde67e8870109e5ce590a801c3722580fe98c33a99"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e55cce9aa93c5d0d4e3937e47b169035c7e91c8655b0974e61bb79cf398d49c"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f4cd64916d8e732ce6b87f3f5296a8942d285bbbc161acee7fe561134af64f9"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c5c1466be2a2ebdf7c5450dd5d980cc87c8ba6976fb82582fea18823da6fa362"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2dabfd05b96b7b8f2da00d53c514eea842bff83e41e1cceb08ae1966254a51df"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:facebdfe5a5af6b1588a1d26d170635ead6892d0e314477e80256ef4a8470cf3"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87a8e86bae0dbd749c815211ca11e3a7bd559b9710746c559ed63106d382bd9c"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:85e654f0ded7befe2d61eeaf3d3b1e4ef3894469cd664ffa85006c7720f1e4a2"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:83a55679c4cb449fa527b8497cadf54f076603cc36779b2170b24f704171ce70"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:812e2052121634cf13cd6fddf0c1871d0ead1aad40a1a258753c04c18bb71bbd"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:780d5d8523c04202184405e60c98d7595bdb498c3c6abba3b6d4cdf2ca2af426"},
|
||||
{file = "ruff-0.8.0-py3-none-win32.whl", hash = "sha256:5fdb6efecc3eb60bba5819679466471fd7d13c53487df7248d6e27146e985468"},
|
||||
{file = "ruff-0.8.0-py3-none-win_amd64.whl", hash = "sha256:582891c57b96228d146725975fbb942e1f30a0c4ba19722e692ca3eb25cc9b4f"},
|
||||
{file = "ruff-0.8.0-py3-none-win_arm64.whl", hash = "sha256:ba93e6294e9a737cd726b74b09a6972e36bb511f9a102f1d9a7e1ce94dd206a6"},
|
||||
{file = "ruff-0.8.0.tar.gz", hash = "sha256:a7ccfe6331bf8c8dad715753e157457faf7351c2b69f62f32c165c2dbcbacd44"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
version = "1.16.0"
|
||||
@@ -1312,19 +1464,18 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "0.7.7"
|
||||
version = "0.9.0"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "storage3-0.7.7-py3-none-any.whl", hash = "sha256:ed80a2546cd0b5c22e2c30ea71096db6c99268daf2958c603488e7d72efb8426"},
|
||||
{file = "storage3-0.7.7.tar.gz", hash = "sha256:9fba680cf761d139ad764f43f0e91c245d1ce1af2cc3afe716652f835f48f83e"},
|
||||
{file = "storage3-0.9.0-py3-none-any.whl", hash = "sha256:8b2fb91f0c61583a2f4eac74a8bae67e00d41ff38095c8a6cd3f2ce5e0ab76e7"},
|
||||
{file = "storage3-0.9.0.tar.gz", hash = "sha256:e16697f60894c94e1d9df0d2e4af783c1b3f7dd08c9013d61978825c624188c4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
python-dateutil = ">=2.8.2,<3.0.0"
|
||||
typing-extensions = ">=4.2.0,<5.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "strenum"
|
||||
@@ -1344,36 +1495,47 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.7.4"
|
||||
version = "2.10.0"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supabase-2.7.4-py3-none-any.whl", hash = "sha256:01815fbc30cac753933d4a44a2529fd13cb7634b56c705c65b12a02c8e75982b"},
|
||||
{file = "supabase-2.7.4.tar.gz", hash = "sha256:5a979c7711b3c5ce688514fa0afc015780522569494e1a9a9d25d03b7c3d654b"},
|
||||
{file = "supabase-2.10.0-py3-none-any.whl", hash = "sha256:183fb23c04528593f8f81c24ceb8178f3a56bff40fec7ed873b6c55ebc2e420a"},
|
||||
{file = "supabase-2.10.0.tar.gz", hash = "sha256:9ac095f8947bf60780e67c0edcbab53e2db3f6f3f022329397b093500bf2607c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
gotrue = ">=1.3,<3.0"
|
||||
httpx = ">=0.24,<0.28"
|
||||
postgrest = ">=0.14,<0.17.0"
|
||||
gotrue = ">=2.10.0,<3.0.0"
|
||||
httpx = ">=0.26,<0.28"
|
||||
postgrest = ">=0.18,<0.19"
|
||||
realtime = ">=2.0.0,<3.0.0"
|
||||
storage3 = ">=0.5.3,<0.8.0"
|
||||
supafunc = ">=0.3.1,<0.6.0"
|
||||
storage3 = ">=0.9.0,<0.10.0"
|
||||
supafunc = ">=0.7.0,<0.8.0"
|
||||
|
||||
[[package]]
|
||||
name = "supafunc"
|
||||
version = "0.5.1"
|
||||
version = "0.7.0"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supafunc-0.5.1-py3-none-any.whl", hash = "sha256:b05e99a2b41270211a3f90ec843c04c5f27a5618f2d2d2eb8e07f41eb962a910"},
|
||||
{file = "supafunc-0.5.1.tar.gz", hash = "sha256:1ae9dce6bd935939c561650e86abb676af9665ecf5d4ffc1c7ec3c4932c84334"},
|
||||
{file = "supafunc-0.7.0-py3-none-any.whl", hash = "sha256:4160260dc02bdd906be1e2ffd7cb3ae8b74ae437c892bb475352b6a99d9ff8eb"},
|
||||
{file = "supafunc-0.7.0.tar.gz", hash = "sha256:5b1c415fba1395740b2b4eedd1d786384bd58b98f6333a11ba7889820a48b6a7"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.1.0"
|
||||
description = "A lil' TOML parser"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
|
||||
{file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
@@ -1690,4 +1852,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "e9b6e5d877eeb9c9f1ebc69dead1985d749facc160afbe61f3bf37e9a6e35aa5"
|
||||
content-hash = "54bf6e076ec4d09be2307f07240018459dd6594efdc55a2dc2dc1d673184587e"
|
||||
|
||||
@@ -8,14 +8,27 @@ packages = [{ include = "autogpt_libs" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
colorama = "^0.4.6"
|
||||
google-cloud-logging = "^3.8.0"
|
||||
pydantic = "^2.8.2"
|
||||
pydantic-settings = "^2.5.2"
|
||||
pyjwt = "^2.8.0"
|
||||
expiringdict = "^1.2.2"
|
||||
google-cloud-logging = "^3.11.3"
|
||||
pydantic = "^2.10.2"
|
||||
pydantic-settings = "^2.6.1"
|
||||
pyjwt = "^2.10.0"
|
||||
pytest-asyncio = "^0.24.0"
|
||||
pytest-mock = "^3.14.0"
|
||||
python = ">=3.10,<4.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
supabase = "^2.7.2"
|
||||
supabase = "^2.10.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.0"
|
||||
ruff = "^0.8.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = ["I"] # sort dependencies
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
DB_USER=agpt_user
|
||||
DB_PASS=pass123
|
||||
DB_NAME=agpt_local
|
||||
DB_PORT=5433
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
ENABLE_AUTH=false
|
||||
ENABLE_CREDIT=false
|
||||
APP_ENV="local"
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
# This is needed when ENABLE_AUTH is true
|
||||
SUPABASE_JWT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
# LLM
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
|
||||
# Reddit
|
||||
REDDIT_CLIENT_ID=
|
||||
REDDIT_CLIENT_SECRET=
|
||||
REDDIT_USERNAME=
|
||||
REDDIT_PASSWORD=
|
||||
|
||||
# Discord
|
||||
DISCORD_BOT_TOKEN=
|
||||
|
||||
# SMTP/Email
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# D-ID
|
||||
DID_API_KEY=
|
||||
|
||||
# Open Weather Map
|
||||
OPENWEATHERMAP_API_KEY=
|
||||
|
||||
# SMTP
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Medium
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
ENABLE_FILE_LOGGING=false
|
||||
# Use to manually set the log directory
|
||||
# LOG_DIR=./logs
|
||||
@@ -1,75 +0,0 @@
|
||||
import glob
|
||||
import importlib
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
from autogpt_server.data.block import Block
|
||||
|
||||
# Dynamically load all modules under autogpt_server.blocks
|
||||
AVAILABLE_MODULES = []
|
||||
current_dir = os.path.dirname(__file__)
|
||||
modules = glob.glob(os.path.join(current_dir, "*.py"))
|
||||
modules = [
|
||||
Path(f).stem
|
||||
for f in modules
|
||||
if os.path.isfile(f) and f.endswith(".py") and not f.endswith("__init__.py")
|
||||
]
|
||||
for module in modules:
|
||||
if not re.match("^[a-z_]+$", module):
|
||||
raise ValueError(
|
||||
f"Block module {module} error: module name must be lowercase, separated by underscores, and contain only alphabet characters"
|
||||
)
|
||||
|
||||
importlib.import_module(f".{module}", package=__name__)
|
||||
AVAILABLE_MODULES.append(module)
|
||||
|
||||
# Load all Block instances from the available modules
|
||||
AVAILABLE_BLOCKS = {}
|
||||
|
||||
|
||||
def all_subclasses(clz):
|
||||
subclasses = clz.__subclasses__()
|
||||
for subclass in subclasses:
|
||||
subclasses += all_subclasses(subclass)
|
||||
return subclasses
|
||||
|
||||
|
||||
for cls in all_subclasses(Block):
|
||||
name = cls.__name__
|
||||
|
||||
if cls.__name__.endswith("Base"):
|
||||
continue
|
||||
|
||||
if not cls.__name__.endswith("Block"):
|
||||
raise ValueError(
|
||||
f"Block class {cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
|
||||
)
|
||||
|
||||
block = cls()
|
||||
|
||||
if not isinstance(block.id, str) or len(block.id) != 36:
|
||||
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
|
||||
|
||||
if block.id in AVAILABLE_BLOCKS:
|
||||
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
|
||||
|
||||
# Prevent duplicate field name in input_schema and output_schema
|
||||
duplicate_field_names = set(block.input_schema.model_fields.keys()) & set(
|
||||
block.output_schema.model_fields.keys()
|
||||
)
|
||||
if duplicate_field_names:
|
||||
raise ValueError(
|
||||
f"{block.name} has duplicate field names in input_schema and output_schema: {duplicate_field_names}"
|
||||
)
|
||||
|
||||
for field in block.input_schema.model_fields.values():
|
||||
if field.annotation is bool and field.default not in (True, False):
|
||||
raise ValueError(f"{block.name} has a boolean field with no default value")
|
||||
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
AVAILABLE_BLOCKS[block.id] = block
|
||||
|
||||
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
|
||||
@@ -1,57 +0,0 @@
|
||||
import json
|
||||
from enum import Enum
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
|
||||
|
||||
class HttpMethod(Enum):
|
||||
GET = "GET"
|
||||
POST = "POST"
|
||||
PUT = "PUT"
|
||||
DELETE = "DELETE"
|
||||
PATCH = "PATCH"
|
||||
OPTIONS = "OPTIONS"
|
||||
HEAD = "HEAD"
|
||||
|
||||
|
||||
class SendWebRequestBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
url: str
|
||||
method: HttpMethod = HttpMethod.POST
|
||||
headers: dict[str, str] = {}
|
||||
body: object = {}
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: object
|
||||
client_error: object
|
||||
server_error: object
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
|
||||
description="This block makes an HTTP request to the given URL.",
|
||||
categories={BlockCategory.OUTPUT},
|
||||
input_schema=SendWebRequestBlock.Input,
|
||||
output_schema=SendWebRequestBlock.Output,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
if isinstance(input_data.body, str):
|
||||
input_data.body = json.loads(input_data.body)
|
||||
|
||||
response = requests.request(
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
json=input_data.body,
|
||||
)
|
||||
if response.status_code // 100 == 2:
|
||||
yield "response", response.json()
|
||||
elif response.status_code // 100 == 4:
|
||||
yield "client_error", response.json()
|
||||
elif response.status_code // 100 == 5:
|
||||
yield "server_error", response.json()
|
||||
else:
|
||||
raise ValueError(f"Unexpected status code: {response.status_code}")
|
||||
@@ -1,36 +0,0 @@
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import SchemaField
|
||||
|
||||
|
||||
class ListIteratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
items: List[Any] = SchemaField(
|
||||
description="The list of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5]",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
item: Tuple[int, Any] = SchemaField(
|
||||
description="A tuple with the index and current item in the iteration"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f8e7d6c5-b4a3-2c1d-0e9f-8g7h6i5j4k3l",
|
||||
input_schema=ListIteratorBlock.Input,
|
||||
output_schema=ListIteratorBlock.Output,
|
||||
categories={BlockCategory.LOGIC},
|
||||
test_input={"items": [1, "two", {"three": 3}, [4, 5]]},
|
||||
test_output=[
|
||||
("item", (0, 1)),
|
||||
("item", (1, "two")),
|
||||
("item", (2, {"three": 3})),
|
||||
("item", (3, [4, 5])),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
for index, item in enumerate(input_data.items):
|
||||
yield "item", (index, item)
|
||||
@@ -1,585 +0,0 @@
|
||||
import logging
|
||||
from enum import Enum
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, List, NamedTuple
|
||||
|
||||
import anthropic
|
||||
import ollama
|
||||
import openai
|
||||
from groq import Groq
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
|
||||
from autogpt_server.util import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LlmApiKeys = {
|
||||
"openai": BlockSecret("openai_api_key"),
|
||||
"anthropic": BlockSecret("anthropic_api_key"),
|
||||
"groq": BlockSecret("groq_api_key"),
|
||||
"ollama": BlockSecret(value=""),
|
||||
}
|
||||
|
||||
|
||||
class ModelMetadata(NamedTuple):
|
||||
provider: str
|
||||
context_window: int
|
||||
cost_factor: int
|
||||
|
||||
|
||||
class LlmModel(str, Enum):
|
||||
# OpenAI models
|
||||
GPT4O_MINI = "gpt-4o-mini"
|
||||
GPT4O = "gpt-4o"
|
||||
GPT4_TURBO = "gpt-4-turbo"
|
||||
GPT3_5_TURBO = "gpt-3.5-turbo"
|
||||
# Anthropic models
|
||||
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# Groq models
|
||||
LLAMA3_8B = "llama3-8b-8192"
|
||||
LLAMA3_70B = "llama3-70b-8192"
|
||||
MIXTRAL_8X7B = "mixtral-8x7b-32768"
|
||||
GEMMA_7B = "gemma-7b-it"
|
||||
GEMMA2_9B = "gemma2-9b-it"
|
||||
# New Groq models (Preview)
|
||||
LLAMA3_1_405B = "llama-3.1-405b-reasoning"
|
||||
LLAMA3_1_70B = "llama-3.1-70b-versatile"
|
||||
LLAMA3_1_8B = "llama-3.1-8b-instant"
|
||||
# Ollama models
|
||||
OLLAMA_LLAMA3_8B = "llama3"
|
||||
OLLAMA_LLAMA3_405B = "llama3.1:405b"
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
return MODEL_METADATA[self]
|
||||
|
||||
|
||||
MODEL_METADATA = {
|
||||
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=10),
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=12),
|
||||
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=11),
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=8),
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=14),
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=13),
|
||||
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=6),
|
||||
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=9),
|
||||
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=7),
|
||||
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=6),
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=7),
|
||||
LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=10),
|
||||
# Limited to 16k during preview
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=15),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=13),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=7),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=11),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_METADATA:
|
||||
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||
|
||||
|
||||
class AIStructuredResponseGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
prompt: str
|
||||
expected_format: dict[str, str]
|
||||
model: LlmModel = LlmModel.GPT4_TURBO
|
||||
api_key: BlockSecret = SecretField(value="")
|
||||
sys_prompt: str = ""
|
||||
retry: int = 3
|
||||
prompt_values: dict[str, str] = SchemaField(
|
||||
advanced=False, default={}, description="Values used to fill in the prompt."
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: dict[str, Any]
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||
description="Call a Large Language Model (LLM) to generate formatted object based on the given prompt.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||
test_input={
|
||||
"model": LlmModel.GPT4_TURBO,
|
||||
"api_key": "fake-api",
|
||||
"expected_format": {
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
},
|
||||
"prompt": "User prompt",
|
||||
},
|
||||
test_output=("response", {"key1": "key1Value", "key2": "key2Value"}),
|
||||
test_mock={
|
||||
"llm_call": lambda *args, **kwargs: json.dumps(
|
||||
{
|
||||
"key1": "key1Value",
|
||||
"key2": "key2Value",
|
||||
}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def llm_call(
|
||||
api_key: str, model: LlmModel, prompt: list[dict], json_format: bool
|
||||
) -> str:
|
||||
provider = model.metadata.provider
|
||||
|
||||
if provider == "openai":
|
||||
openai.api_key = api_key
|
||||
response_format = {"type": "json_object"} if json_format else None
|
||||
response = openai.chat.completions.create(
|
||||
model=model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
)
|
||||
return response.choices[0].message.content or ""
|
||||
elif provider == "anthropic":
|
||||
system_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||
sysprompt = " ".join(system_messages)
|
||||
|
||||
messages = []
|
||||
last_role = None
|
||||
for p in prompt:
|
||||
if p["role"] in ["user", "assistant"]:
|
||||
if p["role"] != last_role:
|
||||
messages.append({"role": p["role"], "content": p["content"]})
|
||||
last_role = p["role"]
|
||||
else:
|
||||
# If the role is the same as the last one, combine the content
|
||||
messages[-1]["content"] += "\n" + p["content"]
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
try:
|
||||
response = client.messages.create(
|
||||
model=model.value,
|
||||
max_tokens=4096,
|
||||
system=sysprompt,
|
||||
messages=messages,
|
||||
)
|
||||
return response.content[0].text if response.content else ""
|
||||
except anthropic.APIError as e:
|
||||
error_message = f"Anthropic API error: {str(e)}"
|
||||
logger.error(error_message)
|
||||
raise ValueError(error_message)
|
||||
elif provider == "groq":
|
||||
client = Groq(api_key=api_key)
|
||||
response_format = {"type": "json_object"} if json_format else None
|
||||
response = client.chat.completions.create(
|
||||
model=model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
)
|
||||
return response.choices[0].message.content or ""
|
||||
elif provider == "ollama":
|
||||
response = ollama.generate(
|
||||
model=model.value,
|
||||
prompt=prompt[0]["content"],
|
||||
)
|
||||
return response["response"]
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
prompt = []
|
||||
|
||||
def trim_prompt(s: str) -> str:
|
||||
lines = s.strip().split("\n")
|
||||
return "\n".join([line.strip().lstrip("|") for line in lines])
|
||||
|
||||
values = input_data.prompt_values
|
||||
if values:
|
||||
input_data.prompt = input_data.prompt.format(**values)
|
||||
input_data.sys_prompt = input_data.sys_prompt.format(**values)
|
||||
|
||||
if input_data.sys_prompt:
|
||||
prompt.append({"role": "system", "content": input_data.sys_prompt})
|
||||
|
||||
if input_data.expected_format:
|
||||
expected_format = [
|
||||
f'"{k}": "{v}"' for k, v in input_data.expected_format.items()
|
||||
]
|
||||
format_prompt = ",\n ".join(expected_format)
|
||||
sys_prompt = trim_prompt(
|
||||
f"""
|
||||
|Reply in json format:
|
||||
|{{
|
||||
| {format_prompt}
|
||||
|}}
|
||||
"""
|
||||
)
|
||||
prompt.append({"role": "system", "content": sys_prompt})
|
||||
|
||||
prompt.append({"role": "user", "content": input_data.prompt})
|
||||
|
||||
def parse_response(resp: str) -> tuple[dict[str, Any], str | None]:
|
||||
try:
|
||||
parsed = json.loads(resp)
|
||||
if not isinstance(parsed, dict):
|
||||
return {}, f"Expected a dictionary, but got {type(parsed)}"
|
||||
miss_keys = set(input_data.expected_format.keys()) - set(parsed.keys())
|
||||
if miss_keys:
|
||||
return parsed, f"Missing keys: {miss_keys}"
|
||||
return parsed, None
|
||||
except JSONDecodeError as e:
|
||||
return {}, f"JSON decode error: {e}"
|
||||
|
||||
logger.info(f"LLM request: {prompt}")
|
||||
retry_prompt = ""
|
||||
model = input_data.model
|
||||
api_key = (
|
||||
input_data.api_key.get_secret_value()
|
||||
or LlmApiKeys[model.metadata.provider].get_secret_value()
|
||||
)
|
||||
|
||||
for retry_count in range(input_data.retry):
|
||||
try:
|
||||
response_text = self.llm_call(
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
json_format=bool(input_data.expected_format),
|
||||
)
|
||||
logger.info(f"LLM attempt-{retry_count} response: {response_text}")
|
||||
|
||||
if input_data.expected_format:
|
||||
parsed_dict, parsed_error = parse_response(response_text)
|
||||
if not parsed_error:
|
||||
yield "response", {
|
||||
k: (
|
||||
json.loads(v)
|
||||
if isinstance(v, str)
|
||||
and v.startswith("[")
|
||||
and v.endswith("]")
|
||||
else (", ".join(v) if isinstance(v, list) else v)
|
||||
)
|
||||
for k, v in parsed_dict.items()
|
||||
}
|
||||
return
|
||||
else:
|
||||
yield "response", {"response": response_text}
|
||||
return
|
||||
|
||||
retry_prompt = trim_prompt(
|
||||
f"""
|
||||
|This is your previous error response:
|
||||
|--
|
||||
|{response_text}
|
||||
|--
|
||||
|
|
||||
|And this is the error:
|
||||
|--
|
||||
|{parsed_error}
|
||||
|--
|
||||
"""
|
||||
)
|
||||
prompt.append({"role": "user", "content": retry_prompt})
|
||||
except Exception as e:
|
||||
logger.error(f"Error calling LLM: {e}")
|
||||
retry_prompt = f"Error calling LLM: {e}"
|
||||
|
||||
yield "error", retry_prompt
|
||||
|
||||
|
||||
class AITextGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
prompt: str
|
||||
model: LlmModel = LlmModel.GPT4_TURBO
|
||||
api_key: BlockSecret = SecretField(value="")
|
||||
sys_prompt: str = ""
|
||||
retry: int = 3
|
||||
prompt_values: dict[str, str] = SchemaField(
|
||||
advanced=False, default={}, description="Values used to fill in the prompt."
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: str
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
description="Call a Large Language Model (LLM) to generate a string based on the given prompt.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AITextGeneratorBlock.Input,
|
||||
output_schema=AITextGeneratorBlock.Output,
|
||||
test_input={"prompt": "User prompt"},
|
||||
test_output=("response", "Response text"),
|
||||
test_mock={"llm_call": lambda *args, **kwargs: "Response text"},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def llm_call(input_data: AIStructuredResponseGeneratorBlock.Input) -> str:
|
||||
object_block = AIStructuredResponseGeneratorBlock()
|
||||
for output_name, output_data in object_block.run(input_data):
|
||||
if output_name == "response":
|
||||
return output_data["response"]
|
||||
else:
|
||||
raise output_data
|
||||
raise ValueError("Failed to get a response from the LLM.")
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
object_input_data = AIStructuredResponseGeneratorBlock.Input(
|
||||
**{attr: getattr(input_data, attr) for attr in input_data.model_fields},
|
||||
expected_format={},
|
||||
)
|
||||
yield "response", self.llm_call(object_input_data)
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class AITextSummarizerBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str
|
||||
model: LlmModel = LlmModel.GPT4_TURBO
|
||||
api_key: BlockSecret = SecretField(value="")
|
||||
# TODO: Make this dynamic
|
||||
max_tokens: int = 4000 # Adjust based on the model's context window
|
||||
chunk_overlap: int = 100 # Overlap between chunks to maintain context
|
||||
|
||||
class Output(BlockSchema):
|
||||
summary: str
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8",
|
||||
description="Utilize a Large Language Model (LLM) to summarize a long text.",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=AITextSummarizerBlock.Input,
|
||||
output_schema=AITextSummarizerBlock.Output,
|
||||
test_input={"text": "Lorem ipsum..." * 100},
|
||||
test_output=("summary", "Final summary of a long text"),
|
||||
test_mock={
|
||||
"llm_call": lambda input_data: (
|
||||
{"final_summary": "Final summary of a long text"}
|
||||
if "final_summary" in input_data.expected_format
|
||||
else {"summary": "Summary of a chunk of text"}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
for output in self._run(input_data):
|
||||
yield output
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
def _run(self, input_data: Input) -> BlockOutput:
|
||||
chunks = self._split_text(
|
||||
input_data.text, input_data.max_tokens, input_data.chunk_overlap
|
||||
)
|
||||
summaries = []
|
||||
|
||||
for chunk in chunks:
|
||||
chunk_summary = self._summarize_chunk(chunk, input_data)
|
||||
summaries.append(chunk_summary)
|
||||
|
||||
final_summary = self._combine_summaries(summaries, input_data)
|
||||
yield "summary", final_summary
|
||||
|
||||
@staticmethod
|
||||
def _split_text(text: str, max_tokens: int, overlap: int) -> list[str]:
|
||||
words = text.split()
|
||||
chunks = []
|
||||
chunk_size = max_tokens - overlap
|
||||
|
||||
for i in range(0, len(words), chunk_size):
|
||||
chunk = " ".join(words[i : i + max_tokens])
|
||||
chunks.append(chunk)
|
||||
|
||||
return chunks
|
||||
|
||||
@staticmethod
|
||||
def llm_call(
|
||||
input_data: AIStructuredResponseGeneratorBlock.Input,
|
||||
) -> dict[str, str]:
|
||||
llm_block = AIStructuredResponseGeneratorBlock()
|
||||
for output_name, output_data in llm_block.run(input_data):
|
||||
if output_name == "response":
|
||||
return output_data
|
||||
raise ValueError("Failed to get a response from the LLM.")
|
||||
|
||||
def _summarize_chunk(self, chunk: str, input_data: Input) -> str:
|
||||
prompt = f"Summarize the following text concisely:\n\n{chunk}"
|
||||
|
||||
llm_response = self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt=prompt,
|
||||
api_key=input_data.api_key,
|
||||
model=input_data.model,
|
||||
expected_format={"summary": "The summary of the given text."},
|
||||
)
|
||||
)
|
||||
|
||||
return llm_response["summary"]
|
||||
|
||||
def _combine_summaries(self, summaries: list[str], input_data: Input) -> str:
|
||||
combined_text = " ".join(summaries)
|
||||
|
||||
if len(combined_text.split()) <= input_data.max_tokens:
|
||||
prompt = (
|
||||
"Provide a final, concise summary of the following summaries:\n\n"
|
||||
+ combined_text
|
||||
)
|
||||
|
||||
llm_response = self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt=prompt,
|
||||
api_key=input_data.api_key,
|
||||
model=input_data.model,
|
||||
expected_format={
|
||||
"final_summary": "The final summary of all provided summaries."
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
return llm_response["final_summary"]
|
||||
else:
|
||||
# If combined summaries are still too long, recursively summarize
|
||||
return self._run(
|
||||
AITextSummarizerBlock.Input(
|
||||
text=combined_text,
|
||||
api_key=input_data.api_key,
|
||||
model=input_data.model,
|
||||
max_tokens=input_data.max_tokens,
|
||||
chunk_overlap=input_data.chunk_overlap,
|
||||
)
|
||||
).send(None)[
|
||||
1
|
||||
] # Get the first yielded value
|
||||
|
||||
|
||||
class MessageRole(str, Enum):
|
||||
SYSTEM = "system"
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
|
||||
|
||||
class Message(BlockSchema):
|
||||
role: MessageRole
|
||||
content: str
|
||||
|
||||
|
||||
class AIConversationBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
messages: List[Message] = SchemaField(
|
||||
description="List of messages in the conversation.", min_length=1
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
default=LlmModel.GPT4_TURBO,
|
||||
description="The language model to use for the conversation.",
|
||||
)
|
||||
api_key: BlockSecret = SecretField(
|
||||
value="", description="API key for the chosen language model provider."
|
||||
)
|
||||
max_tokens: int | None = SchemaField(
|
||||
default=None,
|
||||
description="The maximum number of tokens to generate in the chat completion.",
|
||||
ge=1,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: str = SchemaField(
|
||||
description="The model's response to the conversation."
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the API call failed.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c3d4e5f6-g7h8-i9j0-k1l2-m3n4o5p6q7r8",
|
||||
description="Advanced LLM call that takes a list of messages and sends them to the language model.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIConversationBlock.Input,
|
||||
output_schema=AIConversationBlock.Output,
|
||||
test_input={
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who won the world series in 2020?"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "The Los Angeles Dodgers won the World Series in 2020.",
|
||||
},
|
||||
{"role": "user", "content": "Where was it played?"},
|
||||
],
|
||||
"model": LlmModel.GPT4_TURBO,
|
||||
"api_key": "test_api_key",
|
||||
},
|
||||
test_output=(
|
||||
"response",
|
||||
"The 2020 World Series was played at Globe Life Field in Arlington, Texas.",
|
||||
),
|
||||
test_mock={
|
||||
"llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas."
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def llm_call(
|
||||
api_key: str,
|
||||
model: LlmModel,
|
||||
messages: List[dict[str, str]],
|
||||
max_tokens: int | None = None,
|
||||
) -> str:
|
||||
provider = model.metadata.provider
|
||||
|
||||
if provider == "openai":
|
||||
openai.api_key = api_key
|
||||
response = openai.chat.completions.create(
|
||||
model=model.value,
|
||||
messages=messages, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
return response.choices[0].message.content or ""
|
||||
elif provider == "anthropic":
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
response = client.messages.create(
|
||||
model=model.value,
|
||||
max_tokens=max_tokens or 4096,
|
||||
messages=messages, # type: ignore
|
||||
)
|
||||
return response.content[0].text if response.content else ""
|
||||
elif provider == "groq":
|
||||
client = Groq(api_key=api_key)
|
||||
response = client.chat.completions.create(
|
||||
model=model.value,
|
||||
messages=messages, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
return response.choices[0].message.content or ""
|
||||
elif provider == "ollama":
|
||||
response = ollama.chat(
|
||||
model=model.value,
|
||||
messages=messages, # type: ignore
|
||||
stream=False, # type: ignore
|
||||
)
|
||||
return response["message"]["content"]
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
api_key = (
|
||||
input_data.api_key.get_secret_value()
|
||||
or LlmApiKeys[input_data.model.metadata.provider].get_secret_value()
|
||||
)
|
||||
|
||||
messages = [message.model_dump() for message in input_data.messages]
|
||||
|
||||
response = self.llm_call(
|
||||
api_key=api_key,
|
||||
model=input_data.model,
|
||||
messages=messages,
|
||||
max_tokens=input_data.max_tokens,
|
||||
)
|
||||
|
||||
yield "response", response
|
||||
except Exception as e:
|
||||
yield "error", f"Error calling LLM: {str(e)}"
|
||||
@@ -1,192 +0,0 @@
|
||||
from typing import Any
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SecretField
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@classmethod
|
||||
def get_request(cls, url: str, json=False) -> Any:
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json() if json else response.text
|
||||
|
||||
|
||||
class GetWikipediaSummaryBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
topic: str
|
||||
|
||||
class Output(BlockSchema):
|
||||
summary: str
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m",
|
||||
description="This block fetches the summary of a given topic from Wikipedia.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=GetWikipediaSummaryBlock.Input,
|
||||
output_schema=GetWikipediaSummaryBlock.Output,
|
||||
test_input={"topic": "Artificial Intelligence"},
|
||||
test_output=("summary", "summary content"),
|
||||
test_mock={"get_request": lambda url, json: {"extract": "summary content"}},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
topic = input_data.topic
|
||||
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
|
||||
response = self.get_request(url, json=True)
|
||||
yield "summary", response["extract"]
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to Wikipedia failed: {e}"
|
||||
|
||||
except KeyError as e:
|
||||
yield "error", f"Error parsing Wikipedia response: {e}"
|
||||
|
||||
|
||||
class SearchTheWebBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
query: str # The search query
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: str # The search results including content from top 5 URLs
|
||||
error: str # Error message if the search fails
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b2c3d4e5-6f7g-8h9i-0j1k-l2m3n4o5p6q7",
|
||||
description="This block searches the internet for the given search query.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchTheWebBlock.Input,
|
||||
output_schema=SearchTheWebBlock.Output,
|
||||
test_input={"query": "Artificial Intelligence"},
|
||||
test_output=("results", "search content"),
|
||||
test_mock={"get_request": lambda url, json: "search content"},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
# Encode the search query
|
||||
encoded_query = quote(input_data.query)
|
||||
|
||||
# Prepend the Jina Search URL to the encoded query
|
||||
jina_search_url = f"https://s.jina.ai/{encoded_query}"
|
||||
|
||||
# Make the request to Jina Search
|
||||
response = self.get_request(jina_search_url, json=False)
|
||||
|
||||
# Output the search results
|
||||
yield "results", response
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to Jina Search failed: {e}"
|
||||
|
||||
|
||||
class ExtractWebsiteContentBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
url: str # The URL to scrape
|
||||
|
||||
class Output(BlockSchema):
|
||||
content: str # The scraped content from the URL
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a1b2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6", # Unique ID for the block
|
||||
description="This block scrapes the content from the given web URL.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ExtractWebsiteContentBlock.Input,
|
||||
output_schema=ExtractWebsiteContentBlock.Output,
|
||||
test_input={"url": "https://en.wikipedia.org/wiki/Artificial_intelligence"},
|
||||
test_output=("content", "scraped content"),
|
||||
test_mock={"get_request": lambda url, json: "scraped content"},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
# Prepend the Jina-ai Reader URL to the input URL
|
||||
jina_url = f"https://r.jina.ai/{input_data.url}"
|
||||
|
||||
# Make the request to Jina-ai Reader
|
||||
response = self.get_request(jina_url, json=False)
|
||||
|
||||
# Output the scraped content
|
||||
yield "content", response
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to Jina-ai Reader failed: {e}"
|
||||
|
||||
|
||||
class GetWeatherInformationBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
location: str
|
||||
api_key: BlockSecret = SecretField(key="openweathermap_api_key")
|
||||
use_celsius: bool = True
|
||||
|
||||
class Output(BlockSchema):
|
||||
temperature: str
|
||||
humidity: str
|
||||
condition: str
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f7a8b2c3-6d4e-5f8b-9e7f-6d4e5f8b9e7f",
|
||||
input_schema=GetWeatherInformationBlock.Input,
|
||||
output_schema=GetWeatherInformationBlock.Output,
|
||||
test_input={
|
||||
"location": "New York",
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"use_celsius": True,
|
||||
},
|
||||
test_output=[
|
||||
("temperature", "21.66"),
|
||||
("humidity", "32"),
|
||||
("condition", "overcast clouds"),
|
||||
],
|
||||
test_mock={
|
||||
"get_request": lambda url, json: {
|
||||
"main": {"temp": 21.66, "humidity": 32},
|
||||
"weather": [{"description": "overcast clouds"}],
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
units = "metric" if input_data.use_celsius else "imperial"
|
||||
api_key = input_data.api_key.get_secret_value()
|
||||
location = input_data.location
|
||||
url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}"
|
||||
weather_data = self.get_request(url, json=True)
|
||||
|
||||
if "main" in weather_data and "weather" in weather_data:
|
||||
yield "temperature", str(weather_data["main"]["temp"])
|
||||
yield "humidity", str(weather_data["main"]["humidity"])
|
||||
yield "condition", weather_data["weather"][0]["description"]
|
||||
else:
|
||||
yield "error", f"Expected keys not found in response: {weather_data}"
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 403:
|
||||
yield "error", "Request to weather API failed: 403 Forbidden. Check your API key and permissions."
|
||||
else:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to weather API failed: {e}"
|
||||
except KeyError as e:
|
||||
yield "error", f"Error processing weather data: {e}"
|
||||
@@ -1,59 +0,0 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from contextlib import asynccontextmanager
|
||||
from uuid import uuid4
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from prisma import Prisma
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
load_dotenv()
|
||||
|
||||
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
|
||||
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
|
||||
|
||||
prisma, conn_id = Prisma(auto_register=True), ""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def connect(call_count=0):
|
||||
global conn_id
|
||||
if not conn_id:
|
||||
conn_id = str(uuid4())
|
||||
|
||||
try:
|
||||
logger.info(f"[Prisma-{conn_id}] Acquiring connection..")
|
||||
if not prisma.is_connected():
|
||||
await prisma.connect()
|
||||
logger.info(f"[Prisma-{conn_id}] Connection acquired!")
|
||||
except Exception as e:
|
||||
if call_count <= 5:
|
||||
logger.info(f"[Prisma-{conn_id}] Connection failed: {e}. Retrying now..")
|
||||
await asyncio.sleep(2**call_count)
|
||||
await connect(call_count + 1)
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
async def disconnect():
|
||||
if prisma.is_connected():
|
||||
logger.info(f"[Prisma-{conn_id}] Releasing connection.")
|
||||
await prisma.disconnect()
|
||||
logger.info(f"[Prisma-{conn_id}] Connection released.")
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction():
|
||||
async with prisma.tx() as tx:
|
||||
yield tx
|
||||
|
||||
|
||||
class BaseDbModel(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
def set_model_id(cls, id: str) -> str:
|
||||
# In case an empty ID is submitted
|
||||
return id or str(uuid4())
|
||||
@@ -1,552 +0,0 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal
|
||||
|
||||
import prisma.types
|
||||
from prisma.models import AgentGraph, AgentNode, AgentNodeLink
|
||||
from pydantic import BaseModel, PrivateAttr
|
||||
from pydantic_core import PydanticUndefinedType
|
||||
|
||||
from autogpt_server.blocks.basic import AgentInputBlock, AgentOutputBlock
|
||||
from autogpt_server.data.block import BlockInput, get_block, get_blocks
|
||||
from autogpt_server.data.db import BaseDbModel, transaction
|
||||
from autogpt_server.data.user import DEFAULT_USER_ID
|
||||
from autogpt_server.util import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InputSchemaItem(BaseModel):
|
||||
node_id: str
|
||||
description: str | None = None
|
||||
title: str | None = None
|
||||
|
||||
|
||||
class Link(BaseDbModel):
|
||||
source_id: str
|
||||
sink_id: str
|
||||
source_name: str
|
||||
sink_name: str
|
||||
is_static: bool = False
|
||||
|
||||
@staticmethod
|
||||
def from_db(link: AgentNodeLink):
|
||||
return Link(
|
||||
id=link.id,
|
||||
source_name=link.sourceName,
|
||||
source_id=link.agentNodeSourceId,
|
||||
sink_name=link.sinkName,
|
||||
sink_id=link.agentNodeSinkId,
|
||||
is_static=link.isStatic,
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.source_id, self.sink_id, self.source_name, self.sink_name))
|
||||
|
||||
|
||||
class Node(BaseDbModel):
|
||||
block_id: str
|
||||
input_default: BlockInput = {} # dict[input_name, default_value]
|
||||
metadata: dict[str, Any] = {}
|
||||
|
||||
_input_links: list[Link] = PrivateAttr(default=[])
|
||||
_output_links: list[Link] = PrivateAttr(default=[])
|
||||
|
||||
@property
|
||||
def input_links(self) -> list[Link]:
|
||||
return self._input_links
|
||||
|
||||
@property
|
||||
def output_links(self) -> list[Link]:
|
||||
return self._output_links
|
||||
|
||||
@staticmethod
|
||||
def from_db(node: AgentNode):
|
||||
if not node.AgentBlock:
|
||||
raise ValueError(f"Invalid node {node.id}, invalid AgentBlock.")
|
||||
obj = Node(
|
||||
id=node.id,
|
||||
block_id=node.AgentBlock.id,
|
||||
input_default=json.loads(node.constantInput),
|
||||
metadata=json.loads(node.metadata),
|
||||
)
|
||||
obj._input_links = [Link.from_db(link) for link in node.Input or []]
|
||||
obj._output_links = [Link.from_db(link) for link in node.Output or []]
|
||||
return obj
|
||||
|
||||
|
||||
class GraphMeta(BaseDbModel):
|
||||
version: int = 1
|
||||
is_active: bool = True
|
||||
is_template: bool = False
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
||||
@staticmethod
|
||||
def from_db(graph: AgentGraph):
|
||||
return GraphMeta(
|
||||
id=graph.id,
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
is_template=graph.isTemplate,
|
||||
name=graph.name or "",
|
||||
description=graph.description or "",
|
||||
)
|
||||
|
||||
|
||||
class Graph(GraphMeta):
|
||||
nodes: list[Node]
|
||||
links: list[Link]
|
||||
subgraphs: dict[str, list[str]] = {} # subgraph_id -> [node_id]
|
||||
|
||||
@property
|
||||
def starting_nodes(self) -> list[Node]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
input_nodes = {
|
||||
v.id
|
||||
for v in self.nodes
|
||||
if isinstance(get_block(v.block_id), AgentInputBlock)
|
||||
}
|
||||
return [
|
||||
node
|
||||
for node in self.nodes
|
||||
if node.id not in outbound_nodes or node.id in input_nodes
|
||||
]
|
||||
|
||||
@property
|
||||
def ending_nodes(self) -> list[Node]:
|
||||
return [
|
||||
v for v in self.nodes if isinstance(get_block(v.block_id), AgentOutputBlock)
|
||||
]
|
||||
|
||||
@property
|
||||
def subgraph_map(self) -> dict[str, str]:
|
||||
"""
|
||||
Returns a mapping of node_id to subgraph_id.
|
||||
A node in the main graph will be mapped to the graph's id.
|
||||
"""
|
||||
subgraph_map = {
|
||||
node_id: subgraph_id
|
||||
for subgraph_id, node_ids in self.subgraphs.items()
|
||||
for node_id in node_ids
|
||||
}
|
||||
subgraph_map.update(
|
||||
{node.id: self.id for node in self.nodes if node.id not in subgraph_map}
|
||||
)
|
||||
return subgraph_map
|
||||
|
||||
def reassign_ids(self, reassign_graph_id: bool = False):
|
||||
"""
|
||||
Reassigns all IDs in the graph to new UUIDs.
|
||||
This method can be used before storing a new graph to the database.
|
||||
"""
|
||||
self.validate_graph()
|
||||
|
||||
id_map = {
|
||||
**{node.id: str(uuid.uuid4()) for node in self.nodes},
|
||||
**{subgraph_id: str(uuid.uuid4()) for subgraph_id in self.subgraphs},
|
||||
}
|
||||
|
||||
if reassign_graph_id:
|
||||
self.id = str(uuid.uuid4())
|
||||
|
||||
for node in self.nodes:
|
||||
node.id = id_map[node.id]
|
||||
|
||||
for link in self.links:
|
||||
link.source_id = id_map[link.source_id]
|
||||
link.sink_id = id_map[link.sink_id]
|
||||
|
||||
self.subgraphs = {
|
||||
id_map[subgraph_id]: [id_map[node_id] for node_id in node_ids]
|
||||
for subgraph_id, node_ids in self.subgraphs.items()
|
||||
}
|
||||
|
||||
def validate_graph(self, for_run: bool = False):
|
||||
def sanitize(name):
|
||||
return name.split("_#_")[0].split("_@_")[0].split("_$_")[0]
|
||||
|
||||
# Nodes: required fields are filled or connected, except for InputBlock.
|
||||
for node in self.nodes:
|
||||
block = get_block(node.block_id)
|
||||
if block is None:
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
if not for_run:
|
||||
continue # Skip input completion validation, unless when executing.
|
||||
|
||||
provided_inputs = set(
|
||||
[sanitize(name) for name in node.input_default]
|
||||
+ [sanitize(link.sink_name) for link in node.input_links]
|
||||
)
|
||||
for name in block.input_schema.get_required_fields():
|
||||
if name not in provided_inputs and not isinstance(
|
||||
block, AgentInputBlock
|
||||
):
|
||||
raise ValueError(
|
||||
f"Node {block.name} #{node.id} required input missing: `{name}`"
|
||||
)
|
||||
node_map = {v.id: v for v in self.nodes}
|
||||
|
||||
def is_static_output_block(nid: str) -> bool:
|
||||
bid = node_map[nid].block_id
|
||||
b = get_block(bid)
|
||||
return b.static_output if b else False
|
||||
|
||||
def is_input_output_block(nid: str) -> bool:
|
||||
bid = node_map[nid].block_id
|
||||
b = get_block(bid)
|
||||
return isinstance(b, AgentInputBlock) or isinstance(b, AgentOutputBlock)
|
||||
|
||||
# subgraphs: all nodes in subgraph must be present in the graph.
|
||||
for subgraph_id, node_ids in self.subgraphs.items():
|
||||
for node_id in node_ids:
|
||||
if node_id not in node_map:
|
||||
raise ValueError(f"Subgraph {subgraph_id}'s node {node_id} invalid")
|
||||
subgraph_map = self.subgraph_map
|
||||
|
||||
# Links: links are connected and the connected pin data type are compatible.
|
||||
for link in self.links:
|
||||
source = (link.source_id, link.source_name)
|
||||
sink = (link.sink_id, link.sink_name)
|
||||
suffix = f"Link {source} <-> {sink}"
|
||||
|
||||
for i, (node_id, name) in enumerate([source, sink]):
|
||||
node = node_map.get(node_id)
|
||||
if not node:
|
||||
raise ValueError(
|
||||
f"{suffix}, {node_id} is invalid node id, available nodes: {node_map.keys()}"
|
||||
)
|
||||
|
||||
block = get_block(node.block_id)
|
||||
if not block:
|
||||
blocks = {v.id: v.name for v in get_blocks().values()}
|
||||
raise ValueError(
|
||||
f"{suffix}, {node.block_id} is invalid block id, available blocks: {blocks}"
|
||||
)
|
||||
|
||||
sanitized_name = sanitize(name)
|
||||
if i == 0:
|
||||
fields = f"Valid output fields: {block.output_schema.get_fields()}"
|
||||
else:
|
||||
fields = f"Valid input fields: {block.input_schema.get_fields()}"
|
||||
if sanitized_name not in fields:
|
||||
raise ValueError(f"{suffix}, `{name}` invalid, {fields}")
|
||||
|
||||
if (
|
||||
subgraph_map.get(link.source_id) != subgraph_map.get(link.sink_id)
|
||||
and not is_input_output_block(link.source_id)
|
||||
and not is_input_output_block(link.sink_id)
|
||||
):
|
||||
raise ValueError(f"{suffix}, Connecting nodes from different subgraph.")
|
||||
|
||||
if is_static_output_block(link.source_id):
|
||||
link.is_static = True # Each value block output should be static.
|
||||
|
||||
# TODO: Add type compatibility check here.
|
||||
|
||||
def get_input_schema(self) -> list[InputSchemaItem]:
|
||||
"""
|
||||
Walks the graph and returns all the inputs that are either not:
|
||||
- static
|
||||
- provided by parent node
|
||||
"""
|
||||
input_schema = []
|
||||
for node in self.nodes:
|
||||
block = get_block(node.block_id)
|
||||
if not block:
|
||||
continue
|
||||
|
||||
for input_name, input_schema_item in (
|
||||
block.input_schema.jsonschema().get("properties", {}).items()
|
||||
):
|
||||
# Check if the input is not static and not provided by a parent node
|
||||
if (
|
||||
input_name not in node.input_default
|
||||
and not any(
|
||||
link.sink_name == input_name for link in node.input_links
|
||||
)
|
||||
and isinstance(
|
||||
block.input_schema.model_fields.get(input_name).default,
|
||||
PydanticUndefinedType,
|
||||
)
|
||||
):
|
||||
|
||||
input_schema.append(
|
||||
InputSchemaItem(
|
||||
node_id=node.id,
|
||||
description=input_schema_item.get("description"),
|
||||
title=input_schema_item.get("title"),
|
||||
)
|
||||
)
|
||||
|
||||
return input_schema
|
||||
|
||||
@staticmethod
|
||||
def from_db(graph: AgentGraph):
|
||||
nodes = [
|
||||
*(graph.AgentNodes or []),
|
||||
*(
|
||||
node
|
||||
for subgraph in graph.AgentSubGraphs or []
|
||||
for node in subgraph.AgentNodes or []
|
||||
),
|
||||
]
|
||||
return Graph(
|
||||
**GraphMeta.from_db(graph).model_dump(),
|
||||
nodes=[Node.from_db(node) for node in nodes],
|
||||
links=list(
|
||||
{
|
||||
Link.from_db(link)
|
||||
for node in nodes
|
||||
for link in (node.Input or []) + (node.Output or [])
|
||||
}
|
||||
),
|
||||
subgraphs={
|
||||
subgraph.id: [node.id for node in subgraph.AgentNodes or []]
|
||||
for subgraph in graph.AgentSubGraphs or []
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
|
||||
"Input": True,
|
||||
"Output": True,
|
||||
"AgentBlock": True,
|
||||
}
|
||||
|
||||
__SUBGRAPH_INCLUDE = {"AgentNodes": {"include": AGENT_NODE_INCLUDE}}
|
||||
|
||||
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
|
||||
**__SUBGRAPH_INCLUDE,
|
||||
"AgentSubGraphs": {"include": __SUBGRAPH_INCLUDE}, # type: ignore
|
||||
}
|
||||
|
||||
|
||||
# --------------------- Model functions --------------------- #
|
||||
|
||||
|
||||
async def get_node(node_id: str) -> Node:
|
||||
node = await AgentNode.prisma().find_unique_or_raise(
|
||||
where={"id": node_id},
|
||||
include=AGENT_NODE_INCLUDE,
|
||||
)
|
||||
return Node.from_db(node)
|
||||
|
||||
|
||||
async def get_graphs_meta(
|
||||
filter_by: Literal["active", "template"] | None = "active",
|
||||
user_id: str | None = None,
|
||||
) -> list[GraphMeta]:
|
||||
"""
|
||||
Retrieves graph metadata objects.
|
||||
Default behaviour is to get all currently active graphs.
|
||||
|
||||
Args:
|
||||
filter_by: An optional filter to either select templates or active graphs.
|
||||
|
||||
Returns:
|
||||
list[GraphMeta]: A list of objects representing the retrieved graph metadata.
|
||||
"""
|
||||
where_clause: prisma.types.AgentGraphWhereInput = {}
|
||||
|
||||
if filter_by == "active":
|
||||
where_clause["isActive"] = True
|
||||
elif filter_by == "template":
|
||||
where_clause["isTemplate"] = True
|
||||
|
||||
if user_id and filter_by != "template":
|
||||
where_clause["userId"] = user_id
|
||||
|
||||
graphs = await AgentGraph.prisma().find_many(
|
||||
where=where_clause,
|
||||
distinct=["id"],
|
||||
order={"version": "desc"},
|
||||
)
|
||||
|
||||
if not graphs:
|
||||
return []
|
||||
|
||||
return [GraphMeta.from_db(graph) for graph in graphs]
|
||||
|
||||
|
||||
async def get_graph(
|
||||
graph_id: str,
|
||||
version: int | None = None,
|
||||
template: bool = False,
|
||||
user_id: str | None = None,
|
||||
) -> Graph | None:
|
||||
"""
|
||||
Retrieves a graph from the DB.
|
||||
Defaults to the version with `is_active` if `version` is not passed,
|
||||
or the latest version with `is_template` if `template=True`.
|
||||
|
||||
Returns `None` if the record is not found.
|
||||
"""
|
||||
where_clause: prisma.types.AgentGraphWhereInput = {
|
||||
"id": graph_id,
|
||||
"isTemplate": template,
|
||||
}
|
||||
if version is not None:
|
||||
where_clause["version"] = version
|
||||
elif not template:
|
||||
where_clause["isActive"] = True
|
||||
|
||||
if user_id and not template:
|
||||
where_clause["userId"] = user_id
|
||||
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where=where_clause,
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
order={"version": "desc"},
|
||||
)
|
||||
return Graph.from_db(graph) if graph else None
|
||||
|
||||
|
||||
async def set_graph_active_version(graph_id: str, version: int, user_id: str) -> None:
|
||||
# Check if the graph belongs to the user
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where={
|
||||
"id": graph_id,
|
||||
"version": version,
|
||||
"userId": user_id,
|
||||
}
|
||||
)
|
||||
if not graph:
|
||||
raise Exception(f"Graph #{graph_id} v{version} not found or not owned by user")
|
||||
|
||||
updated_graph = await AgentGraph.prisma().update(
|
||||
data={"isActive": True},
|
||||
where={
|
||||
"graphVersionId": {"id": graph_id, "version": version},
|
||||
},
|
||||
)
|
||||
if not updated_graph:
|
||||
raise Exception(f"Graph #{graph_id} v{version} not found")
|
||||
|
||||
# Deactivate all other versions
|
||||
await AgentGraph.prisma().update_many(
|
||||
data={"isActive": False},
|
||||
where={"id": graph_id, "version": {"not": version}, "userId": user_id},
|
||||
)
|
||||
|
||||
|
||||
async def get_graph_all_versions(graph_id: str, user_id: str) -> list[Graph]:
|
||||
graph_versions = await AgentGraph.prisma().find_many(
|
||||
where={"id": graph_id, "userId": user_id},
|
||||
order={"version": "desc"},
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
)
|
||||
|
||||
if not graph_versions:
|
||||
return []
|
||||
|
||||
return [Graph.from_db(graph) for graph in graph_versions]
|
||||
|
||||
|
||||
async def create_graph(graph: Graph, user_id: str) -> Graph:
|
||||
async with transaction() as tx:
|
||||
await __create_graph(tx, graph, user_id)
|
||||
|
||||
if created_graph := await get_graph(
|
||||
graph.id, graph.version, graph.is_template, user_id=user_id
|
||||
):
|
||||
return created_graph
|
||||
|
||||
raise ValueError(f"Created graph {graph.id} v{graph.version} is not in DB")
|
||||
|
||||
|
||||
async def __create_graph(tx, graph: Graph, user_id: str):
|
||||
await AgentGraph.prisma(tx).create(
|
||||
data={
|
||||
"id": graph.id,
|
||||
"version": graph.version,
|
||||
"name": graph.name,
|
||||
"description": graph.description,
|
||||
"isTemplate": graph.is_template,
|
||||
"isActive": graph.is_active,
|
||||
"userId": user_id,
|
||||
}
|
||||
)
|
||||
|
||||
await asyncio.gather(
|
||||
*[
|
||||
AgentGraph.prisma(tx).create(
|
||||
data={
|
||||
"id": subgraph_id,
|
||||
"agentGraphParentId": graph.id,
|
||||
"version": graph.version,
|
||||
"name": f"SubGraph of {graph.name}",
|
||||
"description": f"Sub-Graph of {graph.id}",
|
||||
"isTemplate": graph.is_template,
|
||||
"isActive": graph.is_active,
|
||||
"userId": user_id,
|
||||
}
|
||||
)
|
||||
for subgraph_id in graph.subgraphs
|
||||
]
|
||||
)
|
||||
|
||||
subgraph_map = graph.subgraph_map
|
||||
|
||||
await asyncio.gather(
|
||||
*[
|
||||
AgentNode.prisma(tx).create(
|
||||
{
|
||||
"id": node.id,
|
||||
"agentBlockId": node.block_id,
|
||||
"agentGraphId": subgraph_map.get(node.id, graph.id),
|
||||
"agentGraphVersion": graph.version,
|
||||
"constantInput": json.dumps(node.input_default),
|
||||
"metadata": json.dumps(node.metadata),
|
||||
}
|
||||
)
|
||||
for node in graph.nodes
|
||||
]
|
||||
)
|
||||
|
||||
await asyncio.gather(
|
||||
*[
|
||||
AgentNodeLink.prisma(tx).create(
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"sourceName": link.source_name,
|
||||
"sinkName": link.sink_name,
|
||||
"agentNodeSourceId": link.source_id,
|
||||
"agentNodeSinkId": link.sink_id,
|
||||
"isStatic": link.is_static,
|
||||
}
|
||||
)
|
||||
for link in graph.links
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# --------------------- Helper functions --------------------- #
|
||||
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "graph_templates"
|
||||
|
||||
|
||||
async def import_packaged_templates() -> None:
|
||||
templates_in_db = await get_graphs_meta(filter_by="template")
|
||||
|
||||
logging.info("Loading templates...")
|
||||
for template_file in TEMPLATES_DIR.glob("*.json"):
|
||||
template_data = json.loads(template_file.read_bytes())
|
||||
|
||||
template = Graph.model_validate(template_data)
|
||||
if not template.is_template:
|
||||
logging.warning(
|
||||
f"pre-packaged graph file {template_file} is not a template"
|
||||
)
|
||||
continue
|
||||
if (
|
||||
exists := next((t for t in templates_in_db if t.id == template.id), None)
|
||||
) and exists.version >= template.version:
|
||||
continue
|
||||
await create_graph(template, DEFAULT_USER_ID)
|
||||
logging.info(f"Loaded template '{template.name}' ({template.id})")
|
||||
@@ -1,140 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Callable, ClassVar, Optional, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field, GetCoreSchemaHandler
|
||||
from pydantic_core import (
|
||||
CoreSchema,
|
||||
PydanticUndefined,
|
||||
PydanticUndefinedType,
|
||||
core_schema,
|
||||
)
|
||||
|
||||
from autogpt_server.util.settings import Secrets
|
||||
|
||||
T = TypeVar("T")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlockSecret:
|
||||
def __init__(self, key: Optional[str] = None, value: Optional[str] = None):
|
||||
if value is not None:
|
||||
trimmed_value = value.strip()
|
||||
if value != trimmed_value:
|
||||
logger.debug(BlockSecret.TRIMMING_VALUE_MSG)
|
||||
self._value = trimmed_value
|
||||
return
|
||||
|
||||
self._value = self.__get_secret(key)
|
||||
if self._value is None:
|
||||
raise ValueError(f"Secret {key} not found.")
|
||||
trimmed_value = self._value.strip()
|
||||
if self._value != trimmed_value:
|
||||
logger.debug(BlockSecret.TRIMMING_VALUE_MSG)
|
||||
self._value = trimmed_value
|
||||
|
||||
TRIMMING_VALUE_MSG: ClassVar[str] = "Provided secret value got trimmed."
|
||||
STR: ClassVar[str] = "<secret>"
|
||||
SECRETS: ClassVar[Secrets] = Secrets()
|
||||
|
||||
def __repr__(self):
|
||||
return BlockSecret.STR
|
||||
|
||||
def __str__(self):
|
||||
return BlockSecret.STR
|
||||
|
||||
@staticmethod
|
||||
def __get_secret(key: str | None):
|
||||
if not key or not hasattr(BlockSecret.SECRETS, key):
|
||||
return None
|
||||
return getattr(BlockSecret.SECRETS, key)
|
||||
|
||||
def get_secret_value(self):
|
||||
trimmed_value = str(self._value).strip()
|
||||
if self._value != trimmed_value:
|
||||
logger.info(BlockSecret.TRIMMING_VALUE_MSG)
|
||||
return trimmed_value
|
||||
|
||||
@classmethod
|
||||
def parse_value(cls, value: Any) -> BlockSecret:
|
||||
if isinstance(value, BlockSecret):
|
||||
return value
|
||||
return BlockSecret(value=value)
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_json_schema__(
|
||||
cls, source_type: Any, handler: GetCoreSchemaHandler
|
||||
) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "string",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, source_type: Any, handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
validate_fun = core_schema.no_info_plain_validator_function(cls.parse_value)
|
||||
return core_schema.json_or_python_schema(
|
||||
json_schema=validate_fun,
|
||||
python_schema=validate_fun,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda val: BlockSecret.STR
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def SecretField(
|
||||
value: Optional[str] = None,
|
||||
key: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
placeholder: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> BlockSecret:
|
||||
return SchemaField(
|
||||
BlockSecret(key=key, value=value),
|
||||
title=title,
|
||||
description=description,
|
||||
placeholder=placeholder,
|
||||
secret=True,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def SchemaField(
|
||||
default: T | PydanticUndefinedType = PydanticUndefined,
|
||||
*args,
|
||||
default_factory: Optional[Callable[[], T]] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
placeholder: Optional[str] = None,
|
||||
advanced: Optional[bool] = None,
|
||||
secret: bool = False,
|
||||
exclude: bool = False,
|
||||
**kwargs,
|
||||
) -> T:
|
||||
json_extra = {
|
||||
k: v
|
||||
for k, v in {
|
||||
"placeholder": placeholder,
|
||||
"secret": secret,
|
||||
"advanced": advanced,
|
||||
}.items()
|
||||
if v is not None
|
||||
}
|
||||
|
||||
return Field(
|
||||
default,
|
||||
*args,
|
||||
default_factory=default_factory,
|
||||
title=title,
|
||||
description=description,
|
||||
exclude=exclude,
|
||||
json_schema_extra=json_extra,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class ContributorDetails(BaseModel):
|
||||
name: str = Field(title="Name", description="The name of the contributor.")
|
||||
@@ -1,78 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from autogpt_server.data.execution import ExecutionResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DateTimeEncoder(json.JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, datetime):
|
||||
return o.isoformat()
|
||||
return super().default(o)
|
||||
|
||||
|
||||
class AsyncEventQueue(ABC):
|
||||
@abstractmethod
|
||||
async def connect(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def close(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def put(self, execution_result: ExecutionResult):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get(self) -> ExecutionResult | None:
|
||||
pass
|
||||
|
||||
|
||||
class AsyncRedisEventQueue(AsyncEventQueue):
|
||||
|
||||
def __init__(self):
|
||||
self.host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.port = int(os.getenv("REDIS_PORT", "6379"))
|
||||
self.password = os.getenv("REDIS_PASSWORD", "password")
|
||||
self.queue_name = os.getenv("REDIS_QUEUE", "execution_events")
|
||||
self.connection = None
|
||||
|
||||
async def connect(self):
|
||||
if not self.connection:
|
||||
self.connection = Redis(
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
password=self.password,
|
||||
decode_responses=True,
|
||||
)
|
||||
await self.connection.ping()
|
||||
logger.info(f"Connected to Redis on {self.host}:{self.port}")
|
||||
|
||||
async def put(self, execution_result: ExecutionResult):
|
||||
if self.connection:
|
||||
message = json.dumps(execution_result.model_dump(), cls=DateTimeEncoder)
|
||||
logger.info(f"Putting execution result to Redis {message}")
|
||||
await self.connection.lpush(self.queue_name, message) # type: ignore
|
||||
|
||||
async def get(self) -> ExecutionResult | None:
|
||||
if self.connection:
|
||||
message = await self.connection.rpop(self.queue_name) # type: ignore
|
||||
if message is not None and isinstance(message, (str, bytes, bytearray)):
|
||||
data = json.loads(message)
|
||||
logger.info(f"Getting execution result from Redis {data}")
|
||||
return ExecutionResult(**data)
|
||||
return None
|
||||
|
||||
async def close(self):
|
||||
if self.connection:
|
||||
await self.connection.close()
|
||||
self.connection = None
|
||||
logger.info("Closed connection to Redis")
|
||||
@@ -1,81 +0,0 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from prisma.models import AgentGraphExecutionSchedule
|
||||
|
||||
from autogpt_server.data.block import BlockInput
|
||||
from autogpt_server.data.db import BaseDbModel
|
||||
from autogpt_server.util import json
|
||||
|
||||
|
||||
class ExecutionSchedule(BaseDbModel):
|
||||
graph_id: str
|
||||
user_id: str
|
||||
graph_version: int
|
||||
schedule: str
|
||||
is_enabled: bool
|
||||
input_data: BlockInput
|
||||
last_updated: Optional[datetime] = None
|
||||
|
||||
def __init__(self, is_enabled: Optional[bool] = None, **kwargs):
|
||||
kwargs["is_enabled"] = (is_enabled is None) or is_enabled
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@staticmethod
|
||||
def from_db(schedule: AgentGraphExecutionSchedule):
|
||||
return ExecutionSchedule(
|
||||
id=schedule.id,
|
||||
graph_id=schedule.agentGraphId,
|
||||
user_id=schedule.userId,
|
||||
graph_version=schedule.agentGraphVersion,
|
||||
schedule=schedule.schedule,
|
||||
is_enabled=schedule.isEnabled,
|
||||
last_updated=schedule.lastUpdated.replace(tzinfo=None),
|
||||
input_data=json.loads(schedule.inputData),
|
||||
)
|
||||
|
||||
|
||||
async def get_active_schedules(last_fetch_time: datetime) -> list[ExecutionSchedule]:
|
||||
query = AgentGraphExecutionSchedule.prisma().find_many(
|
||||
where={"isEnabled": True, "lastUpdated": {"gt": last_fetch_time}},
|
||||
order={"lastUpdated": "asc"},
|
||||
)
|
||||
return [ExecutionSchedule.from_db(schedule) for schedule in await query]
|
||||
|
||||
|
||||
async def disable_schedule(schedule_id: str):
|
||||
await AgentGraphExecutionSchedule.prisma().update(
|
||||
where={"id": schedule_id}, data={"isEnabled": False}
|
||||
)
|
||||
|
||||
|
||||
async def get_schedules(graph_id: str, user_id: str) -> list[ExecutionSchedule]:
|
||||
query = AgentGraphExecutionSchedule.prisma().find_many(
|
||||
where={
|
||||
"isEnabled": True,
|
||||
"agentGraphId": graph_id,
|
||||
"userId": user_id,
|
||||
},
|
||||
)
|
||||
return [ExecutionSchedule.from_db(schedule) for schedule in await query]
|
||||
|
||||
|
||||
async def add_schedule(schedule: ExecutionSchedule) -> ExecutionSchedule:
|
||||
obj = await AgentGraphExecutionSchedule.prisma().create(
|
||||
data={
|
||||
"id": schedule.id,
|
||||
"userId": schedule.user_id,
|
||||
"agentGraphId": schedule.graph_id,
|
||||
"agentGraphVersion": schedule.graph_version,
|
||||
"schedule": schedule.schedule,
|
||||
"isEnabled": schedule.is_enabled,
|
||||
"inputData": json.dumps(schedule.input_data),
|
||||
}
|
||||
)
|
||||
return ExecutionSchedule.from_db(obj)
|
||||
|
||||
|
||||
async def update_schedule(schedule_id: str, is_enabled: bool, user_id: str):
|
||||
await AgentGraphExecutionSchedule.prisma().update(
|
||||
where={"id": schedule_id}, data={"isEnabled": is_enabled}
|
||||
)
|
||||
@@ -1,51 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import HTTPException
|
||||
from prisma.models import User
|
||||
|
||||
from autogpt_server.data.db import prisma
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
async def get_or_create_user(user_data: dict) -> User:
|
||||
|
||||
user_id = user_data.get("sub")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
|
||||
user_email = user_data.get("email")
|
||||
if not user_email:
|
||||
raise HTTPException(status_code=401, detail="Email not found in token")
|
||||
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
if not user:
|
||||
user = await prisma.user.create(
|
||||
data={
|
||||
"id": user_id,
|
||||
"email": user_email,
|
||||
"name": user_data.get("user_metadata", {}).get("name"),
|
||||
}
|
||||
)
|
||||
return User.model_validate(user)
|
||||
|
||||
|
||||
async def get_user_by_id(user_id: str) -> Optional[User]:
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
return User.model_validate(user) if user else None
|
||||
|
||||
|
||||
async def create_default_user(enable_auth: str) -> Optional[User]:
|
||||
if not enable_auth.lower() == "true":
|
||||
user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
|
||||
if not user:
|
||||
user = await prisma.user.create(
|
||||
data={
|
||||
"id": DEFAULT_USER_ID,
|
||||
"email": "default@example.com",
|
||||
"name": "Default User",
|
||||
}
|
||||
)
|
||||
return User.model_validate(user)
|
||||
return None
|
||||
@@ -1,95 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
|
||||
from autogpt_server.data import schedule as model
|
||||
from autogpt_server.data.block import BlockInput
|
||||
from autogpt_server.executor.manager import ExecutionManager
|
||||
from autogpt_server.util.service import AppService, expose, get_service_client
|
||||
from autogpt_server.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def log(msg, **kwargs):
|
||||
logger.warning("[ExecutionScheduler] " + msg, **kwargs)
|
||||
|
||||
|
||||
class ExecutionScheduler(AppService):
|
||||
def __init__(self, refresh_interval=10):
|
||||
super().__init__(port=Config().execution_scheduler_port)
|
||||
self.use_db = True
|
||||
self.last_check = datetime.min
|
||||
self.refresh_interval = refresh_interval
|
||||
self.use_redis = False
|
||||
|
||||
@property
|
||||
def execution_manager_client(self) -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager, Config().execution_manager_port)
|
||||
|
||||
def run_service(self):
|
||||
scheduler = BackgroundScheduler()
|
||||
scheduler.start()
|
||||
while True:
|
||||
self.__refresh_jobs_from_db(scheduler)
|
||||
time.sleep(self.refresh_interval)
|
||||
|
||||
def __refresh_jobs_from_db(self, scheduler: BackgroundScheduler):
|
||||
schedules = self.run_and_wait(model.get_active_schedules(self.last_check))
|
||||
for schedule in schedules:
|
||||
if schedule.last_updated:
|
||||
self.last_check = max(self.last_check, schedule.last_updated)
|
||||
|
||||
if not schedule.is_enabled:
|
||||
log(f"Removing recurring job {schedule.id}: {schedule.schedule}")
|
||||
scheduler.remove_job(schedule.id)
|
||||
continue
|
||||
|
||||
log(f"Adding recurring job {schedule.id}: {schedule.schedule}")
|
||||
scheduler.add_job(
|
||||
self.__execute_graph,
|
||||
CronTrigger.from_crontab(schedule.schedule),
|
||||
id=schedule.id,
|
||||
args=[schedule.graph_id, schedule.input_data, schedule.user_id],
|
||||
replace_existing=True,
|
||||
)
|
||||
|
||||
def __execute_graph(self, graph_id: str, input_data: dict, user_id: str):
|
||||
try:
|
||||
log(f"Executing recurring job for graph #{graph_id}")
|
||||
execution_manager = self.execution_manager_client
|
||||
execution_manager.add_execution(graph_id, input_data, user_id)
|
||||
except Exception as e:
|
||||
logger.exception(f"Error executing graph {graph_id}: {e}")
|
||||
|
||||
@expose
|
||||
def update_schedule(self, schedule_id: str, is_enabled: bool, user_id: str) -> str:
|
||||
self.run_and_wait(model.update_schedule(schedule_id, is_enabled, user_id))
|
||||
return schedule_id
|
||||
|
||||
@expose
|
||||
def add_execution_schedule(
|
||||
self,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
cron: str,
|
||||
input_data: BlockInput,
|
||||
user_id: str,
|
||||
) -> str:
|
||||
schedule = model.ExecutionSchedule(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_version=graph_version,
|
||||
schedule=cron,
|
||||
input_data=input_data,
|
||||
)
|
||||
return self.run_and_wait(model.add_schedule(schedule)).id
|
||||
|
||||
@expose
|
||||
def get_execution_schedules(self, graph_id: str, user_id: str) -> dict[str, str]:
|
||||
query = model.get_schedules(graph_id, user_id=user_id)
|
||||
schedules: list[model.ExecutionSchedule] = self.run_and_wait(query)
|
||||
return {v.id: v.schedule for v in schedules}
|
||||
@@ -1,4 +0,0 @@
|
||||
from .rest_api import AgentServer
|
||||
from .ws_api import WebsocketServer
|
||||
|
||||
__all__ = ["AgentServer", "WebsocketServer"]
|
||||
@@ -1,648 +0,0 @@
|
||||
import inspect
|
||||
from collections import defaultdict
|
||||
from contextlib import asynccontextmanager
|
||||
from functools import wraps
|
||||
from typing import Annotated, Any, Dict
|
||||
|
||||
import uvicorn
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from autogpt_server.data import block, db
|
||||
from autogpt_server.data import execution as execution_db
|
||||
from autogpt_server.data import graph as graph_db
|
||||
from autogpt_server.data import user as user_db
|
||||
from autogpt_server.data.block import BlockInput, CompletedBlockOutput
|
||||
from autogpt_server.data.credit import get_block_costs, get_user_credit_model
|
||||
from autogpt_server.data.queue import AsyncEventQueue, AsyncRedisEventQueue
|
||||
from autogpt_server.data.user import get_or_create_user
|
||||
from autogpt_server.executor import ExecutionManager, ExecutionScheduler
|
||||
from autogpt_server.server.model import CreateGraph, SetGraphActiveVersion
|
||||
from autogpt_server.util.lock import KeyedMutex
|
||||
from autogpt_server.util.service import AppService, expose, get_service_client
|
||||
from autogpt_server.util.settings import Config, Settings
|
||||
|
||||
from .utils import get_user_id
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
class AgentServer(AppService):
|
||||
mutex = KeyedMutex()
|
||||
use_redis = True
|
||||
_test_dependency_overrides = {}
|
||||
_user_credit_model = get_user_credit_model()
|
||||
|
||||
def __init__(self, event_queue: AsyncEventQueue | None = None):
|
||||
super().__init__(port=Config().agent_server_port)
|
||||
self.event_queue = event_queue or AsyncRedisEventQueue()
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(self, _: FastAPI):
|
||||
await db.connect()
|
||||
self.run_and_wait(self.event_queue.connect())
|
||||
await block.initialize_blocks()
|
||||
if await user_db.create_default_user(settings.config.enable_auth):
|
||||
await graph_db.import_packaged_templates()
|
||||
yield
|
||||
await self.event_queue.close()
|
||||
await db.disconnect()
|
||||
|
||||
def run_service(self):
|
||||
app = FastAPI(
|
||||
title="AutoGPT Agent Server",
|
||||
description=(
|
||||
"This server is used to execute agents that are created by the "
|
||||
"AutoGPT system."
|
||||
),
|
||||
summary="AutoGPT Agent Server",
|
||||
version="0.1",
|
||||
lifespan=self.lifespan,
|
||||
)
|
||||
|
||||
if self._test_dependency_overrides:
|
||||
app.dependency_overrides.update(self._test_dependency_overrides)
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # Allows all origins
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"], # Allows all methods
|
||||
allow_headers=["*"], # Allows all headers
|
||||
)
|
||||
|
||||
# Define the API routes
|
||||
api_router = APIRouter(prefix="/api")
|
||||
api_router.dependencies.append(Depends(auth_middleware))
|
||||
|
||||
# Import & Attach sub-routers
|
||||
import autogpt_server.server.routers.analytics
|
||||
import autogpt_server.server.routers.integrations
|
||||
|
||||
api_router.include_router(
|
||||
autogpt_server.server.routers.integrations.router,
|
||||
prefix="/integrations",
|
||||
tags=["integrations"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
|
||||
api_router.include_router(
|
||||
autogpt_server.server.routers.analytics.router,
|
||||
prefix="/analytics",
|
||||
tags=["analytics"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
|
||||
api_router.add_api_route(
|
||||
path="/auth/user",
|
||||
endpoint=self.get_or_create_user_route,
|
||||
methods=["POST"],
|
||||
tags=["auth"],
|
||||
)
|
||||
|
||||
api_router.add_api_route(
|
||||
path="/blocks",
|
||||
endpoint=self.get_graph_blocks,
|
||||
methods=["GET"],
|
||||
tags=["blocks"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/blocks/{block_id}/execute",
|
||||
endpoint=self.execute_graph_block,
|
||||
methods=["POST"],
|
||||
tags=["blocks"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs",
|
||||
endpoint=self.get_graphs,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/templates",
|
||||
endpoint=self.get_templates,
|
||||
methods=["GET"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs",
|
||||
endpoint=self.create_new_graph,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/templates",
|
||||
endpoint=self.create_new_template,
|
||||
methods=["POST"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}",
|
||||
endpoint=self.get_graph,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/templates/{graph_id}",
|
||||
endpoint=self.get_template,
|
||||
methods=["GET"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}",
|
||||
endpoint=self.update_graph,
|
||||
methods=["PUT"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/templates/{graph_id}",
|
||||
endpoint=self.update_graph,
|
||||
methods=["PUT"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/versions",
|
||||
endpoint=self.get_graph_all_versions,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/templates/{graph_id}/versions",
|
||||
endpoint=self.get_graph_all_versions,
|
||||
methods=["GET"],
|
||||
tags=["templates", "graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/versions/{version}",
|
||||
endpoint=self.get_graph,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/versions/active",
|
||||
endpoint=self.set_graph_active_version,
|
||||
methods=["PUT"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/input_schema",
|
||||
endpoint=self.get_graph_input_schema,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/execute",
|
||||
endpoint=self.execute_graph,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/executions",
|
||||
endpoint=self.list_graph_runs,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/executions/{graph_exec_id}",
|
||||
endpoint=self.get_graph_run_node_execution_results,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/executions/{graph_exec_id}/stop",
|
||||
endpoint=self.stop_graph_run,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/schedules",
|
||||
endpoint=self.create_schedule,
|
||||
methods=["POST"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/{graph_id}/schedules",
|
||||
endpoint=self.get_execution_schedules,
|
||||
methods=["GET"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/graphs/schedules/{schedule_id}",
|
||||
endpoint=self.update_schedule,
|
||||
methods=["PUT"],
|
||||
tags=["graphs"],
|
||||
)
|
||||
api_router.add_api_route(
|
||||
path="/credits",
|
||||
endpoint=self.get_user_credits,
|
||||
methods=["GET"],
|
||||
)
|
||||
|
||||
api_router.add_api_route(
|
||||
path="/settings",
|
||||
endpoint=self.update_configuration,
|
||||
methods=["POST"],
|
||||
tags=["settings"],
|
||||
)
|
||||
|
||||
app.add_exception_handler(500, self.handle_internal_http_error)
|
||||
|
||||
app.include_router(api_router)
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=Config().agent_api_port, log_config=None)
|
||||
|
||||
def set_test_dependency_overrides(self, overrides: dict):
|
||||
self._test_dependency_overrides = overrides
|
||||
|
||||
def _apply_overrides_to_methods(self):
|
||||
for attr_name in dir(self):
|
||||
attr = getattr(self, attr_name)
|
||||
if callable(attr) and hasattr(attr, "__annotations__"):
|
||||
setattr(self, attr_name, self._override_method(attr))
|
||||
|
||||
# TODO: fix this with some proper refactoring of the server
|
||||
def _override_method(self, method):
|
||||
@wraps(method)
|
||||
async def wrapper(*args, **kwargs):
|
||||
sig = inspect.signature(method)
|
||||
for param_name, param in sig.parameters.items():
|
||||
if param.annotation is inspect.Parameter.empty:
|
||||
continue
|
||||
if isinstance(param.annotation, Depends) or ( # type: ignore
|
||||
isinstance(param.annotation, type) and issubclass(param.annotation, Depends) # type: ignore
|
||||
):
|
||||
dependency = param.annotation.dependency if isinstance(param.annotation, Depends) else param.annotation # type: ignore
|
||||
if dependency in self._test_dependency_overrides:
|
||||
kwargs[param_name] = self._test_dependency_overrides[
|
||||
dependency
|
||||
]()
|
||||
return await method(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
@property
|
||||
def execution_manager_client(self) -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager, Config().execution_manager_port)
|
||||
|
||||
@property
|
||||
def execution_scheduler_client(self) -> ExecutionScheduler:
|
||||
return get_service_client(ExecutionScheduler, Config().execution_scheduler_port)
|
||||
|
||||
@classmethod
|
||||
def handle_internal_http_error(cls, request: Request, exc: Exception):
|
||||
return JSONResponse(
|
||||
content={
|
||||
"message": f"{request.method} {request.url.path} failed",
|
||||
"error": str(exc),
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def get_or_create_user_route(cls, user_data: dict = Depends(auth_middleware)):
|
||||
user = await get_or_create_user(user_data)
|
||||
return user.model_dump()
|
||||
|
||||
@classmethod
|
||||
def get_graph_blocks(cls) -> list[dict[Any, Any]]:
|
||||
blocks = block.get_blocks()
|
||||
costs = get_block_costs()
|
||||
return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks.values()]
|
||||
|
||||
@classmethod
|
||||
def execute_graph_block(
|
||||
cls, block_id: str, data: BlockInput
|
||||
) -> CompletedBlockOutput:
|
||||
obj = block.get_block(block_id)
|
||||
if not obj:
|
||||
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
||||
|
||||
output = defaultdict(list)
|
||||
for name, data in obj.execute(data):
|
||||
output[name].append(data)
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
async def get_graphs(
|
||||
cls, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> list[graph_db.GraphMeta]:
|
||||
return await graph_db.get_graphs_meta(filter_by="active", user_id=user_id)
|
||||
|
||||
@classmethod
|
||||
async def get_templates(cls) -> list[graph_db.GraphMeta]:
|
||||
return await graph_db.get_graphs_meta(filter_by="template")
|
||||
|
||||
@classmethod
|
||||
async def get_graph(
|
||||
cls,
|
||||
graph_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
version: int | None = None,
|
||||
) -> graph_db.Graph:
|
||||
graph = await graph_db.get_graph(graph_id, version, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
return graph
|
||||
|
||||
@classmethod
|
||||
async def get_template(
|
||||
cls, graph_id: str, version: int | None = None
|
||||
) -> graph_db.Graph:
|
||||
graph = await graph_db.get_graph(graph_id, version, template=True)
|
||||
if not graph:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Template #{graph_id} not found."
|
||||
)
|
||||
return graph
|
||||
|
||||
@classmethod
|
||||
async def get_graph_all_versions(
|
||||
cls, graph_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> list[graph_db.Graph]:
|
||||
graphs = await graph_db.get_graph_all_versions(graph_id, user_id=user_id)
|
||||
if not graphs:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
return graphs
|
||||
|
||||
@classmethod
|
||||
async def create_new_graph(
|
||||
cls, create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> graph_db.Graph:
|
||||
return await cls.create_graph(create_graph, is_template=False, user_id=user_id)
|
||||
|
||||
@classmethod
|
||||
async def create_new_template(
|
||||
cls, create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> graph_db.Graph:
|
||||
return await cls.create_graph(create_graph, is_template=True, user_id=user_id)
|
||||
|
||||
@classmethod
|
||||
async def create_graph(
|
||||
cls,
|
||||
create_graph: CreateGraph,
|
||||
is_template: bool,
|
||||
# user_id doesn't have to be annotated like on other endpoints,
|
||||
# because create_graph isn't used directly as an endpoint
|
||||
user_id: str,
|
||||
) -> graph_db.Graph:
|
||||
if create_graph.graph:
|
||||
graph = create_graph.graph
|
||||
elif create_graph.template_id:
|
||||
# Create a new graph from a template
|
||||
graph = await graph_db.get_graph(
|
||||
create_graph.template_id,
|
||||
create_graph.template_version,
|
||||
template=True,
|
||||
user_id=user_id,
|
||||
)
|
||||
if not graph:
|
||||
raise HTTPException(
|
||||
400, detail=f"Template #{create_graph.template_id} not found"
|
||||
)
|
||||
graph.version = 1
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Either graph or template_id must be provided."
|
||||
)
|
||||
|
||||
graph.is_template = is_template
|
||||
graph.is_active = not is_template
|
||||
graph.reassign_ids(reassign_graph_id=True)
|
||||
|
||||
return await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
@classmethod
|
||||
async def update_graph(
|
||||
cls,
|
||||
graph_id: str,
|
||||
graph: graph_db.Graph,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> graph_db.Graph:
|
||||
# Sanity check
|
||||
if graph.id and graph.id != graph_id:
|
||||
raise HTTPException(400, detail="Graph ID does not match ID in URI")
|
||||
|
||||
# Determine new version
|
||||
existing_versions = await graph_db.get_graph_all_versions(
|
||||
graph_id, user_id=user_id
|
||||
)
|
||||
if not existing_versions:
|
||||
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
|
||||
latest_version_number = max(g.version for g in existing_versions)
|
||||
graph.version = latest_version_number + 1
|
||||
|
||||
latest_version_graph = next(
|
||||
v for v in existing_versions if v.version == latest_version_number
|
||||
)
|
||||
if latest_version_graph.is_template != graph.is_template:
|
||||
raise HTTPException(
|
||||
400, detail="Changing is_template on an existing graph is forbidden"
|
||||
)
|
||||
graph.is_active = not graph.is_template
|
||||
graph.reassign_ids()
|
||||
|
||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
if new_graph_version.is_active:
|
||||
# Ensure new version is the only active version
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=graph_id, version=new_graph_version.version, user_id=user_id
|
||||
)
|
||||
|
||||
return new_graph_version
|
||||
|
||||
@classmethod
|
||||
async def set_graph_active_version(
|
||||
cls,
|
||||
graph_id: str,
|
||||
request_body: SetGraphActiveVersion,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
):
|
||||
new_active_version = request_body.active_graph_version
|
||||
if not await graph_db.get_graph(graph_id, new_active_version, user_id=user_id):
|
||||
raise HTTPException(
|
||||
404, f"Graph #{graph_id} v{new_active_version} not found"
|
||||
)
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=graph_id,
|
||||
version=request_body.active_graph_version,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
async def execute_graph(
|
||||
self,
|
||||
graph_id: str,
|
||||
node_input: dict[Any, Any],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> dict[str, Any]: # FIXME: add proper return type
|
||||
try:
|
||||
graph_exec = self.execution_manager_client.add_execution(
|
||||
graph_id, node_input, user_id=user_id
|
||||
)
|
||||
return {"id": graph_exec["graph_exec_id"]}
|
||||
except Exception as e:
|
||||
msg = e.__str__().encode().decode("unicode_escape")
|
||||
raise HTTPException(status_code=400, detail=msg)
|
||||
|
||||
async def stop_graph_run(
|
||||
self, graph_exec_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> list[execution_db.ExecutionResult]:
|
||||
if not await execution_db.get_graph_execution(graph_exec_id, user_id):
|
||||
raise HTTPException(
|
||||
404, detail=f"Agent execution #{graph_exec_id} not found"
|
||||
)
|
||||
|
||||
self.execution_manager_client.cancel_execution(graph_exec_id)
|
||||
|
||||
# Retrieve & return canceled graph execution in its final state
|
||||
return await execution_db.get_execution_results(graph_exec_id)
|
||||
|
||||
@classmethod
|
||||
async def get_graph_input_schema(
|
||||
cls,
|
||||
graph_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> list[graph_db.InputSchemaItem]:
|
||||
try:
|
||||
graph = await graph_db.get_graph(graph_id, user_id=user_id)
|
||||
return graph.get_input_schema() if graph else []
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
@classmethod
|
||||
async def list_graph_runs(
|
||||
cls,
|
||||
graph_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
graph_version: int | None = None,
|
||||
) -> list[str]:
|
||||
graph = await graph_db.get_graph(graph_id, graph_version, user_id=user_id)
|
||||
if not graph:
|
||||
rev = "" if graph_version is None else f" v{graph_version}"
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Agent #{graph_id}{rev} not found."
|
||||
)
|
||||
|
||||
return await execution_db.list_executions(graph_id, graph_version)
|
||||
|
||||
@classmethod
|
||||
async def get_graph_run_status(
|
||||
cls,
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> execution_db.ExecutionStatus:
|
||||
graph = await graph_db.get_graph(graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
execution = await execution_db.get_graph_execution(graph_exec_id, user_id)
|
||||
if not execution:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Execution #{graph_exec_id} not found."
|
||||
)
|
||||
|
||||
return execution.executionStatus
|
||||
|
||||
@classmethod
|
||||
async def get_graph_run_node_execution_results(
|
||||
cls,
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> list[execution_db.ExecutionResult]:
|
||||
graph = await graph_db.get_graph(graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
return await execution_db.get_execution_results(graph_exec_id)
|
||||
|
||||
async def create_schedule(
|
||||
self,
|
||||
graph_id: str,
|
||||
cron: str,
|
||||
input_data: dict[Any, Any],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> dict[Any, Any]:
|
||||
graph = await graph_db.get_graph(graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
execution_scheduler = self.execution_scheduler_client
|
||||
return {
|
||||
"id": execution_scheduler.add_execution_schedule(
|
||||
graph_id, graph.version, cron, input_data, user_id=user_id
|
||||
)
|
||||
}
|
||||
|
||||
def update_schedule(
|
||||
self,
|
||||
schedule_id: str,
|
||||
input_data: dict[Any, Any],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> dict[Any, Any]:
|
||||
execution_scheduler = self.execution_scheduler_client
|
||||
is_enabled = input_data.get("is_enabled", False)
|
||||
execution_scheduler.update_schedule(schedule_id, is_enabled, user_id=user_id)
|
||||
return {"id": schedule_id}
|
||||
|
||||
async def get_user_credits(
|
||||
self, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> dict[str, int]:
|
||||
return {"credits": await self._user_credit_model.get_or_refill_credit(user_id)}
|
||||
|
||||
def get_execution_schedules(
|
||||
self, graph_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> dict[str, str]:
|
||||
execution_scheduler = self.execution_scheduler_client
|
||||
return execution_scheduler.get_execution_schedules(graph_id, user_id)
|
||||
|
||||
@expose
|
||||
def send_execution_update(self, execution_result_dict: dict[Any, Any]):
|
||||
execution_result = execution_db.ExecutionResult(**execution_result_dict)
|
||||
self.run_and_wait(self.event_queue.put(execution_result))
|
||||
|
||||
@expose
|
||||
def acquire_lock(self, key: Any):
|
||||
self.mutex.lock(key)
|
||||
|
||||
@expose
|
||||
def release_lock(self, key: Any):
|
||||
self.mutex.unlock(key)
|
||||
|
||||
@classmethod
|
||||
def update_configuration(
|
||||
cls,
|
||||
updated_settings: Annotated[
|
||||
Dict[str, Any],
|
||||
Body(
|
||||
examples=[
|
||||
{
|
||||
"config": {
|
||||
"num_graph_workers": 10,
|
||||
"num_node_workers": 10,
|
||||
}
|
||||
}
|
||||
]
|
||||
),
|
||||
],
|
||||
):
|
||||
settings = Settings()
|
||||
try:
|
||||
updated_fields: dict[Any, Any] = {"config": [], "secrets": []}
|
||||
for key, value in updated_settings.get("config", {}).items():
|
||||
if hasattr(settings.config, key):
|
||||
setattr(settings.config, key, value)
|
||||
updated_fields["config"].append(key)
|
||||
for key, value in updated_settings.get("secrets", {}).items():
|
||||
if hasattr(settings.secrets, key):
|
||||
setattr(settings.secrets, key, value)
|
||||
updated_fields["secrets"].append(key)
|
||||
settings.save()
|
||||
return {
|
||||
"message": "Settings updated successfully",
|
||||
"updated_fields": updated_fields,
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
@@ -1,152 +0,0 @@
|
||||
import logging
|
||||
from typing import Annotated, Literal
|
||||
|
||||
from autogpt_libs.supabase_integration_credentials_store import (
|
||||
SupabaseIntegrationCredentialsStore,
|
||||
)
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import (
|
||||
Credentials,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request
|
||||
from pydantic import BaseModel
|
||||
from supabase import Client
|
||||
|
||||
from autogpt_server.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler
|
||||
from autogpt_server.util.settings import Settings
|
||||
|
||||
from ..utils import get_supabase, get_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def get_store(supabase: Client = Depends(get_supabase)):
|
||||
return SupabaseIntegrationCredentialsStore(supabase)
|
||||
|
||||
|
||||
class LoginResponse(BaseModel):
|
||||
login_url: str
|
||||
|
||||
|
||||
@router.get("/{provider}/login")
|
||||
async def login(
|
||||
provider: Annotated[str, Path(title="The provider to initiate an OAuth flow for")],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
request: Request,
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
scopes: Annotated[
|
||||
str, Query(title="Comma-separated list of authorization scopes")
|
||||
] = "",
|
||||
) -> LoginResponse:
|
||||
handler = _get_provider_oauth_handler(request, provider)
|
||||
|
||||
# Generate and store a secure random state token
|
||||
state = await store.store_state_token(user_id, provider)
|
||||
|
||||
requested_scopes = scopes.split(",") if scopes else []
|
||||
login_url = handler.get_login_url(requested_scopes, state)
|
||||
|
||||
return LoginResponse(login_url=login_url)
|
||||
|
||||
|
||||
class CredentialsMetaResponse(BaseModel):
|
||||
id: str
|
||||
type: Literal["oauth2", "api_key"]
|
||||
title: str | None
|
||||
scopes: list[str] | None
|
||||
username: str | None
|
||||
|
||||
|
||||
@router.post("/{provider}/callback")
|
||||
async def callback(
|
||||
provider: Annotated[str, Path(title="The target provider for this OAuth exchange")],
|
||||
code: Annotated[str, Body(title="Authorization code acquired by user login")],
|
||||
state_token: Annotated[str, Body(title="Anti-CSRF nonce")],
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
request: Request,
|
||||
) -> CredentialsMetaResponse:
|
||||
handler = _get_provider_oauth_handler(request, provider)
|
||||
|
||||
# Verify the state token
|
||||
if not await store.verify_state_token(user_id, state_token, provider):
|
||||
raise HTTPException(status_code=400, detail="Invalid or expired state token")
|
||||
|
||||
try:
|
||||
credentials = handler.exchange_code_for_tokens(code)
|
||||
except Exception as e:
|
||||
logger.warning(f"Code->Token exchange failed for provider {provider}: {e}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
# TODO: Allow specifying `title` to set on `credentials`
|
||||
store.add_creds(user_id, credentials)
|
||||
return CredentialsMetaResponse(
|
||||
id=credentials.id,
|
||||
type=credentials.type,
|
||||
title=credentials.title,
|
||||
scopes=credentials.scopes,
|
||||
username=credentials.username,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{provider}/credentials")
|
||||
async def list_credentials(
|
||||
provider: Annotated[str, Path(title="The provider to list credentials for")],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
) -> list[CredentialsMetaResponse]:
|
||||
credentials = store.get_creds_by_provider(user_id, provider)
|
||||
return [
|
||||
CredentialsMetaResponse(
|
||||
id=cred.id,
|
||||
type=cred.type,
|
||||
title=cred.title,
|
||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||
)
|
||||
for cred in credentials
|
||||
]
|
||||
|
||||
|
||||
@router.get("/{provider}/credentials/{cred_id}")
|
||||
async def get_credential(
|
||||
provider: Annotated[str, Path(title="The provider to retrieve credentials for")],
|
||||
cred_id: Annotated[str, Path(title="The ID of the credentials to retrieve")],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
|
||||
) -> Credentials:
|
||||
credential = store.get_creds_by_id(user_id, cred_id)
|
||||
if not credential:
|
||||
raise HTTPException(status_code=404, detail="Credentials not found")
|
||||
if credential.provider != provider:
|
||||
raise HTTPException(
|
||||
status_code=404, detail="Credentials do not match the specified provider"
|
||||
)
|
||||
return credential
|
||||
|
||||
|
||||
# -------- UTILITIES --------- #
|
||||
|
||||
|
||||
def _get_provider_oauth_handler(req: Request, provider_name: str) -> BaseOAuthHandler:
|
||||
if provider_name not in HANDLERS_BY_NAME:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Unknown provider '{provider_name}'"
|
||||
)
|
||||
|
||||
client_id = getattr(settings.secrets, f"{provider_name}_client_id")
|
||||
client_secret = getattr(settings.secrets, f"{provider_name}_client_secret")
|
||||
if not (client_id and client_secret):
|
||||
raise HTTPException(
|
||||
status_code=501,
|
||||
detail=f"Integration with provider '{provider_name}' is not configured",
|
||||
)
|
||||
|
||||
handler_class = HANDLERS_BY_NAME[provider_name]
|
||||
return handler_class(
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
redirect_uri=str(req.url_for("callback", provider=provider_name)),
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from fastapi import Depends, HTTPException
|
||||
from supabase import Client, create_client
|
||||
|
||||
from autogpt_server.data.user import DEFAULT_USER_ID
|
||||
from autogpt_server.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
def get_user_id(payload: dict = Depends(auth_middleware)) -> str:
|
||||
if not payload:
|
||||
# This handles the case when authentication is disabled
|
||||
return DEFAULT_USER_ID
|
||||
|
||||
user_id = payload.get("sub")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
return user_id
|
||||
|
||||
|
||||
def get_supabase() -> Client:
|
||||
return create_client(settings.secrets.supabase_url, settings.secrets.supabase_key)
|
||||
@@ -1,14 +0,0 @@
|
||||
import json
|
||||
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
|
||||
|
||||
def to_dict(data) -> dict:
|
||||
return jsonable_encoder(data)
|
||||
|
||||
|
||||
def dumps(data) -> str:
|
||||
return json.dumps(jsonable_encoder(data))
|
||||
|
||||
|
||||
loads = json.loads
|
||||
@@ -1,31 +0,0 @@
|
||||
from threading import Lock
|
||||
from typing import Any
|
||||
|
||||
from expiringdict import ExpiringDict
|
||||
|
||||
|
||||
class KeyedMutex:
|
||||
"""
|
||||
This class provides a mutex that can be locked and unlocked by a specific key.
|
||||
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
|
||||
in case the key is not unlocked for a specified duration, to prevent memory leaks.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.locks: dict[Any, tuple[Lock, int]] = ExpiringDict(
|
||||
max_len=6000, max_age_seconds=60
|
||||
)
|
||||
self.locks_lock = Lock()
|
||||
|
||||
def lock(self, key: Any):
|
||||
with self.locks_lock:
|
||||
lock, request_count = self.locks.get(key, (Lock(), 0))
|
||||
self.locks[key] = (lock, request_count + 1)
|
||||
lock.acquire()
|
||||
|
||||
def unlock(self, key: Any):
|
||||
with self.locks_lock:
|
||||
lock, request_count = self.locks.pop(key)
|
||||
if request_count > 1:
|
||||
self.locks[key] = (lock, request_count - 1)
|
||||
lock.release()
|
||||
@@ -1,7 +0,0 @@
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
conn_retry = retry(
|
||||
stop=stop_after_attempt(30),
|
||||
wait=wait_exponential(multiplier=1, min=1, max=30),
|
||||
reraise=True,
|
||||
)
|
||||
@@ -1,133 +0,0 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Callable, Coroutine, Type, TypeVar, cast
|
||||
|
||||
import Pyro5.api
|
||||
from Pyro5 import api as pyro
|
||||
|
||||
from autogpt_server.data import db
|
||||
from autogpt_server.data.queue import AsyncEventQueue, AsyncRedisEventQueue
|
||||
from autogpt_server.util.process import AppProcess
|
||||
from autogpt_server.util.retry import conn_retry
|
||||
from autogpt_server.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
T = TypeVar("T")
|
||||
C = TypeVar("C", bound=Callable)
|
||||
|
||||
pyro_host = Config().pyro_host
|
||||
|
||||
|
||||
def expose(func: C) -> C:
|
||||
"""
|
||||
Decorator to mark a method or class to be exposed for remote calls.
|
||||
|
||||
## ⚠️ Gotcha
|
||||
The types on the exposed function signature are respected **as long as they are
|
||||
fully picklable**. This is not the case for Pydantic models, so if you really need
|
||||
to pass a model, try dumping the model and passing the resulting dict instead.
|
||||
"""
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
msg = f"Error in {func.__name__}: {e.__str__()}"
|
||||
logger.exception(msg)
|
||||
raise Exception(msg, e)
|
||||
|
||||
return pyro.expose(wrapper) # type: ignore
|
||||
|
||||
|
||||
class AppService(AppProcess):
|
||||
shared_event_loop: asyncio.AbstractEventLoop
|
||||
event_queue: AsyncEventQueue = AsyncRedisEventQueue()
|
||||
use_db: bool = False
|
||||
use_redis: bool = False
|
||||
|
||||
def __init__(self, port):
|
||||
self.port = port
|
||||
self.uri = None
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def service_name(cls) -> str:
|
||||
return cls.__name__
|
||||
|
||||
@abstractmethod
|
||||
def run_service(self):
|
||||
while True:
|
||||
time.sleep(10)
|
||||
|
||||
def __run_async(self, coro: Coroutine[Any, Any, T]):
|
||||
return asyncio.run_coroutine_threadsafe(coro, self.shared_event_loop)
|
||||
|
||||
def run_and_wait(self, coro: Coroutine[Any, Any, T]) -> T:
|
||||
future = self.__run_async(coro)
|
||||
return future.result()
|
||||
|
||||
def run(self):
|
||||
self.shared_event_loop = asyncio.get_event_loop()
|
||||
if self.use_db:
|
||||
self.shared_event_loop.run_until_complete(db.connect())
|
||||
if self.use_redis:
|
||||
self.shared_event_loop.run_until_complete(self.event_queue.connect())
|
||||
|
||||
# Initialize the async loop.
|
||||
async_thread = threading.Thread(target=self.__start_async_loop)
|
||||
async_thread.daemon = True
|
||||
async_thread.start()
|
||||
|
||||
# Initialize pyro service
|
||||
daemon_thread = threading.Thread(target=self.__start_pyro)
|
||||
daemon_thread.daemon = True
|
||||
daemon_thread.start()
|
||||
|
||||
# Run the main service (if it's not implemented, just sleep).
|
||||
self.run_service()
|
||||
|
||||
def cleanup(self):
|
||||
if self.use_db:
|
||||
logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting DB...")
|
||||
self.run_and_wait(db.disconnect())
|
||||
if self.use_redis:
|
||||
logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting Redis...")
|
||||
self.run_and_wait(self.event_queue.close())
|
||||
|
||||
@conn_retry
|
||||
def __start_pyro(self):
|
||||
host = Config().pyro_host
|
||||
daemon = Pyro5.api.Daemon(host=host, port=self.port)
|
||||
self.uri = daemon.register(self, objectId=self.service_name)
|
||||
logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}")
|
||||
daemon.requestLoop()
|
||||
|
||||
def __start_async_loop(self):
|
||||
self.shared_event_loop.run_forever()
|
||||
|
||||
|
||||
AS = TypeVar("AS", bound=AppService)
|
||||
|
||||
|
||||
def get_service_client(service_type: Type[AS], port: int) -> AS:
|
||||
service_name = service_type.service_name
|
||||
|
||||
class DynamicClient:
|
||||
@conn_retry
|
||||
def __init__(self):
|
||||
host = os.environ.get(f"{service_name.upper()}_HOST", "localhost")
|
||||
uri = f"PYRO:{service_type.service_name}@{host}:{port}"
|
||||
logger.debug(f"Connecting to service [{service_name}]. URI = {uri}")
|
||||
self.proxy = Pyro5.api.Proxy(uri)
|
||||
# Attempt to bind to ensure the connection is established
|
||||
self.proxy._pyroBind()
|
||||
logger.debug(f"Successfully connected to service [{service_name}]")
|
||||
|
||||
def __getattr__(self, name: str) -> Callable[..., Any]:
|
||||
return getattr(self.proxy, name)
|
||||
|
||||
return cast(AS, DynamicClient())
|
||||
@@ -1,128 +0,0 @@
|
||||
import json
|
||||
from typing import Any, Type, TypeVar, get_origin
|
||||
|
||||
|
||||
class ConversionError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def __convert_list(value: Any) -> list:
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return list(value)
|
||||
elif isinstance(value, dict):
|
||||
return list(value.items())
|
||||
elif isinstance(value, str):
|
||||
value = value.strip()
|
||||
if value.startswith("[") and value.endswith("]"):
|
||||
try:
|
||||
return json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
return [value]
|
||||
else:
|
||||
return [value]
|
||||
else:
|
||||
return [value]
|
||||
|
||||
|
||||
def __convert_dict(value: Any) -> dict:
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
result = json.loads(value)
|
||||
if isinstance(result, dict):
|
||||
return result
|
||||
else:
|
||||
return {"value": result}
|
||||
except json.JSONDecodeError:
|
||||
return {"value": value} # Fallback conversion
|
||||
elif isinstance(value, list):
|
||||
return {i: value[i] for i in range(len(value))}
|
||||
elif isinstance(value, tuple):
|
||||
return {i: value[i] for i in range(len(value))}
|
||||
elif isinstance(value, dict):
|
||||
return value
|
||||
else:
|
||||
return {"value": value}
|
||||
|
||||
|
||||
def __convert_tuple(value: Any) -> tuple:
|
||||
if isinstance(value, (str, list, set)):
|
||||
return tuple(value)
|
||||
elif isinstance(value, dict):
|
||||
return tuple(value.items())
|
||||
elif isinstance(value, (int, float, bool)):
|
||||
return (value,)
|
||||
elif isinstance(value, tuple):
|
||||
return value
|
||||
else:
|
||||
return (value,)
|
||||
|
||||
|
||||
def __convert_set(value: Any) -> set:
|
||||
if isinstance(value, (str, list, tuple)):
|
||||
return set(value)
|
||||
elif isinstance(value, dict):
|
||||
return set(value.items())
|
||||
elif isinstance(value, set):
|
||||
return value
|
||||
else:
|
||||
return {value}
|
||||
|
||||
|
||||
def __convert_str(value: Any) -> str:
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
else:
|
||||
return json.dumps(value)
|
||||
|
||||
|
||||
NUM = TypeVar("NUM", int, float)
|
||||
|
||||
|
||||
def __convert_num(value: Any, num_type: Type[NUM]) -> NUM:
|
||||
if isinstance(value, (list, dict, tuple, set)):
|
||||
return num_type(len(value))
|
||||
elif isinstance(value, num_type):
|
||||
return value
|
||||
else:
|
||||
try:
|
||||
return num_type(float(value))
|
||||
except (ValueError, TypeError):
|
||||
return num_type(0) # Fallback conversion
|
||||
|
||||
|
||||
def __convert_bool(value: Any) -> bool:
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
if value.lower() in ["true", "1"]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return bool(value)
|
||||
|
||||
|
||||
def convert(value: Any, target_type: Type):
|
||||
target_type = get_origin(target_type) or target_type
|
||||
if target_type not in [list, dict, tuple, str, set, int, float, bool]:
|
||||
return value
|
||||
if isinstance(value, target_type):
|
||||
return value
|
||||
if target_type is list:
|
||||
return __convert_list(value)
|
||||
elif target_type is dict:
|
||||
return __convert_dict(value)
|
||||
elif target_type is tuple:
|
||||
return __convert_tuple(value)
|
||||
elif target_type is str:
|
||||
return __convert_str(value)
|
||||
elif target_type is set:
|
||||
return __convert_set(value)
|
||||
elif target_type is int:
|
||||
return __convert_num(value, int)
|
||||
elif target_type is float:
|
||||
return __convert_num(value, float)
|
||||
elif target_type is bool:
|
||||
return __convert_bool(value)
|
||||
else:
|
||||
return value
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"num_graph_workers": 10,
|
||||
"num_node_workers": 5,
|
||||
"num_user_credits_refill": 1500
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
[tool.poetry]
|
||||
name = "autogpt_server"
|
||||
version = "0.1.0"
|
||||
description = "An Agentic Experience"
|
||||
authors = [
|
||||
"SwiftyOS <craig.swift@agpt.co>",
|
||||
"Nicholas Tindle <nicholas.tindle@agpt.co>",
|
||||
]
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
aio-pika = "^9.4.3"
|
||||
anthropic = "^0.25.1"
|
||||
apscheduler = "^3.10.4"
|
||||
autogpt-libs = { path = "../autogpt_libs", develop = true }
|
||||
click = "^8.1.7"
|
||||
croniter = "^2.0.5"
|
||||
discord-py = "^2.4.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.109.0"
|
||||
feedparser = "^6.0.11"
|
||||
flake8 = "^7.0.0"
|
||||
google-api-python-client = "^2.142.0"
|
||||
google-auth-oauthlib = "^1.2.1"
|
||||
groq = "^0.8.0"
|
||||
jinja2 = "^3.1.4"
|
||||
jsonref = "^1.1.0"
|
||||
jsonschema = "^4.22.0"
|
||||
ollama = "^0.3.0"
|
||||
openai = "^1.35.7"
|
||||
praw = "^7.7.1"
|
||||
prisma = "^0.13.1"
|
||||
psutil = "^5.9.8"
|
||||
pydantic = "^2.7.2"
|
||||
pydantic-settings = "^2.3.4"
|
||||
pyro5 = "^5.15"
|
||||
pytest = "^8.2.1"
|
||||
pytest-asyncio = "^0.23.7"
|
||||
python-dotenv = "^1.0.1"
|
||||
redis = "^5.0.8"
|
||||
sentry-sdk = "1.45.0"
|
||||
supabase = "^2.7.2"
|
||||
tenacity = "^8.3.0"
|
||||
uvicorn = { extras = ["standard"], version = "^0.30.1" }
|
||||
websockets = "^12.0"
|
||||
youtube-transcript-api = "^0.6.2"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
poethepoet = "^0.26.1"
|
||||
httpx = "^0.27.0"
|
||||
pytest-watcher = "^0.4.2"
|
||||
requests = "^2.32.3"
|
||||
ruff = "^0.5.2"
|
||||
pyright = "^1.1.371"
|
||||
isort = "^5.13.2"
|
||||
black = "^24.4.2"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
app = "autogpt_server.app:main"
|
||||
rest = "autogpt_server.rest:main"
|
||||
ws = "autogpt_server.ws:main"
|
||||
executor = "autogpt_server.exec:main"
|
||||
cli = "autogpt_server.cli:main"
|
||||
format = "linter:format"
|
||||
lint = "linter:lint"
|
||||
test = "run_tests:test"
|
||||
# https://poethepoet.natn.io/index.html
|
||||
|
||||
[tool.poe]
|
||||
poetry_command = ""
|
||||
|
||||
# poetry run poe xxx
|
||||
[tool.poe.tasks]
|
||||
test = "pytest"
|
||||
build = ["test", "_dbuild"]
|
||||
|
||||
# This might break your python install :)
|
||||
install = ["build", "_dinstall"]
|
||||
|
||||
# https://cx-freeze.readthedocs.io/en/stable/index.html
|
||||
[tool.poe.tasks._dbuild]
|
||||
cmd = "python setup.py build"
|
||||
|
||||
[tool.poe.tasks.dist_app]
|
||||
cmd = "python setup.py bdist_app"
|
||||
|
||||
[tool.poe.tasks.dist_dmg]
|
||||
cmd = "python setup.py bdist_dmg"
|
||||
|
||||
[tool.poe.tasks.dist_msi]
|
||||
cmd = "python setup.py bdist_msi"
|
||||
|
||||
[tool.poe.tasks.dist_appimage]
|
||||
cmd = "python setup.py bdist_appimage"
|
||||
|
||||
[tool.poe.tasks.dist_deb]
|
||||
cmd = "python setup.py bdist_deb"
|
||||
|
||||
[tool.poe.tasks._dinstall]
|
||||
cmd = "python setup.py install"
|
||||
|
||||
[tool.pytest-watcher]
|
||||
now = false
|
||||
clear = true
|
||||
delay = 0.2
|
||||
runner = "pytest"
|
||||
runner_args = []
|
||||
patterns = ["*.py"]
|
||||
ignore_patterns = []
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
@@ -1,7 +0,0 @@
|
||||
from autogpt_server.data.block import get_blocks
|
||||
from autogpt_server.util.test import execute_block_test
|
||||
|
||||
|
||||
def test_available_blocks():
|
||||
for block in get_blocks().values():
|
||||
execute_block_test(type(block)())
|
||||
@@ -1,9 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from autogpt_server.util.test import SpinTestServer
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def server():
|
||||
async with SpinTestServer() as server:
|
||||
yield server
|
||||
@@ -1,208 +0,0 @@
|
||||
from uuid import UUID
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt_server.blocks.basic import AgentInputBlock, StoreValueBlock
|
||||
from autogpt_server.data.graph import Graph, Link, Node
|
||||
from autogpt_server.data.user import DEFAULT_USER_ID
|
||||
from autogpt_server.server.model import CreateGraph
|
||||
from autogpt_server.util.test import SpinTestServer
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_graph_creation(server: SpinTestServer):
|
||||
"""
|
||||
Test the creation of a graph with nodes and links.
|
||||
|
||||
This test ensures that:
|
||||
1. Nodes from different subgraphs cannot be directly connected.
|
||||
2. A graph can be successfully created with valid connections.
|
||||
3. The created graph has the correct structure and properties.
|
||||
|
||||
Args:
|
||||
server (SpinTestServer): The test server instance.
|
||||
"""
|
||||
value_block = StoreValueBlock().id
|
||||
input_block = AgentInputBlock().id
|
||||
|
||||
graph = Graph(
|
||||
id="test_graph",
|
||||
name="TestGraph",
|
||||
description="Test graph",
|
||||
nodes=[
|
||||
Node(id="node_1", block_id=value_block),
|
||||
Node(id="node_2", block_id=input_block),
|
||||
Node(id="node_3", block_id=value_block),
|
||||
],
|
||||
links=[
|
||||
Link(
|
||||
source_id="node_1",
|
||||
sink_id="node_3",
|
||||
source_name="output",
|
||||
sink_name="input",
|
||||
),
|
||||
],
|
||||
subgraphs={"subgraph_1": ["node_2", "node_3"]},
|
||||
)
|
||||
create_graph = CreateGraph(graph=graph)
|
||||
|
||||
try:
|
||||
await server.agent_server.create_graph(create_graph, False, DEFAULT_USER_ID)
|
||||
assert False, "Should not be able to connect nodes from different subgraphs"
|
||||
except ValueError as e:
|
||||
assert "different subgraph" in str(e)
|
||||
|
||||
# Change node_1 <-> node_3 link to node_1 <-> node_2 (input for subgraph_1)
|
||||
graph.links[0].sink_id = "node_2"
|
||||
created_graph = await server.agent_server.create_graph(
|
||||
create_graph, False, DEFAULT_USER_ID
|
||||
)
|
||||
|
||||
assert UUID(created_graph.id)
|
||||
assert created_graph.name == "TestGraph"
|
||||
|
||||
assert len(created_graph.nodes) == 3
|
||||
assert UUID(created_graph.nodes[0].id)
|
||||
assert UUID(created_graph.nodes[1].id)
|
||||
assert UUID(created_graph.nodes[2].id)
|
||||
|
||||
nodes = created_graph.nodes
|
||||
links = created_graph.links
|
||||
assert len(links) == 1
|
||||
assert links[0].source_id != links[0].sink_id
|
||||
assert links[0].source_id in {nodes[0].id, nodes[1].id, nodes[2].id}
|
||||
assert links[0].sink_id in {nodes[0].id, nodes[1].id, nodes[2].id}
|
||||
|
||||
assert len(created_graph.subgraphs) == 1
|
||||
assert len(created_graph.subgraph_map) == len(created_graph.nodes) == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_get_input_schema(server: SpinTestServer):
|
||||
"""
|
||||
Test the get_input_schema method of a created graph.
|
||||
|
||||
This test ensures that:
|
||||
1. A graph can be created with a single node.
|
||||
2. The input schema of the created graph is correctly generated.
|
||||
3. The input schema contains the expected input name and node id.
|
||||
|
||||
Args:
|
||||
server (SpinTestServer): The test server instance.
|
||||
"""
|
||||
value_block = StoreValueBlock().id
|
||||
|
||||
graph = Graph(
|
||||
name="TestInputSchema",
|
||||
description="Test input schema",
|
||||
nodes=[
|
||||
Node(id="node_1", block_id=value_block),
|
||||
],
|
||||
links=[],
|
||||
)
|
||||
|
||||
create_graph = CreateGraph(graph=graph)
|
||||
created_graph = await server.agent_server.create_graph(
|
||||
create_graph, False, DEFAULT_USER_ID
|
||||
)
|
||||
|
||||
input_schema = created_graph.get_input_schema()
|
||||
|
||||
assert len(input_schema) == 1
|
||||
|
||||
assert input_schema[0].title == "Input"
|
||||
assert input_schema[0].node_id == created_graph.nodes[0].id
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_get_input_schema_none_required(server: SpinTestServer):
|
||||
"""
|
||||
Test the get_input_schema method when no inputs are required.
|
||||
|
||||
This test ensures that:
|
||||
1. A graph can be created with a node that has a default input value.
|
||||
2. The input schema of the created graph is empty when all inputs have default values.
|
||||
|
||||
Args:
|
||||
server (SpinTestServer): The test server instance.
|
||||
"""
|
||||
value_block = StoreValueBlock().id
|
||||
|
||||
graph = Graph(
|
||||
name="TestInputSchema",
|
||||
description="Test input schema",
|
||||
nodes=[
|
||||
Node(id="node_1", block_id=value_block, input_default={"input": "value"}),
|
||||
],
|
||||
links=[],
|
||||
)
|
||||
|
||||
create_graph = CreateGraph(graph=graph)
|
||||
created_graph = await server.agent_server.create_graph(
|
||||
create_graph, False, DEFAULT_USER_ID
|
||||
)
|
||||
|
||||
input_schema = created_graph.get_input_schema()
|
||||
|
||||
assert input_schema == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_get_input_schema_with_linked_blocks(server: SpinTestServer):
|
||||
"""
|
||||
Test the get_input_schema method with linked blocks.
|
||||
|
||||
This test ensures that:
|
||||
1. A graph can be created with multiple nodes and links between them.
|
||||
2. The input schema correctly identifies required inputs for linked blocks.
|
||||
3. Inputs that are satisfied by links are not included in the input schema.
|
||||
|
||||
Args:
|
||||
server (SpinTestServer): The test server instance.
|
||||
"""
|
||||
value_block = StoreValueBlock().id
|
||||
|
||||
graph = Graph(
|
||||
name="TestInputSchemaLinkedBlocks",
|
||||
description="Test input schema with linked blocks",
|
||||
nodes=[
|
||||
Node(id="node_1", block_id=value_block),
|
||||
Node(id="node_2", block_id=value_block),
|
||||
],
|
||||
links=[
|
||||
Link(
|
||||
source_id="node_1",
|
||||
sink_id="node_2",
|
||||
source_name="output",
|
||||
sink_name="data",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
create_graph = CreateGraph(graph=graph)
|
||||
created_graph = await server.agent_server.create_graph(
|
||||
create_graph, False, DEFAULT_USER_ID
|
||||
)
|
||||
|
||||
input_schema = created_graph.get_input_schema()
|
||||
|
||||
assert len(input_schema) == 2
|
||||
|
||||
node_1_input = next(
|
||||
(item for item in input_schema if item.node_id == created_graph.nodes[0].id),
|
||||
None,
|
||||
)
|
||||
node_2_input = next(
|
||||
(item for item in input_schema if item.node_id == created_graph.nodes[1].id),
|
||||
None,
|
||||
)
|
||||
|
||||
assert node_1_input is not None
|
||||
assert node_2_input is not None
|
||||
assert node_1_input.title == "Input"
|
||||
assert node_2_input.title == "Input"
|
||||
|
||||
assert not any(
|
||||
item.title == "data" and item.node_id == created_graph.nodes[1].id
|
||||
for item in input_schema
|
||||
)
|
||||
@@ -1,39 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from autogpt_server.data import db, graph
|
||||
from autogpt_server.executor import ExecutionScheduler
|
||||
from autogpt_server.usecases.sample import create_test_graph, create_test_user
|
||||
from autogpt_server.util.service import get_service_client
|
||||
from autogpt_server.util.settings import Config
|
||||
from autogpt_server.util.test import SpinTestServer
|
||||
|
||||
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_agent_schedule(server: SpinTestServer):
|
||||
await db.connect()
|
||||
test_user = await create_test_user()
|
||||
test_graph = await graph.create_graph(create_test_graph(), user_id=test_user.id)
|
||||
|
||||
scheduler = get_service_client(
|
||||
ExecutionScheduler, Config().execution_scheduler_port
|
||||
)
|
||||
|
||||
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
|
||||
assert len(schedules) == 0
|
||||
|
||||
schedule_id = scheduler.add_execution_schedule(
|
||||
graph_id=test_graph.id,
|
||||
user_id=test_user.id,
|
||||
graph_version=1,
|
||||
cron="0 0 * * *",
|
||||
input_data={"input": "data"},
|
||||
)
|
||||
assert schedule_id
|
||||
|
||||
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
|
||||
assert len(schedules) == 1
|
||||
assert schedules[schedule_id] == "0 0 * * *"
|
||||
|
||||
scheduler.update_schedule(schedule_id, is_enabled=False, user_id=test_user.id)
|
||||
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
|
||||
assert len(schedules) == 0
|
||||
114
autogpt_platform/backend/.env.example
Normal file
114
autogpt_platform/backend/.env.example
Normal file
@@ -0,0 +1,114 @@
|
||||
DB_USER=postgres
|
||||
DB_PASS=your-super-secret-and-long-postgres-password
|
||||
DB_NAME=postgres
|
||||
DB_PORT=5432
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}?connect_timeout=60&schema=platform"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()`
|
||||
ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw='
|
||||
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
ENABLE_CREDIT=false
|
||||
# What environment things should be logged under: local dev or prod
|
||||
APP_ENV=local
|
||||
# What environment to behave as: "local" or "cloud"
|
||||
BEHAVE_AS=local
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
|
||||
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
|
||||
ENABLE_AUTH=true
|
||||
SUPABASE_URL=http://localhost:8000
|
||||
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
|
||||
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
|
||||
# FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
## PLATFORM_BASE_URL must be set to a *publicly accessible* URL pointing to your backend
|
||||
## to use the platform's webhook-related functionality.
|
||||
## If you are developing locally, you can use something like ngrok to get a publc URL
|
||||
## and tunnel it to your locally running backend.
|
||||
PLATFORM_BASE_URL=https://your-public-url-here
|
||||
|
||||
## == INTEGRATION CREDENTIALS == ##
|
||||
# Each set of server side credentials is required for the corresponding 3rd party
|
||||
# integration to work.
|
||||
|
||||
# For the OAuth callback URL, use <your_frontend_url>/auth/integrations/oauth_callback,
|
||||
# e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
|
||||
# GitHub OAuth App server credentials - https://github.com/settings/developers
|
||||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
|
||||
# Google OAuth App server credentials - https://console.cloud.google.com/apis/credentials, and enable gmail api and set scopes
|
||||
# https://console.cloud.google.com/apis/credentials/consent ?project=<your_project_id>
|
||||
|
||||
# You'll need to add/enable the following scopes (minimum):
|
||||
# https://console.developers.google.com/apis/api/gmail.googleapis.com/overview ?project=<your_project_id>
|
||||
# https://console.cloud.google.com/apis/library/sheets.googleapis.com/ ?project=<your_project_id>
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
# LLM
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
|
||||
# Reddit
|
||||
REDDIT_CLIENT_ID=
|
||||
REDDIT_CLIENT_SECRET=
|
||||
REDDIT_USERNAME=
|
||||
REDDIT_PASSWORD=
|
||||
|
||||
# Discord
|
||||
DISCORD_BOT_TOKEN=
|
||||
|
||||
# SMTP/Email
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# D-ID
|
||||
DID_API_KEY=
|
||||
|
||||
# Open Weather Map
|
||||
OPENWEATHERMAP_API_KEY=
|
||||
|
||||
# SMTP
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Medium
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
|
||||
# Google Maps
|
||||
GOOGLE_MAPS_API_KEY=
|
||||
|
||||
# Replicate
|
||||
REPLICATE_API_KEY=
|
||||
|
||||
# Ideogram
|
||||
IDEOGRAM_API_KEY=
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
ENABLE_FILE_LOGGING=false
|
||||
# Use to manually set the log directory
|
||||
# LOG_DIR=./logs
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user