Compare commits

...

89 Commits

Author SHA1 Message Date
Swifty
6301f8c34e Merge branch 'repo-restructure' into fix-path 2024-09-20 14:11:07 +02:00
Reinier van der Leer
f711c057da unbreak links in README 2024-09-20 14:09:01 +02:00
Reinier van der Leer
0c6b08d882 fix agbenchmark replace-all 2024-09-20 14:08:17 +02:00
SwiftyOS
5b08913ab9 fixing path issues 2024-09-20 14:04:24 +02:00
Reinier van der Leer
815cf8b4ac fix issue with webdriver-manager 2024-09-20 13:46:57 +02:00
SwiftyOS
3abe821533 Update docker file formatting 2024-09-20 13:43:51 +02:00
Reinier van der Leer
6ae6c711b7 fix Dockerfile.autogpt 2024-09-20 13:32:49 +02:00
Reinier van der Leer
c8f55bc518 fix .pre-commit-config.yml 2024-09-20 13:27:50 +02:00
Reinier van der Leer
df2126c1a8 fix Dockerfile.autogpt 2024-09-20 13:23:15 +02:00
Reinier van der Leer
dfcfd003df fix original_autogpt path in classic/cli.py 2024-09-20 13:21:36 +02:00
Reinier van der Leer
4e33399d31 fix code workspace file (vol. 2) 2024-09-20 13:11:05 +02:00
Reinier van der Leer
369b1d9023 fix code workspace file 2024-09-20 13:10:00 +02:00
Reinier van der Leer
241f21ab5f fix classic-autogpts-ci.yml 2024-09-20 13:04:36 +02:00
Reinier van der Leer
7551782cd1 fix classic-autogpts-ci.yml 2024-09-20 13:00:57 +02:00
Reinier van der Leer
430835e539 fix classic-forge-ci.yml 2024-09-20 12:59:12 +02:00
Reinier van der Leer
f5040fa3ab fix classic-benchmark-ci.yml 2024-09-20 12:58:59 +02:00
Reinier van der Leer
6ced85d203 fix classic docker CI workflows 2024-09-20 12:53:12 +02:00
Reinier van der Leer
5e1a3d5717 fix classic-autogpts-ci.yml 2024-09-20 12:50:05 +02:00
Reinier van der Leer
d35b91cde4 delete Classic AutoGPTs Nightly Benchmark 2024-09-20 12:44:27 +02:00
Reinier van der Leer
aeab5aac67 unbreak Classic AutoGPT Docker Release workflow 2024-09-20 12:42:20 +02:00
Reinier van der Leer
31cd6dc652 move back .pre-commit-config.yaml 2024-09-20 12:39:01 +02:00
Reinier van der Leer
13b82c86f5 unbreak Classic AutoGPT CI workflows 2024-09-20 12:37:56 +02:00
SwiftyOS
ff11d00f74 isort fixes 2024-09-20 12:35:53 +02:00
SwiftyOS
9d7dfb0a6d fix type errors 2024-09-20 12:33:39 +02:00
SwiftyOS
f1bf7f269b Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 12:25:02 +02:00
SwiftyOS
46cc8ae3ea more linting fixes 2024-09-20 12:24:55 +02:00
Reinier van der Leer
43bf6f2349 unbreak classic/.dockerignore 2024-09-20 12:23:49 +02:00
SwiftyOS
2582eb1ee8 Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 12:18:04 +02:00
SwiftyOS
10cefc149f linitng fixes 2024-09-20 12:17:56 +02:00
Reinier van der Leer
d62fe001b8 move back .pre-commit-config.yaml 2024-09-20 12:15:51 +02:00
SwiftyOS
f583a15fd0 formatting changes 2024-09-20 12:13:33 +02:00
SwiftyOS
2cad2093eb updaing python checks ci 2024-09-20 12:04:11 +02:00
SwiftyOS
4e569f4562 add flake8 path to python checks ci 2024-09-20 11:58:52 +02:00
SwiftyOS
7f514c10cf Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 11:54:56 +02:00
SwiftyOS
d7aba4f6c0 updaate python checks ci 2024-09-20 11:54:17 +02:00
Swifty
ba30aa2fce Merge branch 'master' into repo-restructure 2024-09-20 11:43:19 +02:00
SwiftyOS
efeba4400e update symbolic links 2024-09-20 11:40:38 +02:00
Aarushi
2dfc927f03 tweak(docs) add to docs supabase submodule steps (#8115)
add to docs
2024-09-20 10:39:52 +01:00
SwiftyOS
ba206e3bec docs update 2024-09-20 11:32:38 +02:00
SwiftyOS
be16fd90d4 fixing repo workflow checker CI 2024-09-20 11:20:03 +02:00
SwiftyOS
d10167ceab renamed all CI's to make it clear which subproject they are for 2024-09-20 11:14:54 +02:00
SwiftyOS
d593f76437 updating CI's 2024-09-20 11:07:14 +02:00
SwiftyOS
bda938422e fix frontend paths 2024-09-20 11:02:23 +02:00
SwiftyOS
8397b78ec2 update frontend ci 2024-09-20 10:56:10 +02:00
SwiftyOS
0d7342826b Restructureing Repo 2024-09-20 10:48:08 +02:00
Aarushi
e3f35d79c7 tweak(.github): Update pr template wording (#8103)
* update pr template wording

* add what and how

* Update .github/PULL_REQUEST_TEMPLATE.md

---------

Co-authored-by: Toran Bruce Richards <toran.richards@gmail.com>
2024-09-19 12:44:50 +00:00
Aarushi
0040495143 tweak(.github): Update PR template (#8100)
* update PR template

* Update .github/PULL_REQUEST_TEMPLATE.md

Co-authored-by: Krzysztof Czerwinski <34861343+kcze@users.noreply.github.com>

* add note

* typo

---------

Co-authored-by: Krzysztof Czerwinski <34861343+kcze@users.noreply.github.com>
2024-09-19 13:00:16 +01:00
Aarushi
d3eac86f9a fix(frontend): Update REST API port (#8096)
update server port to 8006

Co-authored-by: Zamil Majdy <zamil.majdy@agpt.co>
2024-09-19 01:06:04 +02:00
Zamil Majdy
c3cb90ac20 feat(rnd): Add initial block execution credit accounting UI on AutoGPT Builder (#8078) 2024-09-19 04:21:40 +07:00
matanm
9b5bf81d7c Fix typo in Groq setup docs (#8018)
Co-authored-by: Nicholas Tindle <nicholas.tindle@agpt.co>
2024-09-18 20:22:57 +00:00
Nicholas Tindle
86db4deef9 feat(server): backend analytics endpoints (#8030) 2024-09-18 18:23:20 +00:00
Aarushi
d8f989daf8 docs(rnd): Update submodules info in readme (#8095)
update submodules info in readme
2024-09-18 18:59:23 +01:00
Aarushi
00f2b134cb tweak(rnd): add env var to docker compose so no messing with .env (#8091)
add env var to docker compose so no messing with .env
2024-09-18 16:39:15 +01:00
Aarushi
a3959712dc tweak(builder): Update .env.example server url with right port (#8090)
update server url with right port
2024-09-18 15:51:44 +01:00
Aarushi
8477b25c5a tweak(builder) Add local supabse credentials (#8089)
add local supabse credentials
2024-09-18 15:45:09 +01:00
Swifty
f133c9c1ef fix(rnd): incorrect docker image for migrate (#8086)
fix incorrect docker image for migrate
2024-09-18 15:21:38 +02:00
Aarushi
dc72ec97bc feat(rnd): Add support for supabase locally (#8077)
* add just auth for now

* add supabase script

* add to docker compose

* update docker compose

* tweak(rnd) Add prefix in logs (#8001)

* add prefix

* fix typos

* fix conflicts

* feat(rnd): Reduce container size remove dep with forge and autogpt (#8040)

* Remove forge and autogpt

* update lock files

* Update build process to reduce image size

* Reduced built image size

* fixed docker compose watch

* Updated logging

* updated env.example

* formatting

* linting issue

* linting not working in github actions..

* trying to get around github action linting issue

* updated version

* sleep for prisma issues

* add exp backoff on connection issues

* updated config based on review comments

* Sorting alphabetical

* updated default config

* updated depends checks

* fixed missing prisma binaries

* remove dead layer

* remove try

* remove dead layer

* updated lock file

* add to docker compose

* update for init

* add local supabase variables to docker compose

* wip supbase connectioon

* subabase submodule

* combined docker file wth new supbase url pointing to kong

* updated combined

* ngix

* updated docker compose without frontend

* updated docker compose

* update to remove frontend

* update docs

* update newline

* remove unescessary change

---------

Co-authored-by: Swifty <craigswift13@gmail.com>
2024-09-18 09:50:39 +01:00
Nicholas Tindle
0c915cb558 feat(server): anthropic updates, csv, sampling, and code blocks (#7803)
Co-authored-by: Bentlybro <tomnoon9@gmail.com>
2024-09-17 21:29:35 -05:00
Nicholas Tindle
f6ab15db47 feat(market): add filters to the market queries (#8064) 2024-09-17 14:59:25 +00:00
Krzysztof Czerwinski
80161decb9 feat(server): Add credentials API endpoints (#8024)
- Add two endpoints to OAuth `integrations.py`:
  - `GET /integrations/{provider}/credentials` - list all credentials for a provider, without secrets (metadata only)
   - `GET /integrations/{provider}/credentials/{cred_id}` - retrieve a set of credentials (including secrets)

- Add `username` property to `Credentials` types
   - Add logic to populate `username` in OAuth handlers

- Expand `CredentialsMetaResponse` and remove `credentials_` prefix from properties

- Fix `autogpt_libs` dependency caching issue

- Remove accidentally duplicated OAuth handler files in `autogpt_server/integrations`
2024-09-17 11:16:16 +00:00
Swifty
0bf8edcd96 fix(autogpt_server): Fix vulnerability in Dockerfile (#8071) 2024-09-17 11:37:22 +01:00
Zamil Majdy
b1347a92de fix(rnd): Fix execution error on non-saved agent (#8054) 2024-09-16 19:35:31 +00:00
Nicholas Tindle
22ce8e0047 feat(builder): sentry integration (#8053) 2024-09-16 23:19:52 +07:00
Bently
5a7193cfb7 Feat(Builder): Add Runner input and ouput screens (#8038)
* Feat(Builder): Add Runner input and ouput screens

* Fix run button not working

* prettier

* prettier again -- forgot flow

* fix input scaling + auto close on run

* removed "Runner Input" button to make it auto open runner input if input block is  + Fixed issue with output not showing in output UI

* replaced runner output icon and added a new icon for it

* replaced IconOutput icon with LogOut from lucide-react

* prettier

* fix type safety issue + add error handling for formatOutput

* Updates based on comments

* prettier for utils
2024-09-16 13:05:07 +02:00
Zamil Majdy
c1f301ab8b feat(rnd): Add initial credit accounting system for block execution (#8047)
### Background

We need a way to set an execution quota per user for each block execution.

### Changes 🏗️

* Introduced a `UserBlockCredit`, a transaction table tracking the block usage along with it cost/quota.
* The tracking is toggled by `ENABLE_CREDIT` config, default = false.
* Introduced  `BLOCK_COSTS` | `GET /blocks/costs` as a source of information for the cost on each block depending on the input configuration.

Improvements:
* Refactor logging in manager.py to always print a prefix and pass the metadata.
* Make executionStatus on AgentNodeExecution prisma enum. And add executionStatus on AgentGraphExecution.
* Use executionStatus from AgentGraphExecution to improve waiting logic on test_manager.py.
2024-09-14 23:47:28 +07:00
Zamil Majdy
f32244a112 fix(rnd): Fix broken save feature on Agent Builder (#8052) 2024-09-13 18:04:51 -05:00
Aarushi
9395706841 fix(rnd,market): Fix docker issues with market, and DB connection (#8050)
fix docker issues with market, and DB connection
2024-09-13 16:15:06 +01:00
SwiftyOS
a98677b79d Revert "updated lock file"
This reverts commit 056eb46c0f.
2024-09-12 17:59:39 +02:00
SwiftyOS
056eb46c0f updated lock file 2024-09-12 16:18:13 +02:00
Swifty
6fde030c37 feat(rnd): Reduce container size remove dep with forge and autogpt (#8040)
* Remove forge and autogpt

* update lock files

* Update build process to reduce image size

* Reduced built image size

* fixed docker compose watch

* Updated logging

* updated env.example

* formatting

* linting issue

* linting not working in github actions..

* trying to get around github action linting issue

* updated version

* sleep for prisma issues

* add exp backoff on connection issues

* updated config based on review comments

* Sorting alphabetical

* updated default config

* updated depends checks

* fixed missing prisma binaries

* remove dead layer

* remove try

* remove dead layer
2024-09-12 13:03:37 +02:00
Aarushi
bf1e01d423 tweak(rnd) Add prefix in logs (#8001)
* add prefix

* fix typos

* fix conflicts
2024-09-12 11:48:47 +01:00
Zamil Majdy
52c731abd6 fix(rnd): Fix decorator function type hint (#8043) 2024-09-12 05:35:33 +07:00
Aarushi
c8fbce643e fix(rnd): Add connection timeout (#8041)
add connection timeout
2024-09-11 18:32:07 +01:00
Swifty
6c001bd595 Create Input Node Custom UI Node (#8016) 2024-09-11 14:14:03 +02:00
Aarushi
f5b89672f8 feat(rnd): Add k8s default health check (#8037)
add k8s default health check
2024-09-11 12:30:34 +01:00
Aarushi
76480ffa03 fix(rnd): Update port in market (#8036)
update port
2024-09-11 12:19:11 +01:00
Aarushi
ab60a57379 tweak(rnd): Ignore .env in market (#8035)
ignore .env
2024-09-11 11:01:34 +01:00
Aarushi
1d9b01fc77 tweak(rnd): Use docker compose not docker-compose (#8034)
* use docker compose not docker-compose

* linting
2024-09-11 10:20:31 +01:00
Swifty
e81d9f9f0b docker nits (#8033) 2024-09-11 10:31:12 +02:00
Bentlybro
0d5d0270ea Merge branch 'master' of https://github.com/Significant-Gravitas/AutoGPT 2024-09-10 18:24:53 +01:00
SwiftyOS
bd25f9223c expose schedular port and fix marketplace port 2024-09-10 17:09:39 +02:00
SwiftyOS
07305b55ff fix(rnd) use migrate deploy 2024-09-10 16:18:42 +02:00
SwiftyOS
cdfe3e5fbc Updated rnd/README.md 2024-09-10 16:16:16 +02:00
Swifty
e992cdf8c2 Fixing docker setup for local testing (#8026)
* Fixing docker setup

* Updated docker compose setup

* update helm charts

* Corrected agent server host name
2024-09-10 15:46:22 +02:00
Aarushi
ebd2ecd84c docs(server): Update docs (#8031)
update docs
2024-09-10 10:24:54 +01:00
Aarushi
0b919522ae feat(rnd): Split Execution Manager (#8008)
* split execution manager and removed ns and use direct uri with k8s and docker specific dns

* formating

* split execution manager

* refactor(builder): Fix linting warning and errors (#8021)

* Fix lint errors

* Fix dependency loop

* address feedback

* docker compose

* remove ns entirely

* remove yarn lock changes

* update readme

* remove ref

* dockerfile and log

* update log

* debug

* rename to executor

* remove execution from rest

* exec.py

* linting

* udpate tests to use config

* fix test

---------

Co-authored-by: Krzysztof Czerwinski <34861343+kcze@users.noreply.github.com>
2024-09-10 10:05:31 +01:00
Nicholas Tindle
ef691359b7 feat: document the use of isolation better (#8028) 2024-09-09 20:55:05 +00:00
Aarushi
f8815c3053 fix(builder): Use escaped apostrophe (#8027)
* use escaped apostrophe

* linter
2024-09-09 18:08:32 +00:00
Reinier van der Leer
a60ed21404 feat(server): Add OAuth flow endpoints for integrations (#7872)
- feat(server): Initial draft of OAuth init and exchange endpoints
  - Add `supabase` dependency
  - Add Supabase credentials to `Secrets`
  - Add `get_supabase` utility to `.server.utils`
  - Add `.server.integrations` API segment with initial implementations for OAuth init and exchange endpoints
- Move integration OAuth handlers to `autogpt_server.integrations.oauth`
- Change constructor of `SupabaseIntegrationCredentialsStore` to take a Supabase client
- Fix type issues in `GoogleOAuthHandler`
2024-09-09 17:21:56 +02:00
2834 changed files with 88317 additions and 20581 deletions

4
.gitattributes vendored
View File

@@ -1,10 +1,10 @@
frontend/build/** linguist-generated
classic/frontend/build/** linguist-generated
**/poetry.lock linguist-generated
docs/_javascript/** linguist-vendored
# Exclude VCR cassettes from stats
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
* text=auto

8
.github/CODEOWNERS vendored
View File

@@ -1,7 +1,7 @@
* @Significant-Gravitas/maintainers
.github/workflows/ @Significant-Gravitas/devops
forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers
rnd/infra @Significant-Gravitas/devops
classic/forge/ @Significant-Gravitas/forge-maintainers
classic/benchmark/ @Significant-Gravitas/benchmark-maintainers
classic/frontend/ @Significant-Gravitas/frontend-maintainers
autogpt_platform/infra @Significant-Gravitas/devops
.github/CODEOWNERS @Significant-Gravitas/admins

View File

@@ -6,26 +6,18 @@
<!-- Concisely describe all of the changes made in this pull request: -->
### PR Quality Scorecard ✨
### Testing 🔍
> [!NOTE]
Only for the new autogpt platform, currently in autogpt_platform/
<!--
Check out our contribution guide:
https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
1. Avoid duplicate work, issues, PRs etc.
2. Also consider contributing something other than code; see the [contribution guide]
for options.
3. Clearly explain your changes.
4. Avoid making unnecessary changes, especially if they're purely based on personal
preferences. Doing so is the maintainers' job. ;-)
Please make sure your changes have been tested and are in good working condition.
Here is a list of our critical paths, if you need some inspiration on what and how to test:
-->
- [x] Have you used the PR description template? &ensp; `+2 pts`
- [ ] Is your pull request atomic, focusing on a single change? &ensp; `+5 pts`
- [ ] Have you linked the GitHub issue(s) that this PR addresses? &ensp; `+5 pts`
- [ ] Have you documented your changes clearly and comprehensively? &ensp; `+5 pts`
- [ ] Have you changed or added a feature? &ensp; `-4 pts`
- [ ] Have you added/updated corresponding documentation? &ensp; `+4 pts`
- [ ] Have you added/updated corresponding integration tests? &ensp; `+5 pts`
- [ ] Have you changed the behavior of AutoGPT? &ensp; `-5 pts`
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance? &ensp; `+10 pts`
- Create from scratch and execute an agent with at least 3 blocks
- Import an agent from file upload, and confirm it executes correctly
- Upload agent to marketplace
- Import an agent from marketplace and confirm it executes correctly
- Edit an agent from monitor, and confirm it executes correctly

12
.github/labeler.yml vendored
View File

@@ -1,18 +1,18 @@
AutoGPT Agent:
- changed-files:
- any-glob-to-any-file: autogpt/**
- any-glob-to-any-file: classic/original_autogpt/**
Forge:
- changed-files:
- any-glob-to-any-file: forge/**
- any-glob-to-any-file: classic/forge/**
Benchmark:
- changed-files:
- any-glob-to-any-file: benchmark/**
- any-glob-to-any-file: classic/benchmark/**
Frontend:
- changed-files:
- any-glob-to-any-file: frontend/**
- any-glob-to-any-file: classic/frontend/**
documentation:
- changed-files:
@@ -20,8 +20,8 @@ documentation:
Builder:
- changed-files:
- any-glob-to-any-file: rnd/autogpt_builder/**
- any-glob-to-any-file: autogpt_platform/autogpt_builder/**
Server:
- changed-files:
- any-glob-to-any-file: rnd/autogpt_server/**
- any-glob-to-any-file: autogpt_platform/autogpt_server/**

View File

@@ -1,97 +0,0 @@
name: AutoGPTs Nightly Benchmark
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
jobs:
benchmark:
permissions:
contents: write
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
fail-fast: false
timeout-minutes: 120
env:
min-python-version: '3.10'
REPORTS_BRANCH: data/benchmark-reports
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python -
- name: Prepare reports folder
run: mkdir -p ${{ env.REPORTS_FOLDER }}
- run: poetry -C benchmark install
- name: Benchmark ${{ matrix.agent-name }}
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
set +e # Do not quit on non-zero exit codes
poetry run agbenchmark run -N 3 \
--test=ReadFile \
--test=BasicRetrieval --test=RevenueRetrieval2 \
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
--test=UrlShortener --test=TicTacToe --test=Battleship \
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
--test=WebArenaTask_134 --test=WebArenaTask_163
# Convert exit code 1 (some challenges failed) to exit code 0
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
exit 0
else
exit $?
fi
env:
AGENT_NAME: ${{ matrix.agent-name }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
- name: Push reports to data branch
run: |
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
rm ${{ env.REPORTS_FOLDER }}/*.json
# Find folder with newest (untracked) report in it
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
| xargs -I {} dirname {} \
| xargs -I {} git ls-files --others --exclude-standard {} \
| xargs -I {} dirname {} \
| sort -u)
json_report_file="$report_subfolder/report.json"
# Convert JSON report to Markdown
markdown_report_file="$report_subfolder/report.md"
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
git config --global user.name 'GitHub Actions'
git config --global user.email 'github-actions@agpt.co'
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
&& git checkout ${{ env.REPORTS_BRANCH }} \
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
git reset --hard
git add ${{ env.REPORTS_FOLDER }}
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
&& git push origin ${{ env.REPORTS_BRANCH }}

View File

@@ -1,25 +1,25 @@
name: AutoGPT CI
name: Classic - AutoGPT CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
concurrency:
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpt
working-directory: classic/original_autogpt
jobs:
test:
@@ -86,7 +86,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -135,4 +135,4 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-logs
path: autogpt/logs/
path: classic/original_autogpt/logs/

View File

@@ -1,4 +1,4 @@
name: Purge Auto-GPT Docker CI cache
name: Classic - Purge Auto-GPT Docker CI cache
on:
schedule:
@@ -25,7 +25,8 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only

View File

@@ -1,24 +1,26 @@
name: AutoGPT Docker CI
name: Classic - AutoGPT Docker CI
on:
push:
branches: [ master, development ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
concurrency:
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
working-directory: autogpt
working-directory: classic/original_autogpt
env:
IMAGE_NAME: auto-gpt
@@ -47,7 +49,8 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
@@ -116,7 +119,8 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},

View File

@@ -1,4 +1,4 @@
name: AutoGPT Docker Release
name: Classic - AutoGPT Docker Release
on:
release:
@@ -44,6 +44,7 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
context: classic/
file: Dockerfile.autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images

View File

@@ -1,4 +1,4 @@
name: Agent smoke tests
name: Classic - Agent smoke tests
on:
workflow_dispatch:
@@ -7,32 +7,37 @@ on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md'
defaults:
run:
shell: bash
working-directory: classic
jobs:
serve-agent-protocol:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
agent-name: [ original_autogpt ]
fail-fast: false
timeout-minutes: 20
env:
@@ -50,7 +55,7 @@ jobs:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./${{ matrix.agent-name }}/
working-directory: ./classic/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -

View File

@@ -1,18 +1,18 @@
name: AGBenchmark CI
name: Classic - AGBenchmark CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- 'benchmark/**'
- .github/workflows/benchmark-ci.yml
- '!benchmark/reports/**'
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
pull_request:
branches: [ master, development, release-* ]
paths:
- 'benchmark/**'
- '!benchmark/reports/**'
- .github/workflows/benchmark-ci.yml
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
concurrency:
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -39,7 +39,7 @@ jobs:
defaults:
run:
shell: bash
working-directory: benchmark
working-directory: classic/benchmark
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -58,7 +58,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -122,7 +122,7 @@ jobs:
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
working-directory: .
working-directory: classic
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
@@ -155,7 +155,7 @@ jobs:
poetry run agbenchmark --mock
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"

View File

@@ -1,4 +1,4 @@
name: Publish to PyPI
name: Classic - Publish to PyPI
on:
workflow_dispatch:
@@ -21,21 +21,21 @@ jobs:
python-version: 3.8
- name: Install Poetry
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
- name: Build project for distribution
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: poetry build
- name: Install dependencies
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: poetry install
- name: Check Version
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
@@ -43,7 +43,7 @@ jobs:
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "benchmark/dist/*"
artifacts: "classic/benchmark/dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: false
@@ -51,5 +51,5 @@ jobs:
commit: master
- name: Build and publish
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -1,18 +1,18 @@
name: Forge CI
name: Classic - Forge CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/forge-ci.yml'
- 'forge/**'
- '!forge/tests/vcr_cassettes'
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/forge-ci.yml'
- 'forge/**'
- '!forge/tests/vcr_cassettes'
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -21,7 +21,7 @@ concurrency:
defaults:
run:
shell: bash
working-directory: forge
working-directory: classic/forge
jobs:
test:
@@ -110,7 +110,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -233,4 +233,4 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-logs
path: forge/logs/
path: classic/forge/logs/

View File

@@ -1,4 +1,4 @@
name: Frontend CI/CD
name: Classic - Frontend CI/CD
on:
push:
@@ -7,11 +7,11 @@ on:
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'frontend/**'
- 'classic/frontend/**'
- '.github/workflows/frontend-ci.yml'
pull_request:
paths:
- 'frontend/**'
- 'classic/frontend/**'
- '.github/workflows/frontend-ci.yml'
jobs:
@@ -34,7 +34,7 @@ jobs:
- name: Build Flutter to Web
run: |
cd frontend
cd classic/frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
@@ -42,7 +42,7 @@ jobs:
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add frontend/build/web
# git add classic/frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
@@ -51,7 +51,7 @@ jobs:
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v6
with:
add-paths: frontend/build/web
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true

View File

@@ -1,24 +1,24 @@
name: Python checks
name: Classic - Python checks
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '**.py'
- '!forge/tests/vcr_cassettes'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '**.py'
- '!forge/tests/vcr_cassettes'
- '!classic/forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -40,18 +40,18 @@ jobs:
uses: dorny/paths-filter@v3
with:
filters: |
autogpt:
- autogpt/autogpt/**
- autogpt/tests/**
- autogpt/poetry.lock
original_autogpt:
- classic/original_autogpt/autogpt/**
- classic/original_autogpt/tests/**
- classic/original_autogpt/poetry.lock
forge:
- forge/forge/**
- forge/tests/**
- forge/poetry.lock
- classic/forge/forge/**
- classic/forge/tests/**
- classic/forge/poetry.lock
benchmark:
- benchmark/agbenchmark/**
- benchmark/tests/**
- benchmark/poetry.lock
- classic/benchmark/agbenchmark/**
- classic/benchmark/tests/**
- classic/benchmark/poetry.lock
outputs:
changed-parts: ${{ steps.changes-in.outputs.changes }}
@@ -89,23 +89,23 @@ jobs:
# Install dependencies
- name: Install Python dependencies
run: poetry -C ${{ matrix.sub-package }} install
run: poetry -C classic/${{ matrix.sub-package }} install
# Lint
- name: Lint (isort)
run: poetry run isort --check .
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Black)
if: success() || failure()
run: poetry run black --check .
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Flake8)
if: success() || failure()
run: poetry run flake8 .
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}
types:
needs: get-changed-parts
@@ -141,11 +141,11 @@ jobs:
# Install dependencies
- name: Install Python dependencies
run: poetry -C ${{ matrix.sub-package }} install
run: poetry -C classic/${{ matrix.sub-package }} install
# Typecheck
- name: Typecheck
if: success() || failure()
run: poetry run pyright
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}

View File

@@ -1,133 +0,0 @@
name: Hackathon
on:
workflow_dispatch:
inputs:
agents:
description: "Agents to run (comma-separated)"
required: false
default: "autogpt" # Default agents if none are specified
jobs:
matrix-setup:
runs-on: ubuntu-latest
# Service containers to run with `matrix-setup`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
env-name: ${{ steps.set-matrix.outputs.env-name }}
steps:
- id: set-matrix
run: |
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::[ 'irrelevant']"
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::$matrix_string"
else
echo "::set-output name=env-name::testing"
echo "::set-output name=matrix::[ 'irrelevant' ]"
fi
tests:
environment:
name: "${{ needs.matrix-setup.outputs.env-name }}"
needs: matrix-setup
env:
min-python-version: "3.10"
name: "${{ matrix.agent-name }}"
runs-on: ubuntu-latest
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
timeout-minutes: 50
strategy:
fail-fast: false
matrix:
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
steps:
- name: Print Environment Name
run: |
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
- name: Check Docker Container
id: check
run: docker ps
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: v18.15
- name: Run benchmark
run: |
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
git clone "$link" -b "$branch" "$AGENT_NAME"
cd $AGENT_NAME
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
./run agent start $AGENT_NAME
cd ../benchmark
poetry install
poetry run agbenchmark --no-dep
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
AGENT_NAME: ${{ matrix.agent-name }}

View File

@@ -1,20 +1,20 @@
name: AutoGPT Builder CI
name: Platform - AutoGPT Builder CI
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'rnd/autogpt_builder/**'
- 'autogpt_platform/autogpt_builder/**'
pull_request:
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'rnd/autogpt_builder/**'
- 'autogpt_platform/autogpt_builder/**'
defaults:
run:
shell: bash
working-directory: rnd/autogpt_builder
working-directory: autogpt_platform/autogpt_builder
jobs:

View File

@@ -1,20 +1,20 @@
name: AutoGPT Builder Infra
name: Platform - AutoGPT Builder Infra
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
- 'autogpt_platform/infra/**'
pull_request:
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
- 'autogpt_platform/infra/**'
defaults:
run:
shell: bash
working-directory: rnd/infra
working-directory: autogpt_platform/infra
jobs:
lint:

View File

@@ -1,16 +1,16 @@
name: AutoGPT Server CI
name: Platform - AutoGPT Server CI
on:
push:
branches: [master, development, ci-test*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
- "autogpt_platform/autogpt_server/**"
pull_request:
branches: [master, development, release-*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
- "autogpt_platform/autogpt_server/**"
concurrency:
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -19,7 +19,7 @@ concurrency:
defaults:
run:
shell: bash
working-directory: rnd/autogpt_server
working-directory: autogpt_platform/autogpt_server
jobs:
test:
@@ -90,7 +90,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/autogpt_server/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'

View File

@@ -1,4 +1,4 @@
name: 'Close stale issues'
name: Repo - Close stale issues
on:
schedule:
- cron: '30 1 * * *'

View File

@@ -1,12 +1,12 @@
name: "Pull Request auto-label"
name: Repo - Pull Request auto-label
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master, development, release-* ]
paths-ignore:
- 'forge/tests/vcr_cassettes'
- 'benchmark/reports/**'
- 'classic/forge/tests/vcr_cassettes'
- 'classic/benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs

View File

@@ -1,4 +1,4 @@
name: github-repo-stats
name: Repo - Github Stats
on:
schedule:

View File

@@ -1,4 +1,4 @@
name: PR Status Checker
name: Repo - PR Status Checker
on:
pull_request:
types: [opened, synchronize, reopened]
@@ -26,6 +26,6 @@ jobs:
echo "Current directory before running Python script:"
pwd
echo "Attempting to run Python script:"
python check_actions_status.py
python .github/workflows/scripts/check_actions_status.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

8
.gitignore vendored
View File

@@ -1,7 +1,7 @@
## Original ignores
.github_access_token
autogpt/keys.py
autogpt/*.json
classic/original_autogpt/keys.py
classic/original_autogpt/*.json
auto_gpt_workspace/*
*.mpeg
.env
@@ -157,7 +157,7 @@ openai/
CURRENT_BULLETIN.md
# AgBenchmark
agbenchmark/reports/
classic/benchmark/agbenchmark/reports/
# Nodejs
package-lock.json
@@ -170,4 +170,4 @@ pri*
ig*
.github_access_token
LICENSE.rtf
rnd/autogpt_server/settings.py
autogpt_platform/autogpt_server/settings.py

7
.gitmodules vendored
View File

@@ -1,3 +1,6 @@
[submodule "forge/tests/vcr_cassettes"]
path = forge/tests/vcr_cassettes
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
[submodule "autogpt_platform/supabase"]
path = autogpt_platform/supabase
url = https://github.com/supabase/supabase.git

View File

@@ -16,22 +16,22 @@ repos:
hooks:
- id: isort-autogpt
name: Lint (isort) - AutoGPT
entry: poetry -C autogpt run isort
files: ^autogpt/
entry: poetry -C classic/original_autogpt run isort
files: ^classic/original_autogpt/
types: [file, python]
language: system
- id: isort-forge
name: Lint (isort) - Forge
entry: poetry -C forge run isort
files: ^forge/
entry: poetry -C classic/forge run isort
files: ^classic/forge/
types: [file, python]
language: system
- id: isort-benchmark
name: Lint (isort) - Benchmark
entry: poetry -C benchmark run isort
files: ^benchmark/
entry: poetry -C classic/benchmark run isort
files: ^classic/benchmark/
types: [file, python]
language: system
@@ -52,20 +52,20 @@ repos:
- id: flake8
name: Lint (Flake8) - AutoGPT
alias: flake8-autogpt
files: ^autogpt/(autogpt|scripts|tests)/
args: [--config=autogpt/.flake8]
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
args: [--config=classic/original_autogpt/.flake8]
- id: flake8
name: Lint (Flake8) - Forge
alias: flake8-forge
files: ^forge/(forge|tests)/
args: [--config=forge/.flake8]
files: ^classic/forge/(forge|tests)/
args: [--config=classic/forge/.flake8]
- id: flake8
name: Lint (Flake8) - Benchmark
alias: flake8-benchmark
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=benchmark/.flake8]
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
- repo: local
# To have watertight type checking, we check *all* the files in an affected
@@ -74,10 +74,10 @@ repos:
- id: pyright
name: Typecheck - AutoGPT
alias: pyright-autogpt
entry: poetry -C autogpt run pyright
entry: poetry -C classic/original_autogpt run pyright
args: [-p, autogpt, autogpt]
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
@@ -85,9 +85,9 @@ repos:
- id: pyright
name: Typecheck - Forge
alias: pyright-forge
entry: poetry -C forge run pyright
entry: poetry -C classic/forge run pyright
args: [-p, forge, forge]
files: ^forge/(forge/|poetry\.lock$)
files: ^classic/forge/(classic/forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
@@ -95,9 +95,9 @@ repos:
- id: pyright
name: Typecheck - Benchmark
alias: pyright-benchmark
entry: poetry -C benchmark run pyright
entry: poetry -C classic/benchmark run pyright
args: [-p, benchmark, benchmark]
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
@@ -106,22 +106,22 @@ repos:
hooks:
- id: pytest-autogpt
name: Run tests - AutoGPT (excl. slow tests)
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
- id: pytest-forge
name: Run tests - Forge (excl. slow tests)
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
files: ^forge/(forge/|tests/|poetry\.lock$)
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
- id: pytest-benchmark
name: Run tests - Benchmark
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

View File

@@ -1,49 +1,49 @@
{
"folders": [
{
"name": "autogpt",
"path": "../autogpt"
"name": "autogpt_server",
"path": "../autogpt_platform/autogpt_server"
},
{
"name": "benchmark",
"path": "../benchmark"
"name": "autogpt_builder",
"path": "../autogpt_platform/autogpt_builder"
},
{
"name": "market",
"path": "../autogpt_platform/market"
},
{
"name": "lib",
"path": "../autogpt_platform/autogpt_libs"
},
{
"name": "infra",
"path": "../autogpt_platform/infra"
},
{
"name": "docs",
"path": "../docs"
},
{
"name": "forge",
"path": "../forge"
},
{
"name": "frontend",
"path": "../frontend"
},
{
"name": "autogpt_server",
"path": "../rnd/autogpt_server"
},
{
"name": "autogpt_builder",
"path": "../rnd/autogpt_builder"
},
{
"name": "market",
"path": "../rnd/market"
},
{
"name": "lib",
"path": "../rnd/autogpt_libs"
},
{
"name": "infra",
"path": "../rnd/infra"
},
{
"name": "[root]",
"path": ".."
}
},
{
"name": "classic - autogpt",
"path": "../classic/original_autogpt"
},
{
"name": "classic - benchmark",
"path": "../classic/benchmark"
},
{
"name": "classic - forge",
"path": "../classic/forge"
},
{
"name": "classic - frontend",
"path": "../classic/frontend"
},
],
"settings": {
"python.analysis.typeCheckingMode": "basic"

View File

@@ -55,15 +55,16 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
## 🤖 AutoGPT Classic
> Below is information about the classic version of AutoGPT.
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
### 🏗️ Forge
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
**Forge your own agent!** &ndash; Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) &ndash;
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) &ndash;
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
### 🎯 Benchmark
@@ -83,7 +84,7 @@ This guide will walk you through the process of creating your own agent and usin
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
### ⌨️ CLI

View File

@@ -1,3 +0,0 @@
{
"python.analysis.typeCheckingMode": "basic",
}

133
autogpt_platform/README.md Normal file
View File

@@ -0,0 +1,133 @@
# AutoGPT Platform
Welcome to the AutoGPT Platform - a powerful system for creating and running AI agents to solve business problems. This platform enables you to harness the power of artificial intelligence to automate tasks, analyze data, and generate insights for your organization.
## Getting Started
### Prerequisites
- Docker
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
### Running the System
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine.
2. Navigate to autogpt_platform/supabase
3. Run the following command:
```
git submodule update --init --recursive
```
4. Navigate back to rnd (cd ..)
5. Run the following command:
```
cp supabase/docker/.env.example .env
```
6. Run the following command:
```
docker compose -f docker-compose.combined.yml up -d
```
This command will start all the necessary backend services defined in the `docker-compose.combined.yml` file in detached mode.
7. Navigate to autogpt_platform/autogpt_builder.
8. Run the following command:
```
cp .env.example .env.local
```
9. Run the following command:
```
yarn dev
```
### Docker Compose Commands
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
- `docker compose -f docker-compose.combined.yml up -d`: Start the services in detached mode.
- `docker compose -f docker-compose.combined.yml stop`: Stop the running services without removing them.
- `docker compose rm`: Remove stopped service containers.
- `docker compose build`: Build or rebuild services.
- `docker compose down`: Stop and remove containers, networks, and volumes.
- `docker compose watch`: Watch for changes in your services and automatically update them.
### Sample Scenarios
Here are some common scenarios where you might use multiple Docker Compose commands:
1. Updating and restarting a specific service:
```
docker compose build api_srv
docker compose up -d --no-deps api_srv
```
This rebuilds the `api_srv` service and restarts it without affecting other services.
2. Viewing logs for troubleshooting:
```
docker compose logs -f api_srv ws_srv
```
This shows and follows the logs for both `api_srv` and `ws_srv` services.
3. Scaling a service for increased load:
```
docker compose up -d --scale executor=3
```
This scales the `executor` service to 3 instances to handle increased load.
4. Stopping the entire system for maintenance:
```
docker compose stop
docker compose rm -f
docker compose pull
docker compose up -d
```
This stops all services, removes containers, pulls the latest images, and restarts the system.
5. Developing with live updates:
```
docker compose watch
```
This watches for changes in your code and automatically updates the relevant services.
6. Checking the status of services:
```
docker compose ps
```
This shows the current status of all services defined in your docker-compose.yml file.
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
### Persisting Data
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
1. Open the `docker-compose.yml` file in a text editor.
2. Add volume configurations for PostgreSQL and Redis services:
```yaml
services:
postgres:
# ... other configurations ...
volumes:
- postgres_data:/var/lib/postgresql/data
redis:
# ... other configurations ...
volumes:
- redis_data:/data
volumes:
postgres_data:
redis_data:
```
3. Save the file and run `docker compose up -d` to apply the changes.
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.

View File

@@ -0,0 +1,15 @@
NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:8006/auth/callback
NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market
## Supabase credentials
NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
## OAuth Callback URL
## This should be {domain}/auth/callback
## Only used if you're using Supabase and OAuth
AUTH_CALLBACK_URL=http://localhost:3000/auth/callback
GA_MEASUREMENT_ID=G-FH2XK2W4GN

View File

@@ -34,3 +34,6 @@ yarn-error.log*
# typescript
*.tsbuildinfo
next-env.d.ts
# Sentry Config File
.env.sentry-build-plugin

View File

@@ -1,19 +1,19 @@
# Base stage for both dev and prod
FROM node:21-alpine AS base
WORKDIR /app
COPY autogpt_builder/package.json autogpt_builder/yarn.lock ./
COPY autogpt_platform/autogpt_builder/package.json autogpt_platform/autogpt_builder/yarn.lock ./
RUN yarn install --frozen-lockfile
# Dev stage
FROM base AS dev
ENV NODE_ENV=development
COPY autogpt_builder/ .
COPY autogpt_platform/autogpt_builder/ .
EXPOSE 3000
CMD ["npm", "run", "dev"]
CMD ["yarn", "run", "dev"]
# Build stage for prod
FROM base AS build
COPY autogpt_builder/ .
COPY autogpt_platform/autogpt_builder/ .
RUN npm run build
# Prod stage

View File

@@ -0,0 +1,84 @@
import { withSentryConfig } from "@sentry/nextjs";
import dotenv from "dotenv";
// Load environment variables
dotenv.config();
/** @type {import('next').NextConfig} */
const nextConfig = {
env: {
NEXT_PUBLIC_AGPT_SERVER_URL: process.env.NEXT_PUBLIC_AGPT_SERVER_URL,
NEXT_PUBLIC_AGPT_MARKETPLACE_URL:
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL,
},
images: {
domains: ["images.unsplash.com"],
},
async redirects() {
return [
{
source: "/monitor", // FIXME: Remove after 2024-09-01
destination: "/",
permanent: false,
},
];
},
// TODO: Re-enable TypeScript checks once current issues are resolved
typescript: {
ignoreBuildErrors: true,
},
};
export default withSentryConfig(nextConfig, {
// For all available options, see:
// https://github.com/getsentry/sentry-webpack-plugin#options
org: "significant-gravitas",
project: "builder",
// Only print logs for uploading source maps in CI
silent: !process.env.CI,
// For all available options, see:
// https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
// Upload a larger set of source maps for prettier stack traces (increases build time)
widenClientFileUpload: true,
// Automatically annotate React components to show their full name in breadcrumbs and session replay
reactComponentAnnotation: {
enabled: true,
},
// Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers.
// This can increase your server load as well as your hosting bill.
// Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
// side errors will fail.
tunnelRoute: "/monitoring",
// Hides source maps from generated client bundles
hideSourceMaps: true,
// Automatically tree-shake Sentry logger statements to reduce bundle size
disableLogger: true,
// Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.)
// See the following for more information:
// https://docs.sentry.io/product/crons/
// https://vercel.com/docs/cron-jobs
automaticVercelMonitors: true,
async headers() {
return [
{
source: "/:path*",
headers: [
{
key: "Document-Policy",
value: "js-profiling",
},
],
},
];
},
});

View File

@@ -27,6 +27,7 @@
"@radix-ui/react-switch": "^1.1.0",
"@radix-ui/react-toast": "^1.2.1",
"@radix-ui/react-tooltip": "^1.1.2",
"@sentry/nextjs": "^8",
"@supabase/ssr": "^0.4.0",
"@supabase/supabase-js": "^2.45.0",
"@tanstack/react-table": "^8.20.5",

View File

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 29 KiB

View File

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 28 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -0,0 +1,57 @@
// This file configures the initialization of Sentry on the client.
// The config you add here will be used whenever a users loads a page in their browser.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
// Add optional integrations for additional features
integrations: [
Sentry.replayIntegration(),
Sentry.httpClientIntegration(),
Sentry.replayCanvasIntegration(),
Sentry.reportingObserverIntegration(),
Sentry.browserProfilingIntegration(),
// Sentry.feedbackIntegration({
// // Additional SDK configuration goes in here, for example:
// colorScheme: "system",
// }),
],
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
// Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled
tracePropagationTargets: [
"localhost",
/^https:\/\/dev\-builder\.agpt\.co\/api/,
],
beforeSend(event, hint) {
// Check if it is an exception, and if so, show the report dialog
if (event.exception && event.event_id) {
Sentry.showReportDialog({ eventId: event.event_id });
}
return event;
},
// Define how likely Replay events are sampled.
// This sets the sample rate to be 10%. You may want this to be 100% while
// in development and sample at a lower rate in production
replaysSessionSampleRate: 0.1,
// Define how likely Replay events are sampled when an error occurs.
replaysOnErrorSampleRate: 1.0,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
// Set profilesSampleRate to 1.0 to profile every transaction.
// Since profilesSampleRate is relative to tracesSampleRate,
// the final profiling rate can be computed as tracesSampleRate * profilesSampleRate
// For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would
// result in 25% of transactions being profiled (0.5*0.5=0.25)
profilesSampleRate: 1.0,
});

View File

@@ -0,0 +1,16 @@
// This file configures the initialization of Sentry for edge features (middleware, edge routes, and so on).
// The config you add here will be used whenever one of the edge features is loaded.
// Note that this config is unrelated to the Vercel Edge Runtime and is also required when running locally.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import * as Sentry from "@sentry/nextjs";
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
});

View File

@@ -0,0 +1,23 @@
// This file configures the initialization of Sentry on the server.
// The config you add here will be used whenever the server handles a request.
// https://docs.sentry.io/platforms/javascript/guides/nextjs/
import * as Sentry from "@sentry/nextjs";
// import { NodeProfilingIntegration } from "@sentry/profiling-node";
Sentry.init({
dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288",
// Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control.
tracesSampleRate: 1,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
// Integrations
integrations: [
Sentry.anrIntegration(),
// NodeProfilingIntegration,
// Sentry.fsIntegration(),
],
});

View File

@@ -0,0 +1,27 @@
"use client";
import * as Sentry from "@sentry/nextjs";
import NextError from "next/error";
import { useEffect } from "react";
export default function GlobalError({
error,
}: {
error: Error & { digest?: string };
}) {
useEffect(() => {
Sentry.captureException(error);
}, [error]);
return (
<html>
<body>
{/* `NextError` is the default Next.js error page component. Its type
definition requires a `statusCode` prop. However, since the App Router
does not expose status codes for errors, we simply pass 0 to render a
generic error message. */}
<NextError statusCode={0} />
</body>
</html>
);
}

View File

@@ -0,0 +1,64 @@
"use server";
import { revalidatePath } from "next/cache";
import { redirect } from "next/navigation";
import { createServerClient } from "@/lib/supabase/server";
import { z } from "zod";
import * as Sentry from "@sentry/nextjs";
const loginFormSchema = z.object({
email: z.string().email().min(2).max(64),
password: z.string().min(6).max(64),
});
export async function login(values: z.infer<typeof loginFormSchema>) {
return await Sentry.withServerActionInstrumentation("login", {}, async () => {
const supabase = createServerClient();
if (!supabase) {
redirect("/error");
}
// We are sure that the values are of the correct type because zod validates the form
const { data, error } = await supabase.auth.signInWithPassword(values);
if (error) {
return error.message;
}
if (data.session) {
await supabase.auth.setSession(data.session);
}
revalidatePath("/", "layout");
redirect("/profile");
});
}
export async function signup(values: z.infer<typeof loginFormSchema>) {
"use server";
return await Sentry.withServerActionInstrumentation(
"signup",
{},
async () => {
const supabase = createServerClient();
if (!supabase) {
redirect("/error");
}
// We are sure that the values are of the correct type because zod validates the form
const { data, error } = await supabase.auth.signUp(values);
if (error) {
return error.message;
}
if (data.session) {
await supabase.auth.setSession(data.session);
}
revalidatePath("/", "layout");
redirect("/profile");
},
);
}

View File

@@ -7,7 +7,7 @@ import AgentDetailContent from "@/components/marketplace/AgentDetailContent";
async function getAgentDetails(id: string): Promise<AgentDetailResponse> {
const apiUrl =
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
"http://localhost:8001/api/v1/market";
"http://localhost:8015/api/v1/market";
const api = new MarketplaceAPI(apiUrl);
try {
console.log(`Fetching agent details for id: ${id}`);

View File

@@ -185,7 +185,7 @@ const Pagination: React.FC<{
const Marketplace: React.FC = () => {
const apiUrl =
process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL ||
"http://localhost:8001/api/v1/market";
"http://localhost:8015/api/v1/market";
const api = useMemo(() => new MarketplaceAPI(apiUrl), [apiUrl]);
const [searchValue, setSearchValue] = useState("");

View File

@@ -0,0 +1,32 @@
"use client";
import { useState, useEffect } from "react";
import { Button } from "@/components/ui/button";
import { IconRefresh } from "@/components/ui/icons";
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
export default function CreditButton() {
const [credit, setCredit] = useState<number | null>(null);
const api = new AutoGPTServerAPI();
const fetchCredit = async () => {
const response = await api.getUserCredit();
setCredit(response.credits);
};
useEffect(() => {
fetchCredit();
}, [api]);
return (
credit !== null && (
<Button
onClick={fetchCredit}
variant="outline"
className="flex items-center space-x-2 text-muted-foreground"
>
<span>Credits: {credit}</span>
<IconRefresh />
</Button>
)
);
}

View File

@@ -12,8 +12,11 @@ import InputModalComponent from "./InputModalComponent";
import OutputModalComponent from "./OutputModalComponent";
import {
BlockIORootSchema,
BlockIOStringSubSchema,
Category,
NodeExecutionResult,
BlockUIType,
BlockCost,
} from "@/lib/autogpt-server-api/types";
import { beautifyString, cn, setNestedProperty } from "@/lib/utils";
import { Button } from "@/components/ui/button";
@@ -21,7 +24,10 @@ import { Switch } from "@/components/ui/switch";
import { Copy, Trash2 } from "lucide-react";
import { history } from "./history";
import NodeHandle from "./NodeHandle";
import { NodeGenericInputField } from "./node-input-components";
import {
NodeGenericInputField,
NodeTextBoxInput,
} from "./node-input-components";
import SchemaTooltip from "./SchemaTooltip";
import { getPrimaryCategoryColor } from "@/lib/utils";
import { FlowContext } from "./Flow";
@@ -40,6 +46,7 @@ export type ConnectionData = Array<{
export type CustomNodeData = {
blockType: string;
blockCosts: BlockCost[];
title: string;
description: string;
categories: Category[];
@@ -59,6 +66,7 @@ export type CustomNodeData = {
backend_id?: string;
errors?: { [key: string]: string };
isOutputStatic?: boolean;
uiType: BlockUIType;
};
export type CustomNode = Node<CustomNodeData, "custom">;
@@ -118,8 +126,16 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
setIsAdvancedOpen(checked);
};
const generateOutputHandles = (schema: BlockIORootSchema) => {
if (!schema?.properties) return null;
const generateOutputHandles = (
schema: BlockIORootSchema,
nodeType: BlockUIType,
) => {
if (
!schema?.properties ||
nodeType === BlockUIType.OUTPUT ||
nodeType === BlockUIType.NOTE
)
return null;
const keys = Object.keys(schema.properties);
return keys.map((key) => (
<div key={key}>
@@ -133,6 +149,137 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
));
};
const generateInputHandles = (
schema: BlockIORootSchema,
nodeType: BlockUIType,
) => {
if (!schema?.properties) return null;
let keys = Object.entries(schema.properties);
switch (nodeType) {
case BlockUIType.INPUT:
// For INPUT blocks, dont include connection handles
return keys.map(([propKey, propSchema]) => {
const isRequired = data.inputSchema.required?.includes(propKey);
const isConnected = isHandleConnected(propKey);
const isAdvanced = propSchema.advanced;
return (
(isRequired || isAdvancedOpen || !isAdvanced) && (
<div key={propKey}>
<span className="text-m green -mb-1 text-gray-900">
{propSchema.title || beautifyString(propKey)}
</span>
<div key={propKey} onMouseOver={() => {}}>
{!isConnected && (
<NodeGenericInputField
className="mb-2 mt-1"
propKey={propKey}
propSchema={propSchema}
currentValue={getValue(propKey)}
connections={data.connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
errors={data.errors ?? {}}
displayName={propSchema.title || beautifyString(propKey)}
/>
)}
</div>
</div>
)
);
});
case BlockUIType.NOTE:
// For NOTE blocks, don't render any input handles
const [noteKey, noteSchema] = keys[0];
return (
<div key={noteKey}>
<NodeTextBoxInput
className=""
selfKey={noteKey}
schema={noteSchema as BlockIOStringSubSchema}
value={getValue(noteKey)}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
error={data.errors?.[noteKey] ?? ""}
displayName={noteSchema.title || beautifyString(noteKey)}
/>
</div>
);
case BlockUIType.OUTPUT:
// For OUTPUT blocks, only show the 'value' property
return keys.map(([propKey, propSchema]) => {
const isRequired = data.inputSchema.required?.includes(propKey);
const isConnected = isHandleConnected(propKey);
const isAdvanced = propSchema.advanced;
return (
(isRequired || isAdvancedOpen || !isAdvanced) && (
<div key={propKey} onMouseOver={() => {}}>
{propKey !== "value" ? (
<span className="text-m green -mb-1 text-gray-900">
{propSchema.title || beautifyString(propKey)}
</span>
) : (
<NodeHandle
keyName={propKey}
isConnected={isConnected}
isRequired={isRequired}
schema={propSchema}
side="left"
/>
)}
{!isConnected && (
<NodeGenericInputField
className="mb-2 mt-1"
propKey={propKey}
propSchema={propSchema}
currentValue={getValue(propKey)}
connections={data.connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
errors={data.errors ?? {}}
displayName={propSchema.title || beautifyString(propKey)}
/>
)}
</div>
)
);
});
default:
return keys.map(([propKey, propSchema]) => {
const isRequired = data.inputSchema.required?.includes(propKey);
const isConnected = isHandleConnected(propKey);
const isAdvanced = propSchema.advanced;
return (
(isRequired || isAdvancedOpen || isConnected || !isAdvanced) && (
<div key={propKey} onMouseOver={() => {}}>
<NodeHandle
keyName={propKey}
isConnected={isConnected}
isRequired={isRequired}
schema={propSchema}
side="left"
/>
{!isConnected && (
<NodeGenericInputField
className="mb-2 mt-1"
propKey={propKey}
propSchema={propSchema}
currentValue={getValue(propKey)}
connections={data.connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
errors={data.errors ?? {}}
displayName={propSchema.title || beautifyString(propKey)}
/>
)}
</div>
)
);
});
}
};
const handleInputChange = (path: string, value: any) => {
const keys = parseKeys(path);
const newValues = JSON.parse(JSON.stringify(data.hardcodedValues));
@@ -376,15 +523,27 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
);
});
const inputValues = data.hardcodedValues;
const blockCost =
data.blockCosts &&
data.blockCosts.find((cost) =>
Object.entries(cost.cost_filter).every(
// Undefined, null, or empty values are considered equal
([key, value]) =>
value === inputValues[key] || (!value && !inputValues[key]),
),
);
console.debug(`Block cost ${inputValues}|${data.blockCosts}=${blockCost}`);
return (
<div
className={`${blockClasses} ${errorClass} ${statusClass}`}
className={`${data.uiType === BlockUIType.NOTE ? "w-[300px]" : "w-[500px]"} ${blockClasses} ${errorClass} ${statusClass} ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : "bg-white"}`}
onMouseEnter={handleHovered}
onMouseLeave={handleMouseLeave}
data-id={`custom-node-${id}`}
>
<div
className={`mb-2 p-3 ${getPrimaryCategoryColor(data.categories)} rounded-t-xl`}
className={`mb-2 p-3 ${data.uiType === BlockUIType.NOTE ? "bg-yellow-100" : getPrimaryCategoryColor(data.categories)} rounded-t-xl`}
>
<div className="flex items-center justify-between">
<div className="font-roboto p-3 text-lg font-semibold">
@@ -417,53 +576,29 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
)}
</div>
</div>
<div className="flex items-start justify-between gap-2 p-3">
{blockCost && (
<div className="p-3 text-right font-semibold">
Cost: {blockCost.cost_amount} / {blockCost.cost_type}
</div>
)}
{data.uiType !== BlockUIType.NOTE ? (
<div className="flex items-start justify-between p-3">
<div>
{data.inputSchema &&
generateInputHandles(data.inputSchema, data.uiType)}
</div>
<div className="flex-none">
{data.outputSchema &&
generateOutputHandles(data.outputSchema, data.uiType)}
</div>
</div>
) : (
<div>
{data.inputSchema &&
Object.entries(data.inputSchema.properties).map(
([propKey, propSchema]) => {
const isRequired = data.inputSchema.required?.includes(propKey);
const isConnected = isHandleConnected(propKey);
const isAdvanced = propSchema.advanced;
return (
(isRequired ||
isAdvancedOpen ||
isConnected ||
!isAdvanced) && (
<div key={propKey} onMouseOver={() => {}}>
<NodeHandle
keyName={propKey}
isConnected={isConnected}
isRequired={isRequired}
schema={propSchema}
side="left"
/>
{!isConnected && (
<NodeGenericInputField
className="mb-2 mt-1"
propKey={propKey}
propSchema={propSchema}
currentValue={getValue(propKey)}
connections={data.connections}
handleInputChange={handleInputChange}
handleInputClick={handleInputClick}
errors={data.errors ?? {}}
displayName={
propSchema.title || beautifyString(propKey)
}
/>
)}
</div>
)
);
},
)}
generateInputHandles(data.inputSchema, data.uiType)}
</div>
<div className="flex-none">
{data.outputSchema && generateOutputHandles(data.outputSchema)}
</div>
</div>
{isOutputOpen && (
)}
{isOutputOpen && data.uiType !== BlockUIType.NOTE && (
<div
data-id="latest-output"
className="nodrag m-3 break-words rounded-md border-[1.5px] p-2"
@@ -486,25 +621,27 @@ export function CustomNode({ data, id, width, height }: NodeProps<CustomNode>) {
)}
</div>
)}
<div className="mt-2.5 flex items-center pb-4 pl-4">
<Switch checked={isOutputOpen} onCheckedChange={toggleOutput} />
<span className="m-1 mr-4">Output</span>
{hasAdvancedFields && (
<>
<Switch onCheckedChange={toggleAdvancedSettings} />
<span className="m-1">Advanced</span>
</>
)}
{data.status && (
<Badge
variant="outline"
data-id={`badge-${id}-${data.status}`}
className={cn(data.status.toLowerCase(), "ml-auto mr-5")}
>
{data.status}
</Badge>
)}
</div>
{data.uiType !== BlockUIType.NOTE && (
<div className="mt-2.5 flex items-center pb-4 pl-4">
<Switch checked={isOutputOpen} onCheckedChange={toggleOutput} />
<span className="m-1 mr-4">Output</span>
{hasAdvancedFields && (
<>
<Switch onCheckedChange={toggleAdvancedSettings} />
<span className="m-1">Advanced</span>
</>
)}
{data.status && (
<Badge
variant="outline"
data-id={`badge-${id}-${data.status}`}
className={cn(data.status.toLowerCase(), "ml-auto mr-5")}
>
{data.status}
</Badge>
)}
</div>
)}
<InputModalComponent
title={activeKey ? `Enter ${beautifyString(activeKey)}` : undefined}
isOpen={isModalOpen}

View File

@@ -27,7 +27,7 @@ import "@xyflow/react/dist/style.css";
import { CustomNode } from "./CustomNode";
import "./flow.css";
import { Link } from "@/lib/autogpt-server-api";
import { getTypeColor } from "@/lib/utils";
import { getTypeColor, filterBlocksByType } from "@/lib/utils";
import { history } from "./history";
import { CustomEdge } from "./CustomEdge";
import ConnectionLine from "./ConnectionLine";
@@ -36,14 +36,19 @@ import { SaveControl } from "@/components/edit/control/SaveControl";
import { BlocksControl } from "@/components/edit/control/BlocksControl";
import {
IconPlay,
IconUndo2,
IconRedo2,
IconSquare,
IconUndo2,
IconOutput,
} from "@/components/ui/icons";
import { startTutorial } from "./tutorial";
import useAgentGraph from "@/hooks/useAgentGraph";
import { v4 as uuidv4 } from "uuid";
import { useRouter, usePathname, useSearchParams } from "next/navigation";
import { LogOut } from "lucide-react";
import RunnerUIWrapper, {
RunnerUIWrapperRef,
} from "@/components/RunnerUIWrapper";
// This is for the history, this is the minimum distance a block must move before it is logged
// It helps to prevent spamming the history with small movements especially when pressing on a input in a block
@@ -101,6 +106,8 @@ const FlowEditor: React.FC<{
// State to control if blocks menu should be pinned open
const [pinBlocksPopover, setPinBlocksPopover] = useState(false);
const runnerUIRef = useRef<RunnerUIWrapperRef>(null);
useEffect(() => {
const params = new URLSearchParams(window.location.search);
@@ -407,6 +414,7 @@ const FlowEditor: React.FC<{
position: viewportCenter, // Set the position to the calculated viewport center
data: {
blockType: nodeType,
blockCosts: nodeSchema.costs,
title: `${nodeType} ${nodeId}`,
description: nodeSchema.description,
categories: nodeSchema.categories,
@@ -417,6 +425,7 @@ const FlowEditor: React.FC<{
isOutputOpen: false,
block_id: blockId,
isOutputStatic: nodeSchema.staticOutput,
uiType: nodeSchema.uiType,
},
};
@@ -549,9 +558,21 @@ const FlowEditor: React.FC<{
onClick: handleRedo,
},
{
label: !isRunning ? "Run" : "Stop",
label: !savedAgent
? "Please save the agent to run"
: !isRunning
? "Run"
: "Stop",
icon: !isRunning ? <IconPlay /> : <IconSquare />,
onClick: !isRunning ? requestSaveAndRun : requestStopRun,
onClick: !isRunning
? () => runnerUIRef.current?.runOrOpenInput()
: requestStopRun,
disabled: !savedAgent,
},
{
label: "Runner Output",
icon: <LogOut size={18} strokeWidth={1.8} />,
onClick: () => runnerUIRef.current?.openRunnerOutput(),
},
];
@@ -587,12 +608,21 @@ const FlowEditor: React.FC<{
<SaveControl
agentMeta={savedAgent}
onSave={(isTemplate) => requestSave(isTemplate ?? false)}
agentDescription={agentDescription}
onDescriptionChange={setAgentDescription}
agentName={agentName}
onNameChange={setAgentName}
/>
</ControlPanel>
</ReactFlow>
</div>
<RunnerUIWrapper
ref={runnerUIRef}
nodes={nodes}
setNodes={setNodes}
isRunning={isRunning}
requestSaveAndRun={requestSaveAndRun}
/>
</FlowContext.Provider>
);
};

View File

@@ -9,9 +9,12 @@ import {
IconCircleUser,
IconMenu,
IconPackage2,
IconRefresh,
IconSquareActivity,
IconWorkFlow,
} from "@/components/ui/icons";
import AutoGPTServerAPI from "@/lib/autogpt-server-api";
import CreditButton from "@/components/CreditButton";
export async function NavBar() {
const isAvailable = Boolean(
@@ -96,6 +99,8 @@ export async function NavBar() {
</a>
</div>
<div className="flex flex-1 items-center justify-end gap-4">
{isAvailable && user && <CreditButton />}
{isAvailable && !user && (
<Link
href="/login"

View File

@@ -0,0 +1,141 @@
import React, {
useState,
useCallback,
forwardRef,
useImperativeHandle,
} from "react";
import RunnerInputUI from "./runner-ui/RunnerInputUI";
import RunnerOutputUI from "./runner-ui/RunnerOutputUI";
import { Node } from "@xyflow/react";
import { filterBlocksByType } from "@/lib/utils";
import { BlockIORootSchema } from "@/lib/autogpt-server-api/types";
interface RunnerUIWrapperProps {
nodes: Node[];
setNodes: React.Dispatch<React.SetStateAction<Node[]>>;
isRunning: boolean;
requestSaveAndRun: () => void;
}
export interface RunnerUIWrapperRef {
openRunnerInput: () => void;
openRunnerOutput: () => void;
runOrOpenInput: () => void;
}
const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
({ nodes, setNodes, isRunning, requestSaveAndRun }, ref) => {
const [isRunnerInputOpen, setIsRunnerInputOpen] = useState(false);
const [isRunnerOutputOpen, setIsRunnerOutputOpen] = useState(false);
const getBlockInputsAndOutputs = useCallback(() => {
const inputBlocks = filterBlocksByType(
nodes,
(node) => node.data.block_id === "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
);
const outputBlocks = filterBlocksByType(
nodes,
(node) => node.data.block_id === "363ae599-353e-4804-937e-b2ee3cef3da4",
);
const inputs = inputBlocks.map((node) => ({
id: node.id,
type: "input" as const,
inputSchema: node.data.inputSchema as BlockIORootSchema,
hardcodedValues: {
name: (node.data.hardcodedValues as any).name || "",
description: (node.data.hardcodedValues as any).description || "",
value: (node.data.hardcodedValues as any).value,
placeholder_values:
(node.data.hardcodedValues as any).placeholder_values || [],
limit_to_placeholder_values:
(node.data.hardcodedValues as any).limit_to_placeholder_values ||
false,
},
}));
const outputs = outputBlocks.map((node) => ({
id: node.id,
type: "output" as const,
outputSchema: node.data.outputSchema as BlockIORootSchema,
hardcodedValues: {
name: (node.data.hardcodedValues as any).name || "Output",
description:
(node.data.hardcodedValues as any).description ||
"Output from the agent",
value: (node.data.hardcodedValues as any).value,
},
result: (node.data.executionResults as any)?.at(-1)?.data?.output,
}));
return { inputs, outputs };
}, [nodes]);
const handleInputChange = useCallback(
(nodeId: string, field: string, value: string) => {
setNodes((nds) =>
nds.map((node) => {
if (node.id === nodeId) {
return {
...node,
data: {
...node.data,
hardcodedValues: {
...(node.data.hardcodedValues as any),
[field]: value,
},
},
};
}
return node;
}),
);
},
[setNodes],
);
const openRunnerInput = () => setIsRunnerInputOpen(true);
const openRunnerOutput = () => setIsRunnerOutputOpen(true);
const runOrOpenInput = () => {
const { inputs } = getBlockInputsAndOutputs();
if (inputs.length > 0) {
openRunnerInput();
} else {
requestSaveAndRun();
}
};
useImperativeHandle(ref, () => ({
openRunnerInput,
openRunnerOutput,
runOrOpenInput,
}));
return (
<>
<RunnerInputUI
isOpen={isRunnerInputOpen}
onClose={() => setIsRunnerInputOpen(false)}
blockInputs={getBlockInputsAndOutputs().inputs}
onInputChange={handleInputChange}
onRun={() => {
setIsRunnerInputOpen(false);
requestSaveAndRun();
}}
isRunning={isRunning}
/>
<RunnerOutputUI
isOpen={isRunnerOutputOpen}
onClose={() => setIsRunnerOutputOpen(false)}
blockOutputs={getBlockInputsAndOutputs().outputs}
/>
</>
);
},
);
RunnerUIWrapper.displayName = "RunnerUIWrapper";
export default RunnerUIWrapper;

View File

@@ -9,6 +9,7 @@ import {
import FeaturedAgentsTable from "./FeaturedAgentsTable";
import { AdminAddFeaturedAgentDialog } from "./AdminAddFeaturedAgentDialog";
import { revalidatePath } from "next/cache";
import * as Sentry from "@sentry/nextjs";
export default async function AdminFeaturedAgentsControl({
className,
@@ -55,9 +56,15 @@ export default async function AdminFeaturedAgentsControl({
component: <Button>Remove</Button>,
action: async (rows) => {
"use server";
const all = rows.map((row) => removeFeaturedAgent(row.id));
await Promise.all(all);
revalidatePath("/marketplace");
return await Sentry.withServerActionInstrumentation(
"removeFeaturedAgent",
{},
async () => {
const all = rows.map((row) => removeFeaturedAgent(row.id));
await Promise.all(all);
revalidatePath("/marketplace");
},
);
},
},
]}

View File

@@ -0,0 +1,141 @@
"use server";
import MarketplaceAPI from "@/lib/marketplace-api";
import { revalidatePath } from "next/cache";
import * as Sentry from "@sentry/nextjs";
export async function approveAgent(
agentId: string,
version: number,
comment: string,
) {
return await Sentry.withServerActionInstrumentation(
"approveAgent",
{},
async () => {
const api = new MarketplaceAPI();
await api.approveAgentSubmission(agentId, version, comment);
console.debug(`Approving agent ${agentId}`);
revalidatePath("/marketplace");
},
);
}
export async function rejectAgent(
agentId: string,
version: number,
comment: string,
) {
return await Sentry.withServerActionInstrumentation(
"rejectAgent",
{},
async () => {
const api = new MarketplaceAPI();
await api.rejectAgentSubmission(agentId, version, comment);
console.debug(`Rejecting agent ${agentId}`);
revalidatePath("/marketplace");
},
);
}
export async function getReviewableAgents() {
return await Sentry.withServerActionInstrumentation(
"getReviewableAgents",
{},
async () => {
const api = new MarketplaceAPI();
return api.getAgentSubmissions();
},
);
}
export async function getFeaturedAgents(
page: number = 1,
pageSize: number = 10,
) {
return await Sentry.withServerActionInstrumentation(
"getFeaturedAgents",
{},
async () => {
const api = new MarketplaceAPI();
const featured = await api.getFeaturedAgents(page, pageSize);
console.debug(`Getting featured agents ${featured.agents.length}`);
return featured;
},
);
}
export async function getFeaturedAgent(agentId: string) {
return await Sentry.withServerActionInstrumentation(
"getFeaturedAgent",
{},
async () => {
const api = new MarketplaceAPI();
const featured = await api.getFeaturedAgent(agentId);
console.debug(`Getting featured agent ${featured.agentId}`);
return featured;
},
);
}
export async function addFeaturedAgent(
agentId: string,
categories: string[] = ["featured"],
) {
return await Sentry.withServerActionInstrumentation(
"addFeaturedAgent",
{},
async () => {
const api = new MarketplaceAPI();
await api.addFeaturedAgent(agentId, categories);
console.debug(`Adding featured agent ${agentId}`);
revalidatePath("/marketplace");
},
);
}
export async function removeFeaturedAgent(
agentId: string,
categories: string[] = ["featured"],
) {
return await Sentry.withServerActionInstrumentation(
"removeFeaturedAgent",
{},
async () => {
const api = new MarketplaceAPI();
await api.removeFeaturedAgent(agentId, categories);
console.debug(`Removing featured agent ${agentId}`);
revalidatePath("/marketplace");
},
);
}
export async function getCategories() {
return await Sentry.withServerActionInstrumentation(
"getCategories",
{},
async () => {
const api = new MarketplaceAPI();
const categories = await api.getCategories();
console.debug(
`Getting categories ${categories.unique_categories.length}`,
);
return categories;
},
);
}
export async function getNotFeaturedAgents(
page: number = 1,
pageSize: number = 100,
) {
return await Sentry.withServerActionInstrumentation(
"getNotFeaturedAgents",
{},
async () => {
const api = new MarketplaceAPI();
const agents = await api.getNotFeaturedAgents(page, pageSize);
console.debug(`Getting not featured agents ${agents.agents.length}`);
return agents;
},
);
}

View File

@@ -1,6 +1,5 @@
.custom-node {
color: #000000;
width: 500px;
box-sizing: border-box;
transition: border-color 0.3s ease-in-out;
}

View File

@@ -19,6 +19,7 @@ import React from "react";
export type Control = {
icon: React.ReactNode;
label: string;
disabled?: boolean;
onClick: () => void;
};
@@ -50,15 +51,18 @@ export const ControlPanel = ({
{controls.map((control, index) => (
<Tooltip key={index} delayDuration={500}>
<TooltipTrigger asChild>
<Button
variant="ghost"
size="icon"
onClick={() => control.onClick()}
data-id={`control-button-${index}`}
>
{control.icon}
<span className="sr-only">{control.label}</span>
</Button>
<div>
<Button
variant="ghost"
size="icon"
onClick={() => control.onClick()}
data-id={`control-button-${index}`}
disabled={control.disabled || false}
>
{control.icon}
<span className="sr-only">{control.label}</span>
</Button>
</div>
</TooltipTrigger>
<TooltipContent side="right">{control.label}</TooltipContent>
</Tooltip>

Some files were not shown because too many files have changed in this diff Show More