Compare commits

...

42 Commits

Author SHA1 Message Date
SwiftyOS
6c436d6137 add default keyords 2024-09-20 13:55:52 +02:00
SwiftyOS
0146a01595 fix dockerfile 2024-09-20 13:39:47 +02:00
SwiftyOS
7e3b40cef3 Update docker file formatting 2024-09-20 13:37:42 +02:00
SwiftyOS
6afd670e9d Update docker file formatting 2024-09-20 13:36:44 +02:00
Reinier van der Leer
6ae6c711b7 fix Dockerfile.autogpt 2024-09-20 13:32:49 +02:00
Reinier van der Leer
c8f55bc518 fix .pre-commit-config.yml 2024-09-20 13:27:50 +02:00
Reinier van der Leer
df2126c1a8 fix Dockerfile.autogpt 2024-09-20 13:23:15 +02:00
Reinier van der Leer
dfcfd003df fix original_autogpt path in classic/cli.py 2024-09-20 13:21:36 +02:00
Reinier van der Leer
4e33399d31 fix code workspace file (vol. 2) 2024-09-20 13:11:05 +02:00
Reinier van der Leer
369b1d9023 fix code workspace file 2024-09-20 13:10:00 +02:00
Reinier van der Leer
241f21ab5f fix classic-autogpts-ci.yml 2024-09-20 13:04:36 +02:00
Reinier van der Leer
7551782cd1 fix classic-autogpts-ci.yml 2024-09-20 13:00:57 +02:00
Reinier van der Leer
430835e539 fix classic-forge-ci.yml 2024-09-20 12:59:12 +02:00
Reinier van der Leer
f5040fa3ab fix classic-benchmark-ci.yml 2024-09-20 12:58:59 +02:00
Reinier van der Leer
6ced85d203 fix classic docker CI workflows 2024-09-20 12:53:12 +02:00
Reinier van der Leer
5e1a3d5717 fix classic-autogpts-ci.yml 2024-09-20 12:50:05 +02:00
Reinier van der Leer
d35b91cde4 delete Classic AutoGPTs Nightly Benchmark 2024-09-20 12:44:27 +02:00
Reinier van der Leer
aeab5aac67 unbreak Classic AutoGPT Docker Release workflow 2024-09-20 12:42:20 +02:00
Reinier van der Leer
31cd6dc652 move back .pre-commit-config.yaml 2024-09-20 12:39:01 +02:00
Reinier van der Leer
13b82c86f5 unbreak Classic AutoGPT CI workflows 2024-09-20 12:37:56 +02:00
SwiftyOS
ff11d00f74 isort fixes 2024-09-20 12:35:53 +02:00
SwiftyOS
9d7dfb0a6d fix type errors 2024-09-20 12:33:39 +02:00
SwiftyOS
f1bf7f269b Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 12:25:02 +02:00
SwiftyOS
46cc8ae3ea more linting fixes 2024-09-20 12:24:55 +02:00
Reinier van der Leer
43bf6f2349 unbreak classic/.dockerignore 2024-09-20 12:23:49 +02:00
SwiftyOS
2582eb1ee8 Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 12:18:04 +02:00
SwiftyOS
10cefc149f linitng fixes 2024-09-20 12:17:56 +02:00
Reinier van der Leer
d62fe001b8 move back .pre-commit-config.yaml 2024-09-20 12:15:51 +02:00
SwiftyOS
f583a15fd0 formatting changes 2024-09-20 12:13:33 +02:00
SwiftyOS
2cad2093eb updaing python checks ci 2024-09-20 12:04:11 +02:00
SwiftyOS
4e569f4562 add flake8 path to python checks ci 2024-09-20 11:58:52 +02:00
SwiftyOS
7f514c10cf Merge branch 'repo-restructure' of github.com:Significant-Gravitas/AutoGPT into repo-restructure 2024-09-20 11:54:56 +02:00
SwiftyOS
d7aba4f6c0 updaate python checks ci 2024-09-20 11:54:17 +02:00
Swifty
ba30aa2fce Merge branch 'master' into repo-restructure 2024-09-20 11:43:19 +02:00
SwiftyOS
efeba4400e update symbolic links 2024-09-20 11:40:38 +02:00
SwiftyOS
ba206e3bec docs update 2024-09-20 11:32:38 +02:00
SwiftyOS
be16fd90d4 fixing repo workflow checker CI 2024-09-20 11:20:03 +02:00
SwiftyOS
d10167ceab renamed all CI's to make it clear which subproject they are for 2024-09-20 11:14:54 +02:00
SwiftyOS
d593f76437 updating CI's 2024-09-20 11:07:14 +02:00
SwiftyOS
bda938422e fix frontend paths 2024-09-20 11:02:23 +02:00
SwiftyOS
8397b78ec2 update frontend ci 2024-09-20 10:56:10 +02:00
SwiftyOS
0d7342826b Restructureing Repo 2024-09-20 10:48:08 +02:00
2818 changed files with 78145 additions and 12500 deletions

4
.gitattributes vendored
View File

@@ -1,10 +1,10 @@
frontend/build/** linguist-generated
classic/frontend/build/** linguist-generated
**/poetry.lock linguist-generated
docs/_javascript/** linguist-vendored
# Exclude VCR cassettes from stats
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
* text=auto

8
.github/CODEOWNERS vendored
View File

@@ -1,7 +1,7 @@
* @Significant-Gravitas/maintainers
.github/workflows/ @Significant-Gravitas/devops
forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers
rnd/infra @Significant-Gravitas/devops
classic/forge/ @Significant-Gravitas/forge-maintainers
classic/benchmark/ @Significant-Gravitas/benchmark-maintainers
classic/frontend/ @Significant-Gravitas/frontend-maintainers
autogpt_platform/infra @Significant-Gravitas/devops
.github/CODEOWNERS @Significant-Gravitas/admins

View File

@@ -9,7 +9,7 @@
### Testing 🔍
> [!NOTE]
Only for the new autogpt platform, currently in rnd/
Only for the new autogpt platform, currently in autogpt_platform/
<!--
Please make sure your changes have been tested and are in good working condition.

12
.github/labeler.yml vendored
View File

@@ -1,18 +1,18 @@
AutoGPT Agent:
- changed-files:
- any-glob-to-any-file: autogpt/**
- any-glob-to-any-file: classic/original_autogpt/**
Forge:
- changed-files:
- any-glob-to-any-file: forge/**
- any-glob-to-any-file: classic/forge/**
Benchmark:
- changed-files:
- any-glob-to-any-file: benchmark/**
- any-glob-to-any-file: classic/benchmark/**
Frontend:
- changed-files:
- any-glob-to-any-file: frontend/**
- any-glob-to-any-file: classic/frontend/**
documentation:
- changed-files:
@@ -20,8 +20,8 @@ documentation:
Builder:
- changed-files:
- any-glob-to-any-file: rnd/autogpt_builder/**
- any-glob-to-any-file: autogpt_platform/autogpt_builder/**
Server:
- changed-files:
- any-glob-to-any-file: rnd/autogpt_server/**
- any-glob-to-any-file: autogpt_platform/autogpt_server/**

View File

@@ -1,97 +0,0 @@
name: AutoGPTs Nightly Benchmark
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
jobs:
benchmark:
permissions:
contents: write
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
fail-fast: false
timeout-minutes: 120
env:
min-python-version: '3.10'
REPORTS_BRANCH: data/benchmark-reports
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python -
- name: Prepare reports folder
run: mkdir -p ${{ env.REPORTS_FOLDER }}
- run: poetry -C benchmark install
- name: Benchmark ${{ matrix.agent-name }}
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
set +e # Do not quit on non-zero exit codes
poetry run agbenchmark run -N 3 \
--test=ReadFile \
--test=BasicRetrieval --test=RevenueRetrieval2 \
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
--test=UrlShortener --test=TicTacToe --test=Battleship \
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
--test=WebArenaTask_134 --test=WebArenaTask_163
# Convert exit code 1 (some challenges failed) to exit code 0
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
exit 0
else
exit $?
fi
env:
AGENT_NAME: ${{ matrix.agent-name }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
- name: Push reports to data branch
run: |
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
rm ${{ env.REPORTS_FOLDER }}/*.json
# Find folder with newest (untracked) report in it
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
| xargs -I {} dirname {} \
| xargs -I {} git ls-files --others --exclude-standard {} \
| xargs -I {} dirname {} \
| sort -u)
json_report_file="$report_subfolder/report.json"
# Convert JSON report to Markdown
markdown_report_file="$report_subfolder/report.md"
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
git config --global user.name 'GitHub Actions'
git config --global user.email 'github-actions@agpt.co'
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
&& git checkout ${{ env.REPORTS_BRANCH }} \
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
git reset --hard
git add ${{ env.REPORTS_FOLDER }}
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
&& git push origin ${{ env.REPORTS_BRANCH }}

View File

@@ -1,25 +1,25 @@
name: AutoGPT CI
name: Classic - AutoGPT CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
concurrency:
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: autogpt
working-directory: classic/original_autogpt
jobs:
test:
@@ -86,7 +86,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -135,4 +135,4 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-logs
path: autogpt/logs/
path: classic/original_autogpt/logs/

View File

@@ -1,4 +1,4 @@
name: Purge Auto-GPT Docker CI cache
name: Classic - Purge Auto-GPT Docker CI cache
on:
schedule:
@@ -25,7 +25,8 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only

View File

@@ -1,24 +1,26 @@
name: AutoGPT Docker CI
name: Classic - AutoGPT Docker CI
on:
push:
branches: [ master, development ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
concurrency:
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
working-directory: autogpt
working-directory: classic/original_autogpt
env:
IMAGE_NAME: auto-gpt
@@ -47,7 +49,8 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
@@ -116,7 +119,8 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
file: Dockerfile.autogpt
context: classic/
file: classic/Dockerfile.autogpt
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},

View File

@@ -1,4 +1,4 @@
name: AutoGPT Docker Release
name: Classic - AutoGPT Docker Release
on:
release:
@@ -44,6 +44,7 @@ jobs:
name: Build image
uses: docker/build-push-action@v5
with:
context: classic/
file: Dockerfile.autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images

View File

@@ -1,4 +1,4 @@
name: Agent smoke tests
name: Classic - Agent smoke tests
on:
workflow_dispatch:
@@ -7,32 +7,37 @@ on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
- 'setup.py'
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md'
defaults:
run:
shell: bash
working-directory: classic
jobs:
serve-agent-protocol:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
agent-name: [ original_autogpt ]
fail-fast: false
timeout-minutes: 20
env:
@@ -50,7 +55,7 @@ jobs:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./${{ matrix.agent-name }}/
working-directory: ./classic/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -

View File

@@ -1,18 +1,18 @@
name: AGBenchmark CI
name: Classic - AGBenchmark CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- 'benchmark/**'
- .github/workflows/benchmark-ci.yml
- '!benchmark/reports/**'
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
pull_request:
branches: [ master, development, release-* ]
paths:
- 'benchmark/**'
- '!benchmark/reports/**'
- .github/workflows/benchmark-ci.yml
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
concurrency:
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -39,7 +39,7 @@ jobs:
defaults:
run:
shell: bash
working-directory: benchmark
working-directory: classic/benchmark
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -58,7 +58,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -122,7 +122,7 @@ jobs:
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
working-directory: .
working-directory: classic
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
@@ -155,7 +155,7 @@ jobs:
poetry run agbenchmark --mock
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
CHANGED=$(git diff --name-only | grep -E '(agclassic/benchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
echo "$CHANGED"

View File

@@ -1,4 +1,4 @@
name: Publish to PyPI
name: Classic - Publish to PyPI
on:
workflow_dispatch:
@@ -21,21 +21,21 @@ jobs:
python-version: 3.8
- name: Install Poetry
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
- name: Build project for distribution
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: poetry build
- name: Install dependencies
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: poetry install
- name: Check Version
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
id: check-version
run: |
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
@@ -43,7 +43,7 @@ jobs:
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "benchmark/dist/*"
artifacts: "classic/benchmark/dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
generateReleaseNotes: false
@@ -51,5 +51,5 @@ jobs:
commit: master
- name: Build and publish
working-directory: ./benchmark/
working-directory: ./classic/benchmark/
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -1,18 +1,18 @@
name: Forge CI
name: Classic - Forge CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/forge-ci.yml'
- 'forge/**'
- '!forge/tests/vcr_cassettes'
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/forge-ci.yml'
- 'forge/**'
- '!forge/tests/vcr_cassettes'
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -21,7 +21,7 @@ concurrency:
defaults:
run:
shell: bash
working-directory: forge
working-directory: classic/forge
jobs:
test:
@@ -110,7 +110,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
@@ -233,4 +233,4 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-logs
path: forge/logs/
path: classic/forge/logs/

View File

@@ -1,4 +1,4 @@
name: Frontend CI/CD
name: Classic - Frontend CI/CD
on:
push:
@@ -7,11 +7,11 @@ on:
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'frontend/**'
- 'classic/frontend/**'
- '.github/workflows/frontend-ci.yml'
pull_request:
paths:
- 'frontend/**'
- 'classic/frontend/**'
- '.github/workflows/frontend-ci.yml'
jobs:
@@ -34,7 +34,7 @@ jobs:
- name: Build Flutter to Web
run: |
cd frontend
cd classic/frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
@@ -42,7 +42,7 @@ jobs:
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add frontend/build/web
# git add classic/frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
@@ -51,7 +51,7 @@ jobs:
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v6
with:
add-paths: frontend/build/web
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true

View File

@@ -1,24 +1,24 @@
name: Python checks
name: Classic - Python checks
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '**.py'
- '!forge/tests/vcr_cassettes'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- 'classic/benchmark/**'
- '**.py'
- '!forge/tests/vcr_cassettes'
- '!classic/forge/tests/vcr_cassettes'
concurrency:
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -40,18 +40,18 @@ jobs:
uses: dorny/paths-filter@v3
with:
filters: |
autogpt:
- autogpt/autogpt/**
- autogpt/tests/**
- autogpt/poetry.lock
original_autogpt:
- classic/original_autogpt/autogpt/**
- classic/original_autogpt/tests/**
- classic/original_autogpt/poetry.lock
forge:
- forge/forge/**
- forge/tests/**
- forge/poetry.lock
- classic/forge/forge/**
- classic/forge/tests/**
- classic/forge/poetry.lock
benchmark:
- benchmark/agbenchmark/**
- benchmark/tests/**
- benchmark/poetry.lock
- classic/benchmark/agbenchmark/**
- classic/benchmark/tests/**
- classic/benchmark/poetry.lock
outputs:
changed-parts: ${{ steps.changes-in.outputs.changes }}
@@ -89,23 +89,23 @@ jobs:
# Install dependencies
- name: Install Python dependencies
run: poetry -C ${{ matrix.sub-package }} install
run: poetry -C classic/${{ matrix.sub-package }} install
# Lint
- name: Lint (isort)
run: poetry run isort --check .
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Black)
if: success() || failure()
run: poetry run black --check .
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Flake8)
if: success() || failure()
run: poetry run flake8 .
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}
types:
needs: get-changed-parts
@@ -141,11 +141,11 @@ jobs:
# Install dependencies
- name: Install Python dependencies
run: poetry -C ${{ matrix.sub-package }} install
run: poetry -C classic/${{ matrix.sub-package }} install
# Typecheck
- name: Typecheck
if: success() || failure()
run: poetry run pyright
working-directory: ${{ matrix.sub-package }}
working-directory: classic/${{ matrix.sub-package }}

View File

@@ -1,133 +0,0 @@
name: Hackathon
on:
workflow_dispatch:
inputs:
agents:
description: "Agents to run (comma-separated)"
required: false
default: "autogpt" # Default agents if none are specified
jobs:
matrix-setup:
runs-on: ubuntu-latest
# Service containers to run with `matrix-setup`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
env-name: ${{ steps.set-matrix.outputs.env-name }}
steps:
- id: set-matrix
run: |
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::[ 'irrelevant']"
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
echo "::set-output name=env-name::production"
echo "::set-output name=matrix::$matrix_string"
else
echo "::set-output name=env-name::testing"
echo "::set-output name=matrix::[ 'irrelevant' ]"
fi
tests:
environment:
name: "${{ needs.matrix-setup.outputs.env-name }}"
needs: matrix-setup
env:
min-python-version: "3.10"
name: "${{ matrix.agent-name }}"
runs-on: ubuntu-latest
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_PASSWORD: postgres
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
timeout-minutes: 50
strategy:
fail-fast: false
matrix:
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
steps:
- name: Print Environment Name
run: |
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
- name: Check Docker Container
id: check
run: docker ps
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: v18.15
- name: Run benchmark
run: |
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
git clone "$link" -b "$branch" "$AGENT_NAME"
cd $AGENT_NAME
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
./run agent start $AGENT_NAME
cd ../benchmark
poetry install
poetry run agbenchmark --no-dep
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
AGENT_NAME: ${{ matrix.agent-name }}

View File

@@ -1,20 +1,20 @@
name: AutoGPT Builder CI
name: Platform - AutoGPT Builder CI
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'rnd/autogpt_builder/**'
- 'autogpt_platform/autogpt_builder/**'
pull_request:
paths:
- '.github/workflows/autogpt-builder-ci.yml'
- 'rnd/autogpt_builder/**'
- 'autogpt_platform/autogpt_builder/**'
defaults:
run:
shell: bash
working-directory: rnd/autogpt_builder
working-directory: autogpt_platform/autogpt_builder
jobs:

View File

@@ -1,20 +1,20 @@
name: AutoGPT Builder Infra
name: Platform - AutoGPT Builder Infra
on:
push:
branches: [ master ]
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
- 'autogpt_platform/infra/**'
pull_request:
paths:
- '.github/workflows/autogpt-infra-ci.yml'
- 'rnd/infra/**'
- 'autogpt_platform/infra/**'
defaults:
run:
shell: bash
working-directory: rnd/infra
working-directory: autogpt_platform/infra
jobs:
lint:

View File

@@ -1,16 +1,16 @@
name: AutoGPT Server CI
name: Platform - AutoGPT Server CI
on:
push:
branches: [master, development, ci-test*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
- "autogpt_platform/autogpt_server/**"
pull_request:
branches: [master, development, release-*]
paths:
- ".github/workflows/autogpt-server-ci.yml"
- "rnd/autogpt_server/**"
- "autogpt_platform/autogpt_server/**"
concurrency:
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -19,7 +19,7 @@ concurrency:
defaults:
run:
shell: bash
working-directory: rnd/autogpt_server
working-directory: autogpt_platform/autogpt_server
jobs:
test:
@@ -90,7 +90,7 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/autogpt_server/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'

View File

@@ -1,4 +1,4 @@
name: 'Close stale issues'
name: Repo - Close stale issues
on:
schedule:
- cron: '30 1 * * *'

View File

@@ -1,12 +1,12 @@
name: "Pull Request auto-label"
name: Repo - Pull Request auto-label
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master, development, release-* ]
paths-ignore:
- 'forge/tests/vcr_cassettes'
- 'benchmark/reports/**'
- 'classic/forge/tests/vcr_cassettes'
- 'classic/benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs

View File

@@ -1,4 +1,4 @@
name: github-repo-stats
name: Repo - Github Stats
on:
schedule:

View File

@@ -1,4 +1,4 @@
name: PR Status Checker
name: Repo - PR Status Checker
on:
pull_request:
types: [opened, synchronize, reopened]
@@ -26,6 +26,6 @@ jobs:
echo "Current directory before running Python script:"
pwd
echo "Attempting to run Python script:"
python check_actions_status.py
python .github/workflows/scripts/check_actions_status.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

8
.gitignore vendored
View File

@@ -1,7 +1,7 @@
## Original ignores
.github_access_token
autogpt/keys.py
autogpt/*.json
classic/original_autogpt/keys.py
classic/original_autogpt/*.json
auto_gpt_workspace/*
*.mpeg
.env
@@ -157,7 +157,7 @@ openai/
CURRENT_BULLETIN.md
# AgBenchmark
agbenchmark/reports/
agclassic/benchmark/reports/
# Nodejs
package-lock.json
@@ -170,4 +170,4 @@ pri*
ig*
.github_access_token
LICENSE.rtf
rnd/autogpt_server/settings.py
autogpt_platform/autogpt_server/settings.py

8
.gitmodules vendored
View File

@@ -1,6 +1,6 @@
[submodule "forge/tests/vcr_cassettes"]
path = forge/tests/vcr_cassettes
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
[submodule "rnd/supabase"]
path = rnd/supabase
[submodule "autogpt_platform/supabase"]
path = autogpt_platform/supabase
url = https://github.com/supabase/supabase.git

View File

@@ -16,22 +16,22 @@ repos:
hooks:
- id: isort-autogpt
name: Lint (isort) - AutoGPT
entry: poetry -C autogpt run isort
files: ^autogpt/
entry: poetry -C classic/original_autogpt run isort
files: ^classic/original_autogpt/
types: [file, python]
language: system
- id: isort-forge
name: Lint (isort) - Forge
entry: poetry -C forge run isort
files: ^forge/
entry: poetry -C classic/forge run isort
files: ^classic/forge/
types: [file, python]
language: system
- id: isort-benchmark
name: Lint (isort) - Benchmark
entry: poetry -C benchmark run isort
files: ^benchmark/
entry: poetry -C classic/benchmark run isort
files: ^classic/benchmark/
types: [file, python]
language: system
@@ -52,20 +52,20 @@ repos:
- id: flake8
name: Lint (Flake8) - AutoGPT
alias: flake8-autogpt
files: ^autogpt/(autogpt|scripts|tests)/
args: [--config=autogpt/.flake8]
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
args: [--config=classic/original_autogpt/.flake8]
- id: flake8
name: Lint (Flake8) - Forge
alias: flake8-forge
files: ^forge/(forge|tests)/
args: [--config=forge/.flake8]
files: ^classic/forge/(forge|tests)/
args: [--config=classic/forge/.flake8]
- id: flake8
name: Lint (Flake8) - Benchmark
alias: flake8-benchmark
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=benchmark/.flake8]
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
- repo: local
# To have watertight type checking, we check *all* the files in an affected
@@ -74,10 +74,10 @@ repos:
- id: pyright
name: Typecheck - AutoGPT
alias: pyright-autogpt
entry: poetry -C autogpt run pyright
entry: poetry -C classic/original_autogpt run pyright
args: [-p, autogpt, autogpt]
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
@@ -85,9 +85,9 @@ repos:
- id: pyright
name: Typecheck - Forge
alias: pyright-forge
entry: poetry -C forge run pyright
entry: poetry -C classic/forge run pyright
args: [-p, forge, forge]
files: ^forge/(forge/|poetry\.lock$)
files: ^classic/forge/(classic/forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
@@ -95,9 +95,9 @@ repos:
- id: pyright
name: Typecheck - Benchmark
alias: pyright-benchmark
entry: poetry -C benchmark run pyright
entry: poetry -C classic/benchmark run pyright
args: [-p, benchmark, benchmark]
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
@@ -106,22 +106,22 @@ repos:
hooks:
- id: pytest-autogpt
name: Run tests - AutoGPT (excl. slow tests)
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
- id: pytest-forge
name: Run tests - Forge (excl. slow tests)
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
files: ^forge/(forge/|tests/|poetry\.lock$)
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
- id: pytest-benchmark
name: Run tests - Benchmark
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agclassic/benchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

View File

@@ -1,49 +1,49 @@
{
"folders": [
{
"name": "autogpt",
"path": "../autogpt"
"name": "autogpt_server",
"path": "../autogpt_platform/autogpt_server"
},
{
"name": "benchmark",
"path": "../benchmark"
"name": "autogpt_builder",
"path": "../autogpt_platform/autogpt_builder"
},
{
"name": "market",
"path": "../autogpt_platform/market"
},
{
"name": "lib",
"path": "../autogpt_platform/autogpt_libs"
},
{
"name": "infra",
"path": "../autogpt_platform/infra"
},
{
"name": "docs",
"path": "../docs"
},
{
"name": "forge",
"path": "../forge"
},
{
"name": "frontend",
"path": "../frontend"
},
{
"name": "autogpt_server",
"path": "../rnd/autogpt_server"
},
{
"name": "autogpt_builder",
"path": "../rnd/autogpt_builder"
},
{
"name": "market",
"path": "../rnd/market"
},
{
"name": "lib",
"path": "../rnd/autogpt_libs"
},
{
"name": "infra",
"path": "../rnd/infra"
},
{
"name": "[root]",
"path": ".."
}
},
{
"name": "classic - autogpt",
"path": "../classic/original_autogpt"
},
{
"name": "classic - benchmark",
"path": "../classic/benchmark"
},
{
"name": "classic - forge",
"path": "../classic/forge"
},
{
"name": "classic - frontend",
"path": "../classic/frontend"
},
],
"settings": {
"python.analysis.typeCheckingMode": "basic"

View File

@@ -58,9 +58,9 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
### 🏗️ Forge
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) &ndash;
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) &ndash;
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
@@ -71,7 +71,7 @@ This guide will walk you through the process of creating your own agent and usin
<!-- TODO: insert visual demonstrating the benchmark -->
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
📦 [`agbenchmark`](https://pypi.org/project/agclassic/benchmark/) on Pypi
&ensp;|&ensp;
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark

View File

@@ -1,3 +0,0 @@
{
"python.analysis.typeCheckingMode": "basic",
}

View File

@@ -14,7 +14,7 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine.
2. Navigate to rnd/supabase
2. Navigate to autogpt_platform/supabase
3. Run the following command:
```
git submodule update --init --recursive
@@ -32,7 +32,7 @@ To run the AutoGPT Platform, follow these steps:
```
This command will start all the necessary backend services defined in the `docker-compose.combined.yml` file in detached mode.
7. Navigate to rnd/autogpt_builder.
7. Navigate to autogpt_platform/autogpt_builder.
8. Run the following command:
```
cp .env.example .env.local

View File

@@ -1,19 +1,19 @@
# Base stage for both dev and prod
FROM node:21-alpine AS base
WORKDIR /app
COPY rnd/autogpt_builder/package.json rnd/autogpt_builder/yarn.lock ./
COPY autogpt_platform/autogpt_builder/package.json autogpt_platform/autogpt_builder/yarn.lock ./
RUN yarn install --frozen-lockfile
# Dev stage
FROM base AS dev
ENV NODE_ENV=development
COPY rnd/autogpt_builder/ .
COPY autogpt_platform/autogpt_builder/ .
EXPOSE 3000
CMD ["yarn", "run", "dev"]
# Build stage for prod
FROM base AS build
COPY rnd/autogpt_builder/ .
COPY autogpt_platform/autogpt_builder/ .
RUN npm run build
# Prod stage

View File

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 29 KiB

View File

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 28 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -37,6 +37,49 @@ type FormData = {
selectedAgentId: string;
};
const keywords = [
"Automation",
"AI Workflows",
"Integration",
"Task Automation",
"Data Processing",
"Workflow Management",
"Real-time Analytics",
"Custom Triggers",
"Event-driven",
"API Integration",
"Data Transformation",
"Multi-step Workflows",
"Collaboration Tools",
"Business Process Automation",
"No-code Solutions",
"AI-Powered",
"Smart Notifications",
"Data Syncing",
"User Engagement",
"Reporting Automation",
"Lead Generation",
"Customer Support Automation",
"E-commerce Automation",
"Social Media Management",
"Email Marketing Automation",
"Document Management",
"Data Enrichment",
"Performance Tracking",
"Predictive Analytics",
"Resource Allocation",
"Chatbot",
"Virtual Assistant",
"Workflow Automation",
"Social Media Manager",
"Email Optimizer",
"Content Generator",
"Data Analyzer",
"Task Scheduler",
"Customer Service Bot",
"Personalization Engine",
];
const SubmitPage: React.FC = () => {
const router = useRouter();
const {
@@ -292,13 +335,11 @@ const SubmitPage: React.FC = () => {
</MultiSelectorTrigger>
<MultiSelectorContent>
<MultiSelectorList>
<MultiSelectorItem value="keyword1">
Keyword 1
</MultiSelectorItem>
<MultiSelectorItem value="keyword2">
Keyword 2
</MultiSelectorItem>
{/* Add more predefined keywords as needed */}
{keywords.map((keyword) => (
<MultiSelectorItem key={keyword} value={keyword}>
{keyword}
</MultiSelectorItem>
))}
</MultiSelectorList>
</MultiSelectorContent>
</MultiSelector>

Some files were not shown because too many files have changed in this diff Show More