Compare commits

..

4 Commits

Author SHA1 Message Date
Reinier van der Leer
7ffc374bb5 Merge branch 'master' into bringing-in-the-benchmark 2024-01-29 17:23:47 +01:00
Nicholas Tindle
6d4e7b614d fix: Fix missing categories in data_types.py
- Added GAIA_1 category to existing categories in data_types.py
- Added GAIA_2 category to existing categories in data_types.py
- Added GAIA_3 category to existing categories in data_types.py
2023-11-09 01:40:35 -06:00
Nicholas Tindle
03da45d6e6 refactor: Improve debug tooling
- Added print statements for better debugging and understanding of the code flow.
- Updated the log messages to provide more detailed information about the data and variables involved in the process of generating tests and handling challenges.
2023-11-09 01:25:13 -06:00
Nicholas Tindle
67d1e96415 feat: sync 2023-11-09 01:24:26 -06:00
1000 changed files with 75048 additions and 69049 deletions

View File

@@ -1,28 +0,0 @@
# Ignore everything by default, selectively add things to context
*
# AutoGPT
!autogpt/autogpt/
!autogpt/pyproject.toml
!autogpt/poetry.lock
!autogpt/README.md
!autogpt/tests/
# Benchmark
!benchmark/agbenchmark/
!benchmark/pyproject.toml
!benchmark/poetry.lock
!benchmark/README.md
# Forge
!forge/forge/
!forge/pyproject.toml
!forge/poetry.lock
!forge/README.md
# Frontend
!frontend/build/web/
# Explicitly re-ignore some folders
.*
**/__pycache__

4
.gitattributes vendored
View File

@@ -1,5 +1,3 @@
frontend/build/** linguist-generated
frontend/build/* linguist-generated
**/poetry.lock linguist-generated
docs/_javascript/** linguist-vendored

10
.github/CODEOWNERS vendored
View File

@@ -1,5 +1,5 @@
.github/workflows/ @Significant-Gravitas/devops
autogpt/ @Significant-Gravitas/maintainers
forge/ @Significant-Gravitas/forge-maintainers
benchmark/ @Significant-Gravitas/benchmark-maintainers
frontend/ @Significant-Gravitas/frontend-maintainers
.github/workflows/ @Significant-Gravitas/maintainers
autogpts/autogpt/ @Pwuts
benchmark/ @Significant-Gravitas/benchmarkers
forge/ @Swiftyos
frontend/ @hunteraraujo

View File

@@ -90,7 +90,7 @@ body:
attributes:
label: Do you use OpenAI GPT-3 or GPT-4?
description: >
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
If you are using AutoGPT with `--gpt3only`, your problems may be caused by
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
options:
- GPT-3.5

19
.github/labeler.yml vendored
View File

@@ -1,19 +0,0 @@
AutoGPT Agent:
- changed-files:
- any-glob-to-any-file: autogpt/**
Forge:
- changed-files:
- any-glob-to-any-file: forge/**
Benchmark:
- changed-files:
- any-glob-to-any-file: benchmark/**
Frontend:
- changed-files:
- any-glob-to-any-file: frontend/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**

View File

@@ -1,18 +1,18 @@
name: AutoGPT CI
name: AutoGPT Python CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
- '!autogpt/tests/vcr_cassettes'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-ci.yml'
- 'autogpt/**'
- '!autogpt/tests/vcr_cassettes'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
concurrency:
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -20,50 +20,81 @@ concurrency:
defaults:
run:
shell: bash
working-directory: autogpt
working-directory: autogpts/autogpt
jobs:
lint:
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.min-python-version }}
- id: get_date
name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
uses: actions/cache@v3
with:
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }}
- name: Install Python dependencies
run: |
curl -sSL https://install.python-poetry.org | python3 -
poetry install
- name: Lint with flake8
run: poetry run flake8
- name: Check black formatting
run: poetry run black . --check
if: success() || failure()
- name: Check isort formatting
run: poetry run isort . --check
if: success() || failure()
# - name: Check mypy formatting
# run: poetry run mypy
# if: success() || failure()
# - name: Check for unused imports and pass statements
# run: |
# cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
# poetry run $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
test:
permissions:
contents: read
runs-on: ubuntu-latest
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
services:
minio:
image: minio/minio:edge-cicd
ports:
- 9000:9000
options: >
--health-interval=10s --health-timeout=5s --health-retries=3
--health-cmd="curl -f http://localhost:9000/minio/health/live"
steps:
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.'
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: true
@@ -105,7 +136,7 @@ jobs:
fi
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
@@ -114,34 +145,15 @@ jobs:
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }}
- name: Install Python dependencies
run: poetry install
run: |
curl -sSL https://install.python-poetry.org | python3 -
poetry install
- name: Run pytest with coverage
run: |
@@ -153,15 +165,12 @@ jobs:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
S3_ENDPOINT_URL: http://localhost:9000
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent,${{ runner.os }}
uses: codecov/codecov-action@v3
- id: setup_git_auth
name: Set up git token authentication
@@ -169,11 +178,7 @@ jobs:
if: success() || failure()
run: |
config_key="http.${{ github.server_url }}/.extraheader"
if [ "${{ runner.os }}" = 'macOS' ]; then
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
else
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
fi
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
git config "$config_key" \
"Authorization: Basic $base64_pat"
@@ -234,12 +239,12 @@ jobs:
echo "Adding label and comment..."
echo $TOKEN | gh auth login --with-token
gh issue edit $PR_NUMBER --add-label "behaviour change"
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
fi
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: test-logs
path: autogpt/logs/
path: autogpts/autogpt/logs/

View File

@@ -16,16 +16,16 @@ jobs:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- id: build
name: Build image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
file: Dockerfile.autogpt
context: autogpts/autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only

View File

@@ -5,14 +5,14 @@ on:
branches: [ master, development ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
- '!autogpt/tests/vcr_cassettes'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpt-docker-ci.yml'
- 'autogpt/**'
- '!autogpt/tests/vcr_cassettes'
- 'autogpts/autogpt/**'
- '!autogpts/autogpt/tests/vcr_cassettes'
concurrency:
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
@@ -20,11 +20,11 @@ concurrency:
defaults:
run:
working-directory: autogpt
working-directory: autogpts/autogpt
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
DEV_IMAGE_TAG: latest-dev
jobs:
@@ -35,10 +35,10 @@ jobs:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- if: runner.debug
run: |
@@ -47,12 +47,11 @@ jobs:
- id: build
name: Build image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
file: Dockerfile.autogpt
context: autogpts/autogpt
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
@@ -84,6 +83,7 @@ jobs:
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
working-directory: ./
continue-on-error: true
test:
@@ -100,30 +100,28 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
submodules: true
- if: github.event_name == 'push'
name: Log in to Docker hub
uses: docker/login-action@v3
- name: Log in to Docker hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- id: build
name: Build image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
file: Dockerfile.autogpt
context: autogpts/autogpt
build-args: BUILD_TYPE=dev # include pytest
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
labels: GIT_REVISION=${{ github.sha }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=autogpt-docker-dev

View File

@@ -10,6 +10,10 @@ on:
type: boolean
description: 'Build from scratch, without using cached layers'
defaults:
run:
working-directory: autogpts/autogpt
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
@@ -20,16 +24,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Log in to Docker hub
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
@@ -42,9 +46,9 @@ jobs:
- id: build
name: Build image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
file: Dockerfile.autogpt
context: autogpts/autogpt
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
@@ -52,7 +56,6 @@ jobs:
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
labels: GIT_REVISION=${{ github.sha }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
@@ -83,4 +86,5 @@ jobs:
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
working-directory: ./
continue-on-error: true

View File

@@ -1,97 +0,0 @@
name: AutoGPTs Nightly Benchmark
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
jobs:
benchmark:
permissions:
contents: write
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
fail-fast: false
timeout-minutes: 120
env:
min-python-version: '3.10'
REPORTS_BRANCH: data/benchmark-reports
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python -
- name: Prepare reports folder
run: mkdir -p ${{ env.REPORTS_FOLDER }}
- run: poetry -C benchmark install
- name: Benchmark ${{ matrix.agent-name }}
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
set +e # Do not quit on non-zero exit codes
poetry run agbenchmark run -N 3 \
--test=ReadFile \
--test=BasicRetrieval --test=RevenueRetrieval2 \
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
--test=UrlShortener --test=TicTacToe --test=Battleship \
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
--test=WebArenaTask_134 --test=WebArenaTask_163
# Convert exit code 1 (some challenges failed) to exit code 0
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
exit 0
else
exit $?
fi
env:
AGENT_NAME: ${{ matrix.agent-name }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
- name: Push reports to data branch
run: |
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
rm ${{ env.REPORTS_FOLDER }}/*.json
# Find folder with newest (untracked) report in it
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
| xargs -I {} dirname {} \
| xargs -I {} git ls-files --others --exclude-standard {} \
| xargs -I {} dirname {} \
| sort -u)
json_report_file="$report_subfolder/report.json"
# Convert JSON report to Markdown
markdown_report_file="$report_subfolder/report.md"
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
git config --global user.name 'GitHub Actions'
git config --global user.email 'github-actions@agpt.co'
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
&& git checkout ${{ env.REPORTS_BRANCH }} \
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
git reset --hard
git add ${{ env.REPORTS_FOLDER }}
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
&& git push origin ${{ env.REPORTS_BRANCH }}

View File

@@ -1,4 +1,4 @@
name: Agent smoke tests
name: AutoGPTs smoke test CI
on:
workflow_dispatch:
@@ -8,8 +8,7 @@ on:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'autogpts/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
@@ -19,8 +18,7 @@ on:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/autogpts-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'autogpts/**'
- 'benchmark/**'
- 'run'
- 'cli.py'
@@ -28,44 +26,43 @@ on:
- '!**/*.md'
jobs:
serve-agent-protocol:
run-tests:
runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ autogpt ]
agent-name: [ autogpt, forge ]
fail-fast: false
timeout-minutes: 20
env:
min-python-version: '3.10'
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./${{ matrix.agent-name }}/
working-directory: ./autogpts/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
cd autogpts/${{ matrix.agent-name }}
poetry run agbenchmark --mock
poetry run agbenchmark --test=WriteFile
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENT_NAME: ${{ matrix.agent-name }}
HELICONE_API_KEY: ${{ secrets.HELICONE_API_KEY }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
HELICONE_CACHE_ENABLED: false
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
TELEMETRY_ENVIRONMENT: autogpt-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
REPORT_LOCATION: ${{ format('../../reports/{0}', matrix.agent-name) }}

View File

@@ -1,4 +1,4 @@
name: AGBenchmark CI
name: Benchmark CI
on:
push:
@@ -14,91 +14,62 @@ on:
- '!benchmark/reports/**'
- .github/workflows/benchmark-ci.yml
concurrency:
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
env:
min-python-version: '3.10'
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
defaults:
run:
shell: bash
working-directory: benchmark
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
python-version: ${{ env.min-python-version }}
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
- id: get_date
name: Get date
working-directory: ./benchmark/
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
- name: Install Poetry
working-directory: ./benchmark/
run: |
curl -sSL https://install.python-poetry.org | python3 -
curl -sSL https://install.python-poetry.org | python -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
- name: Install dependencies
working-directory: ./benchmark/
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
export POETRY_VIRTUALENVS_IN_PROJECT=true
poetry install -vvv
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Lint with flake8
working-directory: ./benchmark/
run: poetry run flake8
- name: Install Python dependencies
run: poetry install
- name: Check black formatting
working-directory: ./benchmark/
run: poetry run black . --exclude test.py --check
if: success() || failure()
- name: Run pytest with coverage
- name: Check isort formatting
working-directory: ./benchmark/
run: poetry run isort . --check
if: success() || failure()
- name: Check for unused imports and pass statements
working-directory: ./benchmark/
run: |
poetry run pytest -vv \
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
tests
env:
CI: true
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
cmd="poetry run autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring agbenchmark"
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
if: success() || failure()
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: agbenchmark,${{ runner.os }}
self-test-with-agent:
tests-agbenchmark:
runs-on: ubuntu-latest
strategy:
matrix:
@@ -107,28 +78,29 @@ jobs:
timeout-minutes: 20
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: ${{ env.min-python-version }}
- name: Install Poetry
working-directory: ./autogpts/${{ matrix.agent-name }}/
run: |
curl -sSL https://install.python-poetry.org | python -
- name: Run regression tests
working-directory: .
run: |
./run agent start ${{ matrix.agent-name }}
cd ${{ matrix.agent-name }}
sleep 10
cd autogpts/${{ matrix.agent-name }}
set +e # Ignore non-zero exit codes and continue execution
echo "Running the following command: poetry run agbenchmark --maintain --mock"
poetry run agbenchmark --maintain --mock
EXIT_CODE=$?
set -e # Stop ignoring non-zero exit codes
@@ -148,13 +120,14 @@ jobs:
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
poetry run agbenchmark --test=WriteFile
cd ../benchmark
cd ../../benchmark
poetry install
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
export BUILD_SKILL_TREE=true
poetry run agbenchmark --mock
poetry run pytest -vv -s tests
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
if [ ! -z "$CHANGED" ]; then
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
@@ -165,5 +138,3 @@ jobs:
fi
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -10,13 +10,13 @@ jobs:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: true
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: 3.8

46
.github/workflows/build-frontend.yml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: Build and Commit Frontend
on:
push:
branches:
- master
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'frontend/**'
jobs:
build:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout Repo
uses: actions/checkout@v2
- name: Setup Flutter
uses: subosito/flutter-action@v1
with:
flutter-version: '3.13.2'
- name: Build Flutter Web
run: |
cd frontend
flutter build web --base-href /app/
- name: Set branch name
id: vars
run: echo "::set-output name=branch::frontend_build_${GITHUB_SHA}"
- name: Commit and Push
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add frontend/build/web
git commit -m "Update frontend build" -a
git checkout -b ${{ steps.vars.outputs.branch }}
echo "Commit hash: ${GITHUB_SHA}"
git push origin ${{ steps.vars.outputs.branch }}
# - name: Create Pull Request
# uses: peter-evans/create-pull-request@v3
# with:
# title: "Update frontend build"
# body: "This PR updates the frontend build."
# branch: ${{ steps.vars.outputs.branch }}
# base: "master"

View File

@@ -11,7 +11,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
- uses: actions/stale@v8
with:
# operations-per-run: 5000
stale-issue-message: >

View File

@@ -1,129 +0,0 @@
name: Forge CI
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/forge-ci.yml'
- 'forge/**'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/forge-ci.yml'
- 'forge/**'
concurrency:
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
working-directory: forge
jobs:
test:
permissions:
contents: read
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps:
# Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.'
run: |
docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4
with:
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
- name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install
- name: Run pytest with coverage
run: |
poetry run pytest -vv \
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
forge
env:
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: forge,${{ runner.os }}
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: forge/logs/

View File

@@ -1,60 +0,0 @@
name: Frontend CI/CD
on:
push:
branches:
- master
- development
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
pull_request:
paths:
- 'frontend/**'
- '.github/workflows/frontend-ci.yml'
jobs:
build:
permissions:
contents: write
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v6
with:
add-paths: frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

View File

@@ -88,13 +88,13 @@ jobs:
run: docker ps
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: true
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: ${{ env.min-python-version }}
@@ -107,7 +107,7 @@ jobs:
curl -sSL https://install.python-poetry.org | python -
- name: Install Node.js
uses: actions/setup-node@v4
uses: actions/setup-node@v1
with:
node-version: v18.15
@@ -117,7 +117,7 @@ jobs:
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
git clone "$link" -b "$branch" "$AGENT_NAME"
cd $AGENT_NAME
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
cp ./autogpts/$AGENT_NAME/.env.example ./autogpts/$AGENT_NAME/.env || echo "file not found"
./run agent start $AGENT_NAME
cd ../benchmark
poetry install

View File

@@ -5,7 +5,7 @@ on:
push:
branches: [ master, development, release-* ]
paths-ignore:
- 'autogpt/tests/vcr_cassettes'
- 'autogpts/autogpt/tests/vcr_cassettes'
- 'benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
@@ -52,15 +52,6 @@ jobs:
l_label: 'size/l'
l_max_size: 500
xl_label: 'size/xl'
message_if_xl:
scope:
if: ${{ github.event_name == 'pull_request_target' }}
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
with:
sync-labels: true
message_if_xl: >
This PR exceeds the recommended size of 500 lines.
Please make sure you are NOT addressing multiple issues with one PR.

View File

@@ -1,151 +0,0 @@
name: Python checks
on:
push:
branches: [ master, development, ci-test* ]
paths:
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- '**.py'
- '!autogpt/tests/vcr_cassettes'
pull_request:
branches: [ master, development, release-* ]
paths:
- '.github/workflows/lint-ci.yml'
- 'autogpt/**'
- 'forge/**'
- 'benchmark/**'
- '**.py'
- '!autogpt/tests/vcr_cassettes'
concurrency:
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
defaults:
run:
shell: bash
jobs:
get-changed-parts:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- id: changes-in
name: Determine affected subprojects
uses: dorny/paths-filter@v3
with:
filters: |
autogpt:
- autogpt/autogpt/**
- autogpt/tests/**
- autogpt/poetry.lock
forge:
- forge/forge/**
- forge/tests/**
- forge/poetry.lock
benchmark:
- benchmark/agbenchmark/**
- benchmark/tests/**
- benchmark/poetry.lock
outputs:
changed-parts: ${{ steps.changes-in.outputs.changes }}
lint:
needs: get-changed-parts
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
strategy:
matrix:
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python3 -
# Install dependencies
- name: Install Python dependencies
run: poetry -C ${{ matrix.sub-package }} install
# Lint
- name: Lint (isort)
run: poetry run isort --check .
working-directory: ${{ matrix.sub-package }}
- name: Lint (Black)
if: success() || failure()
run: poetry run black --check .
working-directory: ${{ matrix.sub-package }}
- name: Lint (Flake8)
if: success() || failure()
run: poetry run flake8 .
working-directory: ${{ matrix.sub-package }}
types:
needs: get-changed-parts
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
strategy:
matrix:
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.min-python-version }}
- name: Set up Python dependency cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
- name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python3 -
# Install dependencies
- name: Install Python dependencies
run: poetry -C ${{ matrix.sub-package }} install
# Typecheck
- name: Typecheck
if: success() || failure()
run: poetry run pyright
working-directory: ${{ matrix.sub-package }}

7
.gitignore vendored
View File

@@ -6,6 +6,8 @@ auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
ai_settings.yaml
last_run_ai_settings.yaml
.vscode
.idea/*
auto-gpt.json
@@ -170,3 +172,8 @@ pri*
# ignore
ig*
.github_access_token
arena/TestAgent.json
# evo.ninja
autogpts/evo.ninja/*
!autogpts/evo.ninja/setup

4
.gitmodules vendored
View File

@@ -1,3 +1,3 @@
[submodule "autogpt/tests/vcr_cassettes"]
path = autogpt/tests/vcr_cassettes
[submodule "autogpts/autogpt/tests/vcr_cassettes"]
path = autogpts/autogpt/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes

View File

@@ -1,6 +0,0 @@
[pr_reviewer]
num_code_suggestions=0
[pr_code_suggestions]
commitable_code_suggestions=false
num_code_suggestions=0

View File

@@ -1,127 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
args: ["--maxkb=500"]
- id: fix-byte-order-marker
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
- repo: local
# isort needs the context of which packages are installed to function, so we
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
hooks:
- id: isort-autogpt
name: Lint (isort) - AutoGPT
entry: poetry -C autogpt run isort
files: ^autogpt/
types: [file, python]
language: system
- id: isort-forge
name: Lint (isort) - Forge
entry: poetry -C forge run isort
files: ^forge/
types: [file, python]
language: system
- id: isort-benchmark
name: Lint (isort) - Benchmark
entry: poetry -C benchmark run isort
files: ^benchmark/
types: [file, python]
language: system
- repo: https://github.com/psf/black
rev: 23.12.1
# Black has sensible defaults, doesn't need package context, and ignores
# everything in .gitignore, so it works fine without any config or arguments.
hooks:
- id: black
name: Lint (Black)
language_version: python3.10
- repo: https://github.com/PyCQA/flake8
rev: 7.0.0
# To have flake8 load the config of the individual subprojects, we have to call
# them separately.
hooks:
- id: flake8
name: Lint (Flake8) - AutoGPT
alias: flake8-autogpt
files: ^autogpt/(autogpt|scripts|tests)/
args: [--config=autogpt/.flake8]
- id: flake8
name: Lint (Flake8) - Forge
alias: flake8-forge
files: ^forge/(forge|tests)/
args: [--config=forge/.flake8]
- id: flake8
name: Lint (Flake8) - Benchmark
alias: flake8-benchmark
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=benchmark/.flake8]
- repo: local
# To have watertight type checking, we check *all* the files in an affected
# project. To trigger on poetry.lock we also reset the file `types` filter.
hooks:
- id: pyright
name: Typecheck - AutoGPT
alias: pyright-autogpt
entry: poetry -C autogpt run pyright
args: [-p, autogpt, autogpt]
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Forge
alias: pyright-forge
entry: poetry -C forge run pyright
args: [-p, forge, forge]
files: ^forge/(forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Benchmark
alias: pyright-benchmark
entry: poetry -C benchmark run pyright
args: [-p, benchmark, benchmark]
files: ^benchmark/(agbenchmark|tests)/
types: [file]
language: system
pass_filenames: false
- repo: local
hooks:
- id: pytest-autogpt
name: Run tests - AutoGPT (excl. slow tests)
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
- id: pytest-forge
name: Run tests - Forge (excl. slow tests)
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
files: ^forge/(forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
- id: pytest-benchmark
name: Run tests - Benchmark
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

View File

@@ -74,7 +74,7 @@ Lists all the available agents.
**Output**:
```
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
🎉 New agent 'my_agent' created and switched to the new directory in autogpts folder.
```
Creates a new agent named 'my_agent'.

View File

@@ -39,6 +39,9 @@ This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux
The first command you need to use is `./run setup` This will guide you through the process of setting up your system.
Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image:
> Note: for advanced users. The github access token is only needed for the ./run arena enter command so the system can automatically create a PR
![Setup the Project](docs/content/imgs/quickstart/005_setup.png)
@@ -74,30 +77,44 @@ After executing the above commands, running `./run setup` should work successful
#### Store Project Files within the WSL File System
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment.
You can keep running the command to get feedback on where you are up to with your setup.
When setup has been completed, the command will return an output like this:
You can keep running the command to get feedback on where you are up to with your setup.
When setup has been completed, the command will return an output like this:
![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png)
![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png)
## Creating Your Agent
After completing the setup, the next step is to create your agent template.
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with a name of your choosing.
Now setup has been completed its time to create your agent template.
Do so by running the `./run agent create YOUR_AGENT_NAME` replacing YOUR_AGENT_NAME with a name of your choice. Examples of valid names: swiftyosgpt or SwiftyosAgent or swiftyos_agent
Tips for naming your agent:
* Give it its own unique name, or name it after yourself
* Include an important aspect of your agent in the name, such as its purpose
![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png)
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
Upon creating your agent its time to officially enter the Arena!
Do so by running `./run arena enter YOUR_AGENT_NAME`
![Enter the Arena](docs/content/imgs/quickstart/008_enter_arena.png)
> Note: for advanced users, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format.
```json
{
"github_repo_url": "https://github.com/Swiftyos/YourAgentName",
"timestamp": "2023-09-18T10:03:38.051498",
"commit_hash_to_benchmark": "ac36f7bfc7f23ad8800339fa55943c1405d80d5e",
"branch_to_benchmark": "master"
}
```
- github_repo_url: the url to your fork
- timestamp: timestamp of the last update of this file
- commit_hash_to_benchmark: the commit hash of your entry. You update each time you have an something ready to be officially entered into the hackathon
- branch_to_benchmark: the branch you are using to develop your agent on, default is master.
![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png)
## Running your Agent
Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
Your agent can started using the `./run agent start YOUR_AGENT_NAME`
This starts the agent on the URL: `http://localhost:8000/`
This start the agent on `http://localhost:8000/`
![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png)

View File

@@ -18,16 +18,23 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
&ensp;|&ensp;
**🛠️ [Build your own Agent - Quickstart](QUICKSTART.md)**
## 🥇 Current Best Agent: evo.ninja
[Current Best Agent]: #-current-best-agent-evoninja
The AutoGPT Arena Hackathon saw [**evo.ninja**](https://github.com/polywrap/evo.ninja) earn the top spot on our Arena Leaderboard, proving itself as the best open-source generalist agent. Try it now at https://evo.ninja!
📈 To challenge evo.ninja, AutoGPT, and others, submit your benchmark run to the [Leaderboard](#-leaderboard), and maybe your agent will be up here next!
## 🧱 Building blocks
### 🏗️ Forge
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/autogpts/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) &ndash;
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/forge/tutorials/001_getting_started.md) &ndash;
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/forge) about Forge
### 🎯 Benchmark
@@ -39,11 +46,18 @@ This guide will walk you through the process of creating your own agent and usin
&ensp;|&ensp;
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
#### 🏆 [Leaderboard][leaderboard]
[leaderboard]: https://leaderboard.agpt.co
Submit your benchmark run through the UI and claim your place on the AutoGPT Arena Leaderboard! The best scoring general agent earns the title of **[Current Best Agent]**, and will be adopted into our repo so people can easily run it through the [CLI].
[![Screenshot of the AutoGPT Arena leaderboard](https://github.com/Significant-Gravitas/AutoGPT/assets/12185583/60813392-9ddb-4cca-bb44-b477dbae225d)][leaderboard]
### 💻 UI
**Makes agents easy to use!** The `frontend` gives you a user-friendly interface to control and monitor your agents. It connects to agents through the [agent protocol](#-agent-protocol), ensuring compatibility with many agents from both inside and outside of our ecosystem.
<!-- TODO: insert screenshot of front end -->
<!-- TODO: instert screenshot of front end -->
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
@@ -64,6 +78,7 @@ Options:
Commands:
agent Commands to create, start and stop agents
arena Commands to enter the arena
benchmark Commands to start the benchmark and list tests and categories
setup Installs dependencies needed for your system.
```
@@ -87,11 +102,7 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
---
<p align="center">
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date&theme=dark" />
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
</picture>
</a>
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT&Date">
<img src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" alt="Star History Chart">
</a>
</p>

View File

@@ -1,66 +0,0 @@
# Security Policy
- [**Using AutoGPT Securely**](#using-AutoGPT-securely)
- [Restrict Workspace](#restrict-workspace)
- [Untrusted inputs](#untrusted-inputs)
- [Data privacy](#data-privacy)
- [Untrusted environments or networks](#untrusted-environments-or-networks)
- [Multi-Tenant environments](#multi-tenant-environments)
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
## Using AutoGPT Securely
### Restrict Workspace
Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
### Untrusted inputs
When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
For maximum security when handling untrusted inputs, you may need to employ the following:
* Sandboxing: Isolate the process.
* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
* Validation: Enforce strict rules on allowed characters and data types.
* Filtering: Remove potentially malicious scripts or code fragments.
* Encoding: Convert special characters into safe representations.
* Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
### Data privacy
To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
### Untrusted environments or networks
Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
Additionally, running it on an untrusted network can expose your data to potential network attacks.
However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
### Multi-Tenant environments
If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenants identity.
- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
## Reporting a Vulnerability
Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.

6
arena/480bot.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/480/AutoGPT",
"timestamp": "2023-10-22T06:49:52.536177",
"commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d",
"branch_to_benchmark": "master"
}

6
arena/AGENT_GORDON.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
"timestamp": "2023-11-01T17:13:24.272333",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

6
arena/AGENT_JARVIS.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
"timestamp": "2023-11-04T10:13:11.039444",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

6
arena/AI.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/QingquanBao/AutoGPT",
"timestamp": "2023-11-01T16:20:51.086235",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

7
arena/AKBAgent.json Normal file
View File

@@ -0,0 +1,7 @@
{
"github_repo_url": "https://github.com/imakb/AKBAgent",
"timestamp": "2023-10-31T00:03:23.000000",
"commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
"branch_to_benchmark": "AKBAgent"
}

6
arena/ASSISTANT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/hongzzz/AutoGPT",
"timestamp": "2023-10-13T03:22:59.347424",
"commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
"branch_to_benchmark": "master"
}

6
arena/AUTO_ENGINEER.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/kaiomagalhaes/AutoGPT",
"timestamp": "2023-10-04T15:25:30.458687",
"commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Jonobinsoftware/AutoGPT-Tutorial",
"timestamp": "2023-10-10T06:01:23.439061",
"commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/aivaras-mazylis/AutoGPT",
"timestamp": "2023-10-17T13:16:16.327237",
"commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
"branch_to_benchmark": "master"
}

6
arena/AgGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/althaf004/AutoGPT",
"timestamp": "2023-09-26T03:40:03.658369",
"commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
"branch_to_benchmark": "master"
}

6
arena/AgentJPark.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/againeureka/AutoGPT",
"timestamp": "2023-10-12T02:20:01.005361",
"commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
"branch_to_benchmark": "master"
}

6
arena/AgentKD.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/kitdesai/AgentKD",
"timestamp": "2023-10-14T02:35:09.979434",
"commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
"branch_to_benchmark": "master"
}

6
arena/Ahmad.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/JawadAbu/AutoGPT.git",
"timestamp": "2023-11-05T12:35:35.352028",
"commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c",
"branch_to_benchmark": "master"
}

6
arena/Alfred.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Shadowless422/Alfred",
"timestamp": "2023-10-03T10:42:45.473477",
"commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
"branch_to_benchmark": "master"
}

6
arena/AlphaCISO.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/alphaciso/AutoGPT",
"timestamp": "2023-10-21T08:26:41.961187",
"commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f",
"branch_to_benchmark": "master"
}

6
arena/AndersLensway.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/4nd3rs/AutoGPT",
"timestamp": "2023-10-11T11:00:08.150159",
"commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
"branch_to_benchmark": "master"
}

1
arena/AntlerTestGPT.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/pjw1/AntlerAI", "timestamp": "2023-10-07T11:46:39Z", "commit_hash_to_benchmark": "f81e086e5647370854ec639c531c900775a99207", "branch_to_benchmark": "master"}

6
arena/AppleGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Nimit3-droid/AutoGPT",
"timestamp": "2023-10-03T11:59:15.495902",
"commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
"branch_to_benchmark": "master"
}

1
arena/AquaAgent.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/somnistudio/SomniGPT", "timestamp": "2023-10-06T16:40:14Z", "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", "branch_to_benchmark": "master"}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/AmahAjavon/AutoGPT",
"timestamp": "2023-10-28T20:32:15.845741",
"commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
"branch_to_benchmark": "master"
}

6
arena/AskOpie.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/arunqa/AutoGPT",
"timestamp": "2023-09-26T05:13:24.466017",
"commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
"branch_to_benchmark": "master"
}

6
arena/Auto.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Nikhil8652/AutoGPT",
"timestamp": "2023-10-16T09:12:17.452121",
"commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
"branch_to_benchmark": "master"
}

6
arena/AutoGPT-ariel.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/RedTachyon/AutoGPT",
"timestamp": "2023-10-21T22:31:30.871023",
"commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23",
"branch_to_benchmark": "master"
}

1
arena/AutoGPT2.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/SarahGrevy/AutoGPT", "timestamp": "2023-10-20T17:21:22Z", "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", "branch_to_benchmark": "master"}

6
arena/AutoGenius.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/JasonDRZ/AutoGPT",
"timestamp": "2023-10-26T13:27:58.805270",
"commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae",
"branch_to_benchmark": "master"
}

6
arena/AutoTDD.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/vshneer/AutoTDD",
"timestamp": "2023-10-11T19:14:30.939747",
"commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/cagdasbas/AutoGPT",
"timestamp": "2023-10-15T08:43:40.193080",
"commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
"branch_to_benchmark": "master"
}

6
arena/AwareAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/LuisLechugaRuiz/AwareAgent",
"timestamp": "2023-10-26T10:10:01.481205",
"commit_hash_to_benchmark": "c180063dde49af02ed95ec4c019611da0a5540d7",
"branch_to_benchmark": "master"
}

6
arena/Bagi_agent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git",
"timestamp": "2023-10-20T09:21:48.837635",
"commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
"branch_to_benchmark": "master"
}

6
arena/BanglaSgAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/aniruddha-adhikary/AutoGPT",
"timestamp": "2023-09-27T15:32:24.056105",
"commit_hash_to_benchmark": "6f289e6dfa8246f8993b76c933527f3707b8d7e5",
"branch_to_benchmark": "master"
}

6
arena/Baptiste.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/Baptistecaille/AutoGPT",
"timestamp": "2023-10-01T19:44:23.416591",
"commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba",
"branch_to_benchmark": "master"
}

1
arena/Bravo06.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/jafar-albadarneh/Bravo06GPT", "timestamp": "2023-10-04T23:01:27Z", "commit_hash_to_benchmark": "f8c177b4b0e4ca45a3a104011b866c0415c648f1", "branch_to_benchmark": "master"}

1
arena/Brillante-AI.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/dabeer021/Brillante-AI", "timestamp": "2023-10-02T19:05:04Z", "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", "branch_to_benchmark": "master"}

6
arena/Bunny.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/razorhasbeen/AutoGPT",
"timestamp": "2023-10-03T11:50:56.725628",
"commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
"branch_to_benchmark": "master"
}

6
arena/CCAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/ccsnow127/AutoGPT",
"timestamp": "2023-10-21T13:57:15.131761",
"commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
"branch_to_benchmark": "master"
}

6
arena/CES-GPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/ces-sonnguyen/CES-GPT",
"timestamp": "2023-10-30T07:45:07.337258",
"commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
"branch_to_benchmark": "master"
}

6
arena/CISLERK.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/cislerk/AutoGPT",
"timestamp": "2023-10-10T18:40:50.718850",
"commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
"branch_to_benchmark": "master"
}

6
arena/CONNECTBOT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/myncow/DocumentAgent.git",
"timestamp": "2023-10-31T21:21:28.951345",
"commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
"branch_to_benchmark": "master"
}

6
arena/CYNO_AGENT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/dr1yl/AutoGPT",
"timestamp": "2023-10-09T20:01:05.041446",
"commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
"branch_to_benchmark": "master"
}

1
arena/ChadGPT.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/Ahmad-Alaziz/ChadGPT", "timestamp": "2023-10-26T09:39:35Z", "commit_hash_to_benchmark": "84dd029c011379791a6fec8b148b2982a2ef159e", "branch_to_benchmark": "master"}

6
arena/ChrisGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT",
"timestamp": "2023-10-31T17:55:41.458834",
"commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
"branch_to_benchmark": "master"
}

6
arena/CodeAutoGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/hugomastromauro/AutoGPT",
"timestamp": "2023-11-01T13:21:42.624202",
"commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/simonfunk/Auto-GPT", "timestamp": "2023-10-08T02:10:18Z", "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", "branch_to_benchmark": "master"}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/HMDCrew/AutoGPT",
"timestamp": "2023-10-06T20:41:26.293944",
"commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6",
"branch_to_benchmark": "master"
}

6
arena/DE.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/wic0144/AutoGPT",
"timestamp": "2023-10-26T09:05:21.013962",
"commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
"branch_to_benchmark": "master"
}

6
arena/DavidsAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/beisdog/AutoGPT",
"timestamp": "2023-09-29T22:06:18.846082",
"commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce",
"branch_to_benchmark": "master"
}

6
arena/Derpmaster.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/schumacher-m/Derpmaster",
"timestamp": "2023-10-30T21:10:27.407732",
"commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7",
"branch_to_benchmark": "master"
}

6
arena/DevOpsAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/rahuldotar/AutoGPT",
"timestamp": "2023-10-02T11:34:29.870077",
"commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa",
"branch_to_benchmark": "master"
}

6
arena/Drench.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/MohamedBasueny/AutoGPT-Drench",
"timestamp": "2023-10-27T01:28:13.869318",
"commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc",
"branch_to_benchmark": "master"
}

6
arena/Eduardo.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/MuriloEduardo/AutoGPT.git",
"timestamp": "2023-09-25T03:18:20.659056",
"commit_hash_to_benchmark": "ffa76c3a192c36827669335de4390262da5fd972",
"branch_to_benchmark": "master"
}

1
arena/EmbeddedAg.json Normal file
View File

@@ -0,0 +1 @@
{"github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT", "timestamp": "2023-10-26T09:15:50Z", "commit_hash_to_benchmark": "6c9152a95c8994898c47c85ea90ba58e0cc02c28", "branch_to_benchmark": "master"}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/kyannai/AutoGPT",
"timestamp": "2023-09-29T03:05:45.504690",
"commit_hash_to_benchmark": "1f367618edf903f38dff4dd064f96e611ffc5242",
"branch_to_benchmark": "master"
}

6
arena/ExampleAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/janekdijkstra/AutoGPT",
"timestamp": "2023-10-16T12:12:54.998033",
"commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
"branch_to_benchmark": "master"
}

6
arena/FLASH.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/flashdumper/AutoGPT",
"timestamp": "2023-10-30T23:02:13.653861",
"commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7",
"branch_to_benchmark": "master"
}

6
arena/FactoryGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/neilmartindev/FactoryGPT",
"timestamp": "2023-10-04T16:24:58.525870",
"commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
"branch_to_benchmark": "master"
}

6
arena/FcsummerGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/fbk111/FcsummerGPT",
"timestamp": "2023-10-25T09:58:39.801277",
"commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
"branch_to_benchmark": "master"
}

6
arena/FynAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/tomkat-cr/AutoGPT.git",
"timestamp": "2023-10-18T09:41:21.282992",
"commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
"branch_to_benchmark": "master"
}

6
arena/GG.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/IgorCIs/AutoGPT",
"timestamp": "2023-09-27T14:01:20.964953",
"commit_hash_to_benchmark": "a14aadd91493886663232bfd23c0412609f2a2fc",
"branch_to_benchmark": "master"
}

6
arena/GPTTest.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/h3llix/GPTTest.git",
"timestamp": "2023-11-02T10:56:53.142288",
"commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
"branch_to_benchmark": "master"
}

6
arena/GameSoundGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/mordvinov/AutoGPT",
"timestamp": "2023-10-13T14:48:02.852293",
"commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
"branch_to_benchmark": "master"
}

6
arena/GeorgeGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/norn93/GeorgeGPT",
"timestamp": "2023-10-17T14:38:41.051458",
"commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
"branch_to_benchmark": "master"
}

6
arena/Granger.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/balloch/AutoGPTProblemSolver",
"timestamp": "2023-09-29T15:11:44.876627",
"commit_hash_to_benchmark": "9fb6d5bbbd6928402a5718b8c249811c6f682a88",
"branch_to_benchmark": "master"
}

6
arena/HACKATHON.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/manuel-soria/AutoGPT",
"timestamp": "2023-10-07T16:55:38.741776",
"commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4",
"branch_to_benchmark": "master"
}

6
arena/HMD2.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/HMDCrew/AutoGPT",
"timestamp": "2023-10-09T08:46:37.457740",
"commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6",
"branch_to_benchmark": "master"
}

6
arena/Heisenberg.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/georgehaws/Heisenberg",
"timestamp": "2023-10-02T16:07:18-07:00",
"commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/hekolcu/AutoGPT",
"timestamp": "2023-09-30T17:31:20.979122",
"commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/codetitlan/AutoGPT-CDTHB",
"timestamp": "2023-10-03T15:04:54.856291",
"commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d",
"branch_to_benchmark": "master"
}

6
arena/Hypeman.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/kennyu/KenGPT",
"timestamp": "2023-09-27T19:50:31.443494",
"commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7",
"branch_to_benchmark": "master"
}

View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/mariepop13/AutoGPT",
"timestamp": "2023-10-25T18:38:32.012583",
"commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
"branch_to_benchmark": "master"
}

6
arena/JackGPT.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/JackDance/AutoGPT",
"timestamp": "2023-10-09T08:26:35.181112",
"commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61",
"branch_to_benchmark": "master"
}

6
arena/Jarvis.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/elynch303/AutoGPT",
"timestamp": "2023-10-12T14:15:17.014333",
"commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
"branch_to_benchmark": "master"
}

6
arena/JarvisAgent.json Normal file
View File

@@ -0,0 +1,6 @@
{
"github_repo_url": "https://github.com/JadeCong/AutoGPT",
"timestamp": "2023-10-17T18:49:16.489653",
"commit_hash_to_benchmark": "0bd5d4420ec168194d5a93f62d890d33ab7d9940",
"branch_to_benchmark": "master"
}

Some files were not shown because too many files have changed in this diff Show More