Compare commits

..

4 Commits

Author SHA1 Message Date
Otto-AGPT
cdeefb8621 fix(copilot): Use correct OpenRouter reasoning API format
Addresses review comments from CodeRabbit and Sentry:

- Change reasoning format from {"enabled": True} (invalid) to
  {"max_tokens": config.thinking_budget_tokens} per OpenRouter docs
- Add missing thinking_budget_tokens config field (default: 10000)
- Extract duplicate code into _apply_thinking_config() helper function
- Update description from 'adaptive' to 'extended' thinking for clarity

References:
- OpenRouter reasoning docs: https://openrouter.ai/docs/reasoning-tokens
2026-02-11 13:54:57 +00:00
Swifty
ba6d585170 update settings 2026-02-10 16:08:21 +01:00
Swifty
90eac56525 Merge branch 'dev' into fix/enable-extended-thinking 2026-02-10 15:26:40 +01:00
Otto
75f8772f8a feat(copilot): Enable extended thinking for Claude models
Adds configuration to enable Anthropic's extended thinking feature via
OpenRouter. This keeps the model's chain-of-thought reasoning internal
rather than outputting it to users.

Configuration:
- thinking_enabled: bool (default: True)
- thinking_budget_tokens: int (default: 10000)

The thinking config is only applied to Anthropic models (detected via
model name containing 'anthropic').

Fixes the issue where the CoPilot prompt expects thinking mode but it
wasn't enabled on the API side, causing internal reasoning to leak
into user-facing responses.
2026-02-10 13:58:57 +00:00
2297 changed files with 818595 additions and 40140 deletions

View File

@@ -6,15 +6,11 @@ on:
paths: paths:
- '.github/workflows/classic-autogpt-ci.yml' - '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**' - 'classic/original_autogpt/**'
- 'classic/direct_benchmark/**'
- 'classic/forge/**'
pull_request: pull_request:
branches: [ master, dev, release-* ] branches: [ master, dev, release-* ]
paths: paths:
- '.github/workflows/classic-autogpt-ci.yml' - '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**' - 'classic/original_autogpt/**'
- 'classic/direct_benchmark/**'
- 'classic/forge/**'
concurrency: concurrency:
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -23,22 +19,47 @@ concurrency:
defaults: defaults:
run: run:
shell: bash shell: bash
working-directory: classic working-directory: classic/original_autogpt
jobs: jobs:
test: test:
permissions: permissions:
contents: read contents: read
timeout-minutes: 30 timeout-minutes: 30
runs-on: ubuntu-latest strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps: steps:
- name: Start MinIO service # Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.' working-directory: '.'
run: | run: |
docker pull minio/minio:edge-cicd docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
@@ -50,23 +71,41 @@ jobs:
git config --global user.name "Auto-GPT-Bot" git config --global user.name "Auto-GPT-Bot"
git config --global user.email "github-bot@agpt.co" git config --global user.email "github-bot@agpt.co"
- name: Set up Python 3.12 - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: ${{ matrix.python-version }}
- id: get_date - id: get_date
name: Get date name: Get date
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache - name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: ~/.cache/pypoetry path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }} key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
- name: Install Poetry - name: Install Poetry (Unix)
run: curl -sSL https://install.python-poetry.org | python3 - if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies - name: Install Python dependencies
run: poetry install run: poetry install
@@ -77,12 +116,12 @@ jobs:
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \ --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \ --numprocesses=logical --durations=10 \
--junitxml=junit.xml -o junit_family=legacy \ --junitxml=junit.xml -o junit_family=legacy \
original_autogpt/tests/unit original_autogpt/tests/integration tests/unit tests/integration
env: env:
CI: true CI: true
PLAIN_OUTPUT: True PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
S3_ENDPOINT_URL: http://127.0.0.1:9000 S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
AWS_ACCESS_KEY_ID: minioadmin AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin AWS_SECRET_ACCESS_KEY: minioadmin
@@ -96,11 +135,11 @@ jobs:
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent flags: autogpt-agent,${{ runner.os }}
- name: Upload logs to artifact - name: Upload logs to artifact
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: test-logs name: test-logs
path: classic/logs/ path: classic/original_autogpt/logs/

View File

@@ -11,6 +11,9 @@ on:
- 'classic/original_autogpt/**' - 'classic/original_autogpt/**'
- 'classic/forge/**' - 'classic/forge/**'
- 'classic/benchmark/**' - 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md' - '!**/*.md'
pull_request: pull_request:
branches: [ master, dev, release-* ] branches: [ master, dev, release-* ]
@@ -19,6 +22,9 @@ on:
- 'classic/original_autogpt/**' - 'classic/original_autogpt/**'
- 'classic/forge/**' - 'classic/forge/**'
- 'classic/benchmark/**' - 'classic/benchmark/**'
- 'classic/run'
- 'classic/cli.py'
- 'classic/setup.py'
- '!**/*.md' - '!**/*.md'
defaults: defaults:
@@ -29,9 +35,13 @@ defaults:
jobs: jobs:
serve-agent-protocol: serve-agent-protocol:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy:
matrix:
agent-name: [ original_autogpt ]
fail-fast: false
timeout-minutes: 20 timeout-minutes: 20
env: env:
min-python-version: '3.12' min-python-version: '3.10'
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -45,22 +55,22 @@ jobs:
python-version: ${{ env.min-python-version }} python-version: ${{ env.min-python-version }}
- name: Install Poetry - name: Install Poetry
working-directory: ./classic/${{ matrix.agent-name }}/
run: | run: |
curl -sSL https://install.python-poetry.org | python - curl -sSL https://install.python-poetry.org | python -
- name: Install dependencies - name: Run regression tests
run: poetry install
- name: Run smoke tests with direct-benchmark
run: | run: |
poetry run direct-benchmark run \ ./run agent start ${{ matrix.agent-name }}
--strategies one_shot \ cd ${{ matrix.agent-name }}
--models claude \ poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
--tests ReadFile,WriteFile \ poetry run agbenchmark --test=WriteFile
--json
env: env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} AGENT_NAME: ${{ matrix.agent-name }}
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
NONINTERACTIVE_MODE: "true" HELICONE_CACHE_ENABLED: false
CI: true HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
TELEMETRY_ENVIRONMENT: autogpt-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -1,21 +1,17 @@
name: Classic - Direct Benchmark CI name: Classic - AGBenchmark CI
on: on:
push: push:
branches: [ master, dev, ci-test* ] branches: [ master, dev, ci-test* ]
paths: paths:
- 'classic/direct_benchmark/**' - 'classic/benchmark/**'
- 'classic/benchmark/agbenchmark/challenges/**' - '!classic/benchmark/reports/**'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- .github/workflows/classic-benchmark-ci.yml - .github/workflows/classic-benchmark-ci.yml
pull_request: pull_request:
branches: [ master, dev, release-* ] branches: [ master, dev, release-* ]
paths: paths:
- 'classic/direct_benchmark/**' - 'classic/benchmark/**'
- 'classic/benchmark/agbenchmark/challenges/**' - '!classic/benchmark/reports/**'
- 'classic/original_autogpt/**'
- 'classic/forge/**'
- .github/workflows/classic-benchmark-ci.yml - .github/workflows/classic-benchmark-ci.yml
concurrency: concurrency:
@@ -27,16 +23,23 @@ defaults:
shell: bash shell: bash
env: env:
min-python-version: '3.12' min-python-version: '3.10'
jobs: jobs:
benchmark-tests: test:
runs-on: ubuntu-latest permissions:
contents: read
timeout-minutes: 30 timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
defaults: defaults:
run: run:
shell: bash shell: bash
working-directory: classic working-directory: classic/benchmark
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -44,88 +47,71 @@ jobs:
fetch-depth: 0 fetch-depth: 0
submodules: true submodules: true
- name: Set up Python ${{ env.min-python-version }} - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: ${{ env.min-python-version }} python-version: ${{ matrix.python-version }}
- name: Set up Python dependency cache - name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: ~/.cache/pypoetry path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }} key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
- name: Install Poetry - name: Install Poetry (Unix)
if: runner.os != 'Windows'
run: | run: |
curl -sSL https://install.python-poetry.org | python3 - curl -sSL https://install.python-poetry.org | python3 -
- name: Install dependencies if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies
run: poetry install run: poetry install
- name: Run basic benchmark tests - name: Run pytest with coverage
run: | run: |
echo "Testing ReadFile challenge with one_shot strategy..." poetry run pytest -vv \
poetry run direct-benchmark run \ --cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
--fresh \ --durations=10 \
--strategies one_shot \ --junitxml=junit.xml -o junit_family=legacy \
--models claude \ tests
--tests ReadFile \
--json
echo "Testing WriteFile challenge..."
poetry run direct-benchmark run \
--fresh \
--strategies one_shot \
--models claude \
--tests WriteFile \
--json
env: env:
CI: true CI: true
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
NONINTERACTIVE_MODE: "true"
- name: Test category filtering - name: Upload test results to Codecov
run: | if: ${{ !cancelled() }} # Run even if tests fail
echo "Testing coding category..." uses: codecov/test-results-action@v1
poetry run direct-benchmark run \ with:
--fresh \ token: ${{ secrets.CODECOV_TOKEN }}
--strategies one_shot \
--models claude \
--categories coding \
--tests ReadFile,WriteFile \
--json
env:
CI: true
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
NONINTERACTIVE_MODE: "true"
- name: Test multiple strategies - name: Upload coverage reports to Codecov
run: | uses: codecov/codecov-action@v5
echo "Testing multiple strategies..." with:
poetry run direct-benchmark run \ token: ${{ secrets.CODECOV_TOKEN }}
--fresh \ flags: agbenchmark,${{ runner.os }}
--strategies one_shot,plan_execute \
--models claude \
--tests ReadFile \
--parallel 2 \
--json
env:
CI: true
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
NONINTERACTIVE_MODE: "true"
# Run regression tests on maintain challenges self-test-with-agent:
regression-tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 45 strategy:
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev' matrix:
defaults: agent-name: [forge]
run: fail-fast: false
shell: bash timeout-minutes: 20
working-directory: classic
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -140,23 +126,51 @@ jobs:
- name: Install Poetry - name: Install Poetry
run: | run: |
curl -sSL https://install.python-poetry.org | python3 - curl -sSL https://install.python-poetry.org | python -
- name: Install dependencies
run: poetry install
- name: Run regression tests - name: Run regression tests
working-directory: classic
run: | run: |
echo "Running regression tests (previously beaten challenges)..." ./run agent start ${{ matrix.agent-name }}
poetry run direct-benchmark run \ cd ${{ matrix.agent-name }}
--fresh \
--strategies one_shot \ set +e # Ignore non-zero exit codes and continue execution
--models claude \ echo "Running the following command: poetry run agbenchmark --maintain --mock"
--maintain \ poetry run agbenchmark --maintain --mock
--parallel 4 \ EXIT_CODE=$?
--json set -e # Stop ignoring non-zero exit codes
# Check if the exit code was 5, and if so, exit with 0 instead
if [ $EXIT_CODE -eq 5 ]; then
echo "regression_tests.json is empty."
fi
echo "Running the following command: poetry run agbenchmark --mock"
poetry run agbenchmark --mock
echo "Running the following command: poetry run agbenchmark --mock --category=data"
poetry run agbenchmark --mock --category=data
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
poetry run agbenchmark --mock --category=coding
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
# poetry run agbenchmark --test=WriteFile
cd ../benchmark
poetry install
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
export BUILD_SKILL_TREE=true
# poetry run agbenchmark --mock
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
# if [ ! -z "$CHANGED" ]; then
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
# echo "$CHANGED"
# exit 1
# else
# echo "No unstaged changes."
# fi
env: env:
CI: true
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
NONINTERACTIVE_MODE: "true" TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}

View File

@@ -6,11 +6,13 @@ on:
paths: paths:
- '.github/workflows/classic-forge-ci.yml' - '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**' - 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
pull_request: pull_request:
branches: [ master, dev, release-* ] branches: [ master, dev, release-* ]
paths: paths:
- '.github/workflows/classic-forge-ci.yml' - '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**' - 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
concurrency: concurrency:
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
@@ -19,38 +21,115 @@ concurrency:
defaults: defaults:
run: run:
shell: bash shell: bash
working-directory: classic working-directory: classic/forge
jobs: jobs:
test: test:
permissions: permissions:
contents: read contents: read
timeout-minutes: 30 timeout-minutes: 30
runs-on: ubuntu-latest strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
platform-os: [ubuntu, macos, macos-arm64, windows]
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
steps: steps:
- name: Start MinIO service # Quite slow on macOS (2~4 minutes to set up Docker)
# - name: Set up Docker (macOS)
# if: runner.os == 'macOS'
# uses: crazy-max/ghaction-setup-docker@v3
- name: Start MinIO service (Linux)
if: runner.os == 'Linux'
working-directory: '.' working-directory: '.'
run: | run: |
docker pull minio/minio:edge-cicd docker pull minio/minio:edge-cicd
docker run -d -p 9000:9000 minio/minio:edge-cicd docker run -d -p 9000:9000 minio/minio:edge-cicd
- name: Start MinIO service (macOS)
if: runner.os == 'macOS'
working-directory: ${{ runner.temp }}
run: |
brew install minio/stable/minio
mkdir data
minio server ./data &
# No MinIO on Windows:
# - Windows doesn't support running Linux Docker containers
# - It doesn't seem possible to start background processes on Windows. They are
# killed after the step returns.
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: Set up Python 3.12 - name: Checkout cassettes
if: ${{ startsWith(github.event_name, 'pull_request') }}
env:
PR_BASE: ${{ github.event.pull_request.base.ref }}
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
cassette_base_branch="${PR_BASE}"
cd tests/vcr_cassettes
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
cassette_base_branch="master"
fi
if git ls-remote --exit-code --heads origin $cassette_branch ; then
git fetch origin $cassette_branch
git fetch origin $cassette_base_branch
git checkout $cassette_branch
# Pick non-conflicting cassette updates from the base branch
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
echo "Using cassettes from mirror branch '$cassette_branch'," \
"synced to upstream branch '$cassette_base_branch'."
else
git checkout -b $cassette_branch
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
"Using cassettes from '$cassette_base_branch'."
fi
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: ${{ matrix.python-version }}
- name: Set up Python dependency cache - name: Set up Python dependency cache
# On Windows, unpacking cached dependencies takes longer than just installing them
if: runner.os != 'Windows'
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: ~/.cache/pypoetry path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }} key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
- name: Install Poetry - name: Install Poetry (Unix)
run: curl -sSL https://install.python-poetry.org | python3 - if: runner.os != 'Windows'
run: |
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
- name: Install Poetry (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
$env:PATH += ";$env:APPDATA\Python\Scripts"
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
- name: Install Python dependencies - name: Install Python dependencies
run: poetry install run: poetry install
@@ -61,15 +140,12 @@ jobs:
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \ --cov=forge --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \ --durations=10 \
--junitxml=junit.xml -o junit_family=legacy \ --junitxml=junit.xml -o junit_family=legacy \
forge/forge forge/tests forge
env: env:
CI: true CI: true
PLAIN_OUTPUT: True PLAIN_OUTPUT: True
# API keys - tests that need these will skip if not available
# Secrets are not available to fork PRs (GitHub security feature)
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
S3_ENDPOINT_URL: http://127.0.0.1:9000
AWS_ACCESS_KEY_ID: minioadmin AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin AWS_SECRET_ACCESS_KEY: minioadmin
@@ -83,11 +159,85 @@ jobs:
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
flags: forge flags: forge,${{ runner.os }}
- id: setup_git_auth
name: Set up git token authentication
# Cassettes may be pushed even when tests fail
if: success() || failure()
run: |
config_key="http.${{ github.server_url }}/.extraheader"
if [ "${{ runner.os }}" = 'macOS' ]; then
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
else
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
fi
git config "$config_key" \
"Authorization: Basic $base64_pat"
cd tests/vcr_cassettes
git config "$config_key" \
"Authorization: Basic $base64_pat"
echo "config_key=$config_key" >> $GITHUB_OUTPUT
- id: push_cassettes
name: Push updated cassettes
# For pull requests, push updated cassettes even when tests fail
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
env:
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
is_pull_request=true
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
else
cassette_branch="${{ github.ref_name }}"
fi
cd tests/vcr_cassettes
# Commit & push changes to cassettes if any
if ! git diff --quiet; then
git add .
git commit -m "Auto-update cassettes"
git push origin HEAD:$cassette_branch
if [ ! $is_pull_request ]; then
cd ../..
git add tests/vcr_cassettes
git commit -m "Update cassette submodule"
git push origin HEAD:$cassette_branch
fi
echo "updated=true" >> $GITHUB_OUTPUT
else
echo "updated=false" >> $GITHUB_OUTPUT
echo "No cassette changes to commit"
fi
- name: Post Set up git token auth
if: steps.setup_git_auth.outcome == 'success'
run: |
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
- name: Apply "behaviour change" label and comment on PR
if: ${{ startsWith(github.event_name, 'pull_request') }}
run: |
PR_NUMBER="${{ github.event.pull_request.number }}"
TOKEN="${{ secrets.PAT_REVIEW }}"
REPO="${{ github.repository }}"
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
echo "Adding label and comment..."
echo $TOKEN | gh auth login --with-token
gh issue edit $PR_NUMBER --add-label "behaviour change"
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
fi
- name: Upload logs to artifact - name: Upload logs to artifact
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: test-logs name: test-logs
path: classic/logs/ path: classic/forge/logs/

View File

@@ -0,0 +1,60 @@
name: Classic - Frontend CI/CD
on:
push:
branches:
- master
- dev
- 'ci-test*' # This will match any branch that starts with "ci-test"
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
pull_request:
paths:
- 'classic/frontend/**'
- '.github/workflows/classic-frontend-ci.yml'
jobs:
build:
permissions:
contents: write
pull-requests: write
runs-on: ubuntu-latest
env:
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup Flutter
uses: subosito/flutter-action@v2
with:
flutter-version: '3.13.2'
- name: Build Flutter to Web
run: |
cd classic/frontend
flutter build web --base-href /app/
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
# if: github.event_name == 'push'
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git add classic/frontend/build/web
# git checkout -B ${{ env.BUILD_BRANCH }}
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
# git push -f origin ${{ env.BUILD_BRANCH }}
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v8
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
branch: ${{ env.BUILD_BRANCH }}
delete-branch: true
title: "Update frontend build in `${{ github.ref_name }}`"
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
commit-message: "Update frontend build based on commit ${{ github.sha }}"

View File

@@ -7,9 +7,7 @@ on:
- '.github/workflows/classic-python-checks-ci.yml' - '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**' - 'classic/original_autogpt/**'
- 'classic/forge/**' - 'classic/forge/**'
- 'classic/direct_benchmark/**' - 'classic/benchmark/**'
- 'classic/pyproject.toml'
- 'classic/poetry.lock'
- '**.py' - '**.py'
- '!classic/forge/tests/vcr_cassettes' - '!classic/forge/tests/vcr_cassettes'
pull_request: pull_request:
@@ -18,9 +16,7 @@ on:
- '.github/workflows/classic-python-checks-ci.yml' - '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**' - 'classic/original_autogpt/**'
- 'classic/forge/**' - 'classic/forge/**'
- 'classic/direct_benchmark/**' - 'classic/benchmark/**'
- 'classic/pyproject.toml'
- 'classic/poetry.lock'
- '**.py' - '**.py'
- '!classic/forge/tests/vcr_cassettes' - '!classic/forge/tests/vcr_cassettes'
@@ -31,13 +27,44 @@ concurrency:
defaults: defaults:
run: run:
shell: bash shell: bash
working-directory: classic
jobs: jobs:
get-changed-parts:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- id: changes-in
name: Determine affected subprojects
uses: dorny/paths-filter@v3
with:
filters: |
original_autogpt:
- classic/original_autogpt/autogpt/**
- classic/original_autogpt/tests/**
- classic/original_autogpt/poetry.lock
forge:
- classic/forge/forge/**
- classic/forge/tests/**
- classic/forge/poetry.lock
benchmark:
- classic/benchmark/agbenchmark/**
- classic/benchmark/tests/**
- classic/benchmark/poetry.lock
outputs:
changed-parts: ${{ steps.changes-in.outputs.changes }}
lint: lint:
needs: get-changed-parts
runs-on: ubuntu-latest runs-on: ubuntu-latest
env: env:
min-python-version: "3.12" min-python-version: "3.10"
strategy:
matrix:
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
fail-fast: false
steps: steps:
- name: Checkout repository - name: Checkout repository
@@ -54,31 +81,42 @@ jobs:
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: ~/.cache/pypoetry path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }} key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
- name: Install Poetry - name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python3 - run: curl -sSL https://install.python-poetry.org | python3 -
# Install dependencies
- name: Install Python dependencies - name: Install Python dependencies
run: poetry install run: poetry -C classic/${{ matrix.sub-package }} install
# Lint # Lint
- name: Lint (isort) - name: Lint (isort)
run: poetry run isort --check . run: poetry run isort --check .
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Black) - name: Lint (Black)
if: success() || failure() if: success() || failure()
run: poetry run black --check . run: poetry run black --check .
working-directory: classic/${{ matrix.sub-package }}
- name: Lint (Flake8) - name: Lint (Flake8)
if: success() || failure() if: success() || failure()
run: poetry run flake8 . run: poetry run flake8 .
working-directory: classic/${{ matrix.sub-package }}
types: types:
needs: get-changed-parts
runs-on: ubuntu-latest runs-on: ubuntu-latest
env: env:
min-python-version: "3.12" min-python-version: "3.10"
strategy:
matrix:
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
fail-fast: false
steps: steps:
- name: Checkout repository - name: Checkout repository
@@ -95,16 +133,19 @@ jobs:
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: ~/.cache/pypoetry path: ~/.cache/pypoetry
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }} key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
- name: Install Poetry - name: Install Poetry
run: curl -sSL https://install.python-poetry.org | python3 - run: curl -sSL https://install.python-poetry.org | python3 -
# Install dependencies
- name: Install Python dependencies - name: Install Python dependencies
run: poetry install run: poetry -C classic/${{ matrix.sub-package }} install
# Typecheck # Typecheck
- name: Typecheck - name: Typecheck
if: success() || failure() if: success() || failure()
run: poetry run pyright run: poetry run pyright
working-directory: classic/${{ matrix.sub-package }}

View File

@@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
ref: ${{ github.event.workflow_run.head_branch }} ref: ${{ github.event.workflow_run.head_branch }}
fetch-depth: 0 fetch-depth: 0

View File

@@ -30,7 +30,7 @@ jobs:
actions: read # Required for CI access actions: read # Required for CI access
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -40,7 +40,7 @@ jobs:
actions: read # Required for CI access actions: read # Required for CI access
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -58,7 +58,7 @@ jobs:
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL

View File

@@ -27,7 +27,7 @@ jobs:
# If you do not check out your code, Copilot will do this for you. # If you do not check out your code, Copilot will do this for you.
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
submodules: true submodules: true

View File

@@ -23,7 +23,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -23,7 +23,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@@ -28,7 +28,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 1 fetch-depth: 1

View File

@@ -25,7 +25,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
ref: ${{ github.event.inputs.git_ref || github.ref_name }} ref: ${{ github.event.inputs.git_ref || github.ref_name }}
@@ -52,7 +52,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Trigger deploy workflow - name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DEPLOY_TOKEN }} token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -17,7 +17,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
ref: ${{ github.ref_name || 'master' }} ref: ${{ github.ref_name || 'master' }}
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Trigger deploy workflow - name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DEPLOY_TOKEN }} token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -68,7 +68,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
submodules: true submodules: true

View File

@@ -82,7 +82,7 @@ jobs:
- name: Dispatch Deploy Event - name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true' if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DISPATCH_TOKEN }} token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -110,7 +110,7 @@ jobs:
- name: Dispatch Undeploy Event (from comment) - name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true' if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DISPATCH_TOKEN }} token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -168,7 +168,7 @@ jobs:
github.event_name == 'pull_request' && github.event_name == 'pull_request' &&
github.event.action == 'closed' && github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true' steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v4 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.DISPATCH_TOKEN }} token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -31,7 +31,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Check for component changes - name: Check for component changes
uses: dorny/paths-filter@v3 uses: dorny/paths-filter@v3
@@ -71,7 +71,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Set up Node.js - name: Set up Node.js
uses: actions/setup-node@v6 uses: actions/setup-node@v6
@@ -107,7 +107,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -148,7 +148,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive
@@ -277,7 +277,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive

View File

@@ -29,7 +29,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Set up Node.js - name: Set up Node.js
uses: actions/setup-node@v6 uses: actions/setup-node@v6
@@ -63,7 +63,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive

View File

@@ -11,7 +11,7 @@ jobs:
steps: steps:
# - name: Wait some time for all actions to start # - name: Wait some time for all actions to start
# run: sleep 30 # run: sleep 30
- uses: actions/checkout@v6 - uses: actions/checkout@v4
# with: # with:
# fetch-depth: 0 # fetch-depth: 0
- name: Set up Python - name: Set up Python

9
.gitignore vendored
View File

@@ -3,7 +3,6 @@
classic/original_autogpt/keys.py classic/original_autogpt/keys.py
classic/original_autogpt/*.json classic/original_autogpt/*.json
auto_gpt_workspace/* auto_gpt_workspace/*
.autogpt/
*.mpeg *.mpeg
.env .env
# Root .env files # Root .env files
@@ -160,10 +159,6 @@ CURRENT_BULLETIN.md
# AgBenchmark # AgBenchmark
classic/benchmark/agbenchmark/reports/ classic/benchmark/agbenchmark/reports/
classic/reports/
classic/direct_benchmark/reports/
classic/.benchmark_workspaces/
classic/direct_benchmark/.benchmark_workspaces/
# Nodejs # Nodejs
package-lock.json package-lock.json
@@ -182,11 +177,7 @@ autogpt_platform/backend/settings.py
*.ign.* *.ign.*
.test-contents .test-contents
**/.claude/settings.local.json
.claude/settings.local.json .claude/settings.local.json
CLAUDE.local.md CLAUDE.local.md
/autogpt_platform/backend/logs /autogpt_platform/backend/logs
# Test database
test.db
.next .next

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes

View File

@@ -43,10 +43,29 @@ repos:
pass_filenames: false pass_filenames: false
- id: poetry-install - id: poetry-install
name: Check & Install dependencies - Classic name: Check & Install dependencies - Classic - AutoGPT
alias: poetry-install-classic alias: poetry-install-classic-autogpt
entry: poetry -C classic install entry: poetry -C classic/original_autogpt install
files: ^classic/poetry\.lock$ # include forge source (since it's a path dependency)
files: ^classic/(original_autogpt|forge)/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Forge
alias: poetry-install-classic-forge
entry: poetry -C classic/forge install
files: ^classic/forge/poetry\.lock$
types: [file]
language: system
pass_filenames: false
- id: poetry-install
name: Check & Install dependencies - Classic - Benchmark
alias: poetry-install-classic-benchmark
entry: poetry -C classic/benchmark install
files: ^classic/benchmark/poetry\.lock$
types: [file] types: [file]
language: system language: system
pass_filenames: false pass_filenames: false
@@ -97,10 +116,26 @@ repos:
language: system language: system
- id: isort - id: isort
name: Lint (isort) - Classic name: Lint (isort) - Classic - AutoGPT
alias: isort-classic alias: isort-classic-autogpt
entry: bash -c 'cd classic && poetry run isort $(echo "$@" | sed "s|classic/||g")' -- entry: poetry -P classic/original_autogpt run isort -p autogpt
files: ^classic/(original_autogpt|forge|direct_benchmark)/ files: ^classic/original_autogpt/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Forge
alias: isort-classic-forge
entry: poetry -P classic/forge run isort -p forge
files: ^classic/forge/
types: [file, python]
language: system
- id: isort
name: Lint (isort) - Classic - Benchmark
alias: isort-classic-benchmark
entry: poetry -P classic/benchmark run isort -p agbenchmark
files: ^classic/benchmark/
types: [file, python] types: [file, python]
language: system language: system
@@ -114,13 +149,26 @@ repos:
- repo: https://github.com/PyCQA/flake8 - repo: https://github.com/PyCQA/flake8
rev: 7.0.0 rev: 7.0.0
# Use consolidated flake8 config at classic/.flake8 # To have flake8 load the config of the individual subprojects, we have to call
# them separately.
hooks: hooks:
- id: flake8 - id: flake8
name: Lint (Flake8) - Classic name: Lint (Flake8) - Classic - AutoGPT
alias: flake8-classic alias: flake8-classic-autogpt
files: ^classic/(original_autogpt|forge|direct_benchmark)/ files: ^classic/original_autogpt/(autogpt|scripts|tests)/
args: [--config=classic/.flake8] args: [--config=classic/original_autogpt/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Forge
alias: flake8-classic-forge
files: ^classic/forge/(forge|tests)/
args: [--config=classic/forge/.flake8]
- id: flake8
name: Lint (Flake8) - Classic - Benchmark
alias: flake8-classic-benchmark
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
- repo: local - repo: local
hooks: hooks:
@@ -156,10 +204,29 @@ repos:
pass_filenames: false pass_filenames: false
- id: pyright - id: pyright
name: Typecheck - Classic name: Typecheck - Classic - AutoGPT
alias: pyright-classic alias: pyright-classic-autogpt
entry: poetry -C classic run pyright entry: poetry -C classic/original_autogpt run pyright
files: ^classic/(original_autogpt|forge|direct_benchmark)/.*\.py$|^classic/poetry\.lock$ # include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Forge
alias: pyright-classic-forge
entry: poetry -C classic/forge run pyright
files: ^classic/forge/(forge/|poetry\.lock$)
types: [file]
language: system
pass_filenames: false
- id: pyright
name: Typecheck - Classic - Benchmark
alias: pyright-classic-benchmark
entry: poetry -C classic/benchmark run pyright
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
types: [file] types: [file]
language: system language: system
pass_filenames: false pass_filenames: false

View File

@@ -96,7 +96,13 @@ class ChatConfig(BaseSettings):
# Extended thinking configuration for Claude models # Extended thinking configuration for Claude models
thinking_enabled: bool = Field( thinking_enabled: bool = Field(
default=True, default=True,
description="Enable adaptive thinking for Claude models via OpenRouter", description="Enable extended thinking for Claude models via OpenRouter",
)
thinking_budget_tokens: int = Field(
default=10000,
ge=1000,
le=100000,
description="Maximum tokens for extended thinking (budget_tokens for Claude)",
) )
@field_validator("api_key", mode="before") @field_validator("api_key", mode="before")

View File

@@ -10,8 +10,6 @@ from typing import Any
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from backend.util.json import dumps as json_dumps
class ResponseType(str, Enum): class ResponseType(str, Enum):
"""Types of streaming responses following AI SDK protocol.""" """Types of streaming responses following AI SDK protocol."""
@@ -195,18 +193,6 @@ class StreamError(StreamBaseResponse):
default=None, description="Additional error details" default=None, description="Additional error details"
) )
def to_sse(self) -> str:
"""Convert to SSE format, only emitting fields required by AI SDK protocol.
The AI SDK uses z.strictObject({type, errorText}) which rejects
any extra fields like `code` or `details`.
"""
data = {
"type": self.type.value,
"errorText": self.errorText,
}
return f"data: {json_dumps(data)}\n\n"
class StreamHeartbeat(StreamBaseResponse): class StreamHeartbeat(StreamBaseResponse):
"""Heartbeat to keep SSE connection alive during long-running operations. """Heartbeat to keep SSE connection alive during long-running operations.

View File

@@ -80,6 +80,19 @@ settings = Settings()
client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
def _apply_thinking_config(extra_body: dict[str, Any], model: str) -> None:
"""Apply extended thinking configuration for Anthropic models via OpenRouter.
OpenRouter's reasoning API expects either:
- {"max_tokens": N} for explicit token budget
- {"effort": "high"} for automatic budget
See: https://openrouter.ai/docs/reasoning-tokens
"""
if config.thinking_enabled and "anthropic" in model.lower():
extra_body["reasoning"] = {"max_tokens": config.thinking_budget_tokens}
langfuse = get_client() langfuse = get_client()
# Redis key prefix for tracking running long-running operations # Redis key prefix for tracking running long-running operations
@@ -1066,9 +1079,8 @@ async def _stream_chat_chunks(
:128 :128
] # OpenRouter limit ] # OpenRouter limit
# Enable adaptive thinking for Anthropic models via OpenRouter # Enable extended thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in model.lower(): _apply_thinking_config(extra_body, model)
extra_body["reasoning"] = {"enabled": True}
api_call_start = time_module.perf_counter() api_call_start = time_module.perf_counter()
stream = await client.chat.completions.create( stream = await client.chat.completions.create(
@@ -1833,9 +1845,8 @@ async def _generate_llm_continuation(
if session_id: if session_id:
extra_body["session_id"] = session_id[:128] extra_body["session_id"] = session_id[:128]
# Enable adaptive thinking for Anthropic models via OpenRouter # Enable extended thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in config.model.lower(): _apply_thinking_config(extra_body, config.model)
extra_body["reasoning"] = {"enabled": True}
retry_count = 0 retry_count = 0
last_error: Exception | None = None last_error: Exception | None = None
@@ -1967,9 +1978,8 @@ async def _generate_llm_continuation_with_streaming(
if session_id: if session_id:
extra_body["session_id"] = session_id[:128] extra_body["session_id"] = session_id[:128]
# Enable adaptive thinking for Anthropic models via OpenRouter # Enable extended thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in config.model.lower(): _apply_thinking_config(extra_body, config.model)
extra_body["reasoning"] = {"enabled": True}
# Make streaming LLM call (no tools - just text response) # Make streaming LLM call (no tools - just text response)
from typing import cast from typing import cast

View File

@@ -21,71 +21,43 @@ logger = logging.getLogger(__name__)
class HumanInTheLoopBlock(Block): class HumanInTheLoopBlock(Block):
""" """
Pauses execution and waits for human approval or rejection of the data. This block pauses execution and waits for human approval or modification of the data.
When executed, this block creates a pending review entry and sets the node execution When executed, it creates a pending review entry and sets the node execution status
status to REVIEW. The execution remains paused until a human user either approves to REVIEW. The execution will remain paused until a human user either:
or rejects the data. - Approves the data (with or without modifications)
- Rejects the data
**How it works:** This is useful for workflows that require human validation or intervention before
- The input data is presented to a human reviewer proceeding to the next steps.
- The reviewer can approve or reject (and optionally modify the data if editable)
- On approval: the data flows out through the `approved_data` output pin
- On rejection: the data flows out through the `rejected_data` output pin
**Important:** The output pins yield the actual data itself, NOT status strings.
The approval/rejection decision determines WHICH output pin fires, not the value.
You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
downstream blocks to the appropriate output pin for each case.
**Example usage:**
- Connect `approved_data` → next step in your workflow (data was approved)
- Connect `rejected_data` → error handling or notification (data was rejected)
""" """
class Input(BlockSchemaInput): class Input(BlockSchemaInput):
data: Any = SchemaField( data: Any = SchemaField(description="The data to be reviewed by a human user")
description="The data to be reviewed by a human user. "
"This exact data will be passed through to either approved_data or "
"rejected_data output based on the reviewer's decision."
)
name: str = SchemaField( name: str = SchemaField(
description="A descriptive name for what this data represents. " description="A descriptive name for what this data represents",
"This helps the reviewer understand what they are reviewing.",
) )
editable: bool = SchemaField( editable: bool = SchemaField(
description="Whether the human reviewer can edit the data before " description="Whether the human reviewer can edit the data",
"approving or rejecting it",
default=True, default=True,
advanced=True, advanced=True,
) )
class Output(BlockSchemaOutput): class Output(BlockSchemaOutput):
approved_data: Any = SchemaField( approved_data: Any = SchemaField(
description="Outputs the input data when the reviewer APPROVES it. " description="The data when approved (may be modified by reviewer)"
"The value is the actual data itself (not a status string like 'APPROVED'). "
"If the reviewer edited the data, this contains the modified version. "
"Connect downstream blocks here for the 'approved' workflow path."
) )
rejected_data: Any = SchemaField( rejected_data: Any = SchemaField(
description="Outputs the input data when the reviewer REJECTS it. " description="The data when rejected (may be modified by reviewer)"
"The value is the actual data itself (not a status string like 'REJECTED'). "
"If the reviewer edited the data, this contains the modified version. "
"Connect downstream blocks here for the 'rejected' workflow path."
) )
review_message: str = SchemaField( review_message: str = SchemaField(
description="Optional message provided by the reviewer explaining their " description="Any message provided by the reviewer", default=""
"decision. Only outputs when the reviewer provides a message; "
"this pin does not fire if no message was given.",
default="",
) )
def __init__(self): def __init__(self):
super().__init__( super().__init__(
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d", id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
description="Pause execution for human review. Data flows through " description="Pause execution and wait for human approval or modification of data",
"approved_data or rejected_data output based on the reviewer's decision. "
"Outputs contain the actual data, not status strings.",
categories={BlockCategory.BASIC}, categories={BlockCategory.BASIC},
input_schema=HumanInTheLoopBlock.Input, input_schema=HumanInTheLoopBlock.Input,
output_schema=HumanInTheLoopBlock.Output, output_schema=HumanInTheLoopBlock.Output,

View File

@@ -743,11 +743,6 @@ class GraphModel(Graph, GraphMeta):
# For invalid blocks, we still raise immediately as this is a structural issue # For invalid blocks, we still raise immediately as this is a structural issue
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}") raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
if block.disabled:
raise ValueError(
f"Block {node.block_id} is disabled and cannot be used in graphs"
)
node_input_mask = ( node_input_mask = (
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {} nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
) )

View File

@@ -213,9 +213,6 @@ async def execute_node(
block_name=node_block.name, block_name=node_block.name,
) )
if node_block.disabled:
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
# Sanity check: validate the execution input. # Sanity check: validate the execution input.
input_data, error = validate_exec(node, data.inputs, resolve_input=False) input_data, error = validate_exec(node, data.inputs, resolve_input=False)
if input_data is None: if input_data is None:

View File

@@ -364,44 +364,6 @@ def _remove_orphan_tool_responses(
return result return result
def validate_and_remove_orphan_tool_responses(
messages: list[dict],
log_warning: bool = True,
) -> list[dict]:
"""
Validate tool_call/tool_response pairs and remove orphaned responses.
Scans messages in order, tracking all tool_call IDs. Any tool response
referencing an ID not seen in a preceding message is considered orphaned
and removed. This prevents API errors like Anthropic's "unexpected tool_use_id".
Args:
messages: List of messages to validate (OpenAI or Anthropic format)
log_warning: Whether to log a warning when orphans are found
Returns:
A new list with orphaned tool responses removed
"""
available_ids: set[str] = set()
orphan_ids: set[str] = set()
for msg in messages:
available_ids |= _extract_tool_call_ids_from_message(msg)
for resp_id in _extract_tool_response_ids_from_message(msg):
if resp_id not in available_ids:
orphan_ids.add(resp_id)
if not orphan_ids:
return messages
if log_warning:
logger.warning(
f"Removing {len(orphan_ids)} orphan tool response(s): {orphan_ids}"
)
return _remove_orphan_tool_responses(messages, orphan_ids)
def _ensure_tool_pairs_intact( def _ensure_tool_pairs_intact(
recent_messages: list[dict], recent_messages: list[dict],
all_messages: list[dict], all_messages: list[dict],
@@ -761,13 +723,6 @@ async def compress_context(
# Filter out any None values that may have been introduced # Filter out any None values that may have been introduced
final_msgs: list[dict] = [m for m in msgs if m is not None] final_msgs: list[dict] = [m for m in msgs if m is not None]
# ---- STEP 6: Final tool-pair validation ---------------------------------
# After all compression steps, verify that every tool response has a
# matching tool_call in a preceding assistant message. Remove orphans
# to prevent API errors (e.g., Anthropic's "unexpected tool_use_id").
final_msgs = validate_and_remove_orphan_tool_responses(final_msgs)
final_count = sum(_msg_tokens(m, enc) for m in final_msgs) final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
error = None error = None
if final_count + reserve > target_tokens: if final_count + reserve > target_tokens:

View File

@@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5"
[[package]] [[package]]
name = "aiofiles" name = "aiofiles"
version = "25.1.0" version = "24.1.0"
description = "File support for asyncio." description = "File support for asyncio."
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.8"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"}, {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
{file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"}, {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
] ]
[[package]] [[package]]
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = ">=3.10,<3.14" python-versions = ">=3.10,<3.14"
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af" content-hash = "fc135114e01de39c8adf70f6132045e7d44a19473c1279aee0978de65aad1655"

View File

@@ -76,7 +76,7 @@ yt-dlp = "2025.12.08"
zerobouncesdk = "^1.1.2" zerobouncesdk = "^1.1.2"
# NOTE: please insert new dependencies in their alphabetical location # NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0" pytest-snapshot = "^0.9.0"
aiofiles = "^25.1.0" aiofiles = "^24.1.0"
tiktoken = "^0.12.0" tiktoken = "^0.12.0"
aioclamd = "^1.0.0" aioclamd = "^1.0.0"
setuptools = "^80.9.0" setuptools = "^80.9.0"

View File

@@ -1,11 +1,11 @@
"use client"; "use client";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { SidebarProvider } from "@/components/ui/sidebar"; import { SidebarProvider } from "@/components/ui/sidebar";
import { ChatContainer } from "./components/ChatContainer/ChatContainer"; import { ChatContainer } from "./components/ChatContainer/ChatContainer";
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar"; import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
import { MobileHeader } from "./components/MobileHeader/MobileHeader"; import { MobileHeader } from "./components/MobileHeader/MobileHeader";
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
import { useCopilotPage } from "./useCopilotPage"; import { useCopilotPage } from "./useCopilotPage";
export function CopilotPage() { export function CopilotPage() {
@@ -34,11 +34,7 @@ export function CopilotPage() {
} = useCopilotPage(); } = useCopilotPage();
if (isUserLoading || !isLoggedIn) { if (isUserLoading || !isLoggedIn) {
return ( return <LoadingSpinner size="large" cover />;
<div className="fixed inset-0 z-50 flex items-center justify-center bg-[#f8f8f9]">
<ScaleLoader className="text-neutral-400" />
</div>
);
} }
return ( return (

View File

@@ -10,9 +10,8 @@ import {
MessageResponse, MessageResponse,
} from "@/components/ai-elements/message"; } from "@/components/ai-elements/message";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { toast } from "@/components/molecules/Toast/use-toast";
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai"; import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
import { useEffect, useRef, useState } from "react"; import { useEffect, useState } from "react";
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent"; import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
import { EditAgentTool } from "../../tools/EditAgent/EditAgent"; import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents"; import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
@@ -122,7 +121,6 @@ export const ChatMessagesContainer = ({
isLoading, isLoading,
}: ChatMessagesContainerProps) => { }: ChatMessagesContainerProps) => {
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase); const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
const lastToastTimeRef = useRef(0);
useEffect(() => { useEffect(() => {
if (status === "submitted") { if (status === "submitted") {
@@ -130,20 +128,6 @@ export const ChatMessagesContainer = ({
} }
}, [status]); }, [status]);
// Show a toast when a new error occurs, debounced to avoid spam
useEffect(() => {
if (!error) return;
const now = Date.now();
if (now - lastToastTimeRef.current < 3_000) return;
lastToastTimeRef.current = now;
toast({
variant: "destructive",
title: "Something went wrong",
description:
"The assistant encountered an error. Please try sending your message again.",
});
}, [error]);
const lastMessage = messages[messages.length - 1]; const lastMessage = messages[messages.length - 1];
const lastAssistantHasVisibleContent = const lastAssistantHasVisibleContent =
lastMessage?.role === "assistant" && lastMessage?.role === "assistant" &&
@@ -159,10 +143,10 @@ export const ChatMessagesContainer = ({
return ( return (
<Conversation className="min-h-0 flex-1"> <Conversation className="min-h-0 flex-1">
<ConversationContent className="flex min-h-screen flex-1 flex-col gap-6 px-3 py-6"> <ConversationContent className="gap-6 px-3 py-6">
{isLoading && messages.length === 0 && ( {isLoading && messages.length === 0 && (
<div className="flex min-h-full flex-1 items-center justify-center"> <div className="flex flex-1 items-center justify-center">
<LoadingSpinner className="text-neutral-600" /> <LoadingSpinner size="large" className="text-neutral-400" />
</div> </div>
)} )}
{messages.map((message, messageIndex) => { {messages.map((message, messageIndex) => {
@@ -279,12 +263,8 @@ export const ChatMessagesContainer = ({
</Message> </Message>
)} )}
{error && ( {error && (
<div className="rounded-lg bg-red-50 p-4 text-sm text-red-700"> <div className="rounded-lg bg-red-50 p-3 text-red-600">
<p className="font-medium">Something went wrong</p> Error: {error.message}
<p className="mt-1 text-red-600">
The assistant encountered an error. Please try sending your
message again.
</p>
</div> </div>
)} )}
</ConversationContent> </ConversationContent>

View File

@@ -121,8 +121,8 @@ export function ChatSidebar() {
className="mt-4 flex flex-col gap-1" className="mt-4 flex flex-col gap-1"
> >
{isLoadingSessions ? ( {isLoadingSessions ? (
<div className="flex min-h-[30rem] items-center justify-center py-4"> <div className="flex items-center justify-center py-4">
<LoadingSpinner size="small" className="text-neutral-600" /> <LoadingSpinner size="small" className="text-neutral-400" />
</div> </div>
) : sessions.length === 0 ? ( ) : sessions.length === 0 ? (
<p className="py-4 text-center text-sm text-neutral-500"> <p className="py-4 text-center text-sm text-neutral-500">

View File

@@ -1,35 +0,0 @@
.loader {
width: 48px;
height: 48px;
display: inline-block;
position: relative;
}
.loader::after,
.loader::before {
content: "";
box-sizing: border-box;
width: 100%;
height: 100%;
border-radius: 50%;
background: currentColor;
position: absolute;
left: 0;
top: 0;
animation: animloader 2s linear infinite;
}
.loader::after {
animation-delay: 1s;
}
@keyframes animloader {
0% {
transform: scale(0);
opacity: 1;
}
100% {
transform: scale(1);
opacity: 0;
}
}

View File

@@ -1,16 +0,0 @@
import { cn } from "@/lib/utils";
import styles from "./ScaleLoader.module.css";
interface Props {
size?: number;
className?: string;
}
export function ScaleLoader({ size = 48, className }: Props) {
return (
<div
className={cn(styles.loader, className)}
style={{ width: size, height: size }}
/>
);
}

View File

@@ -30,7 +30,7 @@ export function ContentCard({
return ( return (
<div <div
className={cn( className={cn(
"min-w-0 rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]", "rounded-lg bg-gradient-to-r from-purple-500/30 to-blue-500/30 p-[1px]",
className, className,
)} )}
> >

View File

@@ -4,6 +4,7 @@ import { WarningDiamondIcon } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai"; import type { ToolUIPart } from "ai";
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions"; import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
import { ProgressBar } from "../../components/ProgressBar/ProgressBar"; import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
import { import {
ContentCardDescription, ContentCardDescription,
@@ -48,7 +49,12 @@ interface Props {
part: CreateAgentToolPart; part: CreateAgentToolPart;
} }
function getAccordionMeta(output: CreateAgentToolOutput) { function getAccordionMeta(output: CreateAgentToolOutput): {
icon: React.ReactNode;
title: React.ReactNode;
titleClassName?: string;
description?: string;
} {
const icon = <AccordionIcon />; const icon = <AccordionIcon />;
if (isAgentSavedOutput(output)) { if (isAgentSavedOutput(output)) {
@@ -67,7 +73,6 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
icon, icon,
title: "Needs clarification", title: "Needs clarification",
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`, description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
expanded: true,
}; };
} }
if ( if (
@@ -76,7 +81,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
isOperationInProgressOutput(output) isOperationInProgressOutput(output)
) { ) {
return { return {
icon, icon: <OrbitLoader size={32} />,
title: "Creating agent, this may take a few minutes. Sit back and relax.", title: "Creating agent, this may take a few minutes. Sit back and relax.",
}; };
} }
@@ -92,23 +97,18 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
export function CreateAgentTool({ part }: Props) { export function CreateAgentTool({ part }: Props) {
const text = getAnimationText(part); const text = getAnimationText(part);
const { onSend } = useCopilotChatActions(); const { onSend } = useCopilotChatActions();
const isStreaming = const isStreaming =
part.state === "input-streaming" || part.state === "input-available"; part.state === "input-streaming" || part.state === "input-available";
const output = getCreateAgentToolOutput(part); const output = getCreateAgentToolOutput(part);
const isError = const isError =
part.state === "output-error" || (!!output && isErrorOutput(output)); part.state === "output-error" || (!!output && isErrorOutput(output));
const isOperating = const isOperating =
!!output && !!output &&
(isOperationStartedOutput(output) || (isOperationStartedOutput(output) ||
isOperationPendingOutput(output) || isOperationPendingOutput(output) ||
isOperationInProgressOutput(output)); isOperationInProgressOutput(output));
const progress = useAsymptoticProgress(isOperating); const progress = useAsymptoticProgress(isOperating);
const hasExpandableContent = const hasExpandableContent =
part.state === "output-available" && part.state === "output-available" &&
!!output && !!output &&
@@ -149,7 +149,10 @@ export function CreateAgentTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
>
{isOperating && ( {isOperating && (
<ContentGrid> <ContentGrid>
<ProgressBar value={progress} className="max-w-[280px]" /> <ProgressBar value={progress} className="max-w-[280px]" />

View File

@@ -146,7 +146,10 @@ export function EditAgentTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
>
{isOperating && ( {isOperating && (
<ContentGrid> <ContentGrid>
<ProgressBar value={progress} className="max-w-[280px]" /> <ProgressBar value={progress} className="max-w-[280px]" />

View File

@@ -61,7 +61,14 @@ export function RunAgentTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={
isRunAgentExecutionStartedOutput(output) ||
isRunAgentSetupRequirementsOutput(output) ||
isRunAgentAgentDetailsOutput(output)
}
>
{isRunAgentExecutionStartedOutput(output) && ( {isRunAgentExecutionStartedOutput(output) && (
<ExecutionStartedCard output={output} /> <ExecutionStartedCard output={output} />
)} )}

View File

@@ -10,7 +10,7 @@ import {
WarningDiamondIcon, WarningDiamondIcon,
} from "@phosphor-icons/react"; } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai"; import type { ToolUIPart } from "ai";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
export interface RunAgentInput { export interface RunAgentInput {
username_agent_slug?: string; username_agent_slug?: string;
@@ -171,7 +171,7 @@ export function ToolIcon({
); );
} }
if (isStreaming) { if (isStreaming) {
return <OrbitLoader size={24} />; return <SpinnerLoader size={40} className="text-neutral-700" />;
} }
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />; return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
} }
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
? output.status.trim() ? output.status.trim()
: "started"; : "started";
return { return {
icon, icon: <SpinnerLoader size={28} className="text-neutral-700" />,
title: output.graph_name, title: output.graph_name,
description: `Status: ${statusText}`, description: `Status: ${statusText}`,
}; };

View File

@@ -55,7 +55,13 @@ export function RunBlockTool({ part }: Props) {
</div> </div>
{hasExpandableContent && output && ( {hasExpandableContent && output && (
<ToolAccordion {...getAccordionMeta(output)}> <ToolAccordion
{...getAccordionMeta(output)}
defaultExpanded={
isRunBlockBlockOutput(output) ||
isRunBlockSetupRequirementsOutput(output)
}
>
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />} {isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
{isRunBlockSetupRequirementsOutput(output) && ( {isRunBlockSetupRequirementsOutput(output) && (

View File

@@ -8,7 +8,7 @@ import {
WarningDiamondIcon, WarningDiamondIcon,
} from "@phosphor-icons/react"; } from "@phosphor-icons/react";
import type { ToolUIPart } from "ai"; import type { ToolUIPart } from "ai";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
export interface RunBlockInput { export interface RunBlockInput {
block_id?: string; block_id?: string;
@@ -120,7 +120,7 @@ export function ToolIcon({
); );
} }
if (isStreaming) { if (isStreaming) {
return <OrbitLoader size={24} />; return <SpinnerLoader size={40} className="text-neutral-700" />;
} }
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />; return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
} }
@@ -149,7 +149,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
if (isRunBlockBlockOutput(output)) { if (isRunBlockBlockOutput(output)) {
const keys = Object.keys(output.outputs ?? {}); const keys = Object.keys(output.outputs ?? {});
return { return {
icon, icon: <SpinnerLoader size={32} className="text-neutral-700" />,
title: output.block_name, title: output.block_name,
description: description:
keys.length > 0 keys.length > 0

View File

@@ -3,6 +3,7 @@ import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { useChat } from "@ai-sdk/react"; import { useChat } from "@ai-sdk/react";
import { DefaultChatTransport } from "ai"; import { DefaultChatTransport } from "ai";
import { useRouter } from "next/navigation";
import { useEffect, useMemo, useState } from "react"; import { useEffect, useMemo, useState } from "react";
import { useChatSession } from "./useChatSession"; import { useChatSession } from "./useChatSession";
@@ -10,6 +11,7 @@ export function useCopilotPage() {
const { isUserLoading, isLoggedIn } = useSupabase(); const { isUserLoading, isLoggedIn } = useSupabase();
const [isDrawerOpen, setIsDrawerOpen] = useState(false); const [isDrawerOpen, setIsDrawerOpen] = useState(false);
const [pendingMessage, setPendingMessage] = useState<string | null>(null); const [pendingMessage, setPendingMessage] = useState<string | null>(null);
const router = useRouter();
const { const {
sessionId, sessionId,
@@ -52,6 +54,10 @@ export function useCopilotPage() {
transport: transport ?? undefined, transport: transport ?? undefined,
}); });
useEffect(() => {
if (!isUserLoading && !isLoggedIn) router.replace("/login");
}, [isUserLoading, isLoggedIn]);
useEffect(() => { useEffect(() => {
if (!hydratedMessages || hydratedMessages.length === 0) return; if (!hydratedMessages || hydratedMessages.length === 0) return;
setMessages((prev) => { setMessages((prev) => {

View File

@@ -1,8 +1,11 @@
import { environment } from "@/services/environment"; import { environment } from "@/services/environment";
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers"; import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
import { NextRequest } from "next/server"; import { NextRequest } from "next/server";
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
/**
* SSE Proxy for chat streaming.
* Supports POST with context (page content + URL) in the request body.
*/
export async function POST( export async function POST(
request: NextRequest, request: NextRequest,
{ params }: { params: Promise<{ sessionId: string }> }, { params }: { params: Promise<{ sessionId: string }> },
@@ -20,14 +23,17 @@ export async function POST(
); );
} }
// Get auth token from server-side session
const token = await getServerAuthToken(); const token = await getServerAuthToken();
// Build backend URL
const backendUrl = environment.getAGPTServerBaseUrl(); const backendUrl = environment.getAGPTServerBaseUrl();
const streamUrl = new URL( const streamUrl = new URL(
`/api/chat/sessions/${sessionId}/stream`, `/api/chat/sessions/${sessionId}/stream`,
backendUrl, backendUrl,
); );
// Forward request to backend with auth header
const headers: Record<string, string> = { const headers: Record<string, string> = {
"Content-Type": "application/json", "Content-Type": "application/json",
Accept: "text/event-stream", Accept: "text/event-stream",
@@ -57,15 +63,14 @@ export async function POST(
}); });
} }
if (!response.body) { // Return the SSE stream directly
return new Response( return new Response(response.body, {
JSON.stringify({ error: "Empty response from chat service" }), headers: {
{ status: 502, headers: { "Content-Type": "application/json" } }, "Content-Type": "text/event-stream",
); "Cache-Control": "no-cache, no-transform",
} Connection: "keep-alive",
"X-Accel-Buffering": "no",
return new Response(normalizeSSEStream(response.body), { },
headers: SSE_HEADERS,
}); });
} catch (error) { } catch (error) {
console.error("SSE proxy error:", error); console.error("SSE proxy error:", error);
@@ -82,6 +87,13 @@ export async function POST(
} }
} }
/**
* Resume an active stream for a session.
*
* Called by the AI SDK's `useChat(resume: true)` on page load.
* Proxies to the backend which checks for an active stream and either
* replays it (200 + SSE) or returns 204 No Content.
*/
export async function GET( export async function GET(
_request: NextRequest, _request: NextRequest,
{ params }: { params: Promise<{ sessionId: string }> }, { params }: { params: Promise<{ sessionId: string }> },
@@ -112,6 +124,7 @@ export async function GET(
headers, headers,
}); });
// 204 = no active stream to resume
if (response.status === 204) { if (response.status === 204) {
return new Response(null, { status: 204 }); return new Response(null, { status: 204 });
} }
@@ -124,13 +137,12 @@ export async function GET(
}); });
} }
if (!response.body) { return new Response(response.body, {
return new Response(null, { status: 204 });
}
return new Response(normalizeSSEStream(response.body), {
headers: { headers: {
...SSE_HEADERS, "Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no",
"x-vercel-ai-ui-message-stream": "v1", "x-vercel-ai-ui-message-stream": "v1",
}, },
}); });

View File

@@ -1,72 +0,0 @@
export const SSE_HEADERS = {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no",
} as const;
export function normalizeSSEStream(
input: ReadableStream<Uint8Array>,
): ReadableStream<Uint8Array> {
const decoder = new TextDecoder();
const encoder = new TextEncoder();
let buffer = "";
return input.pipeThrough(
new TransformStream<Uint8Array, Uint8Array>({
transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
const parts = buffer.split("\n\n");
buffer = parts.pop() ?? "";
for (const part of parts) {
const normalized = normalizeSSEEvent(part);
controller.enqueue(encoder.encode(normalized + "\n\n"));
}
},
flush(controller) {
if (buffer.trim()) {
const normalized = normalizeSSEEvent(buffer);
controller.enqueue(encoder.encode(normalized + "\n\n"));
}
},
}),
);
}
function normalizeSSEEvent(event: string): string {
const lines = event.split("\n");
const dataLines: string[] = [];
const otherLines: string[] = [];
for (const line of lines) {
if (line.startsWith("data: ")) {
dataLines.push(line.slice(6));
} else {
otherLines.push(line);
}
}
if (dataLines.length === 0) return event;
const dataStr = dataLines.join("\n");
try {
const parsed = JSON.parse(dataStr) as Record<string, unknown>;
if (parsed.type === "error") {
const normalized = {
type: "error",
errorText:
typeof parsed.errorText === "string"
? parsed.errorText
: "An unexpected error occurred",
};
const newData = `data: ${JSON.stringify(normalized)}`;
return [...otherLines.filter((l) => l.length > 0), newData].join("\n");
}
} catch {
// Not valid JSON — pass through as-is
}
return event;
}

View File

@@ -1,8 +1,20 @@
import { environment } from "@/services/environment"; import { environment } from "@/services/environment";
import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers"; import { getServerAuthToken } from "@/lib/autogpt-server-api/helpers";
import { NextRequest } from "next/server"; import { NextRequest } from "next/server";
import { normalizeSSEStream, SSE_HEADERS } from "../../../sse-helpers";
/**
* SSE Proxy for task stream reconnection.
*
* This endpoint allows clients to reconnect to an ongoing or recently completed
* background task's stream. It replays missed messages from Redis Streams and
* subscribes to live updates if the task is still running.
*
* Client contract:
* 1. When receiving an operation_started event, store the task_id
* 2. To reconnect: GET /api/chat/tasks/{taskId}/stream?last_message_id={idx}
* 3. Messages are replayed from the last_message_id position
* 4. Stream ends when "finish" event is received
*/
export async function GET( export async function GET(
request: NextRequest, request: NextRequest,
{ params }: { params: Promise<{ taskId: string }> }, { params }: { params: Promise<{ taskId: string }> },
@@ -12,12 +24,15 @@ export async function GET(
const lastMessageId = searchParams.get("last_message_id") || "0-0"; const lastMessageId = searchParams.get("last_message_id") || "0-0";
try { try {
// Get auth token from server-side session
const token = await getServerAuthToken(); const token = await getServerAuthToken();
// Build backend URL
const backendUrl = environment.getAGPTServerBaseUrl(); const backendUrl = environment.getAGPTServerBaseUrl();
const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl); const streamUrl = new URL(`/api/chat/tasks/${taskId}/stream`, backendUrl);
streamUrl.searchParams.set("last_message_id", lastMessageId); streamUrl.searchParams.set("last_message_id", lastMessageId);
// Forward request to backend with auth header
const headers: Record<string, string> = { const headers: Record<string, string> = {
Accept: "text/event-stream", Accept: "text/event-stream",
"Cache-Control": "no-cache", "Cache-Control": "no-cache",
@@ -41,12 +56,14 @@ export async function GET(
}); });
} }
if (!response.body) { // Return the SSE stream directly
return new Response(null, { status: 204 }); return new Response(response.body, {
} headers: {
"Content-Type": "text/event-stream",
return new Response(normalizeSSEStream(response.body), { "Cache-Control": "no-cache, no-transform",
headers: SSE_HEADERS, Connection: "keep-alive",
"X-Accel-Buffering": "no",
},
}); });
} catch (error) { } catch (error) {
console.error("Task stream proxy error:", error); console.error("Task stream proxy error:", error);

View File

@@ -6,7 +6,6 @@ import { SupabaseClient } from "@supabase/supabase-js";
export const PROTECTED_PAGES = [ export const PROTECTED_PAGES = [
"/auth/authorize", "/auth/authorize",
"/auth/integrations", "/auth/integrations",
"/copilot",
"/monitor", "/monitor",
"/build", "/build",
"/onboarding", "/onboarding",

View File

@@ -1,15 +1,12 @@
[flake8] [flake8]
max-line-length = 88 max-line-length = 88
extend-ignore = E203
exclude = exclude =
.tox, .tox,
__pycache__, __pycache__,
*.pyc, *.pyc,
.env, .env
venv*, venv*/*,
.venv, .venv/*,
reports, reports/*,
dist, dist/*,
data, data/*,
.benchmark_workspaces,
.autogpt,

View File

@@ -1,291 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
AutoGPT Classic is an experimental, **unsupported** project demonstrating autonomous GPT-4 operation. Dependencies will not be updated, and the codebase contains known vulnerabilities. This is preserved for educational/historical purposes.
## Repository Structure
```
classic/
├── pyproject.toml # Single consolidated Poetry project
├── poetry.lock # Single lock file
├── forge/
│ └── forge/ # Core agent framework package
├── original_autogpt/
│ └── autogpt/ # AutoGPT agent package
├── direct_benchmark/
│ └── direct_benchmark/ # Benchmark harness package
└── benchmark/ # Challenge definitions (data, not code)
```
All packages are managed by a single `pyproject.toml` at the classic/ root.
## Common Commands
### Setup & Install
```bash
# Install everything from classic/ directory
cd classic
poetry install
```
### Running Agents
```bash
# Run forge agent
poetry run python -m forge
# Run original autogpt server
poetry run serve --debug
# Run autogpt CLI
poetry run autogpt
```
Agents run on `http://localhost:8000` by default.
### Benchmarking
```bash
# Run benchmarks
poetry run direct-benchmark run
# Run specific strategies and models
poetry run direct-benchmark run \
--strategies one_shot,rewoo \
--models claude \
--parallel 4
# Run a single test
poetry run direct-benchmark run --tests ReadFile
# List available commands
poetry run direct-benchmark --help
```
### Testing
```bash
poetry run pytest # All tests
poetry run pytest forge/tests/ # Forge tests only
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
poetry run pytest -k test_name # Single test by name
poetry run pytest path/to/test.py # Specific test file
poetry run pytest --cov # With coverage
```
### Linting & Formatting
Run from the classic/ directory:
```bash
# Format everything (recommended to run together)
poetry run black . && poetry run isort .
# Check formatting (CI-style, no changes)
poetry run black --check . && poetry run isort --check-only .
# Lint
poetry run flake8 # Style linting
# Type check
poetry run pyright # Type checking (some errors are expected in infrastructure code)
```
Note: Always run linters over the entire directory, not specific files, for best results.
## Architecture
### Forge (Core Framework)
The `forge` package is the foundation that other components depend on:
- `forge/agent/` - Agent implementation and protocols
- `forge/llm/` - Multi-provider LLM integrations (OpenAI, Anthropic, Groq, LiteLLM)
- `forge/components/` - Reusable agent components
- `forge/file_storage/` - File system abstraction
- `forge/config/` - Configuration management
### Original AutoGPT
- `original_autogpt/autogpt/app/` - CLI application entry points
- `original_autogpt/autogpt/agents/` - Agent implementations
- `original_autogpt/autogpt/agent_factory/` - Agent creation logic
### Direct Benchmark
Benchmark harness for testing agent performance:
- `direct_benchmark/direct_benchmark/` - CLI and harness code
- `benchmark/agbenchmark/challenges/` - Test cases organized by category (code, retrieval, data, etc.)
- Reports generated in `direct_benchmark/reports/`
### Package Structure
All three packages are included in a single Poetry project. Imports are fully qualified:
- `from forge.agent.base import BaseAgent`
- `from autogpt.agents.agent import Agent`
- `from direct_benchmark.harness import BenchmarkHarness`
## Code Style
- Python 3.12 target
- Line length: 88 characters (Black default)
- Black for formatting, isort for imports (profile="black")
- Type hints with Pyright checking
## Testing Patterns
- Async support via pytest-asyncio
- Fixtures defined in `conftest.py` files provide: `tmp_project_root`, `storage`, `config`, `llm_provider`, `agent`
- Tests requiring API keys (OPENAI_API_KEY, ANTHROPIC_API_KEY) will skip if not set
## Environment Setup
Copy `.env.example` to `.env` in the relevant directory and add your API keys:
```bash
cp .env.example .env
# Edit .env with your OPENAI_API_KEY, etc.
```
## Workspaces
Agents operate within a **workspace** - a directory containing all agent data and files. The workspace root defaults to the current working directory.
### Workspace Structure
```
{workspace}/
├── .autogpt/
│ ├── autogpt.yaml # Workspace-level permissions
│ ├── ap_server.db # Agent Protocol database (server mode)
│ └── agents/
│ └── AutoGPT-{agent_id}/
│ ├── state.json # Agent profile, directives, action history
│ ├── permissions.yaml # Agent-specific permission overrides
│ └── workspace/ # Agent's sandboxed working directory
```
### Key Concepts
- **Multiple agents** can coexist in the same workspace (each gets its own subdirectory)
- **File access** is sandboxed to the agent's `workspace/` directory by default
- **State persistence** - agent state saves to `state.json` and survives across sessions
- **Storage backends** - supports local filesystem, S3, and GCS (via `FILE_STORAGE_BACKEND` env var)
### Specifying a Workspace
```bash
# Default: uses current directory
cd /path/to/my/project && poetry run autogpt
# Or specify explicitly via CLI (if supported)
poetry run autogpt --workspace /path/to/workspace
```
## Settings Location
Configuration uses a **layered system** with three levels (in order of precedence):
### 1. Environment Variables (Global)
Loaded from `.env` file in the working directory:
```bash
# Required
OPENAI_API_KEY=sk-...
# Optional LLM settings
SMART_LLM=gpt-4o # Model for complex reasoning
FAST_LLM=gpt-4o-mini # Model for simple tasks
EMBEDDING_MODEL=text-embedding-3-small
# Optional search providers (for web search component)
TAVILY_API_KEY=tvly-...
SERPER_API_KEY=...
GOOGLE_API_KEY=...
GOOGLE_CUSTOM_SEARCH_ENGINE_ID=...
# Optional infrastructure
LOG_LEVEL=DEBUG # DEBUG, INFO, WARNING, ERROR
DATABASE_STRING=sqlite:///agent.db # Agent Protocol database
PORT=8000 # Server port
FILE_STORAGE_BACKEND=local # local, s3, or gcs
```
### 2. Workspace Settings (`{workspace}/.autogpt/autogpt.yaml`)
Workspace-wide permissions that apply to **all agents** in this workspace:
```yaml
allow:
- read_file({workspace}/**)
- write_to_file({workspace}/**)
- list_folder({workspace}/**)
- web_search(*)
deny:
- read_file(**.env)
- read_file(**.env.*)
- read_file(**.key)
- read_file(**.pem)
- execute_shell(rm -rf:*)
- execute_shell(sudo:*)
```
Auto-generated with sensible defaults if missing.
### 3. Agent Settings (`{workspace}/.autogpt/agents/{id}/permissions.yaml`)
Agent-specific permission overrides:
```yaml
allow:
- execute_python(*)
- web_search(*)
deny:
- execute_shell(*)
```
## Permissions
The permission system uses **pattern matching** with a **first-match-wins** evaluation order.
### Permission Check Order
1. Agent deny list → **Block**
2. Workspace deny list → **Block**
3. Agent allow list → **Allow**
4. Workspace allow list → **Allow**
5. Session denied list → **Block** (commands denied during this session)
6. **Prompt user** → Interactive approval (if in interactive mode)
### Pattern Syntax
Format: `command_name(glob_pattern)`
| Pattern | Description |
|---------|-------------|
| `read_file({workspace}/**)` | Read any file in workspace (recursive) |
| `write_to_file({workspace}/*.txt)` | Write only .txt files in workspace root |
| `execute_shell(python:**)` | Execute Python commands only |
| `execute_shell(git:*)` | Execute any git command |
| `web_search(*)` | Allow all web searches |
Special tokens:
- `{workspace}` - Replaced with actual workspace path
- `**` - Matches any path including `/`
- `*` - Matches any characters except `/`
### Interactive Approval Scopes
When prompted for permission, users can choose:
| Scope | Effect |
|-------|--------|
| **Once** | Allow this one time only (not saved) |
| **Agent** | Always allow for this agent (saves to agent `permissions.yaml`) |
| **Workspace** | Always allow for all agents (saves to `autogpt.yaml`) |
| **Deny** | Deny this command (saves to appropriate deny list) |
### Default Security
Out of the box, the following are **denied by default**:
- Reading sensitive files (`.env`, `.key`, `.pem`)
- Destructive shell commands (`rm -rf`, `sudo`)
- Operations outside the workspace directory

182
classic/CLI-USAGE.md Executable file
View File

@@ -0,0 +1,182 @@
## CLI Documentation
This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
### 1. Entry Point for the CLI
Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
```sh
./run
```
**Output**:
```
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
Commands:
agent Commands to create, start and stop agents
benchmark Commands to start the benchmark and list tests and categories
setup Installs dependencies needed for your system.
```
If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
```sh
./run COMMAND --help
```
This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
### 2. Setup Command
```sh
./run setup
```
**Output**:
```
Setup initiated
Installation has been completed.
```
This command initializes the setup of the project.
### 3. Agents Commands
**a. List All Agents**
```sh
./run agent list
```
**Output**:
```
Available agents: 🤖
🐙 forge
🐙 autogpt
```
Lists all the available agents.
**b. Create a New Agent**
```sh
./run agent create my_agent
```
**Output**:
```
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
```
Creates a new agent named 'my_agent'.
**c. Start an Agent**
```sh
./run agent start my_agent
```
**Output**:
```
... (ASCII Art representing the agent startup)
[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
```
Starts the 'my_agent' and displays startup ASCII art and logs.
**d. Stop an Agent**
```sh
./run agent stop
```
**Output**:
```
Agent stopped
```
Stops the running agent.
### 4. Benchmark Commands
**a. List Benchmark Categories**
```sh
./run benchmark categories list
```
**Output**:
```
Available categories: 📚
📖 code
📖 safety
📖 memory
... (and so on)
```
Lists all available benchmark categories.
**b. List Benchmark Tests**
```sh
./run benchmark tests list
```
**Output**:
```
Available tests: 📚
📖 interface
🔬 Search - TestSearch
🔬 Write File - TestWriteFile
... (and so on)
```
Lists all available benchmark tests.
**c. Show Details of a Benchmark Test**
```sh
./run benchmark tests details TestWriteFile
```
**Output**:
```
TestWriteFile
-------------
Category: interface
Task: Write the word 'Washington' to a .txt file
... (and other details)
```
Displays the details of the 'TestWriteFile' benchmark test.
**d. Start Benchmark for the Agent**
```sh
./run benchmark start my_agent
```
**Output**:
```
(more details about the testing process shown whilst the test are running)
============= 13 failed, 1 passed in 0.97s ============...
```
Displays the results of the benchmark tests on 'my_agent'.

View File

@@ -2,7 +2,7 @@
ARG BUILD_TYPE=dev ARG BUILD_TYPE=dev
# Use an official Python base image from the Docker Hub # Use an official Python base image from the Docker Hub
FROM python:3.12-slim AS autogpt-base FROM python:3.10-slim AS autogpt-base
# Install browsers # Install browsers
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
@@ -34,6 +34,9 @@ COPY original_autogpt/pyproject.toml original_autogpt/poetry.lock ./
# Include forge so it can be used as a path dependency # Include forge so it can be used as a path dependency
COPY forge/ ../forge COPY forge/ ../forge
# Include frontend
COPY frontend/ ../frontend
# Set the entrypoint # Set the entrypoint
ENTRYPOINT ["poetry", "run", "autogpt"] ENTRYPOINT ["poetry", "run", "autogpt"]
CMD [] CMD []

173
classic/FORGE-QUICKSTART.md Normal file
View File

@@ -0,0 +1,173 @@
# Quickstart Guide
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
## System Requirements
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
## Getting Setup
1. **Fork the Repository**
To fork the repository, follow these steps:
- Navigate to the main page of the repository.
![Repository](../docs/content/imgs/quickstart/001_repo.png)
- In the top-right corner of the page, click Fork.
![Create Fork UI](../docs/content/imgs/quickstart/002_fork.png)
- On the next page, select your GitHub account to create the fork.
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
2. **Clone the Repository**
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
- Open your terminal.
- Navigate to the directory where you want to clone the repository.
- Run the git clone command for the fork you just created
![Clone the Repository](../docs/content/imgs/quickstart/003_clone.png)
- Then open your project in your ide
![Open the Project in your IDE](../docs/content/imgs/quickstart/004_ide.png)
4. **Setup the Project**
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
It can be accessed by running the `run` command by typing `./run` in the terminal.
The first command you need to use is `./run setup.` This will guide you through setting up your system.
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
![Setup the Project](../docs/content/imgs/quickstart/005_setup.png)
### For Windows Users
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
#### Update WSL
Run the following command in Powershell or Command Prompt:
1. Enable the optional WSL and Virtual Machine Platform components.
2. Download and install the latest Linux kernel.
3. Set WSL 2 as the default.
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
```shell
wsl --install
```
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
#### Resolve FileNotFoundError or "No such file or directory" Errors
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Heres how to install and run `dos2unix` on the script:
```shell
sudo apt update
sudo apt install dos2unix
dos2unix ./run
```
After executing the above commands, running `./run setup` should work successfully.
#### Store Project Files within the WSL File System
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
You can keep running the command to get feedback on where you are up to with your setup.
When setup has been completed, the command will return an output like this:
![Setup Complete](../docs/content/imgs/quickstart/006_setup_complete.png)
## Creating Your Agent
After completing the setup, the next step is to create your agent template.
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
Tips for naming your agent:
* Give it its own unique name, or name it after yourself
* Include an important aspect of your agent in the name, such as its purpose
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
![Create an Agent](../docs/content/imgs/quickstart/007_create_agent.png)
## Running your Agent
Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
This starts the agent on the URL: `http://localhost:8000/`
![Start the Agent](../docs/content/imgs/quickstart/009_start_agent.png)
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
![Login](../docs/content/imgs/quickstart/010_login.png)
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
![Login](../docs/content/imgs/quickstart/011_home.png)
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
## Benchmarking your Agent
The benchmarking system can also be accessed using the CLI too:
```bash
agpt % ./run benchmark
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
Commands to start the benchmark and list tests and categories
Options:
--help Show this message and exit.
Commands:
categories Benchmark categories group command
start Starts the benchmark command
tests Benchmark tests group command
agpt % ./run benchmark categories
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
Benchmark categories group command
Options:
--help Show this message and exit.
Commands:
list List benchmark categories command
agpt % ./run benchmark tests
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
Benchmark tests group command
Options:
--help Show this message and exit.
Commands:
details Benchmark test details command
list List benchmark tests command
```
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
```bash
./run benchmark categories list
# And what tests are available with
./run benchmark tests list
```
![Login](../docs/content/imgs/quickstart/012_tests.png)
Finally, you can run the benchmark with
```bash
./run benchmark start YOUR_AGENT_NAME
```
>

View File

@@ -4,7 +4,7 @@ AutoGPT Classic was an experimental project to demonstrate autonomous GPT-4 oper
## Project Status ## Project Status
**This project is unsupported, and dependencies will not be updated.** It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform). ⚠️ **This project is unsupported, and dependencies will not be updated. It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform)**
For those interested in autonomous AI agents, we recommend exploring more actively maintained alternatives or referring to this codebase for educational purposes only. For those interested in autonomous AI agents, we recommend exploring more actively maintained alternatives or referring to this codebase for educational purposes only.
@@ -16,171 +16,37 @@ AutoGPT Classic was one of the first implementations of autonomous AI agents - A
- Learn from the results and adjust its approach - Learn from the results and adjust its approach
- Chain multiple actions together to achieve an objective - Chain multiple actions together to achieve an objective
## Key Features
- 🔄 Autonomous task chaining
- 🛠 Tool and API integration capabilities
- 💾 Memory management for context retention
- 🔍 Web browsing and information gathering
- 📝 File operations and content creation
- 🔄 Self-prompting and task breakdown
## Structure ## Structure
``` The project is organized into several key components:
classic/ - `/benchmark` - Performance testing tools
├── pyproject.toml # Single consolidated Poetry project - `/forge` - Core autonomous agent framework
├── poetry.lock # Single lock file - `/frontend` - User interface components
├── forge/ # Core autonomous agent framework - `/original_autogpt` - Original implementation
├── original_autogpt/ # Original implementation
├── direct_benchmark/ # Benchmark harness
└── benchmark/ # Challenge definitions (data)
```
## Getting Started ## Getting Started
### Prerequisites While this project is no longer actively maintained, you can still explore the codebase:
- Python 3.12+
- [Poetry](https://python-poetry.org/docs/#installation)
### Installation
1. Clone the repository:
```bash ```bash
# Clone the repository
git clone https://github.com/Significant-Gravitas/AutoGPT.git git clone https://github.com/Significant-Gravitas/AutoGPT.git
cd classic cd classic
# Install everything
poetry install
``` ```
### Configuration 2. Review the documentation:
- For reference, see the [documentation](https://docs.agpt.co). You can browse at the same point in time as this commit so the docs don't change.
Configuration uses a layered system: - Check `CLI-USAGE.md` for command-line interface details
- Refer to `TROUBLESHOOTING.md` for common issues
1. **Environment variables** (`.env` file)
2. **Workspace settings** (`.autogpt/autogpt.yaml`)
3. **Agent settings** (`.autogpt/agents/{id}/permissions.yaml`)
Copy the example environment file and add your API keys:
```bash
cp .env.example .env
```
Key environment variables:
```bash
# Required
OPENAI_API_KEY=sk-...
# Optional LLM settings
SMART_LLM=gpt-4o # Model for complex reasoning
FAST_LLM=gpt-4o-mini # Model for simple tasks
# Optional search providers
TAVILY_API_KEY=tvly-...
SERPER_API_KEY=...
# Optional infrastructure
LOG_LEVEL=DEBUG
PORT=8000
FILE_STORAGE_BACKEND=local # local, s3, or gcs
```
### Running
All commands run from the `classic/` directory:
```bash
# Run forge agent
poetry run python -m forge
# Run original autogpt server
poetry run serve --debug
# Run autogpt CLI
poetry run autogpt
```
Agents run on `http://localhost:8000` by default.
### Benchmarking
```bash
poetry run direct-benchmark run
```
### Testing
```bash
poetry run pytest # All tests
poetry run pytest forge/tests/ # Forge tests only
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
```
## Workspaces
Agents operate within a **workspace** directory that contains all agent data and files:
```
{workspace}/
├── .autogpt/
│ ├── autogpt.yaml # Workspace-level permissions
│ ├── ap_server.db # Agent Protocol database (server mode)
│ └── agents/
│ └── AutoGPT-{agent_id}/
│ ├── state.json # Agent profile, directives, history
│ ├── permissions.yaml # Agent-specific permissions
│ └── workspace/ # Agent's sandboxed working directory
```
- The workspace defaults to the current working directory
- Multiple agents can coexist in the same workspace
- Agent file access is sandboxed to their `workspace/` subdirectory
- State persists across sessions via `state.json`
## Permissions
AutoGPT uses a **layered permission system** with pattern matching:
### Permission Files
| File | Scope | Location |
|------|-------|----------|
| `autogpt.yaml` | All agents in workspace | `.autogpt/autogpt.yaml` |
| `permissions.yaml` | Single agent | `.autogpt/agents/{id}/permissions.yaml` |
### Permission Format
```yaml
allow:
- read_file({workspace}/**) # Read any file in workspace
- write_to_file({workspace}/**) # Write any file in workspace
- web_search(*) # All web searches
deny:
- read_file(**.env) # Block .env files
- execute_shell(sudo:*) # Block sudo commands
```
### Check Order (First Match Wins)
1. Agent deny → Block
2. Workspace deny → Block
3. Agent allow → Allow
4. Workspace allow → Allow
5. Prompt user → Interactive approval
### Interactive Approval
When prompted, users can approve commands with different scopes:
- **Once** - Allow this one time only
- **Agent** - Always allow for this agent
- **Workspace** - Always allow for all agents
- **Deny** - Block this command
### Default Security
Denied by default:
- Sensitive files (`.env`, `.key`, `.pem`)
- Destructive commands (`rm -rf`, `sudo`)
- Operations outside the workspace
## Security Notice
This codebase has **known vulnerabilities** and issues with its dependencies. It will not be updated to new dependencies. Use for educational purposes only.
## License ## License
@@ -189,3 +55,27 @@ This project segment is licensed under the MIT License - see the [LICENSE](LICEN
## Documentation ## Documentation
Please refer to the [documentation](https://docs.agpt.co) for more detailed information about the project's architecture and concepts. Please refer to the [documentation](https://docs.agpt.co) for more detailed information about the project's architecture and concepts.
You can browse at the same point in time as this commit so the docs don't change.
## Historical Impact
AutoGPT Classic played a significant role in advancing the field of autonomous AI agents:
- Demonstrated practical implementation of AI autonomy
- Inspired numerous derivative projects and research
- Contributed to the development of AI agent architectures
- Helped identify key challenges in AI autonomy
## Security Notice
If you're studying this codebase, please understand this has KNOWN vulnerabilities and issues with its dependencies. It will not be updated to new dependencies.
## Community & Support
While active development has concluded:
- The codebase remains available for study and reference
- Historical discussions can be found in project issues
- Related research and developments continue in the broader AI agent community
## Acknowledgments
Thanks to all contributors who participated in this experimental project and helped advance the field of autonomous AI agents.

View File

@@ -0,0 +1,4 @@
AGENT_NAME=mini-agi
REPORTS_FOLDER="reports/mini-agi"
OPENAI_API_KEY="sk-" # for LLM eval
BUILD_SKILL_TREE=false # set to true to build the skill tree.

12
classic/benchmark/.flake8 Normal file
View File

@@ -0,0 +1,12 @@
[flake8]
max-line-length = 88
# Ignore rules that conflict with Black code style
extend-ignore = E203, W503
exclude =
__pycache__/,
*.pyc,
.pytest_cache/,
venv*/,
.venv/,
reports/,
agbenchmark/reports/,

174
classic/benchmark/.gitignore vendored Normal file
View File

@@ -0,0 +1,174 @@
agbenchmark_config/workspace/
backend/backend_stdout.txt
reports/df*.pkl
reports/raw*
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
.DS_Store
```
secrets.json
agbenchmark_config/challenges_already_beaten.json
agbenchmark_config/challenges/pri_*
agbenchmark_config/updates.json
agbenchmark_config/reports/*
agbenchmark_config/reports/success_rate.json
agbenchmark_config/reports/regression_tests.json

21
classic/benchmark/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 AutoGPT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,25 @@
# Auto-GPT Benchmarks
Built for the purpose of benchmarking the performance of agents regardless of how they work.
Objectively know how well your agent is performing in categories like code, retrieval, memory, and safety.
Save time and money while doing it through smart dependencies. The best part? It's all automated.
## Scores:
<img width="733" alt="Screenshot 2023-07-25 at 10 35 01 AM" src="https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/assets/9652976/98963e0b-18b9-4b17-9a6a-4d3e4418af70">
## Ranking overall:
- 1- [Beebot](https://github.com/AutoPackAI/beebot)
- 2- [mini-agi](https://github.com/muellerberndt/mini-agi)
- 3- [Auto-GPT](https://github.com/Significant-Gravitas/AutoGPT)
## Detailed results:
<img width="733" alt="Screenshot 2023-07-25 at 10 42 15 AM" src="https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/assets/9652976/39be464c-c842-4437-b28a-07d878542a83">
[Click here to see the results and the raw data!](https://docs.google.com/spreadsheets/d/1WXm16P2AHNbKpkOI0LYBpcsGG0O7D8HYTG5Uj0PaJjA/edit#gid=203558751)!
More agents coming soon !

View File

@@ -0,0 +1,69 @@
## As a user
1. `pip install auto-gpt-benchmarks`
2. Add boilerplate code to run and kill agent
3. `agbenchmark`
- `--category challenge_category` to run tests in a specific category
- `--mock` to only run mock tests if they exists for each test
- `--noreg` to skip any tests that have passed in the past. When you run without this flag and a previous challenge that passed fails, it will now not be regression tests
4. We call boilerplate code for your agent
5. Show pass rate of tests, logs, and any other metrics
## Contributing
##### Diagrams: https://whimsical.com/agbenchmark-5n4hXBq1ZGzBwRsK4TVY7x
### To run the existing mocks
1. clone the repo `auto-gpt-benchmarks`
2. `pip install poetry`
3. `poetry shell`
4. `poetry install`
5. `cp .env_example .env`
6. `git submodule update --init --remote --recursive`
7. `uvicorn server:app --reload`
8. `agbenchmark --mock`
Keep config the same and watch the logs :)
### To run with mini-agi
1. Navigate to `auto-gpt-benchmarks/agent/mini-agi`
2. `pip install -r requirements.txt`
3. `cp .env_example .env`, set `PROMPT_USER=false` and add your `OPENAI_API_KEY=`. Sset `MODEL="gpt-3.5-turbo"` if you don't have access to `gpt-4` yet. Also make sure you have Python 3.10^ installed
4. set `AGENT_NAME=mini-agi` in `.env` file and where you want your `REPORTS_FOLDER` to be
5. Make sure to follow the commands above, and remove mock flag `agbenchmark`
- To add requirements `poetry add requirement`.
Feel free to create prs to merge with `main` at will (but also feel free to ask for review) - if you can't send msg in R&D chat for access.
If you push at any point and break things - it'll happen to everyone - fix it asap. Step 1 is to revert `master` to last working commit
Let people know what beautiful code you write does, document everything well
Share your progress :)
#### Dataset
Manually created, existing challenges within Auto-Gpt, https://osu-nlp-group.github.io/Mind2Web/
## How do I add new agents to agbenchmark ?
Example with smol developer.
1- Create a github branch with your agent following the same pattern as this example:
https://github.com/smol-ai/developer/pull/114/files
2- Create the submodule and the github workflow by following the same pattern as this example:
https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/pull/48/files
## How do I run agent in different environments?
**To just use as the benchmark for your agent**. `pip install` the package and run `agbenchmark`
**For internal Auto-GPT ci runs**, specify the `AGENT_NAME` you want you use and set the `HOME_ENV`.
Ex. `AGENT_NAME=mini-agi`
**To develop agent alongside benchmark**, you can specify the `AGENT_NAME` you want you use and add as a submodule to the repo

View File

@@ -0,0 +1,352 @@
import logging
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
import click
from click_default_group import DefaultGroup
from dotenv import load_dotenv
from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.logging import configure_logging
load_dotenv()
# try:
# if os.getenv("HELICONE_API_KEY"):
# import helicone # noqa
# helicone_enabled = True
# else:
# helicone_enabled = False
# except ImportError:
# helicone_enabled = False
class InvalidInvocationError(ValueError):
pass
logger = logging.getLogger(__name__)
BENCHMARK_START_TIME_DT = datetime.now(timezone.utc)
BENCHMARK_START_TIME = BENCHMARK_START_TIME_DT.strftime("%Y-%m-%dT%H:%M:%S+00:00")
# if helicone_enabled:
# from helicone.lock import HeliconeLockManager
# HeliconeLockManager.write_custom_property(
# "benchmark_start_time", BENCHMARK_START_TIME
# )
@click.group(cls=DefaultGroup, default_if_no_args=True)
@click.option("--debug", is_flag=True, help="Enable debug output")
def cli(
debug: bool,
) -> Any:
configure_logging(logging.DEBUG if debug else logging.INFO)
@cli.command(hidden=True)
def start():
raise DeprecationWarning(
"`agbenchmark start` is deprecated. Use `agbenchmark run` instead."
)
@cli.command(default=True)
@click.option(
"-N", "--attempts", default=1, help="Number of times to run each challenge."
)
@click.option(
"-c",
"--category",
multiple=True,
help="(+) Select a category to run.",
)
@click.option(
"-s",
"--skip-category",
multiple=True,
help="(+) Exclude a category from running.",
)
@click.option("--test", multiple=True, help="(+) Select a test to run.")
@click.option("--maintain", is_flag=True, help="Run only regression tests.")
@click.option("--improve", is_flag=True, help="Run only non-regression tests.")
@click.option(
"--explore",
is_flag=True,
help="Run only challenges that have never been beaten.",
)
@click.option(
"--no-dep",
is_flag=True,
help="Run all (selected) challenges, regardless of dependency success/failure.",
)
@click.option("--cutoff", type=int, help="Override the challenge time limit (seconds).")
@click.option("--nc", is_flag=True, help="Disable the challenge time limit.")
@click.option("--mock", is_flag=True, help="Run with mock")
@click.option("--keep-answers", is_flag=True, help="Keep answers")
@click.option(
"--backend",
is_flag=True,
help="Write log output to a file instead of the terminal.",
)
# @click.argument(
# "agent_path",
# type=click.Path(exists=True, file_okay=False, path_type=Path),
# required=False,
# )
def run(
maintain: bool,
improve: bool,
explore: bool,
mock: bool,
no_dep: bool,
nc: bool,
keep_answers: bool,
test: tuple[str],
category: tuple[str],
skip_category: tuple[str],
attempts: int,
cutoff: Optional[int] = None,
backend: Optional[bool] = False,
# agent_path: Optional[Path] = None,
) -> None:
"""
Run the benchmark on the agent in the current directory.
Options marked with (+) can be specified multiple times, to select multiple items.
"""
from agbenchmark.main import run_benchmark, validate_args
agbenchmark_config = AgentBenchmarkConfig.load()
logger.debug(f"agbenchmark_config: {agbenchmark_config.agbenchmark_config_dir}")
try:
validate_args(
maintain=maintain,
improve=improve,
explore=explore,
tests=test,
categories=category,
skip_categories=skip_category,
no_cutoff=nc,
cutoff=cutoff,
)
except InvalidInvocationError as e:
logger.error("Error: " + "\n".join(e.args))
sys.exit(1)
original_stdout = sys.stdout # Save the original standard output
exit_code = None
if backend:
with open("backend/backend_stdout.txt", "w") as f:
sys.stdout = f
exit_code = run_benchmark(
config=agbenchmark_config,
maintain=maintain,
improve=improve,
explore=explore,
mock=mock,
no_dep=no_dep,
no_cutoff=nc,
keep_answers=keep_answers,
tests=test,
categories=category,
skip_categories=skip_category,
attempts_per_challenge=attempts,
cutoff=cutoff,
)
sys.stdout = original_stdout
else:
exit_code = run_benchmark(
config=agbenchmark_config,
maintain=maintain,
improve=improve,
explore=explore,
mock=mock,
no_dep=no_dep,
no_cutoff=nc,
keep_answers=keep_answers,
tests=test,
categories=category,
skip_categories=skip_category,
attempts_per_challenge=attempts,
cutoff=cutoff,
)
sys.exit(exit_code)
@cli.command()
@click.option("--port", type=int, help="Port to run the API on.")
def serve(port: Optional[int] = None):
"""Serve the benchmark frontend and API on port 8080."""
import uvicorn
from agbenchmark.app import setup_fastapi_app
config = AgentBenchmarkConfig.load()
app = setup_fastapi_app(config)
# Run the FastAPI application using uvicorn
port = port or int(os.getenv("PORT", 8080))
uvicorn.run(app, host="0.0.0.0", port=port)
@cli.command()
def config():
"""Displays info regarding the present AGBenchmark config."""
from .utils.utils import pretty_print_model
try:
config = AgentBenchmarkConfig.load()
except FileNotFoundError as e:
click.echo(e, err=True)
return 1
pretty_print_model(config, include_header=False)
@cli.group()
def challenge():
logging.getLogger().setLevel(logging.WARNING)
@challenge.command("list")
@click.option(
"--all", "include_unavailable", is_flag=True, help="Include unavailable challenges."
)
@click.option(
"--names", "only_names", is_flag=True, help="List only the challenge names."
)
@click.option("--json", "output_json", is_flag=True)
def list_challenges(include_unavailable: bool, only_names: bool, output_json: bool):
"""Lists [available|all] challenges."""
import json
from tabulate import tabulate
from .challenges.builtin import load_builtin_challenges
from .challenges.webarena import load_webarena_challenges
from .utils.data_types import Category, DifficultyLevel
from .utils.utils import sorted_by_enum_index
DIFFICULTY_COLORS = {
difficulty: color
for difficulty, color in zip(
DifficultyLevel,
["black", "blue", "cyan", "green", "yellow", "red", "magenta", "white"],
)
}
CATEGORY_COLORS = {
category: f"bright_{color}"
for category, color in zip(
Category,
["blue", "cyan", "green", "yellow", "magenta", "red", "white", "black"],
)
}
# Load challenges
challenges = filter(
lambda c: c.info.available or include_unavailable,
[
*load_builtin_challenges(),
*load_webarena_challenges(skip_unavailable=False),
],
)
challenges = sorted_by_enum_index(
challenges, DifficultyLevel, key=lambda c: c.info.difficulty
)
if only_names:
if output_json:
click.echo(json.dumps([c.info.name for c in challenges]))
return
for c in challenges:
click.echo(
click.style(c.info.name, fg=None if c.info.available else "black")
)
return
if output_json:
click.echo(
json.dumps([json.loads(c.info.model_dump_json()) for c in challenges])
)
return
headers = tuple(
click.style(h, bold=True) for h in ("Name", "Difficulty", "Categories")
)
table = [
tuple(
v if challenge.info.available else click.style(v, fg="black")
for v in (
challenge.info.name,
(
click.style(
challenge.info.difficulty.value,
fg=DIFFICULTY_COLORS[challenge.info.difficulty],
)
if challenge.info.difficulty
else click.style("-", fg="black")
),
" ".join(
click.style(cat.value, fg=CATEGORY_COLORS[cat])
for cat in sorted_by_enum_index(challenge.info.category, Category)
),
)
)
for challenge in challenges
]
click.echo(tabulate(table, headers=headers))
@challenge.command()
@click.option("--json", is_flag=True)
@click.argument("name")
def info(name: str, json: bool):
from itertools import chain
from .challenges.builtin import load_builtin_challenges
from .challenges.webarena import load_webarena_challenges
from .utils.utils import pretty_print_model
for challenge in chain(
load_builtin_challenges(),
load_webarena_challenges(skip_unavailable=False),
):
if challenge.info.name != name:
continue
if json:
click.echo(challenge.info.model_dump_json())
break
pretty_print_model(challenge.info)
break
else:
click.echo(click.style(f"Unknown challenge '{name}'", fg="red"), err=True)
@cli.command()
def version():
"""Print version info for the AGBenchmark application."""
import toml
package_root = Path(__file__).resolve().parent.parent
pyproject = toml.load(package_root / "pyproject.toml")
version = pyproject["tool"]["poetry"]["version"]
click.echo(f"AGBenchmark version {version}")
if __name__ == "__main__":
cli()

View File

@@ -0,0 +1,111 @@
import logging
import time
from pathlib import Path
from typing import AsyncIterator, Optional
from agent_protocol_client import (
AgentApi,
ApiClient,
Configuration,
Step,
TaskRequestBody,
)
from agbenchmark.agent_interface import get_list_of_file_paths
from agbenchmark.config import AgentBenchmarkConfig
logger = logging.getLogger(__name__)
async def run_api_agent(
task: str,
config: AgentBenchmarkConfig,
timeout: int,
artifacts_location: Optional[Path] = None,
*,
mock: bool = False,
) -> AsyncIterator[Step]:
configuration = Configuration(host=config.host)
async with ApiClient(configuration) as api_client:
api_instance = AgentApi(api_client)
task_request_body = TaskRequestBody(input=task, additional_input=None)
start_time = time.time()
response = await api_instance.create_agent_task(
task_request_body=task_request_body
)
task_id = response.task_id
if artifacts_location:
logger.debug("Uploading task input artifacts to agent...")
await upload_artifacts(
api_instance, artifacts_location, task_id, "artifacts_in"
)
logger.debug("Running agent until finished or timeout...")
while True:
step = await api_instance.execute_agent_task_step(task_id=task_id)
yield step
if time.time() - start_time > timeout:
raise TimeoutError("Time limit exceeded")
if step and mock:
step.is_last = True
if not step or step.is_last:
break
if artifacts_location:
# In "mock" mode, we cheat by giving the correct artifacts to pass the test
if mock:
logger.debug("Uploading mock artifacts to agent...")
await upload_artifacts(
api_instance, artifacts_location, task_id, "artifacts_out"
)
logger.debug("Downloading agent artifacts...")
await download_agent_artifacts_into_folder(
api_instance, task_id, config.temp_folder
)
async def download_agent_artifacts_into_folder(
api_instance: AgentApi, task_id: str, folder: Path
):
artifacts = await api_instance.list_agent_task_artifacts(task_id=task_id)
for artifact in artifacts.artifacts:
# current absolute path of the directory of the file
if artifact.relative_path:
path: str = (
artifact.relative_path
if not artifact.relative_path.startswith("/")
else artifact.relative_path[1:]
)
folder = (folder / path).parent
if not folder.exists():
folder.mkdir(parents=True)
file_path = folder / artifact.file_name
logger.debug(f"Downloading agent artifact {artifact.file_name} to {folder}")
with open(file_path, "wb") as f:
content = await api_instance.download_agent_task_artifact(
task_id=task_id, artifact_id=artifact.artifact_id
)
f.write(content)
async def upload_artifacts(
api_instance: AgentApi, artifacts_location: Path, task_id: str, type: str
) -> None:
for file_path in get_list_of_file_paths(artifacts_location, type):
relative_path: Optional[str] = "/".join(
str(file_path).split(f"{type}/", 1)[-1].split("/")[:-1]
)
if not relative_path:
relative_path = None
await api_instance.upload_agent_task_artifacts(
task_id=task_id, file=str(file_path), relative_path=relative_path
)

View File

@@ -0,0 +1,27 @@
import os
import shutil
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
HELICONE_GRAPHQL_LOGS = os.getenv("HELICONE_GRAPHQL_LOGS", "").lower() == "true"
def get_list_of_file_paths(
challenge_dir_path: str | Path, artifact_folder_name: str
) -> list[Path]:
source_dir = Path(challenge_dir_path) / artifact_folder_name
if not source_dir.exists():
return []
return list(source_dir.iterdir())
def copy_challenge_artifacts_into_workspace(
challenge_dir_path: str | Path, artifact_folder_name: str, workspace: str | Path
) -> None:
file_paths = get_list_of_file_paths(challenge_dir_path, artifact_folder_name)
for file_path in file_paths:
if file_path.is_file():
shutil.copy(file_path, workspace)

View File

@@ -0,0 +1,339 @@
import datetime
import glob
import json
import logging
import sys
import time
import uuid
from collections import deque
from multiprocessing import Process
from pathlib import Path
from typing import Optional
import httpx
import psutil
from agent_protocol_client import AgentApi, ApiClient, ApiException, Configuration
from agent_protocol_client.models import Task, TaskRequestBody
from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, ConfigDict, ValidationError
from agbenchmark.challenges import ChallengeInfo
from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.reports.processing.report_types_v2 import (
BenchmarkRun,
Metrics,
RepositoryInfo,
RunDetails,
TaskInfo,
)
from agbenchmark.schema import TaskEvalRequestBody
from agbenchmark.utils.utils import write_pretty_json
sys.path.append(str(Path(__file__).parent.parent))
logger = logging.getLogger(__name__)
CHALLENGES: dict[str, ChallengeInfo] = {}
challenges_path = Path(__file__).parent / "challenges"
challenge_spec_files = deque(
glob.glob(
f"{challenges_path}/**/data.json",
recursive=True,
)
)
logger.debug("Loading challenges...")
while challenge_spec_files:
challenge_spec_file = Path(challenge_spec_files.popleft())
challenge_relpath = challenge_spec_file.relative_to(challenges_path.parent)
if challenge_relpath.is_relative_to("challenges/deprecated"):
continue
logger.debug(f"Loading {challenge_relpath}...")
try:
challenge_info = ChallengeInfo.model_validate_json(
challenge_spec_file.read_text()
)
except ValidationError as e:
if logging.getLogger().level == logging.DEBUG:
logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
logger.debug(f"Invalid challenge spec: {challenge_spec_file.read_text()}")
continue
if not challenge_info.eval_id:
challenge_info.eval_id = str(uuid.uuid4())
# this will sort all the keys of the JSON systematically
# so that the order is always the same
write_pretty_json(challenge_info.model_dump(), challenge_spec_file)
CHALLENGES[challenge_info.eval_id] = challenge_info
class BenchmarkTaskInfo(BaseModel):
task_id: str
start_time: datetime.datetime
challenge_info: ChallengeInfo
task_informations: dict[str, BenchmarkTaskInfo] = {}
def find_agbenchmark_without_uvicorn():
pids = []
for process in psutil.process_iter(
attrs=[
"pid",
"cmdline",
"name",
"username",
"status",
"cpu_percent",
"memory_info",
"create_time",
"cwd",
"connections",
]
):
try:
# Convert the process.info dictionary values to strings and concatenate them
full_info = " ".join([str(v) for k, v in process.as_dict().items()])
if "agbenchmark" in full_info and "uvicorn" not in full_info:
pids.append(process.pid)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return pids
class CreateReportRequest(BaseModel):
test: str
test_run_id: str
# category: Optional[str] = []
mock: Optional[bool] = False
model_config = ConfigDict(extra="forbid")
updates_list = []
origins = [
"http://localhost:8000",
"http://localhost:8080",
"http://127.0.0.1:5000",
"http://localhost:5000",
]
def stream_output(pipe):
for line in pipe:
print(line, end="")
def setup_fastapi_app(agbenchmark_config: AgentBenchmarkConfig) -> FastAPI:
from agbenchmark.agent_api_interface import upload_artifacts
from agbenchmark.challenges import get_challenge_from_source_uri
from agbenchmark.main import run_benchmark
configuration = Configuration(
host=agbenchmark_config.host or "http://localhost:8000"
)
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
router = APIRouter()
@router.post("/reports")
def run_single_test(body: CreateReportRequest) -> dict:
pids = find_agbenchmark_without_uvicorn()
logger.info(f"pids already running with agbenchmark: {pids}")
logger.debug(f"Request to /reports: {body.model_dump()}")
# Start the benchmark in a separate thread
benchmark_process = Process(
target=lambda: run_benchmark(
config=agbenchmark_config,
tests=(body.test,),
mock=body.mock or False,
)
)
benchmark_process.start()
# Wait for the benchmark to finish, with a timeout of 200 seconds
timeout = 200
start_time = time.time()
while benchmark_process.is_alive():
if time.time() - start_time > timeout:
logger.warning(f"Benchmark run timed out after {timeout} seconds")
benchmark_process.terminate()
break
time.sleep(1)
else:
logger.debug(f"Benchmark finished running in {time.time() - start_time} s")
# List all folders in the current working directory
reports_folder = agbenchmark_config.reports_folder
folders = [folder for folder in reports_folder.iterdir() if folder.is_dir()]
# Sort the folders based on their names
sorted_folders = sorted(folders, key=lambda x: x.name)
# Get the last folder
latest_folder = sorted_folders[-1] if sorted_folders else None
# Read report.json from this folder
if latest_folder:
report_path = latest_folder / "report.json"
logger.debug(f"Getting latest report from {report_path}")
if report_path.exists():
with report_path.open() as file:
data = json.load(file)
logger.debug(f"Report data: {data}")
else:
raise HTTPException(
502,
"Could not get result after running benchmark: "
f"'report.json' does not exist in '{latest_folder}'",
)
else:
raise HTTPException(
504, "Could not get result after running benchmark: no reports found"
)
return data
@router.post("/agent/tasks", tags=["agent"])
async def create_agent_task(task_eval_request: TaskEvalRequestBody) -> Task:
"""
Creates a new task using the provided TaskEvalRequestBody and returns a Task.
Args:
task_eval_request: `TaskRequestBody` including an eval_id.
Returns:
Task: A new task with task_id, input, additional_input,
and empty lists for artifacts and steps.
Example:
Request (TaskEvalRequestBody defined in schema.py):
{
...,
"eval_id": "50da533e-3904-4401-8a07-c49adf88b5eb"
}
Response (Task defined in `agent_protocol_client.models`):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"artifacts": []
}
"""
try:
challenge_info = CHALLENGES[task_eval_request.eval_id]
async with ApiClient(configuration) as api_client:
api_instance = AgentApi(api_client)
task_input = challenge_info.task
task_request_body = TaskRequestBody(
input=task_input, additional_input=None
)
task_response = await api_instance.create_agent_task(
task_request_body=task_request_body
)
task_info = BenchmarkTaskInfo(
task_id=task_response.task_id,
start_time=datetime.datetime.now(datetime.timezone.utc),
challenge_info=challenge_info,
)
task_informations[task_info.task_id] = task_info
if input_artifacts_dir := challenge_info.task_artifacts_dir:
await upload_artifacts(
api_instance,
input_artifacts_dir,
task_response.task_id,
"artifacts_in",
)
return task_response
except ApiException as e:
logger.error(f"Error whilst trying to create a task:\n{e}")
logger.error(
"The above error was caused while processing request: "
f"{task_eval_request}"
)
raise HTTPException(500)
@router.post("/agent/tasks/{task_id}/steps")
async def proxy(request: Request, task_id: str):
timeout = httpx.Timeout(300.0, read=300.0) # 5 minutes
async with httpx.AsyncClient(timeout=timeout) as client:
# Construct the new URL
new_url = f"{configuration.host}/ap/v1/agent/tasks/{task_id}/steps"
# Forward the request
response = await client.post(
new_url,
content=await request.body(),
headers=dict(request.headers),
)
# Return the response from the forwarded request
return Response(content=response.content, status_code=response.status_code)
@router.post("/agent/tasks/{task_id}/evaluations")
async def create_evaluation(task_id: str) -> BenchmarkRun:
task_info = task_informations[task_id]
challenge = get_challenge_from_source_uri(task_info.challenge_info.source_uri)
try:
async with ApiClient(configuration) as api_client:
api_instance = AgentApi(api_client)
eval_results = await challenge.evaluate_task_state(
api_instance, task_id
)
eval_info = BenchmarkRun(
repository_info=RepositoryInfo(),
run_details=RunDetails(
command=f"agbenchmark --test={challenge.info.name}",
benchmark_start_time=(
task_info.start_time.strftime("%Y-%m-%dT%H:%M:%S+00:00")
),
test_name=challenge.info.name,
),
task_info=TaskInfo(
data_path=challenge.info.source_uri,
is_regression=None,
category=[c.value for c in challenge.info.category],
task=challenge.info.task,
answer=challenge.info.reference_answer or "",
description=challenge.info.description or "",
),
metrics=Metrics(
success=all(e.passed for e in eval_results),
success_percentage=(
100 * sum(e.score for e in eval_results) / len(eval_results)
if eval_results # avoid division by 0
else 0
),
attempted=True,
),
config={},
)
logger.debug(
f"Returning evaluation data:\n{eval_info.model_dump_json(indent=4)}"
)
return eval_info
except ApiException as e:
logger.error(f"Error {e} whilst trying to evaluate task: {task_id}")
raise HTTPException(500)
app.include_router(router, prefix="/ap/v1")
return app

View File

@@ -1,98 +1,19 @@
import logging import logging
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from pathlib import Path from pathlib import Path
from typing import Any, AsyncIterator, Awaitable, ClassVar, Optional from typing import AsyncIterator, Awaitable, ClassVar, Optional
import pytest import pytest
from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.data_types import Category, DifficultyLevel, EvalResult
from agent_protocol_client import AgentApi, Step from agent_protocol_client import AgentApi, Step
from colorama import Fore, Style from colorama import Fore, Style
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.data_types import Category, DifficultyLevel, EvalResult
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def format_step_output(step: Step, step_num: int, challenge_name: str) -> str:
"""Format a step for concise, informative console output.
Format: [Challenge] step N: tool_name(args) result [$cost]
"""
parts = [f"[{challenge_name}]", f"step {step_num}:"]
# Get additional_output data
ao: dict[str, Any] = step.additional_output or {}
# Get the tool being used in this step
use_tool = ao.get("use_tool", {})
tool_name = use_tool.get("name", "")
tool_args = use_tool.get("arguments", {})
if tool_name:
# Format tool call with abbreviated arguments
args_str = _format_tool_args(tool_name, tool_args)
parts.append(f"{Fore.CYAN}{tool_name}{Fore.RESET}({args_str})")
else:
parts.append(f"{Fore.YELLOW}(no tool){Fore.RESET}")
# Get result from last action (this step's tool will be executed next iteration)
last_action = ao.get("last_action", {})
if last_action:
result = last_action.get("result", {})
if isinstance(result, dict):
if result.get("error"):
parts.append(f"{Fore.RED}error{Fore.RESET}")
elif result.get("status") == "success":
parts.append(f"{Fore.GREEN}{Fore.RESET}")
# Add cost if available
cost = ao.get("task_cumulative_cost", 0)
if cost > 0:
parts.append(f"{Fore.BLUE}${cost:.3f}{Fore.RESET}")
return " ".join(parts)
def _format_tool_args(tool_name: str, args: dict) -> str:
"""Format tool arguments for display, keeping it concise."""
if not args:
return ""
# For common tools, show the most relevant argument
key_args = {
"read_file": ["filename"],
"write_file": ["filename"],
"open_file": ["filename", "file_path"],
"execute_python": ["filename"],
"execute_shell": ["command_line"],
"web_search": ["query"],
"read_webpage": ["url"],
"finish": ["reason"],
"ask_user": ["question"],
"todo_write": [], # Skip args for todo_write (too verbose)
}
if tool_name in key_args:
keys = key_args[tool_name]
if not keys:
return "..."
values = [str(args.get(k, ""))[:40] for k in keys if k in args]
if values:
return ", ".join(
f'"{v}"' if " " not in v else f'"{v[:20]}..."' for v in values
)
# Default: show first arg value, abbreviated
if args:
first_key = next(iter(args))
first_val = str(args[first_key])[:30]
return f'{first_key}="{first_val}"' + (
"..." if len(str(args[first_key])) > 30 else ""
)
return ""
class ChallengeInfo(BaseModel): class ChallengeInfo(BaseModel):
eval_id: str = "" eval_id: str = ""
name: str name: str
@@ -174,7 +95,7 @@ class BaseChallenge(ABC):
cls.info.task, config, timeout, cls.info.task_artifacts_dir, mock=mock cls.info.task, config, timeout, cls.info.task_artifacts_dir, mock=mock
): ):
i += 1 i += 1
print(format_step_output(step, i, cls.info.name)) print(f"[{cls.info.name}] - step {step.name} ({i}. request)")
yield step yield step
logger.debug(f"Finished {cls.info.name} challenge run") logger.debug(f"Finished {cls.info.name} challenge run")
@@ -182,4 +103,5 @@ class BaseChallenge(ABC):
@abstractmethod @abstractmethod
async def evaluate_task_state( async def evaluate_task_state(
cls, agent: AgentApi, task_id: str cls, agent: AgentApi, task_id: str
) -> list[EvalResult]: ... ) -> list[EvalResult]:
...

View File

@@ -10,16 +10,6 @@ from pathlib import Path
from typing import Annotated, Any, ClassVar, Iterator, Literal, Optional from typing import Annotated, Any, ClassVar, Iterator, Literal, Optional
import pytest import pytest
from agbenchmark.agent_api_interface import download_agent_artifacts_into_folder
from agbenchmark.agent_interface import copy_challenge_artifacts_into_workspace
from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.data_types import Category, DifficultyLevel, EvalResult
from agbenchmark.utils.prompts import (
END_PROMPT,
FEW_SHOT_EXAMPLES,
PROMPT_MAP,
SCORING_MAP,
)
from agent_protocol_client import AgentApi, ApiClient from agent_protocol_client import AgentApi, ApiClient
from agent_protocol_client import Configuration as ClientConfig from agent_protocol_client import Configuration as ClientConfig
from agent_protocol_client import Step from agent_protocol_client import Step
@@ -33,6 +23,17 @@ from pydantic import (
field_validator, field_validator,
) )
from agbenchmark.agent_api_interface import download_agent_artifacts_into_folder
from agbenchmark.agent_interface import copy_challenge_artifacts_into_workspace
from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.data_types import Category, DifficultyLevel, EvalResult
from agbenchmark.utils.prompts import (
END_PROMPT,
FEW_SHOT_EXAMPLES,
PROMPT_MAP,
SCORING_MAP,
)
from .base import BaseChallenge, ChallengeInfo from .base import BaseChallenge, ChallengeInfo
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -68,9 +69,9 @@ class BuiltinChallengeSpec(BaseModel):
class Eval(BaseModel): class Eval(BaseModel):
type: str type: str
scoring: Optional[Literal["percentage", "scale", "binary"]] = None scoring: Optional[Literal["percentage", "scale", "binary"]] = None
template: Optional[Literal["rubric", "reference", "question", "custom"]] = ( template: Optional[
None Literal["rubric", "reference", "question", "custom"]
) ] = None
examples: Optional[str] = None examples: Optional[str] = None
@field_validator("scoring", "template") @field_validator("scoring", "template")
@@ -227,11 +228,9 @@ class BuiltinChallenge(BaseChallenge):
request.node.user_properties.append( request.node.user_properties.append(
( (
"answers", "answers",
( [r.result for r in eval_results]
[r.result for r in eval_results] if request.config.getoption("--keep-answers")
if request.config.getoption("--keep-answers") else None,
else None
),
) )
) )
request.node.user_properties.append(("scores", [r.score for r in eval_results])) request.node.user_properties.append(("scores", [r.score for r in eval_results]))

Some files were not shown because too many files have changed in this diff Show More