mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-30 09:28:19 -05:00
Compare commits
71 Commits
feat/sub-a
...
make-old-w
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d8d87f2853 | ||
|
|
791e1d8982 | ||
|
|
0040636948 | ||
|
|
c671af851f | ||
|
|
7dd181f4b0 | ||
|
|
114856cef1 | ||
|
|
68b9bd0c51 | ||
|
|
ff076b1f15 | ||
|
|
57fbab500b | ||
|
|
6faabef24d | ||
|
|
a67d475a69 | ||
|
|
326554d89a | ||
|
|
5e22a1888a | ||
|
|
a4d7b0142f | ||
|
|
7d6375f59c | ||
|
|
aeec0ce509 | ||
|
|
b32bfcaac5 | ||
|
|
5373a6eb6e | ||
|
|
98cde46ccb | ||
|
|
bd10da10d9 | ||
|
|
60fdee1345 | ||
|
|
6f2783468c | ||
|
|
c1031b286d | ||
|
|
b849eafb7f | ||
|
|
572c3f5e0d | ||
|
|
89003a585d | ||
|
|
0e65785228 | ||
|
|
f07dff1cdd | ||
|
|
00e02a4696 | ||
|
|
634bff8277 | ||
|
|
d591f36c7b | ||
|
|
a347bed0b1 | ||
|
|
4eeb6ee2b0 | ||
|
|
7db962b9f9 | ||
|
|
9108b21541 | ||
|
|
ffe9325296 | ||
|
|
0a616d9267 | ||
|
|
ab95077e5b | ||
|
|
e477150979 | ||
|
|
804430e243 | ||
|
|
acb320d32d | ||
|
|
32f68d5999 | ||
|
|
49f56b4e8d | ||
|
|
bead811e73 | ||
|
|
013f728ebf | ||
|
|
cda9572acd | ||
|
|
e0784f8f6b | ||
|
|
3040f39136 | ||
|
|
515504c604 | ||
|
|
18edeaeaf4 | ||
|
|
44182aff9c | ||
|
|
864c5a7846 | ||
|
|
699fffb1a8 | ||
|
|
f0641c2d26 | ||
|
|
94b6f74c95 | ||
|
|
46aabab3ea | ||
|
|
0a65df5102 | ||
|
|
6fbd208fe3 | ||
|
|
8fc174ca87 | ||
|
|
cacc89790f | ||
|
|
b9113bee02 | ||
|
|
3f65da03e7 | ||
|
|
9e96d11b2d | ||
|
|
4c264b7ae9 | ||
|
|
0adbc0bd05 | ||
|
|
8f3291bc92 | ||
|
|
7a20de880d | ||
|
|
ef8a6d2528 | ||
|
|
fd66be2aaa | ||
|
|
ae2cc97dc4 | ||
|
|
ea521eed26 |
73
.github/workflows/classic-autogpt-ci.yml
vendored
73
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -6,11 +6,15 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/forge/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -19,47 +23,22 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/original_autogpt
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
- name: Start MinIO service
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -71,41 +50,23 @@ jobs:
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: "3.12"
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
@@ -116,12 +77,12 @@ jobs:
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests/unit tests/integration
|
||||
original_autogpt/tests/unit original_autogpt/tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
@@ -135,11 +96,11 @@ jobs:
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
flags: autogpt-agent
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/original_autogpt/logs/
|
||||
path: classic/logs/
|
||||
|
||||
36
.github/workflows/classic-autogpts-ci.yml
vendored
36
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -11,9 +11,6 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
@@ -22,9 +19,6 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
defaults:
|
||||
@@ -35,13 +29,9 @@ defaults:
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ original_autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
min-python-version: '3.12'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -55,22 +45,22 @@ jobs:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run smoke tests with direct-benchmark
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests ReadFile,WriteFile \
|
||||
--json
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
HELICONE_CACHE_ENABLED: false
|
||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
CI: true
|
||||
|
||||
194
.github/workflows/classic-benchmark-ci.yml
vendored
194
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -1,17 +1,21 @@
|
||||
name: Classic - AGBenchmark CI
|
||||
name: Classic - Direct Benchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
@@ -23,23 +27,16 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
min-python-version: '3.12'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
benchmark-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/benchmark
|
||||
working-directory: classic
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -47,71 +44,88 @@ jobs:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
- name: Run basic benchmark tests
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests
|
||||
echo "Testing ReadFile challenge with one_shot strategy..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests ReadFile \
|
||||
--json
|
||||
|
||||
echo "Testing WriteFile challenge..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests WriteFile \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Test category filtering
|
||||
run: |
|
||||
echo "Testing coding category..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--categories coding \
|
||||
--tests ReadFile,WriteFile \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
- name: Test multiple strategies
|
||||
run: |
|
||||
echo "Testing multiple strategies..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot,plan_execute \
|
||||
--models claude \
|
||||
--tests ReadFile \
|
||||
--parallel 2 \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
self-test-with-agent:
|
||||
# Run regression tests on maintain challenges
|
||||
regression-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 45
|
||||
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -126,51 +140,23 @@ jobs:
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: classic
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
poetry run agbenchmark --maintain --mock
|
||||
EXIT_CODE=$?
|
||||
set -e # Stop ignoring non-zero exit codes
|
||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||
if [ $EXIT_CODE -eq 5 ]; then
|
||||
echo "regression_tests.json is empty."
|
||||
fi
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock"
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||
poetry run agbenchmark --mock --category=data
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
echo "Running regression tests (previously beaten challenges)..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--maintain \
|
||||
--parallel 4 \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
182
.github/workflows/classic-forge-ci.yml
vendored
182
.github/workflows/classic-forge-ci.yml
vendored
@@ -6,13 +6,11 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -21,115 +19,38 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/forge
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
- name: Start MinIO service
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
@@ -140,12 +61,15 @@ jobs:
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
forge
|
||||
forge/forge forge/tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
# API keys - tests that need these will skip if not available
|
||||
# Secrets are not available to fork PRs (GitHub security feature)
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
@@ -159,85 +83,11 @@ jobs:
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
flags: forge
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/forge/logs/
|
||||
path: classic/logs/
|
||||
|
||||
60
.github/workflows/classic-frontend-ci.yml
vendored
60
.github/workflows/classic-frontend-ci.yml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Classic - Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
67
.github/workflows/classic-python-checks.yml
vendored
67
.github/workflows/classic-python-checks.yml
vendored
@@ -7,7 +7,9 @@ on:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/pyproject.toml'
|
||||
- 'classic/poetry.lock'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
@@ -16,7 +18,9 @@ on:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/pyproject.toml'
|
||||
- 'classic/poetry.lock'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
@@ -27,44 +31,13 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
get-changed-parts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: changes-in
|
||||
name: Determine affected subprojects
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
original_autogpt:
|
||||
- classic/original_autogpt/autogpt/**
|
||||
- classic/original_autogpt/tests/**
|
||||
- classic/original_autogpt/poetry.lock
|
||||
forge:
|
||||
- classic/forge/forge/**
|
||||
- classic/forge/tests/**
|
||||
- classic/forge/poetry.lock
|
||||
benchmark:
|
||||
- classic/benchmark/agbenchmark/**
|
||||
- classic/benchmark/tests/**
|
||||
- classic/benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
lint:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
min-python-version: "3.12"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -81,42 +54,31 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
run: poetry install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
min-python-version: "3.12"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -133,19 +95,16 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
run: poetry install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -3,6 +3,7 @@
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
.autogpt/
|
||||
*.mpeg
|
||||
.env
|
||||
# Root .env files
|
||||
@@ -159,6 +160,10 @@ CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
classic/benchmark/agbenchmark/reports/
|
||||
classic/reports/
|
||||
classic/direct_benchmark/reports/
|
||||
classic/.benchmark_workspaces/
|
||||
classic/direct_benchmark/.benchmark_workspaces/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
@@ -177,6 +182,10 @@ autogpt_platform/backend/settings.py
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
**/.claude/settings.local.json
|
||||
.claude/settings.local.json
|
||||
CLAUDE.local.md
|
||||
/autogpt_platform/backend/logs
|
||||
|
||||
# Test database
|
||||
test.db
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
@@ -43,29 +43,10 @@ repos:
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
name: Check & Install dependencies - Classic
|
||||
alias: poetry-install-classic
|
||||
entry: poetry -C classic install
|
||||
files: ^classic/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
@@ -116,26 +97,10 @@ repos:
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -P classic/forge run isort -p forge
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||
files: ^classic/benchmark/
|
||||
name: Lint (isort) - Classic
|
||||
alias: isort-classic
|
||||
entry: bash -c 'cd classic && poetry run isort $(echo "$@" | sed "s|classic/||g")' --
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
@@ -149,26 +114,13 @@ repos:
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
# To have flake8 load the config of the individual subprojects, we have to call
|
||||
# them separately.
|
||||
# Use consolidated flake8 config at classic/.flake8
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
name: Lint (Flake8) - Classic
|
||||
alias: flake8-classic
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||
args: [--config=classic/.flake8]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
@@ -204,29 +156,10 @@ repos:
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
name: Typecheck - Classic
|
||||
alias: pyright-classic
|
||||
entry: poetry -C classic run pyright
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/.*\.py$|^classic/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
@@ -1834,11 +1834,6 @@ async def _execute_long_running_tool(
|
||||
tool_call_id=tool_call_id,
|
||||
result=error_response.model_dump_json(),
|
||||
)
|
||||
# Generate LLM continuation so user sees explanation even for errors
|
||||
try:
|
||||
await _generate_llm_continuation(session_id=session_id, user_id=user_id)
|
||||
except Exception as llm_err:
|
||||
logger.warning(f"Failed to generate LLM continuation for error: {llm_err}")
|
||||
finally:
|
||||
await _mark_operation_completed(tool_call_id)
|
||||
|
||||
|
||||
@@ -2,52 +2,30 @@
|
||||
|
||||
from .core import (
|
||||
AgentGeneratorNotConfiguredError,
|
||||
AgentSummary,
|
||||
DecompositionResult,
|
||||
DecompositionStep,
|
||||
LibraryAgentSummary,
|
||||
MarketplaceAgentSummary,
|
||||
decompose_goal,
|
||||
enrich_library_agents_from_steps,
|
||||
extract_search_terms_from_steps,
|
||||
extract_uuids_from_text,
|
||||
generate_agent,
|
||||
generate_agent_patch,
|
||||
get_agent_as_json,
|
||||
get_all_relevant_agents_for_generation,
|
||||
get_library_agent_by_graph_id,
|
||||
get_library_agent_by_id,
|
||||
get_library_agents_for_generation,
|
||||
json_to_graph,
|
||||
save_agent_to_library,
|
||||
search_marketplace_agents_for_generation,
|
||||
)
|
||||
from .errors import get_user_message_for_error
|
||||
from .service import health_check as check_external_service_health
|
||||
from .service import is_external_service_configured
|
||||
|
||||
__all__ = [
|
||||
"AgentGeneratorNotConfiguredError",
|
||||
"AgentSummary",
|
||||
"DecompositionResult",
|
||||
"DecompositionStep",
|
||||
"LibraryAgentSummary",
|
||||
"MarketplaceAgentSummary",
|
||||
"check_external_service_health",
|
||||
# Core functions
|
||||
"decompose_goal",
|
||||
"enrich_library_agents_from_steps",
|
||||
"extract_search_terms_from_steps",
|
||||
"extract_uuids_from_text",
|
||||
"generate_agent",
|
||||
"generate_agent_patch",
|
||||
"get_agent_as_json",
|
||||
"get_all_relevant_agents_for_generation",
|
||||
"get_library_agent_by_graph_id",
|
||||
"get_library_agent_by_id",
|
||||
"get_library_agents_for_generation",
|
||||
"get_user_message_for_error",
|
||||
"is_external_service_configured",
|
||||
"json_to_graph",
|
||||
"save_agent_to_library",
|
||||
"search_marketplace_agents_for_generation",
|
||||
"get_agent_as_json",
|
||||
"json_to_graph",
|
||||
# Exceptions
|
||||
"AgentGeneratorNotConfiguredError",
|
||||
# Service
|
||||
"is_external_service_configured",
|
||||
"check_external_service_health",
|
||||
# Error handling
|
||||
"get_user_message_for_error",
|
||||
]
|
||||
|
||||
@@ -1,21 +1,11 @@
|
||||
"""Core agent generation functions."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from typing import Any, TypedDict
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data.graph import (
|
||||
Graph,
|
||||
Link,
|
||||
Node,
|
||||
create_graph,
|
||||
get_graph,
|
||||
get_graph_all_versions,
|
||||
)
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
from backend.data.graph import Graph, Link, Node, create_graph
|
||||
|
||||
from .service import (
|
||||
decompose_goal_external,
|
||||
@@ -27,60 +17,6 @@ from .service import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LibraryAgentSummary(TypedDict):
|
||||
"""Summary of a library agent for sub-agent composition."""
|
||||
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
name: str
|
||||
description: str
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
|
||||
|
||||
class MarketplaceAgentSummary(TypedDict):
|
||||
"""Summary of a marketplace agent for sub-agent composition."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
sub_heading: str
|
||||
creator: str
|
||||
is_marketplace_agent: bool
|
||||
|
||||
|
||||
class DecompositionStep(TypedDict, total=False):
|
||||
"""A single step in decomposed instructions."""
|
||||
|
||||
description: str
|
||||
action: str
|
||||
block_name: str
|
||||
tool: str
|
||||
name: str
|
||||
|
||||
|
||||
class DecompositionResult(TypedDict, total=False):
|
||||
"""Result from decompose_goal - can be instructions, questions, or error."""
|
||||
|
||||
type: str # "instructions", "clarifying_questions", "error", etc.
|
||||
steps: list[DecompositionStep]
|
||||
questions: list[dict[str, Any]]
|
||||
error: str
|
||||
error_type: str
|
||||
|
||||
|
||||
# Type alias for agent summaries (can be either library or marketplace)
|
||||
AgentSummary = LibraryAgentSummary | MarketplaceAgentSummary | dict[str, Any]
|
||||
|
||||
|
||||
def _to_dict_list(
|
||||
agents: list[AgentSummary] | list[dict[str, Any]] | None,
|
||||
) -> list[dict[str, Any]] | None:
|
||||
"""Convert typed agent summaries to plain dicts for external service calls."""
|
||||
if agents is None:
|
||||
return None
|
||||
return [dict(a) for a in agents]
|
||||
|
||||
|
||||
class AgentGeneratorNotConfiguredError(Exception):
|
||||
"""Raised when the external Agent Generator service is not configured."""
|
||||
|
||||
@@ -100,394 +36,15 @@ def _check_service_configured() -> None:
|
||||
)
|
||||
|
||||
|
||||
_UUID_PATTERN = re.compile(
|
||||
r"[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def extract_uuids_from_text(text: str) -> list[str]:
|
||||
"""Extract all UUID v4 strings from text.
|
||||
|
||||
Args:
|
||||
text: Text that may contain UUIDs (e.g., user's goal description)
|
||||
|
||||
Returns:
|
||||
List of unique UUIDs found in the text (lowercase)
|
||||
"""
|
||||
matches = _UUID_PATTERN.findall(text)
|
||||
return list({m.lower() for m in matches})
|
||||
|
||||
|
||||
async def get_library_agent_by_id(
|
||||
user_id: str, agent_id: str
|
||||
) -> LibraryAgentSummary | None:
|
||||
"""Fetch a specific library agent by its ID (library agent ID or graph_id).
|
||||
|
||||
This function tries multiple lookup strategies:
|
||||
1. First tries to find by graph_id (AgentGraph primary key)
|
||||
2. If not found, tries to find by library agent ID (LibraryAgent primary key)
|
||||
|
||||
This handles both cases:
|
||||
- User provides graph_id (e.g., from AgentExecutorBlock)
|
||||
- User provides library agent ID (e.g., from library URL)
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
agent_id: The ID to look up (can be graph_id or library agent ID)
|
||||
|
||||
Returns:
|
||||
LibraryAgentSummary if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
agent = await library_db.get_library_agent_by_graph_id(user_id, agent_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by graph_id: {agent.name}")
|
||||
return LibraryAgentSummary(
|
||||
graph_id=agent.graph_id,
|
||||
graph_version=agent.graph_version,
|
||||
name=agent.name,
|
||||
description=agent.description,
|
||||
input_schema=agent.input_schema,
|
||||
output_schema=agent.output_schema,
|
||||
)
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not fetch library agent by graph_id {agent_id}: {e}")
|
||||
|
||||
try:
|
||||
agent = await library_db.get_library_agent(agent_id, user_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by library_id: {agent.name}")
|
||||
return LibraryAgentSummary(
|
||||
graph_id=agent.graph_id,
|
||||
graph_version=agent.graph_version,
|
||||
name=agent.name,
|
||||
description=agent.description,
|
||||
input_schema=agent.input_schema,
|
||||
output_schema=agent.output_schema,
|
||||
)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by library_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by library_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# Alias for backward compatibility
|
||||
get_library_agent_by_graph_id = get_library_agent_by_id
|
||||
|
||||
|
||||
async def get_library_agents_for_generation(
|
||||
user_id: str,
|
||||
search_query: str | None = None,
|
||||
exclude_graph_id: str | None = None,
|
||||
max_results: int = 15,
|
||||
) -> list[LibraryAgentSummary]:
|
||||
"""Fetch user's library agents formatted for Agent Generator.
|
||||
|
||||
Uses search-based fetching to return relevant agents instead of all agents.
|
||||
This is more scalable for users with large libraries.
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
search_query: Optional search term to find relevant agents (user's goal/description)
|
||||
exclude_graph_id: Optional graph ID to exclude (prevents circular references)
|
||||
max_results: Maximum number of agents to return (default 15)
|
||||
|
||||
Returns:
|
||||
List of LibraryAgentSummary with schemas for sub-agent composition
|
||||
|
||||
Note:
|
||||
Future enhancement: Add quality filtering based on execution success rate
|
||||
or correctness_score from AgentGraphExecution stats. The current
|
||||
LibraryAgentStatus.ERROR is too aggressive (1 failed run = ERROR).
|
||||
Better approach: filter by success rate (e.g., >50% successful runs)
|
||||
or require at least 1 successful execution.
|
||||
"""
|
||||
try:
|
||||
response = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query,
|
||||
page=1,
|
||||
page_size=max_results,
|
||||
)
|
||||
|
||||
results: list[LibraryAgentSummary] = []
|
||||
for agent in response.agents:
|
||||
if exclude_graph_id is not None and agent.graph_id == exclude_graph_id:
|
||||
continue
|
||||
|
||||
results.append(
|
||||
LibraryAgentSummary(
|
||||
graph_id=agent.graph_id,
|
||||
graph_version=agent.graph_version,
|
||||
name=agent.name,
|
||||
description=agent.description,
|
||||
input_schema=agent.input_schema,
|
||||
output_schema=agent.output_schema,
|
||||
)
|
||||
)
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch library agents: {e}")
|
||||
return []
|
||||
|
||||
|
||||
async def search_marketplace_agents_for_generation(
|
||||
search_query: str,
|
||||
max_results: int = 10,
|
||||
) -> list[MarketplaceAgentSummary]:
|
||||
"""Search marketplace agents formatted for Agent Generator.
|
||||
|
||||
Note: This returns basic agent info. Full input/output schemas would require
|
||||
additional graph fetches and is a potential future enhancement.
|
||||
|
||||
Args:
|
||||
search_query: Search term to find relevant public agents
|
||||
max_results: Maximum number of agents to return (default 10)
|
||||
|
||||
Returns:
|
||||
List of MarketplaceAgentSummary (without detailed schemas for now)
|
||||
"""
|
||||
try:
|
||||
response = await store_db.get_store_agents(
|
||||
search_query=search_query,
|
||||
page=1,
|
||||
page_size=max_results,
|
||||
)
|
||||
|
||||
results: list[MarketplaceAgentSummary] = []
|
||||
for agent in response.agents:
|
||||
results.append(
|
||||
MarketplaceAgentSummary(
|
||||
name=agent.agent_name,
|
||||
description=agent.description,
|
||||
sub_heading=agent.sub_heading,
|
||||
creator=agent.creator,
|
||||
is_marketplace_agent=True,
|
||||
)
|
||||
)
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to search marketplace agents: {e}")
|
||||
return []
|
||||
|
||||
|
||||
async def get_all_relevant_agents_for_generation(
|
||||
user_id: str,
|
||||
search_query: str | None = None,
|
||||
exclude_graph_id: str | None = None,
|
||||
include_library: bool = True,
|
||||
include_marketplace: bool = True,
|
||||
max_library_results: int = 15,
|
||||
max_marketplace_results: int = 10,
|
||||
) -> list[AgentSummary]:
|
||||
"""Fetch relevant agents from library and/or marketplace.
|
||||
|
||||
Searches both user's library and marketplace by default.
|
||||
Explicitly mentioned UUIDs in the search query are always looked up.
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
search_query: Search term to find relevant agents (user's goal/description)
|
||||
exclude_graph_id: Optional graph ID to exclude (prevents circular references)
|
||||
include_library: Whether to search user's library (default True)
|
||||
include_marketplace: Whether to also search marketplace (default True)
|
||||
max_library_results: Max library agents to return (default 15)
|
||||
max_marketplace_results: Max marketplace agents to return (default 10)
|
||||
|
||||
Returns:
|
||||
List of AgentSummary, library agents first (with full schemas),
|
||||
then marketplace agents (basic info only)
|
||||
"""
|
||||
agents: list[AgentSummary] = []
|
||||
seen_graph_ids: set[str] = set()
|
||||
|
||||
if search_query:
|
||||
mentioned_uuids = extract_uuids_from_text(search_query)
|
||||
for graph_id in mentioned_uuids:
|
||||
if graph_id == exclude_graph_id:
|
||||
continue
|
||||
agent = await get_library_agent_by_graph_id(user_id, graph_id)
|
||||
if agent and agent["graph_id"] not in seen_graph_ids:
|
||||
agents.append(agent)
|
||||
seen_graph_ids.add(agent["graph_id"])
|
||||
logger.debug(f"Found explicitly mentioned agent: {agent['name']}")
|
||||
|
||||
if include_library:
|
||||
library_agents = await get_library_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=search_query,
|
||||
exclude_graph_id=exclude_graph_id,
|
||||
max_results=max_library_results,
|
||||
)
|
||||
for agent in library_agents:
|
||||
if agent["graph_id"] not in seen_graph_ids:
|
||||
agents.append(agent)
|
||||
seen_graph_ids.add(agent["graph_id"])
|
||||
|
||||
if include_marketplace and search_query:
|
||||
marketplace_agents = await search_marketplace_agents_for_generation(
|
||||
search_query=search_query,
|
||||
max_results=max_marketplace_results,
|
||||
)
|
||||
library_names = {a["name"].lower() for a in agents if a.get("name")}
|
||||
for agent in marketplace_agents:
|
||||
agent_name = agent.get("name")
|
||||
if agent_name and agent_name.lower() not in library_names:
|
||||
agents.append(agent)
|
||||
|
||||
return agents
|
||||
|
||||
|
||||
def extract_search_terms_from_steps(
|
||||
decomposition_result: DecompositionResult | dict[str, Any],
|
||||
) -> list[str]:
|
||||
"""Extract search terms from decomposed instruction steps.
|
||||
|
||||
Analyzes the decomposition result to extract relevant keywords
|
||||
for additional library agent searches.
|
||||
|
||||
Args:
|
||||
decomposition_result: Result from decompose_goal containing steps
|
||||
|
||||
Returns:
|
||||
List of unique search terms extracted from steps
|
||||
"""
|
||||
search_terms: list[str] = []
|
||||
|
||||
if decomposition_result.get("type") != "instructions":
|
||||
return search_terms
|
||||
|
||||
steps = decomposition_result.get("steps", [])
|
||||
if not steps:
|
||||
return search_terms
|
||||
|
||||
step_keys: list[str] = ["description", "action", "block_name", "tool", "name"]
|
||||
|
||||
for step in steps:
|
||||
for key in step_keys:
|
||||
value = step.get(key) # type: ignore[union-attr]
|
||||
if isinstance(value, str) and len(value) > 3:
|
||||
search_terms.append(value)
|
||||
|
||||
seen: set[str] = set()
|
||||
unique_terms: list[str] = []
|
||||
for term in search_terms:
|
||||
term_lower = term.lower()
|
||||
if term_lower not in seen:
|
||||
seen.add(term_lower)
|
||||
unique_terms.append(term)
|
||||
|
||||
return unique_terms
|
||||
|
||||
|
||||
async def enrich_library_agents_from_steps(
|
||||
user_id: str,
|
||||
decomposition_result: DecompositionResult | dict[str, Any],
|
||||
existing_agents: list[AgentSummary] | list[dict[str, Any]],
|
||||
exclude_graph_id: str | None = None,
|
||||
include_marketplace: bool = True,
|
||||
max_additional_results: int = 10,
|
||||
) -> list[AgentSummary] | list[dict[str, Any]]:
|
||||
"""Enrich library agents list with additional searches based on decomposed steps.
|
||||
|
||||
This implements two-phase search: after decomposition, we search for additional
|
||||
relevant agents based on the specific steps identified.
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
decomposition_result: Result from decompose_goal containing steps
|
||||
existing_agents: Already fetched library agents from initial search
|
||||
exclude_graph_id: Optional graph ID to exclude
|
||||
include_marketplace: Whether to also search marketplace
|
||||
max_additional_results: Max additional agents per search term (default 10)
|
||||
|
||||
Returns:
|
||||
Combined list of library agents (existing + newly discovered)
|
||||
"""
|
||||
search_terms = extract_search_terms_from_steps(decomposition_result)
|
||||
|
||||
if not search_terms:
|
||||
return existing_agents
|
||||
|
||||
existing_ids: set[str] = set()
|
||||
existing_names: set[str] = set()
|
||||
|
||||
for agent in existing_agents:
|
||||
agent_name = agent.get("name", "")
|
||||
if agent_name:
|
||||
existing_names.add(agent_name.lower())
|
||||
graph_id = agent.get("graph_id") # type: ignore[call-overload]
|
||||
if graph_id:
|
||||
existing_ids.add(graph_id)
|
||||
|
||||
all_agents: list[AgentSummary] | list[dict[str, Any]] = list(existing_agents)
|
||||
|
||||
for term in search_terms[:3]:
|
||||
try:
|
||||
additional_agents = await get_all_relevant_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=term,
|
||||
exclude_graph_id=exclude_graph_id,
|
||||
include_marketplace=include_marketplace,
|
||||
max_library_results=max_additional_results,
|
||||
max_marketplace_results=5,
|
||||
)
|
||||
|
||||
for agent in additional_agents:
|
||||
agent_name = agent.get("name", "")
|
||||
if not agent_name:
|
||||
continue
|
||||
agent_name_lower = agent_name.lower()
|
||||
|
||||
if agent_name_lower in existing_names:
|
||||
continue
|
||||
|
||||
graph_id = agent.get("graph_id") # type: ignore[call-overload]
|
||||
if graph_id and graph_id in existing_ids:
|
||||
continue
|
||||
|
||||
all_agents.append(agent)
|
||||
existing_names.add(agent_name_lower)
|
||||
if graph_id:
|
||||
existing_ids.add(graph_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to search for additional agents with term '{term}': {e}"
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Enriched library agents: {len(existing_agents)} initial + "
|
||||
f"{len(all_agents) - len(existing_agents)} additional = {len(all_agents)} total"
|
||||
)
|
||||
|
||||
return all_agents
|
||||
|
||||
|
||||
async def decompose_goal(
|
||||
description: str,
|
||||
context: str = "",
|
||||
library_agents: list[AgentSummary] | None = None,
|
||||
) -> DecompositionResult | None:
|
||||
async def decompose_goal(description: str, context: str = "") -> dict[str, Any] | None:
|
||||
"""Break down a goal into steps or return clarifying questions.
|
||||
|
||||
Args:
|
||||
description: Natural language goal description
|
||||
context: Additional context (e.g., answers to previous questions)
|
||||
library_agents: User's library agents available for sub-agent composition
|
||||
|
||||
Returns:
|
||||
DecompositionResult with either:
|
||||
Dict with either:
|
||||
- {"type": "clarifying_questions", "questions": [...]}
|
||||
- {"type": "instructions", "steps": [...]}
|
||||
Or None on error
|
||||
@@ -497,23 +54,14 @@ async def decompose_goal(
|
||||
"""
|
||||
_check_service_configured()
|
||||
logger.info("Calling external Agent Generator service for decompose_goal")
|
||||
# Convert typed dicts to plain dicts for external service
|
||||
result = await decompose_goal_external(
|
||||
description, context, _to_dict_list(library_agents)
|
||||
)
|
||||
# Cast the result to DecompositionResult (external service returns dict)
|
||||
return result # type: ignore[return-value]
|
||||
return await decompose_goal_external(description, context)
|
||||
|
||||
|
||||
async def generate_agent(
|
||||
instructions: DecompositionResult | dict[str, Any],
|
||||
library_agents: list[AgentSummary] | list[dict[str, Any]] | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
async def generate_agent(instructions: dict[str, Any]) -> dict[str, Any] | None:
|
||||
"""Generate agent JSON from instructions.
|
||||
|
||||
Args:
|
||||
instructions: Structured instructions from decompose_goal
|
||||
library_agents: User's library agents available for sub-agent composition
|
||||
|
||||
Returns:
|
||||
Agent JSON dict, error dict {"type": "error", ...}, or None on error
|
||||
@@ -523,10 +71,7 @@ async def generate_agent(
|
||||
"""
|
||||
_check_service_configured()
|
||||
logger.info("Calling external Agent Generator service for generate_agent")
|
||||
# Convert typed dicts to plain dicts for external service
|
||||
result = await generate_agent_external(
|
||||
dict(instructions), _to_dict_list(library_agents)
|
||||
)
|
||||
result = await generate_agent_external(instructions)
|
||||
if result:
|
||||
# Check if it's an error response - pass through as-is
|
||||
if isinstance(result, dict) and result.get("type") == "error":
|
||||
@@ -617,6 +162,8 @@ async def save_agent_to_library(
|
||||
Returns:
|
||||
Tuple of (created Graph, LibraryAgent)
|
||||
"""
|
||||
from backend.data.graph import get_graph_all_versions
|
||||
|
||||
graph = json_to_graph(agent_json)
|
||||
|
||||
if is_update:
|
||||
@@ -653,31 +200,25 @@ async def save_agent_to_library(
|
||||
|
||||
|
||||
async def get_agent_as_json(
|
||||
agent_id: str, user_id: str | None
|
||||
graph_id: str, user_id: str | None
|
||||
) -> dict[str, Any] | None:
|
||||
"""Fetch an agent and convert to JSON format for editing.
|
||||
|
||||
Args:
|
||||
agent_id: Graph ID or library agent ID
|
||||
graph_id: Graph ID or library agent ID
|
||||
user_id: User ID
|
||||
|
||||
Returns:
|
||||
Agent as JSON dict or None if not found
|
||||
"""
|
||||
graph = await get_graph(agent_id, version=None, user_id=user_id)
|
||||
|
||||
if not graph and user_id:
|
||||
try:
|
||||
library_agent = await library_db.get_library_agent(agent_id, user_id)
|
||||
graph = await get_graph(
|
||||
library_agent.graph_id, version=None, user_id=user_id
|
||||
)
|
||||
except NotFoundError:
|
||||
pass
|
||||
from backend.data.graph import get_graph
|
||||
|
||||
# Try to get the graph (version=None gets the active version)
|
||||
graph = await get_graph(graph_id, version=None, user_id=user_id)
|
||||
if not graph:
|
||||
return None
|
||||
|
||||
# Convert to JSON format
|
||||
nodes = []
|
||||
for node in graph.nodes:
|
||||
nodes.append(
|
||||
@@ -715,9 +256,7 @@ async def get_agent_as_json(
|
||||
|
||||
|
||||
async def generate_agent_patch(
|
||||
update_request: str,
|
||||
current_agent: dict[str, Any],
|
||||
library_agents: list[AgentSummary] | None = None,
|
||||
update_request: str, current_agent: dict[str, Any]
|
||||
) -> dict[str, Any] | None:
|
||||
"""Update an existing agent using natural language.
|
||||
|
||||
@@ -729,7 +268,6 @@ async def generate_agent_patch(
|
||||
Args:
|
||||
update_request: Natural language description of changes
|
||||
current_agent: Current agent JSON
|
||||
library_agents: User's library agents available for sub-agent composition
|
||||
|
||||
Returns:
|
||||
Updated agent JSON, clarifying questions dict {"type": "clarifying_questions", ...},
|
||||
@@ -740,7 +278,4 @@ async def generate_agent_patch(
|
||||
"""
|
||||
_check_service_configured()
|
||||
logger.info("Calling external Agent Generator service for generate_agent_patch")
|
||||
# Convert typed dicts to plain dicts for external service
|
||||
return await generate_agent_patch_external(
|
||||
update_request, current_agent, _to_dict_list(library_agents)
|
||||
)
|
||||
return await generate_agent_patch_external(update_request, current_agent)
|
||||
|
||||
@@ -1,49 +1,11 @@
|
||||
"""Error handling utilities for agent generator."""
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def _sanitize_error_details(details: str) -> str:
|
||||
"""Sanitize error details to remove sensitive information.
|
||||
|
||||
Strips common patterns that could expose internal system info:
|
||||
- File paths (Unix and Windows)
|
||||
- Database connection strings
|
||||
- URLs with credentials
|
||||
- Stack trace internals
|
||||
|
||||
Args:
|
||||
details: Raw error details string
|
||||
|
||||
Returns:
|
||||
Sanitized error details safe for user display
|
||||
"""
|
||||
# Remove file paths (Unix-style)
|
||||
sanitized = re.sub(
|
||||
r"/[a-zA-Z0-9_./\-]+\.(py|js|ts|json|yaml|yml)", "[path]", details
|
||||
)
|
||||
# Remove file paths (Windows-style)
|
||||
sanitized = re.sub(r"[A-Z]:\\[a-zA-Z0-9_\\.\\-]+", "[path]", sanitized)
|
||||
# Remove database URLs
|
||||
sanitized = re.sub(
|
||||
r"(postgres|mysql|mongodb|redis)://[^\s]+", "[database_url]", sanitized
|
||||
)
|
||||
# Remove URLs with credentials
|
||||
sanitized = re.sub(r"https?://[^:]+:[^@]+@[^\s]+", "[url]", sanitized)
|
||||
# Remove line numbers from stack traces
|
||||
sanitized = re.sub(r", line \d+", "", sanitized)
|
||||
# Remove "File" references from stack traces
|
||||
sanitized = re.sub(r'File "[^"]+",?', "", sanitized)
|
||||
|
||||
return sanitized.strip()
|
||||
|
||||
|
||||
def get_user_message_for_error(
|
||||
error_type: str,
|
||||
operation: str = "process the request",
|
||||
llm_parse_message: str | None = None,
|
||||
validation_message: str | None = None,
|
||||
error_details: str | None = None,
|
||||
) -> str:
|
||||
"""Get a user-friendly error message based on error type.
|
||||
|
||||
@@ -57,48 +19,25 @@ def get_user_message_for_error(
|
||||
message (e.g., "analyze the goal", "generate the agent")
|
||||
llm_parse_message: Custom message for llm_parse_error type
|
||||
validation_message: Custom message for validation_error type
|
||||
error_details: Optional additional details about the error
|
||||
|
||||
Returns:
|
||||
User-friendly error message suitable for display to the user
|
||||
"""
|
||||
base_message = ""
|
||||
|
||||
if error_type == "llm_parse_error":
|
||||
base_message = (
|
||||
return (
|
||||
llm_parse_message
|
||||
or "The AI had trouble processing this request. Please try again."
|
||||
)
|
||||
elif error_type == "validation_error":
|
||||
base_message = (
|
||||
return (
|
||||
validation_message
|
||||
or "The generated agent failed validation. "
|
||||
"This usually happens when the agent structure doesn't match "
|
||||
"what the platform expects. Please try simplifying your goal "
|
||||
"or breaking it into smaller parts."
|
||||
or "The request failed validation. Please try rephrasing."
|
||||
)
|
||||
elif error_type == "patch_error":
|
||||
base_message = (
|
||||
"Failed to apply the changes. The modification couldn't be "
|
||||
"validated. Please try a different approach or simplify the change."
|
||||
)
|
||||
return "Failed to apply the changes. Please try a different approach."
|
||||
elif error_type in ("timeout", "llm_timeout"):
|
||||
base_message = (
|
||||
"The request took too long to process. This can happen with "
|
||||
"complex agents. Please try again or simplify your goal."
|
||||
)
|
||||
return "The request took too long. Please try again."
|
||||
elif error_type in ("rate_limit", "llm_rate_limit"):
|
||||
base_message = "The service is currently busy. Please try again in a moment."
|
||||
return "The service is currently busy. Please try again in a moment."
|
||||
else:
|
||||
base_message = f"Failed to {operation}. Please try again."
|
||||
|
||||
# Add error details if provided (sanitized and truncated)
|
||||
if error_details:
|
||||
# Sanitize to remove sensitive information
|
||||
details = _sanitize_error_details(error_details)
|
||||
# Truncate long error details
|
||||
if len(details) > 200:
|
||||
details = details[:200] + "..."
|
||||
base_message += f"\n\nTechnical details: {details}"
|
||||
|
||||
return base_message
|
||||
return f"Failed to {operation}. Please try again."
|
||||
|
||||
@@ -117,16 +117,13 @@ def _get_client() -> httpx.AsyncClient:
|
||||
|
||||
|
||||
async def decompose_goal_external(
|
||||
description: str,
|
||||
context: str = "",
|
||||
library_agents: list[dict[str, Any]] | None = None,
|
||||
description: str, context: str = ""
|
||||
) -> dict[str, Any] | None:
|
||||
"""Call the external service to decompose a goal.
|
||||
|
||||
Args:
|
||||
description: Natural language goal description
|
||||
context: Additional context (e.g., answers to previous questions)
|
||||
library_agents: User's library agents available for sub-agent composition
|
||||
|
||||
Returns:
|
||||
Dict with either:
|
||||
@@ -144,8 +141,6 @@ async def decompose_goal_external(
|
||||
if context:
|
||||
# The external service uses user_instruction for additional context
|
||||
payload["user_instruction"] = context
|
||||
if library_agents:
|
||||
payload["library_agents"] = library_agents
|
||||
|
||||
try:
|
||||
response = await client.post("/api/decompose-description", json=payload)
|
||||
@@ -212,25 +207,21 @@ async def decompose_goal_external(
|
||||
|
||||
async def generate_agent_external(
|
||||
instructions: dict[str, Any],
|
||||
library_agents: list[dict[str, Any]] | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Call the external service to generate an agent from instructions.
|
||||
|
||||
Args:
|
||||
instructions: Structured instructions from decompose_goal
|
||||
library_agents: User's library agents available for sub-agent composition
|
||||
|
||||
Returns:
|
||||
Agent JSON dict on success, or error dict {"type": "error", ...} on error
|
||||
"""
|
||||
client = _get_client()
|
||||
|
||||
payload: dict[str, Any] = {"instructions": instructions}
|
||||
if library_agents:
|
||||
payload["library_agents"] = library_agents
|
||||
|
||||
try:
|
||||
response = await client.post("/api/generate-agent", json=payload)
|
||||
response = await client.post(
|
||||
"/api/generate-agent", json={"instructions": instructions}
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
@@ -238,7 +229,8 @@ async def generate_agent_external(
|
||||
error_msg = data.get("error", "Unknown error from Agent Generator")
|
||||
error_type = data.get("error_type", "unknown")
|
||||
logger.error(
|
||||
f"Agent Generator generation failed: {error_msg} (type: {error_type})"
|
||||
f"Agent Generator generation failed: {error_msg} "
|
||||
f"(type: {error_type})"
|
||||
)
|
||||
return _create_error_response(error_msg, error_type)
|
||||
|
||||
@@ -259,31 +251,27 @@ async def generate_agent_external(
|
||||
|
||||
|
||||
async def generate_agent_patch_external(
|
||||
update_request: str,
|
||||
current_agent: dict[str, Any],
|
||||
library_agents: list[dict[str, Any]] | None = None,
|
||||
update_request: str, current_agent: dict[str, Any]
|
||||
) -> dict[str, Any] | None:
|
||||
"""Call the external service to generate a patch for an existing agent.
|
||||
|
||||
Args:
|
||||
update_request: Natural language description of changes
|
||||
current_agent: Current agent JSON
|
||||
library_agents: User's library agents available for sub-agent composition
|
||||
|
||||
Returns:
|
||||
Updated agent JSON, clarifying questions dict, or error dict on error
|
||||
"""
|
||||
client = _get_client()
|
||||
|
||||
payload: dict[str, Any] = {
|
||||
"update_request": update_request,
|
||||
"current_agent_json": current_agent,
|
||||
}
|
||||
if library_agents:
|
||||
payload["library_agents"] = library_agents
|
||||
|
||||
try:
|
||||
response = await client.post("/api/update-agent", json=payload)
|
||||
response = await client.post(
|
||||
"/api/update-agent",
|
||||
json={
|
||||
"update_request": update_request,
|
||||
"current_agent_json": current_agent,
|
||||
},
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Shared agent search functionality for find_agent and find_library_agent tools."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Literal
|
||||
|
||||
from backend.api.features.library import db as library_db
|
||||
@@ -20,86 +19,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
SearchSource = Literal["marketplace", "library"]
|
||||
|
||||
# UUID v4 pattern for direct agent ID lookup
|
||||
_UUID_PATTERN = re.compile(
|
||||
r"^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def _is_uuid(text: str) -> bool:
|
||||
"""Check if text is a valid UUID v4."""
|
||||
return bool(_UUID_PATTERN.match(text.strip()))
|
||||
|
||||
|
||||
async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None:
|
||||
"""Fetch a library agent by ID (library agent ID or graph_id).
|
||||
|
||||
Tries multiple lookup strategies:
|
||||
1. First by graph_id (AgentGraph primary key)
|
||||
2. Then by library agent ID (LibraryAgent primary key)
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
agent_id: The ID to look up (can be graph_id or library agent ID)
|
||||
|
||||
Returns:
|
||||
AgentInfo if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
agent = await library_db.get_library_agent_by_graph_id(user_id, agent_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by graph_id: {agent.name}")
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by graph_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
try:
|
||||
agent = await library_db.get_library_agent(agent_id, user_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by library_id: {agent.name}")
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by library_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by library_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def search_agents(
|
||||
query: str,
|
||||
@@ -151,38 +70,28 @@ async def search_agents(
|
||||
)
|
||||
)
|
||||
else: # library
|
||||
# If query looks like a UUID, try direct lookup first
|
||||
if _is_uuid(query):
|
||||
logger.info(f"Query looks like UUID, trying direct lookup: {query}")
|
||||
agent = await _get_library_agent_by_id(user_id, query) # type: ignore[arg-type]
|
||||
if agent:
|
||||
agents.append(agent)
|
||||
logger.info(f"Found agent by direct ID lookup: {agent.name}")
|
||||
|
||||
# If no results from UUID lookup, do text search
|
||||
if not agents:
|
||||
logger.info(f"Searching user library for: {query}")
|
||||
results = await library_db.list_library_agents(
|
||||
user_id=user_id, # type: ignore[arg-type]
|
||||
search_term=query,
|
||||
page_size=10,
|
||||
)
|
||||
for agent in results.agents:
|
||||
agents.append(
|
||||
AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
logger.info(f"Searching user library for: {query}")
|
||||
results = await library_db.list_library_agents(
|
||||
user_id=user_id, # type: ignore[arg-type]
|
||||
search_term=query,
|
||||
page_size=10,
|
||||
)
|
||||
for agent in results.agents:
|
||||
agents.append(
|
||||
AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
)
|
||||
logger.info(f"Found {len(agents)} agents in {source}")
|
||||
except NotFoundError:
|
||||
pass
|
||||
|
||||
@@ -8,9 +8,7 @@ from backend.api.features.chat.model import ChatSession
|
||||
from .agent_generator import (
|
||||
AgentGeneratorNotConfiguredError,
|
||||
decompose_goal,
|
||||
enrich_library_agents_from_steps,
|
||||
generate_agent,
|
||||
get_all_relevant_agents_for_generation,
|
||||
get_user_message_for_error,
|
||||
save_agent_to_library,
|
||||
)
|
||||
@@ -105,27 +103,9 @@ class CreateAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Fetch relevant library and marketplace agents for sub-agent composition
|
||||
library_agents = None
|
||||
if user_id:
|
||||
try:
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=description, # Use goal as search term
|
||||
include_marketplace=True,
|
||||
)
|
||||
logger.debug(
|
||||
f"Found {len(library_agents)} relevant agents for sub-agent composition"
|
||||
)
|
||||
except Exception as e:
|
||||
# Log but don't fail - agent generation can work without sub-agents
|
||||
logger.warning(f"Failed to fetch library agents: {e}")
|
||||
|
||||
# Step 1: Decompose goal into steps
|
||||
try:
|
||||
decomposition_result = await decompose_goal(
|
||||
description, context, library_agents
|
||||
)
|
||||
decomposition_result = await decompose_goal(description, context)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
@@ -210,26 +190,9 @@ class CreateAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Step 1.5: Enrich library agents with step-based search (two-phase search)
|
||||
# After decomposition, search for additional relevant agents based on the steps
|
||||
if user_id and library_agents is not None:
|
||||
try:
|
||||
library_agents = await enrich_library_agents_from_steps(
|
||||
user_id=user_id,
|
||||
decomposition_result=decomposition_result,
|
||||
existing_agents=library_agents,
|
||||
include_marketplace=True,
|
||||
)
|
||||
logger.debug(
|
||||
f"After enrichment: {len(library_agents)} total agents for sub-agent composition"
|
||||
)
|
||||
except Exception as e:
|
||||
# Log but don't fail - continue with existing agents
|
||||
logger.warning(f"Failed to enrich library agents from steps: {e}")
|
||||
|
||||
# Step 2: Generate agent JSON (external service handles fixing and validation)
|
||||
try:
|
||||
agent_json = await generate_agent(decomposition_result, library_agents)
|
||||
agent_json = await generate_agent(decomposition_result)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
@@ -256,12 +219,7 @@ class CreateAgentTool(BaseTool):
|
||||
error_type,
|
||||
operation="generate the agent",
|
||||
llm_parse_message="The AI had trouble generating the agent. Please try again or simplify your goal.",
|
||||
validation_message=(
|
||||
"I wasn't able to create a valid agent for this request. "
|
||||
"The generated workflow had some structural issues. "
|
||||
"Please try simplifying your goal or breaking it into smaller steps."
|
||||
),
|
||||
error_details=error_msg if error_type == "validation_error" else None,
|
||||
validation_message="The generated agent failed validation. Please try rephrasing your goal.",
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=user_message,
|
||||
@@ -312,7 +270,7 @@ class CreateAgentTool(BaseTool):
|
||||
agent_id=created_graph.id,
|
||||
agent_name=created_graph.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=f"/library/agents/{library_agent.id}",
|
||||
library_agent_link=f"/library/{library_agent.id}",
|
||||
agent_page_link=f"/build?flowID={created_graph.id}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -9,7 +9,6 @@ from .agent_generator import (
|
||||
AgentGeneratorNotConfiguredError,
|
||||
generate_agent_patch,
|
||||
get_agent_as_json,
|
||||
get_all_relevant_agents_for_generation,
|
||||
get_user_message_for_error,
|
||||
save_agent_to_library,
|
||||
)
|
||||
@@ -128,22 +127,6 @@ class EditAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
library_agents = None
|
||||
if user_id:
|
||||
try:
|
||||
exclude_id = current_agent.get("id") or agent_id
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=changes,
|
||||
exclude_graph_id=exclude_id,
|
||||
include_marketplace=True,
|
||||
)
|
||||
logger.debug(
|
||||
f"Found {len(library_agents)} relevant agents for sub-agent composition"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch library agents: {e}")
|
||||
|
||||
# Build the update request with context
|
||||
update_request = changes
|
||||
if context:
|
||||
@@ -151,9 +134,7 @@ class EditAgentTool(BaseTool):
|
||||
|
||||
# Step 2: Generate updated agent (external service handles fixing and validation)
|
||||
try:
|
||||
result = await generate_agent_patch(
|
||||
update_request, current_agent, library_agents
|
||||
)
|
||||
result = await generate_agent_patch(update_request, current_agent)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
@@ -255,7 +236,7 @@ class EditAgentTool(BaseTool):
|
||||
agent_id=created_graph.id,
|
||||
agent_name=created_graph.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=f"/library/agents/{library_agent.id}",
|
||||
library_agent_link=f"/library/{library_agent.id}",
|
||||
agent_page_link=f"/build?flowID={created_graph.id}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import Credentials, CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
@@ -266,14 +266,13 @@ async def match_user_credentials_to_graph(
|
||||
credential_requirements,
|
||||
_node_fields,
|
||||
) in aggregated_creds.items():
|
||||
# Find first matching credential by provider, type, and scopes
|
||||
# Find first matching credential by provider and type
|
||||
matching_cred = next(
|
||||
(
|
||||
cred
|
||||
for cred in available_creds
|
||||
if cred.provider in credential_requirements.provider
|
||||
and cred.type in credential_requirements.supported_types
|
||||
and _credential_has_required_scopes(cred, credential_requirements)
|
||||
),
|
||||
None,
|
||||
)
|
||||
@@ -297,17 +296,10 @@ async def match_user_credentials_to_graph(
|
||||
f"{credential_field_name} (validation failed: {e})"
|
||||
)
|
||||
else:
|
||||
# Build a helpful error message including scope requirements
|
||||
error_parts = [
|
||||
f"provider in {list(credential_requirements.provider)}",
|
||||
f"type in {list(credential_requirements.supported_types)}",
|
||||
]
|
||||
if credential_requirements.required_scopes:
|
||||
error_parts.append(
|
||||
f"scopes including {list(credential_requirements.required_scopes)}"
|
||||
)
|
||||
missing_creds.append(
|
||||
f"{credential_field_name} (requires {', '.join(error_parts)})"
|
||||
f"{credential_field_name} "
|
||||
f"(requires provider in {list(credential_requirements.provider)}, "
|
||||
f"type in {list(credential_requirements.supported_types)})"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
@@ -317,28 +309,6 @@ async def match_user_credentials_to_graph(
|
||||
return graph_credentials_inputs, missing_creds
|
||||
|
||||
|
||||
def _credential_has_required_scopes(
|
||||
credential: Credentials,
|
||||
requirements: CredentialsFieldInfo,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a credential has all the scopes required by the block.
|
||||
|
||||
For OAuth2 credentials, verifies that the credential's scopes are a superset
|
||||
of the required scopes. For other credential types, returns True (no scope check).
|
||||
"""
|
||||
# Only OAuth2 credentials have scopes to check
|
||||
if credential.type != "oauth2":
|
||||
return True
|
||||
|
||||
# If no scopes are required, any credential matches
|
||||
if not requirements.required_scopes:
|
||||
return True
|
||||
|
||||
# Check that credential scopes are a superset of required scopes
|
||||
return set(credential.scopes).issuperset(requirements.required_scopes)
|
||||
|
||||
|
||||
async def check_user_has_required_credentials(
|
||||
user_id: str,
|
||||
required_credentials: list[CredentialsMetaInput],
|
||||
|
||||
@@ -77,32 +77,21 @@ async def list_library_agents(
|
||||
}
|
||||
|
||||
# Build search filter if applicable
|
||||
# Split into words and match ANY word in name or description
|
||||
if search_term:
|
||||
words = [w.strip() for w in search_term.split() if len(w.strip()) >= 3]
|
||||
if words:
|
||||
or_conditions: list[prisma.types.LibraryAgentWhereInput] = []
|
||||
for word in words:
|
||||
or_conditions.append(
|
||||
{
|
||||
"AgentGraph": {
|
||||
"is": {"name": {"contains": word, "mode": "insensitive"}}
|
||||
}
|
||||
where_clause["OR"] = [
|
||||
{
|
||||
"AgentGraph": {
|
||||
"is": {"name": {"contains": search_term, "mode": "insensitive"}}
|
||||
}
|
||||
},
|
||||
{
|
||||
"AgentGraph": {
|
||||
"is": {
|
||||
"description": {"contains": search_term, "mode": "insensitive"}
|
||||
}
|
||||
)
|
||||
or_conditions.append(
|
||||
{
|
||||
"AgentGraph": {
|
||||
"is": {
|
||||
"description": {
|
||||
"contains": word,
|
||||
"mode": "insensitive",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
where_clause["OR"] = or_conditions
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
# Determine sorting
|
||||
order_by: prisma.types.LibraryAgentOrderByInput | None = None
|
||||
|
||||
@@ -115,6 +115,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
@@ -279,6 +280,9 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2
|
||||
), # claude-haiku-4-5-20251001
|
||||
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2
|
||||
), # claude-3-7-sonnet-20250219
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1
|
||||
), # claude-3-haiku-20240307
|
||||
|
||||
@@ -83,7 +83,7 @@ class StagehandRecommendedLlmModel(str, Enum):
|
||||
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||
|
||||
# Anthropic
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
@@ -137,7 +137,7 @@ class StagehandObserveBlock(Block):
|
||||
model: StagehandRecommendedLlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
description="LLM to use for Stagehand (provider is inferred)",
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_4_5_SONNET,
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET,
|
||||
advanced=False,
|
||||
)
|
||||
model_credentials: AICredentials = AICredentialsField()
|
||||
@@ -230,7 +230,7 @@ class StagehandActBlock(Block):
|
||||
model: StagehandRecommendedLlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
description="LLM to use for Stagehand (provider is inferred)",
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_4_5_SONNET,
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET,
|
||||
advanced=False,
|
||||
)
|
||||
model_credentials: AICredentials = AICredentialsField()
|
||||
@@ -330,7 +330,7 @@ class StagehandExtractBlock(Block):
|
||||
model: StagehandRecommendedLlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
description="LLM to use for Stagehand (provider is inferred)",
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_4_5_SONNET,
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET,
|
||||
advanced=False,
|
||||
)
|
||||
model_credentials: AICredentials = AICredentialsField()
|
||||
|
||||
@@ -81,6 +81,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
LlmModel.CLAUDE_3_7_SONNET: 5,
|
||||
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||
LlmModel.AIML_API_QWEN2_5_72B: 1,
|
||||
LlmModel.AIML_API_LLAMA3_1_70B: 1,
|
||||
|
||||
@@ -666,16 +666,10 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
if not (self.discriminator and self.discriminator_mapping):
|
||||
return self
|
||||
|
||||
try:
|
||||
provider = self.discriminator_mapping[discriminator_value]
|
||||
except KeyError:
|
||||
raise ValueError(
|
||||
f"Model '{discriminator_value}' is not supported. "
|
||||
"It may have been deprecated. Please update your agent configuration."
|
||||
)
|
||||
|
||||
return CredentialsFieldInfo(
|
||||
credentials_provider=frozenset([provider]),
|
||||
credentials_provider=frozenset(
|
||||
[self.discriminator_mapping[discriminator_value]]
|
||||
),
|
||||
credentials_types=self.supported_types,
|
||||
credentials_scopes=self.required_scopes,
|
||||
discriminator=self.discriminator,
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
-- Migrate Claude 3.7 Sonnet to Claude 4.5 Sonnet
|
||||
-- This updates all AgentNode blocks that use the deprecated Claude 3.7 Sonnet model
|
||||
-- Anthropic is retiring claude-3-7-sonnet-20250219 on February 19, 2026
|
||||
|
||||
-- Update AgentNode constant inputs
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
'"claude-sonnet-4-5-20250929"'::jsonb
|
||||
)
|
||||
WHERE "constantInput"::jsonb->>'model' = 'claude-3-7-sonnet-20250219';
|
||||
|
||||
-- Update AgentPreset input overrides (stored in AgentNodeExecutionInputOutput)
|
||||
UPDATE "AgentNodeExecutionInputOutput"
|
||||
SET "data" = JSONB_SET(
|
||||
"data"::jsonb,
|
||||
'{model}',
|
||||
'"claude-sonnet-4-5-20250929"'::jsonb
|
||||
)
|
||||
WHERE "agentPresetId" IS NOT NULL
|
||||
AND "data"::jsonb->>'model' = 'claude-3-7-sonnet-20250219';
|
||||
@@ -57,8 +57,7 @@ class TestDecomposeGoal:
|
||||
|
||||
result = await core.decompose_goal("Build a chatbot")
|
||||
|
||||
# library_agents defaults to None
|
||||
mock_external.assert_called_once_with("Build a chatbot", "", None)
|
||||
mock_external.assert_called_once_with("Build a chatbot", "")
|
||||
assert result == expected_result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -75,8 +74,7 @@ class TestDecomposeGoal:
|
||||
|
||||
await core.decompose_goal("Build a chatbot", "Use Python")
|
||||
|
||||
# library_agents defaults to None
|
||||
mock_external.assert_called_once_with("Build a chatbot", "Use Python", None)
|
||||
mock_external.assert_called_once_with("Build a chatbot", "Use Python")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_none_on_service_failure(self):
|
||||
@@ -111,8 +109,7 @@ class TestGenerateAgent:
|
||||
instructions = {"type": "instructions", "steps": ["Step 1"]}
|
||||
result = await core.generate_agent(instructions)
|
||||
|
||||
# library_agents defaults to None
|
||||
mock_external.assert_called_once_with(instructions, None)
|
||||
mock_external.assert_called_once_with(instructions)
|
||||
# Result should have id, version, is_active added if not present
|
||||
assert result is not None
|
||||
assert result["name"] == "Test Agent"
|
||||
@@ -177,8 +174,7 @@ class TestGenerateAgentPatch:
|
||||
current_agent = {"nodes": [], "links": []}
|
||||
result = await core.generate_agent_patch("Add a node", current_agent)
|
||||
|
||||
# library_agents defaults to None
|
||||
mock_external.assert_called_once_with("Add a node", current_agent, None)
|
||||
mock_external.assert_called_once_with("Add a node", current_agent)
|
||||
assert result == expected_result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -1,838 +0,0 @@
|
||||
"""
|
||||
Tests for library agent fetching functionality in agent generator.
|
||||
|
||||
This test suite verifies the search-based library agent fetching,
|
||||
including the combination of library and marketplace agents.
|
||||
"""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.agent_generator import core
|
||||
|
||||
|
||||
class TestGetLibraryAgentsForGeneration:
|
||||
"""Test get_library_agents_for_generation function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetches_agents_with_search_term(self):
|
||||
"""Test that search_term is passed to the library db."""
|
||||
# Create a mock agent with proper attribute values
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.graph_id = "agent-123"
|
||||
mock_agent.graph_version = 1
|
||||
mock_agent.name = "Email Agent"
|
||||
mock_agent.description = "Sends emails"
|
||||
mock_agent.input_schema = {"properties": {}}
|
||||
mock_agent.output_schema = {"properties": {}}
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.agents = [mock_agent]
|
||||
|
||||
with patch.object(
|
||||
core.library_db,
|
||||
"list_library_agents",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
) as mock_list:
|
||||
result = await core.get_library_agents_for_generation(
|
||||
user_id="user-123",
|
||||
search_query="send email",
|
||||
)
|
||||
|
||||
# Verify search_term was passed
|
||||
mock_list.assert_called_once_with(
|
||||
user_id="user-123",
|
||||
search_term="send email",
|
||||
page=1,
|
||||
page_size=15,
|
||||
)
|
||||
|
||||
# Verify result format
|
||||
assert len(result) == 1
|
||||
assert result[0]["graph_id"] == "agent-123"
|
||||
assert result[0]["name"] == "Email Agent"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_excludes_specified_graph_id(self):
|
||||
"""Test that agents with excluded graph_id are filtered out."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.agents = [
|
||||
MagicMock(
|
||||
graph_id="agent-123",
|
||||
graph_version=1,
|
||||
name="Agent 1",
|
||||
description="First agent",
|
||||
input_schema={},
|
||||
output_schema={},
|
||||
),
|
||||
MagicMock(
|
||||
graph_id="agent-456",
|
||||
graph_version=1,
|
||||
name="Agent 2",
|
||||
description="Second agent",
|
||||
input_schema={},
|
||||
output_schema={},
|
||||
),
|
||||
]
|
||||
|
||||
with patch.object(
|
||||
core.library_db,
|
||||
"list_library_agents",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
):
|
||||
result = await core.get_library_agents_for_generation(
|
||||
user_id="user-123",
|
||||
exclude_graph_id="agent-123",
|
||||
)
|
||||
|
||||
# Verify the excluded agent is not in results
|
||||
assert len(result) == 1
|
||||
assert result[0]["graph_id"] == "agent-456"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_respects_max_results(self):
|
||||
"""Test that max_results parameter limits the page_size."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.agents = []
|
||||
|
||||
with patch.object(
|
||||
core.library_db,
|
||||
"list_library_agents",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
) as mock_list:
|
||||
await core.get_library_agents_for_generation(
|
||||
user_id="user-123",
|
||||
max_results=5,
|
||||
)
|
||||
|
||||
# Verify page_size was set to max_results
|
||||
mock_list.assert_called_once_with(
|
||||
user_id="user-123",
|
||||
search_term=None,
|
||||
page=1,
|
||||
page_size=5,
|
||||
)
|
||||
|
||||
|
||||
class TestSearchMarketplaceAgentsForGeneration:
|
||||
"""Test search_marketplace_agents_for_generation function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_searches_marketplace_with_query(self):
|
||||
"""Test that marketplace is searched with the query."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.agents = [
|
||||
MagicMock(
|
||||
agent_name="Public Agent",
|
||||
description="A public agent",
|
||||
sub_heading="Does something useful",
|
||||
creator="creator-1",
|
||||
)
|
||||
]
|
||||
|
||||
# The store_db is dynamically imported, so patch the import path
|
||||
with patch(
|
||||
"backend.api.features.store.db.get_store_agents",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
) as mock_search:
|
||||
result = await core.search_marketplace_agents_for_generation(
|
||||
search_query="automation",
|
||||
max_results=10,
|
||||
)
|
||||
|
||||
mock_search.assert_called_once_with(
|
||||
search_query="automation",
|
||||
page=1,
|
||||
page_size=10,
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["name"] == "Public Agent"
|
||||
assert result[0]["is_marketplace_agent"] is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handles_marketplace_error_gracefully(self):
|
||||
"""Test that marketplace errors don't crash the function."""
|
||||
with patch(
|
||||
"backend.api.features.store.db.get_store_agents",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("Marketplace unavailable"),
|
||||
):
|
||||
result = await core.search_marketplace_agents_for_generation(
|
||||
search_query="test"
|
||||
)
|
||||
|
||||
# Should return empty list, not raise exception
|
||||
assert result == []
|
||||
|
||||
|
||||
class TestGetAllRelevantAgentsForGeneration:
|
||||
"""Test get_all_relevant_agents_for_generation function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_combines_library_and_marketplace_agents(self):
|
||||
"""Test that agents from both sources are combined."""
|
||||
library_agents = [
|
||||
{
|
||||
"graph_id": "lib-123",
|
||||
"graph_version": 1,
|
||||
"name": "Library Agent",
|
||||
"description": "From library",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
marketplace_agents = [
|
||||
{
|
||||
"name": "Market Agent",
|
||||
"description": "From marketplace",
|
||||
"sub_heading": "Sub heading",
|
||||
"creator": "creator-1",
|
||||
"is_marketplace_agent": True,
|
||||
}
|
||||
]
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_library_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=library_agents,
|
||||
):
|
||||
with patch.object(
|
||||
core,
|
||||
"search_marketplace_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=marketplace_agents,
|
||||
):
|
||||
result = await core.get_all_relevant_agents_for_generation(
|
||||
user_id="user-123",
|
||||
search_query="test query",
|
||||
include_marketplace=True,
|
||||
)
|
||||
|
||||
# Library agents should come first
|
||||
assert len(result) == 2
|
||||
assert result[0]["name"] == "Library Agent"
|
||||
assert result[1]["name"] == "Market Agent"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_deduplicates_by_name(self):
|
||||
"""Test that marketplace agents with same name as library are excluded."""
|
||||
library_agents = [
|
||||
{
|
||||
"graph_id": "lib-123",
|
||||
"graph_version": 1,
|
||||
"name": "Shared Agent",
|
||||
"description": "From library",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
marketplace_agents = [
|
||||
{
|
||||
"name": "Shared Agent", # Same name, should be deduplicated
|
||||
"description": "From marketplace",
|
||||
"sub_heading": "Sub heading",
|
||||
"creator": "creator-1",
|
||||
"is_marketplace_agent": True,
|
||||
},
|
||||
{
|
||||
"name": "Unique Agent",
|
||||
"description": "Only in marketplace",
|
||||
"sub_heading": "Sub heading",
|
||||
"creator": "creator-2",
|
||||
"is_marketplace_agent": True,
|
||||
},
|
||||
]
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_library_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=library_agents,
|
||||
):
|
||||
with patch.object(
|
||||
core,
|
||||
"search_marketplace_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=marketplace_agents,
|
||||
):
|
||||
result = await core.get_all_relevant_agents_for_generation(
|
||||
user_id="user-123",
|
||||
search_query="test",
|
||||
include_marketplace=True,
|
||||
)
|
||||
|
||||
# Shared Agent from marketplace should be excluded
|
||||
assert len(result) == 2
|
||||
names = [a["name"] for a in result]
|
||||
assert "Shared Agent" in names
|
||||
assert "Unique Agent" in names
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_skips_marketplace_when_disabled(self):
|
||||
"""Test that marketplace is not searched when include_marketplace=False."""
|
||||
library_agents = [
|
||||
{
|
||||
"graph_id": "lib-123",
|
||||
"graph_version": 1,
|
||||
"name": "Library Agent",
|
||||
"description": "From library",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_library_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=library_agents,
|
||||
):
|
||||
with patch.object(
|
||||
core,
|
||||
"search_marketplace_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_marketplace:
|
||||
result = await core.get_all_relevant_agents_for_generation(
|
||||
user_id="user-123",
|
||||
search_query="test",
|
||||
include_marketplace=False,
|
||||
)
|
||||
|
||||
# Marketplace should not be called
|
||||
mock_marketplace.assert_not_called()
|
||||
assert len(result) == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_skips_marketplace_when_no_search_query(self):
|
||||
"""Test that marketplace is not searched without a search query."""
|
||||
library_agents = [
|
||||
{
|
||||
"graph_id": "lib-123",
|
||||
"graph_version": 1,
|
||||
"name": "Library Agent",
|
||||
"description": "From library",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_library_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=library_agents,
|
||||
):
|
||||
with patch.object(
|
||||
core,
|
||||
"search_marketplace_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_marketplace:
|
||||
result = await core.get_all_relevant_agents_for_generation(
|
||||
user_id="user-123",
|
||||
search_query=None, # No search query
|
||||
include_marketplace=True,
|
||||
)
|
||||
|
||||
# Marketplace should not be called without search query
|
||||
mock_marketplace.assert_not_called()
|
||||
assert len(result) == 1
|
||||
|
||||
|
||||
class TestExtractSearchTermsFromSteps:
|
||||
"""Test extract_search_terms_from_steps function."""
|
||||
|
||||
def test_extracts_terms_from_instructions_type(self):
|
||||
"""Test extraction from valid instructions decomposition result."""
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [
|
||||
{
|
||||
"description": "Send an email notification",
|
||||
"block_name": "GmailSendBlock",
|
||||
},
|
||||
{"description": "Fetch weather data", "action": "Get weather API"},
|
||||
],
|
||||
}
|
||||
|
||||
result = core.extract_search_terms_from_steps(decomposition_result)
|
||||
|
||||
assert "Send an email notification" in result
|
||||
assert "GmailSendBlock" in result
|
||||
assert "Fetch weather data" in result
|
||||
assert "Get weather API" in result
|
||||
|
||||
def test_returns_empty_for_non_instructions_type(self):
|
||||
"""Test that non-instructions types return empty list."""
|
||||
decomposition_result = {
|
||||
"type": "clarifying_questions",
|
||||
"questions": [{"question": "What email?"}],
|
||||
}
|
||||
|
||||
result = core.extract_search_terms_from_steps(decomposition_result)
|
||||
|
||||
assert result == []
|
||||
|
||||
def test_deduplicates_terms_case_insensitively(self):
|
||||
"""Test that duplicate terms are removed (case-insensitive)."""
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [
|
||||
{"description": "Send Email", "name": "send email"},
|
||||
{"description": "Other task"},
|
||||
],
|
||||
}
|
||||
|
||||
result = core.extract_search_terms_from_steps(decomposition_result)
|
||||
|
||||
# Should only have one "send email" variant
|
||||
email_terms = [t for t in result if "email" in t.lower()]
|
||||
assert len(email_terms) == 1
|
||||
|
||||
def test_filters_short_terms(self):
|
||||
"""Test that terms with 3 or fewer characters are filtered out."""
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [
|
||||
{"description": "ab", "action": "xyz"}, # Both too short
|
||||
{"description": "Valid term here"},
|
||||
],
|
||||
}
|
||||
|
||||
result = core.extract_search_terms_from_steps(decomposition_result)
|
||||
|
||||
assert "ab" not in result
|
||||
assert "xyz" not in result
|
||||
assert "Valid term here" in result
|
||||
|
||||
def test_handles_empty_steps(self):
|
||||
"""Test handling of empty steps list."""
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [],
|
||||
}
|
||||
|
||||
result = core.extract_search_terms_from_steps(decomposition_result)
|
||||
|
||||
assert result == []
|
||||
|
||||
|
||||
class TestEnrichLibraryAgentsFromSteps:
|
||||
"""Test enrich_library_agents_from_steps function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_enriches_with_additional_agents(self):
|
||||
"""Test that additional agents are found based on steps."""
|
||||
existing_agents = [
|
||||
{
|
||||
"graph_id": "existing-123",
|
||||
"graph_version": 1,
|
||||
"name": "Existing Agent",
|
||||
"description": "Already fetched",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
additional_agents = [
|
||||
{
|
||||
"graph_id": "new-456",
|
||||
"graph_version": 1,
|
||||
"name": "Email Agent",
|
||||
"description": "For sending emails",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [
|
||||
{"description": "Send email notification"},
|
||||
],
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_all_relevant_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=additional_agents,
|
||||
):
|
||||
result = await core.enrich_library_agents_from_steps(
|
||||
user_id="user-123",
|
||||
decomposition_result=decomposition_result,
|
||||
existing_agents=existing_agents,
|
||||
)
|
||||
|
||||
# Should have both existing and new agents
|
||||
assert len(result) == 2
|
||||
names = [a["name"] for a in result]
|
||||
assert "Existing Agent" in names
|
||||
assert "Email Agent" in names
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_deduplicates_by_graph_id(self):
|
||||
"""Test that agents with same graph_id are not duplicated."""
|
||||
existing_agents = [
|
||||
{
|
||||
"graph_id": "agent-123",
|
||||
"graph_version": 1,
|
||||
"name": "Existing Agent",
|
||||
"description": "Already fetched",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
# Additional search returns same agent
|
||||
additional_agents = [
|
||||
{
|
||||
"graph_id": "agent-123", # Same ID
|
||||
"graph_version": 1,
|
||||
"name": "Existing Agent Copy",
|
||||
"description": "Same agent different name",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [{"description": "Some action"}],
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_all_relevant_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=additional_agents,
|
||||
):
|
||||
result = await core.enrich_library_agents_from_steps(
|
||||
user_id="user-123",
|
||||
decomposition_result=decomposition_result,
|
||||
existing_agents=existing_agents,
|
||||
)
|
||||
|
||||
# Should not duplicate
|
||||
assert len(result) == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_deduplicates_by_name(self):
|
||||
"""Test that agents with same name are not duplicated."""
|
||||
existing_agents = [
|
||||
{
|
||||
"graph_id": "agent-123",
|
||||
"graph_version": 1,
|
||||
"name": "Email Agent",
|
||||
"description": "Already fetched",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
# Additional search returns agent with same name but different ID
|
||||
additional_agents = [
|
||||
{
|
||||
"graph_id": "agent-456", # Different ID
|
||||
"graph_version": 1,
|
||||
"name": "Email Agent", # Same name
|
||||
"description": "Different agent same name",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [{"description": "Send email"}],
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_all_relevant_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=additional_agents,
|
||||
):
|
||||
result = await core.enrich_library_agents_from_steps(
|
||||
user_id="user-123",
|
||||
decomposition_result=decomposition_result,
|
||||
existing_agents=existing_agents,
|
||||
)
|
||||
|
||||
# Should not duplicate by name
|
||||
assert len(result) == 1
|
||||
assert result[0].get("graph_id") == "agent-123" # Original kept
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_existing_when_no_steps(self):
|
||||
"""Test that existing agents are returned when no search terms extracted."""
|
||||
existing_agents = [
|
||||
{
|
||||
"graph_id": "existing-123",
|
||||
"graph_version": 1,
|
||||
"name": "Existing Agent",
|
||||
"description": "Already fetched",
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
}
|
||||
]
|
||||
|
||||
decomposition_result = {
|
||||
"type": "clarifying_questions", # Not instructions type
|
||||
"questions": [],
|
||||
}
|
||||
|
||||
result = await core.enrich_library_agents_from_steps(
|
||||
user_id="user-123",
|
||||
decomposition_result=decomposition_result,
|
||||
existing_agents=existing_agents,
|
||||
)
|
||||
|
||||
# Should return existing unchanged
|
||||
assert result == existing_agents
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_limits_search_terms_to_three(self):
|
||||
"""Test that only first 3 search terms are used."""
|
||||
existing_agents = []
|
||||
|
||||
decomposition_result = {
|
||||
"type": "instructions",
|
||||
"steps": [
|
||||
{"description": "First action"},
|
||||
{"description": "Second action"},
|
||||
{"description": "Third action"},
|
||||
{"description": "Fourth action"},
|
||||
{"description": "Fifth action"},
|
||||
],
|
||||
}
|
||||
|
||||
call_count = 0
|
||||
|
||||
async def mock_get_agents(*args, **kwargs):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return []
|
||||
|
||||
with patch.object(
|
||||
core,
|
||||
"get_all_relevant_agents_for_generation",
|
||||
side_effect=mock_get_agents,
|
||||
):
|
||||
await core.enrich_library_agents_from_steps(
|
||||
user_id="user-123",
|
||||
decomposition_result=decomposition_result,
|
||||
existing_agents=existing_agents,
|
||||
)
|
||||
|
||||
# Should only make 3 calls (limited to first 3 terms)
|
||||
assert call_count == 3
|
||||
|
||||
|
||||
class TestExtractUuidsFromText:
|
||||
"""Test extract_uuids_from_text function."""
|
||||
|
||||
def test_extracts_single_uuid(self):
|
||||
"""Test extraction of a single UUID from text."""
|
||||
text = "Use my agent 46631191-e8a8-486f-ad90-84f89738321d for this task"
|
||||
result = core.extract_uuids_from_text(text)
|
||||
assert len(result) == 1
|
||||
assert "46631191-e8a8-486f-ad90-84f89738321d" in result
|
||||
|
||||
def test_extracts_multiple_uuids(self):
|
||||
"""Test extraction of multiple UUIDs from text."""
|
||||
text = (
|
||||
"Combine agents 11111111-1111-4111-8111-111111111111 "
|
||||
"and 22222222-2222-4222-9222-222222222222"
|
||||
)
|
||||
result = core.extract_uuids_from_text(text)
|
||||
assert len(result) == 2
|
||||
assert "11111111-1111-4111-8111-111111111111" in result
|
||||
assert "22222222-2222-4222-9222-222222222222" in result
|
||||
|
||||
def test_deduplicates_uuids(self):
|
||||
"""Test that duplicate UUIDs are deduplicated."""
|
||||
text = (
|
||||
"Use 46631191-e8a8-486f-ad90-84f89738321d twice: "
|
||||
"46631191-e8a8-486f-ad90-84f89738321d"
|
||||
)
|
||||
result = core.extract_uuids_from_text(text)
|
||||
assert len(result) == 1
|
||||
|
||||
def test_normalizes_to_lowercase(self):
|
||||
"""Test that UUIDs are normalized to lowercase."""
|
||||
text = "Use 46631191-E8A8-486F-AD90-84F89738321D"
|
||||
result = core.extract_uuids_from_text(text)
|
||||
assert result[0] == "46631191-e8a8-486f-ad90-84f89738321d"
|
||||
|
||||
def test_returns_empty_for_no_uuids(self):
|
||||
"""Test that empty list is returned when no UUIDs found."""
|
||||
text = "Create an email agent that sends notifications"
|
||||
result = core.extract_uuids_from_text(text)
|
||||
assert result == []
|
||||
|
||||
def test_ignores_invalid_uuids(self):
|
||||
"""Test that invalid UUID-like strings are ignored."""
|
||||
text = "Not a valid UUID: 12345678-1234-1234-1234-123456789abc"
|
||||
result = core.extract_uuids_from_text(text)
|
||||
# UUID v4 requires specific patterns (4 in third group, 8/9/a/b in fourth)
|
||||
assert len(result) == 0
|
||||
|
||||
|
||||
class TestGetLibraryAgentById:
|
||||
"""Test get_library_agent_by_id function (and its alias get_library_agent_by_graph_id)."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_agent_when_found_by_graph_id(self):
|
||||
"""Test that agent is returned when found by graph_id."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.graph_id = "agent-123"
|
||||
mock_agent.graph_version = 1
|
||||
mock_agent.name = "Test Agent"
|
||||
mock_agent.description = "Test description"
|
||||
mock_agent.input_schema = {"properties": {}}
|
||||
mock_agent.output_schema = {"properties": {}}
|
||||
|
||||
with patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent_by_graph_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_agent,
|
||||
):
|
||||
result = await core.get_library_agent_by_id("user-123", "agent-123")
|
||||
|
||||
assert result is not None
|
||||
assert result["graph_id"] == "agent-123"
|
||||
assert result["name"] == "Test Agent"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_falls_back_to_library_agent_id(self):
|
||||
"""Test that lookup falls back to library agent ID when graph_id not found."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.graph_id = "graph-456" # Different from the lookup ID
|
||||
mock_agent.graph_version = 1
|
||||
mock_agent.name = "Library Agent"
|
||||
mock_agent.description = "Found by library ID"
|
||||
mock_agent.input_schema = {"properties": {}}
|
||||
mock_agent.output_schema = {"properties": {}}
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent_by_graph_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=None, # Not found by graph_id
|
||||
),
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_agent, # Found by library ID
|
||||
),
|
||||
):
|
||||
result = await core.get_library_agent_by_id("user-123", "library-id-123")
|
||||
|
||||
assert result is not None
|
||||
assert result["graph_id"] == "graph-456"
|
||||
assert result["name"] == "Library Agent"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_none_when_not_found_by_either_method(self):
|
||||
"""Test that None is returned when agent not found by either method."""
|
||||
with (
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent_by_graph_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=None,
|
||||
),
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=core.NotFoundError("Not found"),
|
||||
),
|
||||
):
|
||||
result = await core.get_library_agent_by_id("user-123", "nonexistent")
|
||||
|
||||
assert result is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_none_on_exception(self):
|
||||
"""Test that None is returned when exception occurs in both lookups."""
|
||||
with (
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent_by_graph_id",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("Database error"),
|
||||
),
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("Database error"),
|
||||
),
|
||||
):
|
||||
result = await core.get_library_agent_by_id("user-123", "agent-123")
|
||||
|
||||
assert result is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_alias_works(self):
|
||||
"""Test that get_library_agent_by_graph_id is an alias for get_library_agent_by_id."""
|
||||
assert core.get_library_agent_by_graph_id is core.get_library_agent_by_id
|
||||
|
||||
|
||||
class TestGetAllRelevantAgentsWithUuids:
|
||||
"""Test UUID extraction in get_all_relevant_agents_for_generation."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetches_explicitly_mentioned_agents(self):
|
||||
"""Test that agents mentioned by UUID are fetched directly."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.graph_id = "46631191-e8a8-486f-ad90-84f89738321d"
|
||||
mock_agent.graph_version = 1
|
||||
mock_agent.name = "Mentioned Agent"
|
||||
mock_agent.description = "Explicitly mentioned"
|
||||
mock_agent.input_schema = {}
|
||||
mock_agent.output_schema = {}
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.agents = []
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"get_library_agent_by_graph_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_agent,
|
||||
),
|
||||
patch.object(
|
||||
core.library_db,
|
||||
"list_library_agents",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
),
|
||||
):
|
||||
result = await core.get_all_relevant_agents_for_generation(
|
||||
user_id="user-123",
|
||||
search_query="Use agent 46631191-e8a8-486f-ad90-84f89738321d",
|
||||
include_marketplace=False,
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].get("graph_id") == "46631191-e8a8-486f-ad90-84f89738321d"
|
||||
assert result[0].get("name") == "Mentioned Agent"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
@@ -433,139 +433,5 @@ class TestGetBlocksExternal:
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestLibraryAgentsPassthrough:
|
||||
"""Test that library_agents are passed correctly in all requests."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Reset client singleton before each test."""
|
||||
service._settings = None
|
||||
service._client = None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_decompose_goal_passes_library_agents(self):
|
||||
"""Test that library_agents are included in decompose goal payload."""
|
||||
library_agents = [
|
||||
{
|
||||
"graph_id": "agent-123",
|
||||
"graph_version": 1,
|
||||
"name": "Email Sender",
|
||||
"description": "Sends emails",
|
||||
"input_schema": {"properties": {"to": {"type": "string"}}},
|
||||
"output_schema": {"properties": {"sent": {"type": "boolean"}}},
|
||||
},
|
||||
]
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"success": True,
|
||||
"type": "instructions",
|
||||
"steps": ["Step 1"],
|
||||
}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.return_value = mock_response
|
||||
|
||||
with patch.object(service, "_get_client", return_value=mock_client):
|
||||
await service.decompose_goal_external(
|
||||
"Send an email",
|
||||
library_agents=library_agents,
|
||||
)
|
||||
|
||||
# Verify library_agents was passed in the payload
|
||||
call_args = mock_client.post.call_args
|
||||
assert call_args[1]["json"]["library_agents"] == library_agents
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_agent_passes_library_agents(self):
|
||||
"""Test that library_agents are included in generate agent payload."""
|
||||
library_agents = [
|
||||
{
|
||||
"graph_id": "agent-456",
|
||||
"graph_version": 2,
|
||||
"name": "Data Fetcher",
|
||||
"description": "Fetches data from API",
|
||||
"input_schema": {"properties": {"url": {"type": "string"}}},
|
||||
"output_schema": {"properties": {"data": {"type": "object"}}},
|
||||
},
|
||||
]
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"success": True,
|
||||
"agent_json": {"name": "Test Agent", "nodes": []},
|
||||
}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.return_value = mock_response
|
||||
|
||||
with patch.object(service, "_get_client", return_value=mock_client):
|
||||
await service.generate_agent_external(
|
||||
{"steps": ["Step 1"]},
|
||||
library_agents=library_agents,
|
||||
)
|
||||
|
||||
# Verify library_agents was passed in the payload
|
||||
call_args = mock_client.post.call_args
|
||||
assert call_args[1]["json"]["library_agents"] == library_agents
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_agent_patch_passes_library_agents(self):
|
||||
"""Test that library_agents are included in patch generation payload."""
|
||||
library_agents = [
|
||||
{
|
||||
"graph_id": "agent-789",
|
||||
"graph_version": 1,
|
||||
"name": "Slack Notifier",
|
||||
"description": "Sends Slack messages",
|
||||
"input_schema": {"properties": {"message": {"type": "string"}}},
|
||||
"output_schema": {"properties": {"success": {"type": "boolean"}}},
|
||||
},
|
||||
]
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"success": True,
|
||||
"agent_json": {"name": "Updated Agent", "nodes": []},
|
||||
}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.return_value = mock_response
|
||||
|
||||
with patch.object(service, "_get_client", return_value=mock_client):
|
||||
await service.generate_agent_patch_external(
|
||||
"Add error handling",
|
||||
{"name": "Original Agent", "nodes": []},
|
||||
library_agents=library_agents,
|
||||
)
|
||||
|
||||
# Verify library_agents was passed in the payload
|
||||
call_args = mock_client.post.call_args
|
||||
assert call_args[1]["json"]["library_agents"] == library_agents
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_decompose_goal_without_library_agents(self):
|
||||
"""Test that decompose goal works without library_agents."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"success": True,
|
||||
"type": "instructions",
|
||||
"steps": ["Step 1"],
|
||||
}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.return_value = mock_response
|
||||
|
||||
with patch.object(service, "_get_client", return_value=mock_client):
|
||||
await service.decompose_goal_external("Build a workflow")
|
||||
|
||||
# Verify library_agents was NOT passed when not provided
|
||||
call_args = mock_client.post.call_args
|
||||
assert "library_agents" not in call_args[1]["json"]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
|
||||
@@ -43,24 +43,19 @@ faker = Faker()
|
||||
# Constants for data generation limits (reduced for E2E tests)
|
||||
NUM_USERS = 15
|
||||
NUM_AGENT_BLOCKS = 30
|
||||
MIN_GRAPHS_PER_USER = 25
|
||||
MAX_GRAPHS_PER_USER = 25
|
||||
MIN_GRAPHS_PER_USER = 15
|
||||
MAX_GRAPHS_PER_USER = 15
|
||||
MIN_NODES_PER_GRAPH = 3
|
||||
MAX_NODES_PER_GRAPH = 6
|
||||
MIN_PRESETS_PER_USER = 2
|
||||
MAX_PRESETS_PER_USER = 3
|
||||
MIN_AGENTS_PER_USER = 25
|
||||
MAX_AGENTS_PER_USER = 25
|
||||
MIN_AGENTS_PER_USER = 15
|
||||
MAX_AGENTS_PER_USER = 15
|
||||
MIN_EXECUTIONS_PER_GRAPH = 2
|
||||
MAX_EXECUTIONS_PER_GRAPH = 8
|
||||
MIN_REVIEWS_PER_VERSION = 2
|
||||
MAX_REVIEWS_PER_VERSION = 5
|
||||
|
||||
# Guaranteed minimums for marketplace tests (deterministic)
|
||||
GUARANTEED_FEATURED_AGENTS = 8
|
||||
GUARANTEED_FEATURED_CREATORS = 5
|
||||
GUARANTEED_TOP_AGENTS = 10
|
||||
|
||||
|
||||
def get_image():
|
||||
"""Generate a consistent image URL using picsum.photos service."""
|
||||
@@ -390,7 +385,7 @@ class TestDataCreator:
|
||||
|
||||
library_agents = []
|
||||
for user in self.users:
|
||||
num_agents = random.randint(MIN_AGENTS_PER_USER, MAX_AGENTS_PER_USER)
|
||||
num_agents = 10 # Create exactly 10 agents per user
|
||||
|
||||
# Get available graphs for this user
|
||||
user_graphs = [
|
||||
@@ -512,17 +507,14 @@ class TestDataCreator:
|
||||
existing_profiles, min(num_creators, len(existing_profiles))
|
||||
)
|
||||
|
||||
# Guarantee at least GUARANTEED_FEATURED_CREATORS featured creators
|
||||
num_featured = max(GUARANTEED_FEATURED_CREATORS, int(num_creators * 0.5))
|
||||
# Mark about 50% of creators as featured (more for testing)
|
||||
num_featured = max(2, int(num_creators * 0.5))
|
||||
num_featured = min(
|
||||
num_featured, len(selected_profiles)
|
||||
) # Don't exceed available profiles
|
||||
featured_profile_ids = set(
|
||||
random.sample([p.id for p in selected_profiles], num_featured)
|
||||
)
|
||||
print(
|
||||
f"🎯 Creating {num_featured} featured creators (min: {GUARANTEED_FEATURED_CREATORS})"
|
||||
)
|
||||
|
||||
for profile in selected_profiles:
|
||||
try:
|
||||
@@ -553,25 +545,21 @@ class TestDataCreator:
|
||||
return profiles
|
||||
|
||||
async def create_test_store_submissions(self) -> List[Dict[str, Any]]:
|
||||
"""Create test store submissions using the API function.
|
||||
|
||||
DETERMINISTIC: Guarantees minimum featured agents for E2E tests.
|
||||
"""
|
||||
"""Create test store submissions using the API function."""
|
||||
print("Creating test store submissions...")
|
||||
|
||||
submissions = []
|
||||
approved_submissions = []
|
||||
featured_count = 0
|
||||
submission_counter = 0
|
||||
|
||||
# Create a special test submission for test123@gmail.com (ALWAYS approved + featured)
|
||||
# Create a special test submission for test123@gmail.com
|
||||
test_user = next(
|
||||
(user for user in self.users if user["email"] == "test123@gmail.com"), None
|
||||
)
|
||||
if test_user and self.agent_graphs:
|
||||
if test_user:
|
||||
# Special test data for consistent testing
|
||||
test_submission_data = {
|
||||
"user_id": test_user["id"],
|
||||
"agent_id": self.agent_graphs[0]["id"],
|
||||
"agent_id": self.agent_graphs[0]["id"], # Use first available graph
|
||||
"agent_version": 1,
|
||||
"slug": "test-agent-submission",
|
||||
"name": "Test Agent Submission",
|
||||
@@ -592,24 +580,37 @@ class TestDataCreator:
|
||||
submissions.append(test_submission.model_dump())
|
||||
print("✅ Created special test store submission for test123@gmail.com")
|
||||
|
||||
# ALWAYS approve and feature the test submission
|
||||
# Randomly approve, reject, or leave pending the test submission
|
||||
if test_submission.store_listing_version_id:
|
||||
approved_submission = await review_store_submission(
|
||||
store_listing_version_id=test_submission.store_listing_version_id,
|
||||
is_approved=True,
|
||||
external_comments="Test submission approved",
|
||||
internal_comments="Auto-approved test submission",
|
||||
reviewer_id=test_user["id"],
|
||||
)
|
||||
approved_submissions.append(approved_submission.model_dump())
|
||||
print("✅ Approved test store submission")
|
||||
random_value = random.random()
|
||||
if random_value < 0.4: # 40% chance to approve
|
||||
approved_submission = await review_store_submission(
|
||||
store_listing_version_id=test_submission.store_listing_version_id,
|
||||
is_approved=True,
|
||||
external_comments="Test submission approved",
|
||||
internal_comments="Auto-approved test submission",
|
||||
reviewer_id=test_user["id"],
|
||||
)
|
||||
approved_submissions.append(approved_submission.model_dump())
|
||||
print("✅ Approved test store submission")
|
||||
|
||||
await prisma.storelistingversion.update(
|
||||
where={"id": test_submission.store_listing_version_id},
|
||||
data={"isFeatured": True},
|
||||
)
|
||||
featured_count += 1
|
||||
print("🌟 Marked test agent as FEATURED")
|
||||
# Mark approved submission as featured
|
||||
await prisma.storelistingversion.update(
|
||||
where={"id": test_submission.store_listing_version_id},
|
||||
data={"isFeatured": True},
|
||||
)
|
||||
print("🌟 Marked test agent as FEATURED")
|
||||
elif random_value < 0.7: # 30% chance to reject (40% to 70%)
|
||||
await review_store_submission(
|
||||
store_listing_version_id=test_submission.store_listing_version_id,
|
||||
is_approved=False,
|
||||
external_comments="Test submission rejected - needs improvements",
|
||||
internal_comments="Auto-rejected test submission for E2E testing",
|
||||
reviewer_id=test_user["id"],
|
||||
)
|
||||
print("❌ Rejected test store submission")
|
||||
else: # 30% chance to leave pending (70% to 100%)
|
||||
print("⏳ Left test submission pending for review")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error creating test store submission: {e}")
|
||||
@@ -619,6 +620,7 @@ class TestDataCreator:
|
||||
|
||||
# Create regular submissions for all users
|
||||
for user in self.users:
|
||||
# Get available graphs for this specific user
|
||||
user_graphs = [
|
||||
g for g in self.agent_graphs if g.get("userId") == user["id"]
|
||||
]
|
||||
@@ -629,17 +631,18 @@ class TestDataCreator:
|
||||
)
|
||||
continue
|
||||
|
||||
# Create exactly 4 store submissions per user
|
||||
for submission_index in range(4):
|
||||
graph = random.choice(user_graphs)
|
||||
submission_counter += 1
|
||||
|
||||
try:
|
||||
print(
|
||||
f"Creating store submission for user {user['id']} with graph {graph['id']}"
|
||||
f"Creating store submission for user {user['id']} with graph {graph['id']} (owner: {graph.get('userId')})"
|
||||
)
|
||||
|
||||
# Use the API function to create store submission with correct parameters
|
||||
submission = await create_store_submission(
|
||||
user_id=user["id"],
|
||||
user_id=user["id"], # Must match graph's userId
|
||||
agent_id=graph["id"],
|
||||
agent_version=graph.get("version", 1),
|
||||
slug=faker.slug(),
|
||||
@@ -648,24 +651,22 @@ class TestDataCreator:
|
||||
video_url=get_video_url() if random.random() < 0.3 else None,
|
||||
image_urls=[get_image() for _ in range(3)],
|
||||
description=faker.text(),
|
||||
categories=[get_category()],
|
||||
categories=[
|
||||
get_category()
|
||||
], # Single category from predefined list
|
||||
changes_summary="Initial E2E test submission",
|
||||
)
|
||||
submissions.append(submission.model_dump())
|
||||
print(f"✅ Created store submission: {submission.name}")
|
||||
|
||||
# Randomly approve, reject, or leave pending the submission
|
||||
if submission.store_listing_version_id:
|
||||
# DETERMINISTIC: First N submissions are always approved
|
||||
# First GUARANTEED_FEATURED_AGENTS of those are always featured
|
||||
should_approve = (
|
||||
submission_counter <= GUARANTEED_TOP_AGENTS
|
||||
or random.random() < 0.4
|
||||
)
|
||||
should_feature = featured_count < GUARANTEED_FEATURED_AGENTS
|
||||
|
||||
if should_approve:
|
||||
random_value = random.random()
|
||||
if random_value < 0.4: # 40% chance to approve
|
||||
try:
|
||||
# Pick a random user as the reviewer (admin)
|
||||
reviewer_id = random.choice(self.users)["id"]
|
||||
|
||||
approved_submission = await review_store_submission(
|
||||
store_listing_version_id=submission.store_listing_version_id,
|
||||
is_approved=True,
|
||||
@@ -680,7 +681,16 @@ class TestDataCreator:
|
||||
f"✅ Approved store submission: {submission.name}"
|
||||
)
|
||||
|
||||
if should_feature:
|
||||
# Mark some agents as featured during creation (30% chance)
|
||||
# More likely for creators and first submissions
|
||||
is_creator = user["id"] in [
|
||||
p.get("userId") for p in self.profiles
|
||||
]
|
||||
feature_chance = (
|
||||
0.5 if is_creator else 0.2
|
||||
) # 50% for creators, 20% for others
|
||||
|
||||
if random.random() < feature_chance:
|
||||
try:
|
||||
await prisma.storelistingversion.update(
|
||||
where={
|
||||
@@ -688,25 +698,8 @@ class TestDataCreator:
|
||||
},
|
||||
data={"isFeatured": True},
|
||||
)
|
||||
featured_count += 1
|
||||
print(
|
||||
f"🌟 Marked agent as FEATURED ({featured_count}/{GUARANTEED_FEATURED_AGENTS}): {submission.name}"
|
||||
)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"Warning: Could not mark submission as featured: {e}"
|
||||
)
|
||||
elif random.random() < 0.2:
|
||||
try:
|
||||
await prisma.storelistingversion.update(
|
||||
where={
|
||||
"id": submission.store_listing_version_id
|
||||
},
|
||||
data={"isFeatured": True},
|
||||
)
|
||||
featured_count += 1
|
||||
print(
|
||||
f"🌟 Marked agent as FEATURED (bonus): {submission.name}"
|
||||
f"🌟 Marked agent as FEATURED: {submission.name}"
|
||||
)
|
||||
except Exception as e:
|
||||
print(
|
||||
@@ -717,9 +710,11 @@ class TestDataCreator:
|
||||
print(
|
||||
f"Warning: Could not approve submission {submission.name}: {e}"
|
||||
)
|
||||
elif random.random() < 0.5:
|
||||
elif random_value < 0.7: # 30% chance to reject (40% to 70%)
|
||||
try:
|
||||
# Pick a random user as the reviewer (admin)
|
||||
reviewer_id = random.choice(self.users)["id"]
|
||||
|
||||
await review_store_submission(
|
||||
store_listing_version_id=submission.store_listing_version_id,
|
||||
is_approved=False,
|
||||
@@ -734,7 +729,7 @@ class TestDataCreator:
|
||||
print(
|
||||
f"Warning: Could not reject submission {submission.name}: {e}"
|
||||
)
|
||||
else:
|
||||
else: # 30% chance to leave pending (70% to 100%)
|
||||
print(
|
||||
f"⏳ Left submission pending for review: {submission.name}"
|
||||
)
|
||||
@@ -748,13 +743,9 @@ class TestDataCreator:
|
||||
traceback.print_exc()
|
||||
continue
|
||||
|
||||
print("\n📊 Store Submissions Summary:")
|
||||
print(f" Created: {len(submissions)}")
|
||||
print(f" Approved: {len(approved_submissions)}")
|
||||
print(
|
||||
f" Featured: {featured_count} (guaranteed min: {GUARANTEED_FEATURED_AGENTS})"
|
||||
f"Created {len(submissions)} store submissions, approved {len(approved_submissions)}"
|
||||
)
|
||||
|
||||
self.store_submissions = submissions
|
||||
return submissions
|
||||
|
||||
@@ -834,15 +825,12 @@ class TestDataCreator:
|
||||
print(f"✅ Agent blocks available: {len(self.agent_blocks)}")
|
||||
print(f"✅ Agent graphs created: {len(self.agent_graphs)}")
|
||||
print(f"✅ Library agents created: {len(self.library_agents)}")
|
||||
print(f"✅ Creator profiles updated: {len(self.profiles)}")
|
||||
print(f"✅ Store submissions created: {len(self.store_submissions)}")
|
||||
print(f"✅ Creator profiles updated: {len(self.profiles)} (some featured)")
|
||||
print(
|
||||
f"✅ Store submissions created: {len(self.store_submissions)} (some marked as featured during creation)"
|
||||
)
|
||||
print(f"✅ API keys created: {len(self.api_keys)}")
|
||||
print(f"✅ Presets created: {len(self.presets)}")
|
||||
print("\n🎯 Deterministic Guarantees:")
|
||||
print(f" • Featured agents: >= {GUARANTEED_FEATURED_AGENTS}")
|
||||
print(f" • Featured creators: >= {GUARANTEED_FEATURED_CREATORS}")
|
||||
print(f" • Top agents (approved): >= {GUARANTEED_TOP_AGENTS}")
|
||||
print(f" • Library agents per user: >= {MIN_AGENTS_PER_USER}")
|
||||
print("\n🚀 Your E2E test database is ready to use!")
|
||||
|
||||
|
||||
|
||||
@@ -57,7 +57,6 @@ export function ChatInput({
|
||||
isStreaming,
|
||||
value,
|
||||
baseHandleKeyDown,
|
||||
inputId,
|
||||
});
|
||||
|
||||
return (
|
||||
|
||||
@@ -15,7 +15,6 @@ interface Args {
|
||||
isStreaming?: boolean;
|
||||
value: string;
|
||||
baseHandleKeyDown: (event: KeyboardEvent<HTMLTextAreaElement>) => void;
|
||||
inputId?: string;
|
||||
}
|
||||
|
||||
export function useVoiceRecording({
|
||||
@@ -24,7 +23,6 @@ export function useVoiceRecording({
|
||||
isStreaming = false,
|
||||
value,
|
||||
baseHandleKeyDown,
|
||||
inputId,
|
||||
}: Args) {
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
const [isTranscribing, setIsTranscribing] = useState(false);
|
||||
@@ -105,7 +103,7 @@ export function useVoiceRecording({
|
||||
setIsTranscribing(false);
|
||||
}
|
||||
},
|
||||
[handleTranscription, inputId],
|
||||
[handleTranscription],
|
||||
);
|
||||
|
||||
const stopRecording = useCallback(() => {
|
||||
@@ -203,15 +201,6 @@ export function useVoiceRecording({
|
||||
}
|
||||
}, [error, toast]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isTranscribing && inputId) {
|
||||
const inputElement = document.getElementById(inputId);
|
||||
if (inputElement) {
|
||||
inputElement.focus();
|
||||
}
|
||||
}
|
||||
}, [isTranscribing, inputId]);
|
||||
|
||||
const handleKeyDown = useCallback(
|
||||
(event: KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
if (event.key === " " && !value.trim() && !isTranscribing) {
|
||||
|
||||
@@ -30,9 +30,9 @@ export function getErrorMessage(result: unknown): string {
|
||||
}
|
||||
if (typeof result === "object" && result !== null) {
|
||||
const response = result as Record<string, unknown>;
|
||||
if (response.error) return stripInternalReasoning(String(response.error));
|
||||
if (response.message)
|
||||
return stripInternalReasoning(String(response.message));
|
||||
if (response.error) return stripInternalReasoning(String(response.error));
|
||||
}
|
||||
return "An error occurred";
|
||||
}
|
||||
@@ -363,8 +363,8 @@ export function formatToolResponse(result: unknown, toolName: string): string {
|
||||
|
||||
case "error":
|
||||
const errorMsg =
|
||||
(response.message as string) || response.error || "An error occurred";
|
||||
return stripInternalReasoning(String(errorMsg));
|
||||
(response.error as string) || response.message || "An error occurred";
|
||||
return `Error: ${errorMsg}`;
|
||||
|
||||
case "no_results":
|
||||
const suggestions = (response.suggestions as string[]) || [];
|
||||
|
||||
@@ -59,13 +59,12 @@ test.describe("Library", () => {
|
||||
});
|
||||
|
||||
test("pagination works correctly", async ({ page }, testInfo) => {
|
||||
test.setTimeout(testInfo.timeout * 3);
|
||||
test.setTimeout(testInfo.timeout * 3); // Increase timeout for pagination operations
|
||||
await page.goto("/library");
|
||||
|
||||
const PAGE_SIZE = 20;
|
||||
const paginationResult = await libraryPage.testPagination();
|
||||
|
||||
if (paginationResult.initialCount >= PAGE_SIZE) {
|
||||
if (paginationResult.initialCount >= 10) {
|
||||
expect(paginationResult.finalCount).toBeGreaterThanOrEqual(
|
||||
paginationResult.initialCount,
|
||||
);
|
||||
@@ -134,10 +133,7 @@ test.describe("Library", () => {
|
||||
test.expect(clearedSearchValue).toBe("");
|
||||
});
|
||||
|
||||
test("pagination while searching works correctly", async ({
|
||||
page,
|
||||
}, testInfo) => {
|
||||
test.setTimeout(testInfo.timeout * 3);
|
||||
test("pagination while searching works correctly", async ({ page }) => {
|
||||
await page.goto("/library");
|
||||
|
||||
const allAgents = await libraryPage.getAgents();
|
||||
@@ -156,10 +152,9 @@ test.describe("Library", () => {
|
||||
);
|
||||
expect(matchingResults.length).toEqual(initialSearchResults.length);
|
||||
|
||||
const PAGE_SIZE = 20;
|
||||
const searchPaginationResult = await libraryPage.testPagination();
|
||||
|
||||
if (searchPaginationResult.initialCount >= PAGE_SIZE) {
|
||||
if (searchPaginationResult.initialCount >= 10) {
|
||||
expect(searchPaginationResult.finalCount).toBeGreaterThanOrEqual(
|
||||
searchPaginationResult.initialCount,
|
||||
);
|
||||
|
||||
@@ -69,12 +69,9 @@ test.describe("Marketplace Creator Page – Basic Functionality", () => {
|
||||
await marketplacePage.getFirstCreatorProfile(page);
|
||||
await firstCreatorProfile.click();
|
||||
await page.waitForURL("**/marketplace/creator/**");
|
||||
await page.waitForLoadState("networkidle").catch(() => {});
|
||||
|
||||
const firstAgent = page
|
||||
.locator('[data-testid="store-card"]:visible')
|
||||
.first();
|
||||
await firstAgent.waitFor({ state: "visible", timeout: 30000 });
|
||||
|
||||
await firstAgent.click();
|
||||
await page.waitForURL("**/marketplace/agent/**");
|
||||
|
||||
@@ -77,6 +77,7 @@ test.describe("Marketplace – Basic Functionality", () => {
|
||||
|
||||
const firstFeaturedAgent =
|
||||
await marketplacePage.getFirstFeaturedAgent(page);
|
||||
await firstFeaturedAgent.waitFor({ state: "visible" });
|
||||
await firstFeaturedAgent.click();
|
||||
await page.waitForURL("**/marketplace/agent/**");
|
||||
await matchesUrl(page, /\/marketplace\/agent\/.+/);
|
||||
@@ -115,15 +116,7 @@ test.describe("Marketplace – Basic Functionality", () => {
|
||||
const searchTerm = page.getByText("DummyInput").first();
|
||||
await isVisible(searchTerm);
|
||||
|
||||
await page.waitForLoadState("networkidle").catch(() => {});
|
||||
|
||||
await page
|
||||
.waitForFunction(
|
||||
() =>
|
||||
document.querySelectorAll('[data-testid="store-card"]').length > 0,
|
||||
{ timeout: 15000 },
|
||||
)
|
||||
.catch(() => console.log("No search results appeared within timeout"));
|
||||
await page.waitForTimeout(10000);
|
||||
|
||||
const results = await marketplacePage.getSearchResultsCount(page);
|
||||
expect(results).toBeGreaterThan(0);
|
||||
|
||||
@@ -300,27 +300,21 @@ export class LibraryPage extends BasePage {
|
||||
async scrollToLoadMore(): Promise<void> {
|
||||
console.log(`scrolling to load more agents`);
|
||||
|
||||
const initialCount = await this.getAgentCountByListLength();
|
||||
console.log(`Initial agent count (DOM cards): ${initialCount}`);
|
||||
// Get initial agent count
|
||||
const initialCount = await this.getAgentCount();
|
||||
console.log(`Initial agent count: ${initialCount}`);
|
||||
|
||||
// Scroll down to trigger pagination
|
||||
await this.scrollToBottom();
|
||||
|
||||
await this.page
|
||||
.waitForLoadState("networkidle", { timeout: 10000 })
|
||||
.catch(() => console.log("Network idle timeout, continuing..."));
|
||||
// Wait for potential new agents to load
|
||||
await this.page.waitForTimeout(2000);
|
||||
|
||||
await this.page
|
||||
.waitForFunction(
|
||||
(prevCount) =>
|
||||
document.querySelectorAll('[data-testid="library-agent-card"]')
|
||||
.length > prevCount,
|
||||
initialCount,
|
||||
{ timeout: 5000 },
|
||||
)
|
||||
.catch(() => {});
|
||||
// Check if more agents loaded
|
||||
const newCount = await this.getAgentCount();
|
||||
console.log(`New agent count after scroll: ${newCount}`);
|
||||
|
||||
const newCount = await this.getAgentCountByListLength();
|
||||
console.log(`New agent count after scroll (DOM cards): ${newCount}`);
|
||||
return;
|
||||
}
|
||||
|
||||
async testPagination(): Promise<{
|
||||
|
||||
@@ -9,7 +9,6 @@ export class MarketplacePage extends BasePage {
|
||||
|
||||
async goto(page: Page) {
|
||||
await page.goto("/marketplace");
|
||||
await page.waitForLoadState("networkidle").catch(() => {});
|
||||
}
|
||||
|
||||
async getMarketplaceTitle(page: Page) {
|
||||
@@ -110,24 +109,16 @@ export class MarketplacePage extends BasePage {
|
||||
|
||||
async getFirstFeaturedAgent(page: Page) {
|
||||
const { getId } = getSelectors(page);
|
||||
const card = getId("featured-store-card").first();
|
||||
await card.waitFor({ state: "visible", timeout: 30000 });
|
||||
return card;
|
||||
return getId("featured-store-card").first();
|
||||
}
|
||||
|
||||
async getFirstTopAgent() {
|
||||
const card = this.page
|
||||
.locator('[data-testid="store-card"]:visible')
|
||||
.first();
|
||||
await card.waitFor({ state: "visible", timeout: 30000 });
|
||||
return card;
|
||||
return this.page.locator('[data-testid="store-card"]:visible').first();
|
||||
}
|
||||
|
||||
async getFirstCreatorProfile(page: Page) {
|
||||
const { getId } = getSelectors(page);
|
||||
const card = getId("creator-card").first();
|
||||
await card.waitFor({ state: "visible", timeout: 30000 });
|
||||
return card;
|
||||
return getId("creator-card").first();
|
||||
}
|
||||
|
||||
async getSearchResultsCount(page: Page) {
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
extend-ignore = E203
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
*.pyc,
|
||||
.env
|
||||
venv*/*,
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
data/*,
|
||||
.env,
|
||||
venv*,
|
||||
.venv,
|
||||
reports,
|
||||
dist,
|
||||
data,
|
||||
.benchmark_workspaces,
|
||||
.autogpt,
|
||||
|
||||
291
classic/CLAUDE.md
Normal file
291
classic/CLAUDE.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
AutoGPT Classic is an experimental, **unsupported** project demonstrating autonomous GPT-4 operation. Dependencies will not be updated, and the codebase contains known vulnerabilities. This is preserved for educational/historical purposes.
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
classic/
|
||||
├── pyproject.toml # Single consolidated Poetry project
|
||||
├── poetry.lock # Single lock file
|
||||
├── forge/
|
||||
│ └── forge/ # Core agent framework package
|
||||
├── original_autogpt/
|
||||
│ └── autogpt/ # AutoGPT agent package
|
||||
├── direct_benchmark/
|
||||
│ └── direct_benchmark/ # Benchmark harness package
|
||||
└── benchmark/ # Challenge definitions (data, not code)
|
||||
```
|
||||
|
||||
All packages are managed by a single `pyproject.toml` at the classic/ root.
|
||||
|
||||
## Common Commands
|
||||
|
||||
### Setup & Install
|
||||
```bash
|
||||
# Install everything from classic/ directory
|
||||
cd classic
|
||||
poetry install
|
||||
```
|
||||
|
||||
### Running Agents
|
||||
```bash
|
||||
# Run forge agent
|
||||
poetry run python -m forge
|
||||
|
||||
# Run original autogpt server
|
||||
poetry run serve --debug
|
||||
|
||||
# Run autogpt CLI
|
||||
poetry run autogpt
|
||||
```
|
||||
|
||||
Agents run on `http://localhost:8000` by default.
|
||||
|
||||
### Benchmarking
|
||||
```bash
|
||||
# Run benchmarks
|
||||
poetry run direct-benchmark run
|
||||
|
||||
# Run specific strategies and models
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,rewoo \
|
||||
--models claude \
|
||||
--parallel 4
|
||||
|
||||
# Run a single test
|
||||
poetry run direct-benchmark run --tests ReadFile
|
||||
|
||||
# List available commands
|
||||
poetry run direct-benchmark --help
|
||||
```
|
||||
|
||||
### Testing
|
||||
```bash
|
||||
poetry run pytest # All tests
|
||||
poetry run pytest forge/tests/ # Forge tests only
|
||||
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
|
||||
poetry run pytest -k test_name # Single test by name
|
||||
poetry run pytest path/to/test.py # Specific test file
|
||||
poetry run pytest --cov # With coverage
|
||||
```
|
||||
|
||||
### Linting & Formatting
|
||||
|
||||
Run from the classic/ directory:
|
||||
|
||||
```bash
|
||||
# Format everything (recommended to run together)
|
||||
poetry run black . && poetry run isort .
|
||||
|
||||
# Check formatting (CI-style, no changes)
|
||||
poetry run black --check . && poetry run isort --check-only .
|
||||
|
||||
# Lint
|
||||
poetry run flake8 # Style linting
|
||||
|
||||
# Type check
|
||||
poetry run pyright # Type checking (some errors are expected in infrastructure code)
|
||||
```
|
||||
|
||||
Note: Always run linters over the entire directory, not specific files, for best results.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Forge (Core Framework)
|
||||
The `forge` package is the foundation that other components depend on:
|
||||
- `forge/agent/` - Agent implementation and protocols
|
||||
- `forge/llm/` - Multi-provider LLM integrations (OpenAI, Anthropic, Groq, LiteLLM)
|
||||
- `forge/components/` - Reusable agent components
|
||||
- `forge/file_storage/` - File system abstraction
|
||||
- `forge/config/` - Configuration management
|
||||
|
||||
### Original AutoGPT
|
||||
- `original_autogpt/autogpt/app/` - CLI application entry points
|
||||
- `original_autogpt/autogpt/agents/` - Agent implementations
|
||||
- `original_autogpt/autogpt/agent_factory/` - Agent creation logic
|
||||
|
||||
### Direct Benchmark
|
||||
Benchmark harness for testing agent performance:
|
||||
- `direct_benchmark/direct_benchmark/` - CLI and harness code
|
||||
- `benchmark/agbenchmark/challenges/` - Test cases organized by category (code, retrieval, data, etc.)
|
||||
- Reports generated in `direct_benchmark/reports/`
|
||||
|
||||
### Package Structure
|
||||
All three packages are included in a single Poetry project. Imports are fully qualified:
|
||||
- `from forge.agent.base import BaseAgent`
|
||||
- `from autogpt.agents.agent import Agent`
|
||||
- `from direct_benchmark.harness import BenchmarkHarness`
|
||||
|
||||
## Code Style
|
||||
|
||||
- Python 3.12 target
|
||||
- Line length: 88 characters (Black default)
|
||||
- Black for formatting, isort for imports (profile="black")
|
||||
- Type hints with Pyright checking
|
||||
|
||||
## Testing Patterns
|
||||
|
||||
- Async support via pytest-asyncio
|
||||
- Fixtures defined in `conftest.py` files provide: `tmp_project_root`, `storage`, `config`, `llm_provider`, `agent`
|
||||
- Tests requiring API keys (OPENAI_API_KEY, ANTHROPIC_API_KEY) will skip if not set
|
||||
|
||||
## Environment Setup
|
||||
|
||||
Copy `.env.example` to `.env` in the relevant directory and add your API keys:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env with your OPENAI_API_KEY, etc.
|
||||
```
|
||||
|
||||
## Workspaces
|
||||
|
||||
Agents operate within a **workspace** - a directory containing all agent data and files. The workspace root defaults to the current working directory.
|
||||
|
||||
### Workspace Structure
|
||||
|
||||
```
|
||||
{workspace}/
|
||||
├── .autogpt/
|
||||
│ ├── autogpt.yaml # Workspace-level permissions
|
||||
│ ├── ap_server.db # Agent Protocol database (server mode)
|
||||
│ └── agents/
|
||||
│ └── AutoGPT-{agent_id}/
|
||||
│ ├── state.json # Agent profile, directives, action history
|
||||
│ ├── permissions.yaml # Agent-specific permission overrides
|
||||
│ └── workspace/ # Agent's sandboxed working directory
|
||||
```
|
||||
|
||||
### Key Concepts
|
||||
|
||||
- **Multiple agents** can coexist in the same workspace (each gets its own subdirectory)
|
||||
- **File access** is sandboxed to the agent's `workspace/` directory by default
|
||||
- **State persistence** - agent state saves to `state.json` and survives across sessions
|
||||
- **Storage backends** - supports local filesystem, S3, and GCS (via `FILE_STORAGE_BACKEND` env var)
|
||||
|
||||
### Specifying a Workspace
|
||||
|
||||
```bash
|
||||
# Default: uses current directory
|
||||
cd /path/to/my/project && poetry run autogpt
|
||||
|
||||
# Or specify explicitly via CLI (if supported)
|
||||
poetry run autogpt --workspace /path/to/workspace
|
||||
```
|
||||
|
||||
## Settings Location
|
||||
|
||||
Configuration uses a **layered system** with three levels (in order of precedence):
|
||||
|
||||
### 1. Environment Variables (Global)
|
||||
|
||||
Loaded from `.env` file in the working directory:
|
||||
|
||||
```bash
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional LLM settings
|
||||
SMART_LLM=gpt-4o # Model for complex reasoning
|
||||
FAST_LLM=gpt-4o-mini # Model for simple tasks
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
# Optional search providers (for web search component)
|
||||
TAVILY_API_KEY=tvly-...
|
||||
SERPER_API_KEY=...
|
||||
GOOGLE_API_KEY=...
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID=...
|
||||
|
||||
# Optional infrastructure
|
||||
LOG_LEVEL=DEBUG # DEBUG, INFO, WARNING, ERROR
|
||||
DATABASE_STRING=sqlite:///agent.db # Agent Protocol database
|
||||
PORT=8000 # Server port
|
||||
FILE_STORAGE_BACKEND=local # local, s3, or gcs
|
||||
```
|
||||
|
||||
### 2. Workspace Settings (`{workspace}/.autogpt/autogpt.yaml`)
|
||||
|
||||
Workspace-wide permissions that apply to **all agents** in this workspace:
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- read_file({workspace}/**)
|
||||
- write_to_file({workspace}/**)
|
||||
- list_folder({workspace}/**)
|
||||
- web_search(*)
|
||||
|
||||
deny:
|
||||
- read_file(**.env)
|
||||
- read_file(**.env.*)
|
||||
- read_file(**.key)
|
||||
- read_file(**.pem)
|
||||
- execute_shell(rm -rf:*)
|
||||
- execute_shell(sudo:*)
|
||||
```
|
||||
|
||||
Auto-generated with sensible defaults if missing.
|
||||
|
||||
### 3. Agent Settings (`{workspace}/.autogpt/agents/{id}/permissions.yaml`)
|
||||
|
||||
Agent-specific permission overrides:
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- execute_python(*)
|
||||
- web_search(*)
|
||||
|
||||
deny:
|
||||
- execute_shell(*)
|
||||
```
|
||||
|
||||
## Permissions
|
||||
|
||||
The permission system uses **pattern matching** with a **first-match-wins** evaluation order.
|
||||
|
||||
### Permission Check Order
|
||||
|
||||
1. Agent deny list → **Block**
|
||||
2. Workspace deny list → **Block**
|
||||
3. Agent allow list → **Allow**
|
||||
4. Workspace allow list → **Allow**
|
||||
5. Session denied list → **Block** (commands denied during this session)
|
||||
6. **Prompt user** → Interactive approval (if in interactive mode)
|
||||
|
||||
### Pattern Syntax
|
||||
|
||||
Format: `command_name(glob_pattern)`
|
||||
|
||||
| Pattern | Description |
|
||||
|---------|-------------|
|
||||
| `read_file({workspace}/**)` | Read any file in workspace (recursive) |
|
||||
| `write_to_file({workspace}/*.txt)` | Write only .txt files in workspace root |
|
||||
| `execute_shell(python:**)` | Execute Python commands only |
|
||||
| `execute_shell(git:*)` | Execute any git command |
|
||||
| `web_search(*)` | Allow all web searches |
|
||||
|
||||
Special tokens:
|
||||
- `{workspace}` - Replaced with actual workspace path
|
||||
- `**` - Matches any path including `/`
|
||||
- `*` - Matches any characters except `/`
|
||||
|
||||
### Interactive Approval Scopes
|
||||
|
||||
When prompted for permission, users can choose:
|
||||
|
||||
| Scope | Effect |
|
||||
|-------|--------|
|
||||
| **Once** | Allow this one time only (not saved) |
|
||||
| **Agent** | Always allow for this agent (saves to agent `permissions.yaml`) |
|
||||
| **Workspace** | Always allow for all agents (saves to `autogpt.yaml`) |
|
||||
| **Deny** | Deny this command (saves to appropriate deny list) |
|
||||
|
||||
### Default Security
|
||||
|
||||
Out of the box, the following are **denied by default**:
|
||||
- Reading sensitive files (`.env`, `.key`, `.pem`)
|
||||
- Destructive shell commands (`rm -rf`, `sudo`)
|
||||
- Operations outside the workspace directory
|
||||
@@ -1,182 +0,0 @@
|
||||
## CLI Documentation
|
||||
|
||||
This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
|
||||
|
||||
### 1. Entry Point for the CLI
|
||||
|
||||
Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
|
||||
|
||||
```sh
|
||||
./run
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
agent Commands to create, start and stop agents
|
||||
benchmark Commands to start the benchmark and list tests and categories
|
||||
setup Installs dependencies needed for your system.
|
||||
```
|
||||
|
||||
If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
|
||||
|
||||
```sh
|
||||
./run COMMAND --help
|
||||
```
|
||||
|
||||
This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
|
||||
|
||||
### 2. Setup Command
|
||||
|
||||
```sh
|
||||
./run setup
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Setup initiated
|
||||
Installation has been completed.
|
||||
```
|
||||
|
||||
This command initializes the setup of the project.
|
||||
|
||||
### 3. Agents Commands
|
||||
|
||||
**a. List All Agents**
|
||||
|
||||
```sh
|
||||
./run agent list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available agents: 🤖
|
||||
🐙 forge
|
||||
🐙 autogpt
|
||||
```
|
||||
|
||||
Lists all the available agents.
|
||||
|
||||
**b. Create a New Agent**
|
||||
|
||||
```sh
|
||||
./run agent create my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
|
||||
```
|
||||
|
||||
Creates a new agent named 'my_agent'.
|
||||
|
||||
**c. Start an Agent**
|
||||
|
||||
```sh
|
||||
./run agent start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
... (ASCII Art representing the agent startup)
|
||||
[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
|
||||
[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
|
||||
```
|
||||
|
||||
Starts the 'my_agent' and displays startup ASCII art and logs.
|
||||
|
||||
**d. Stop an Agent**
|
||||
|
||||
```sh
|
||||
./run agent stop
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Agent stopped
|
||||
```
|
||||
|
||||
Stops the running agent.
|
||||
|
||||
### 4. Benchmark Commands
|
||||
|
||||
**a. List Benchmark Categories**
|
||||
|
||||
```sh
|
||||
./run benchmark categories list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available categories: 📚
|
||||
📖 code
|
||||
📖 safety
|
||||
📖 memory
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark categories.
|
||||
|
||||
**b. List Benchmark Tests**
|
||||
|
||||
```sh
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available tests: 📚
|
||||
📖 interface
|
||||
🔬 Search - TestSearch
|
||||
🔬 Write File - TestWriteFile
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark tests.
|
||||
|
||||
**c. Show Details of a Benchmark Test**
|
||||
|
||||
```sh
|
||||
./run benchmark tests details TestWriteFile
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
TestWriteFile
|
||||
-------------
|
||||
|
||||
Category: interface
|
||||
Task: Write the word 'Washington' to a .txt file
|
||||
... (and other details)
|
||||
```
|
||||
|
||||
Displays the details of the 'TestWriteFile' benchmark test.
|
||||
|
||||
**d. Start Benchmark for the Agent**
|
||||
|
||||
```sh
|
||||
./run benchmark start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
(more details about the testing process shown whilst the test are running)
|
||||
============= 13 failed, 1 passed in 0.97s ============...
|
||||
```
|
||||
|
||||
Displays the results of the benchmark tests on 'my_agent'.
|
||||
@@ -2,7 +2,7 @@
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
FROM python:3.12-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -34,9 +34,6 @@ COPY original_autogpt/pyproject.toml original_autogpt/poetry.lock ./
|
||||
# Include forge so it can be used as a path dependency
|
||||
COPY forge/ ../forge
|
||||
|
||||
# Include frontend
|
||||
COPY frontend/ ../frontend
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["poetry", "run", "autogpt"]
|
||||
CMD []
|
||||
|
||||
@@ -1,173 +0,0 @@
|
||||
# Quickstart Guide
|
||||
|
||||
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
|
||||
|
||||
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
|
||||
|
||||
## System Requirements
|
||||
|
||||
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
|
||||
|
||||
## Getting Setup
|
||||
1. **Fork the Repository**
|
||||
To fork the repository, follow these steps:
|
||||
- Navigate to the main page of the repository.
|
||||
|
||||

|
||||
- In the top-right corner of the page, click Fork.
|
||||
|
||||

|
||||
- On the next page, select your GitHub account to create the fork.
|
||||
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
|
||||
|
||||
2. **Clone the Repository**
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
- Open your terminal.
|
||||
- Navigate to the directory where you want to clone the repository.
|
||||
- Run the git clone command for the fork you just created
|
||||
|
||||

|
||||
|
||||
- Then open your project in your ide
|
||||
|
||||

|
||||
|
||||
4. **Setup the Project**
|
||||
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
|
||||
It can be accessed by running the `run` command by typing `./run` in the terminal.
|
||||
|
||||
The first command you need to use is `./run setup.` This will guide you through setting up your system.
|
||||
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
|
||||
|
||||

|
||||
|
||||
### For Windows Users
|
||||
|
||||
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
|
||||
|
||||
#### Update WSL
|
||||
Run the following command in Powershell or Command Prompt:
|
||||
1. Enable the optional WSL and Virtual Machine Platform components.
|
||||
2. Download and install the latest Linux kernel.
|
||||
3. Set WSL 2 as the default.
|
||||
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
|
||||
|
||||
```shell
|
||||
wsl --install
|
||||
```
|
||||
|
||||
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
|
||||
|
||||
#### Resolve FileNotFoundError or "No such file or directory" Errors
|
||||
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
|
||||
|
||||
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script:
|
||||
|
||||
```shell
|
||||
sudo apt update
|
||||
sudo apt install dos2unix
|
||||
dos2unix ./run
|
||||
```
|
||||
|
||||
After executing the above commands, running `./run setup` should work successfully.
|
||||
|
||||
#### Store Project Files within the WSL File System
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
|
||||
|
||||
You can keep running the command to get feedback on where you are up to with your setup.
|
||||
When setup has been completed, the command will return an output like this:
|
||||
|
||||

|
||||
|
||||
## Creating Your Agent
|
||||
|
||||
After completing the setup, the next step is to create your agent template.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
|
||||
|
||||
Tips for naming your agent:
|
||||
* Give it its own unique name, or name it after yourself
|
||||
* Include an important aspect of your agent in the name, such as its purpose
|
||||
|
||||
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
|
||||
|
||||

|
||||
|
||||
## Running your Agent
|
||||
|
||||
Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
|
||||
|
||||
This starts the agent on the URL: `http://localhost:8000/`
|
||||
|
||||

|
||||
|
||||
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
|
||||
|
||||

|
||||
|
||||
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
|
||||
|
||||

|
||||
|
||||
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
|
||||
|
||||
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
|
||||
|
||||
## Benchmarking your Agent
|
||||
|
||||
The benchmarking system can also be accessed using the CLI too:
|
||||
|
||||
```bash
|
||||
agpt % ./run benchmark
|
||||
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Commands to start the benchmark and list tests and categories
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
categories Benchmark categories group command
|
||||
start Starts the benchmark command
|
||||
tests Benchmark tests group command
|
||||
agpt % ./run benchmark categories
|
||||
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark categories group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
list List benchmark categories command
|
||||
agpt % ./run benchmark tests
|
||||
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark tests group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
details Benchmark test details command
|
||||
list List benchmark tests command
|
||||
```
|
||||
|
||||
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
|
||||
```bash
|
||||
./run benchmark categories list
|
||||
# And what tests are available with
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||

|
||||
|
||||
|
||||
Finally, you can run the benchmark with
|
||||
|
||||
```bash
|
||||
./run benchmark start YOUR_AGENT_NAME
|
||||
|
||||
```
|
||||
|
||||
>
|
||||
@@ -4,7 +4,7 @@ AutoGPT Classic was an experimental project to demonstrate autonomous GPT-4 oper
|
||||
|
||||
## Project Status
|
||||
|
||||
⚠️ **This project is unsupported, and dependencies will not be updated. It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform)**
|
||||
**This project is unsupported, and dependencies will not be updated.** It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform).
|
||||
|
||||
For those interested in autonomous AI agents, we recommend exploring more actively maintained alternatives or referring to this codebase for educational purposes only.
|
||||
|
||||
@@ -16,37 +16,171 @@ AutoGPT Classic was one of the first implementations of autonomous AI agents - A
|
||||
- Learn from the results and adjust its approach
|
||||
- Chain multiple actions together to achieve an objective
|
||||
|
||||
## Key Features
|
||||
|
||||
- 🔄 Autonomous task chaining
|
||||
- 🛠 Tool and API integration capabilities
|
||||
- 💾 Memory management for context retention
|
||||
- 🔍 Web browsing and information gathering
|
||||
- 📝 File operations and content creation
|
||||
- 🔄 Self-prompting and task breakdown
|
||||
|
||||
## Structure
|
||||
|
||||
The project is organized into several key components:
|
||||
- `/benchmark` - Performance testing tools
|
||||
- `/forge` - Core autonomous agent framework
|
||||
- `/frontend` - User interface components
|
||||
- `/original_autogpt` - Original implementation
|
||||
```
|
||||
classic/
|
||||
├── pyproject.toml # Single consolidated Poetry project
|
||||
├── poetry.lock # Single lock file
|
||||
├── forge/ # Core autonomous agent framework
|
||||
├── original_autogpt/ # Original implementation
|
||||
├── direct_benchmark/ # Benchmark harness
|
||||
└── benchmark/ # Challenge definitions (data)
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
While this project is no longer actively maintained, you can still explore the codebase:
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.12+
|
||||
- [Poetry](https://python-poetry.org/docs/#installation)
|
||||
|
||||
### Installation
|
||||
|
||||
1. Clone the repository:
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/Significant-Gravitas/AutoGPT.git
|
||||
cd classic
|
||||
|
||||
# Install everything
|
||||
poetry install
|
||||
```
|
||||
|
||||
2. Review the documentation:
|
||||
- For reference, see the [documentation](https://docs.agpt.co). You can browse at the same point in time as this commit so the docs don't change.
|
||||
- Check `CLI-USAGE.md` for command-line interface details
|
||||
- Refer to `TROUBLESHOOTING.md` for common issues
|
||||
### Configuration
|
||||
|
||||
Configuration uses a layered system:
|
||||
|
||||
1. **Environment variables** (`.env` file)
|
||||
2. **Workspace settings** (`.autogpt/autogpt.yaml`)
|
||||
3. **Agent settings** (`.autogpt/agents/{id}/permissions.yaml`)
|
||||
|
||||
Copy the example environment file and add your API keys:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Key environment variables:
|
||||
```bash
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional LLM settings
|
||||
SMART_LLM=gpt-4o # Model for complex reasoning
|
||||
FAST_LLM=gpt-4o-mini # Model for simple tasks
|
||||
|
||||
# Optional search providers
|
||||
TAVILY_API_KEY=tvly-...
|
||||
SERPER_API_KEY=...
|
||||
|
||||
# Optional infrastructure
|
||||
LOG_LEVEL=DEBUG
|
||||
PORT=8000
|
||||
FILE_STORAGE_BACKEND=local # local, s3, or gcs
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
All commands run from the `classic/` directory:
|
||||
|
||||
```bash
|
||||
# Run forge agent
|
||||
poetry run python -m forge
|
||||
|
||||
# Run original autogpt server
|
||||
poetry run serve --debug
|
||||
|
||||
# Run autogpt CLI
|
||||
poetry run autogpt
|
||||
```
|
||||
|
||||
Agents run on `http://localhost:8000` by default.
|
||||
|
||||
### Benchmarking
|
||||
|
||||
```bash
|
||||
poetry run direct-benchmark run
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
poetry run pytest # All tests
|
||||
poetry run pytest forge/tests/ # Forge tests only
|
||||
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
|
||||
```
|
||||
|
||||
## Workspaces
|
||||
|
||||
Agents operate within a **workspace** directory that contains all agent data and files:
|
||||
|
||||
```
|
||||
{workspace}/
|
||||
├── .autogpt/
|
||||
│ ├── autogpt.yaml # Workspace-level permissions
|
||||
│ ├── ap_server.db # Agent Protocol database (server mode)
|
||||
│ └── agents/
|
||||
│ └── AutoGPT-{agent_id}/
|
||||
│ ├── state.json # Agent profile, directives, history
|
||||
│ ├── permissions.yaml # Agent-specific permissions
|
||||
│ └── workspace/ # Agent's sandboxed working directory
|
||||
```
|
||||
|
||||
- The workspace defaults to the current working directory
|
||||
- Multiple agents can coexist in the same workspace
|
||||
- Agent file access is sandboxed to their `workspace/` subdirectory
|
||||
- State persists across sessions via `state.json`
|
||||
|
||||
## Permissions
|
||||
|
||||
AutoGPT uses a **layered permission system** with pattern matching:
|
||||
|
||||
### Permission Files
|
||||
|
||||
| File | Scope | Location |
|
||||
|------|-------|----------|
|
||||
| `autogpt.yaml` | All agents in workspace | `.autogpt/autogpt.yaml` |
|
||||
| `permissions.yaml` | Single agent | `.autogpt/agents/{id}/permissions.yaml` |
|
||||
|
||||
### Permission Format
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- read_file({workspace}/**) # Read any file in workspace
|
||||
- write_to_file({workspace}/**) # Write any file in workspace
|
||||
- web_search(*) # All web searches
|
||||
|
||||
deny:
|
||||
- read_file(**.env) # Block .env files
|
||||
- execute_shell(sudo:*) # Block sudo commands
|
||||
```
|
||||
|
||||
### Check Order (First Match Wins)
|
||||
|
||||
1. Agent deny → Block
|
||||
2. Workspace deny → Block
|
||||
3. Agent allow → Allow
|
||||
4. Workspace allow → Allow
|
||||
5. Prompt user → Interactive approval
|
||||
|
||||
### Interactive Approval
|
||||
|
||||
When prompted, users can approve commands with different scopes:
|
||||
- **Once** - Allow this one time only
|
||||
- **Agent** - Always allow for this agent
|
||||
- **Workspace** - Always allow for all agents
|
||||
- **Deny** - Block this command
|
||||
|
||||
### Default Security
|
||||
|
||||
Denied by default:
|
||||
- Sensitive files (`.env`, `.key`, `.pem`)
|
||||
- Destructive commands (`rm -rf`, `sudo`)
|
||||
- Operations outside the workspace
|
||||
|
||||
## Security Notice
|
||||
|
||||
This codebase has **known vulnerabilities** and issues with its dependencies. It will not be updated to new dependencies. Use for educational purposes only.
|
||||
|
||||
## License
|
||||
|
||||
@@ -55,27 +189,3 @@ This project segment is licensed under the MIT License - see the [LICENSE](LICEN
|
||||
## Documentation
|
||||
|
||||
Please refer to the [documentation](https://docs.agpt.co) for more detailed information about the project's architecture and concepts.
|
||||
You can browse at the same point in time as this commit so the docs don't change.
|
||||
|
||||
## Historical Impact
|
||||
|
||||
AutoGPT Classic played a significant role in advancing the field of autonomous AI agents:
|
||||
- Demonstrated practical implementation of AI autonomy
|
||||
- Inspired numerous derivative projects and research
|
||||
- Contributed to the development of AI agent architectures
|
||||
- Helped identify key challenges in AI autonomy
|
||||
|
||||
## Security Notice
|
||||
|
||||
If you're studying this codebase, please understand this has KNOWN vulnerabilities and issues with its dependencies. It will not be updated to new dependencies.
|
||||
|
||||
## Community & Support
|
||||
|
||||
While active development has concluded:
|
||||
- The codebase remains available for study and reference
|
||||
- Historical discussions can be found in project issues
|
||||
- Related research and developments continue in the broader AI agent community
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
Thanks to all contributors who participated in this experimental project and helped advance the field of autonomous AI agents.
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
AGENT_NAME=mini-agi
|
||||
REPORTS_FOLDER="reports/mini-agi"
|
||||
OPENAI_API_KEY="sk-" # for LLM eval
|
||||
BUILD_SKILL_TREE=false # set to true to build the skill tree.
|
||||
@@ -1,12 +0,0 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
# Ignore rules that conflict with Black code style
|
||||
extend-ignore = E203, W503
|
||||
exclude =
|
||||
__pycache__/,
|
||||
*.pyc,
|
||||
.pytest_cache/,
|
||||
venv*/,
|
||||
.venv/,
|
||||
reports/,
|
||||
agbenchmark/reports/,
|
||||
174
classic/benchmark/.gitignore
vendored
174
classic/benchmark/.gitignore
vendored
@@ -1,174 +0,0 @@
|
||||
agbenchmark_config/workspace/
|
||||
backend/backend_stdout.txt
|
||||
reports/df*.pkl
|
||||
reports/raw*
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
.DS_Store
|
||||
```
|
||||
secrets.json
|
||||
agbenchmark_config/challenges_already_beaten.json
|
||||
agbenchmark_config/challenges/pri_*
|
||||
agbenchmark_config/updates.json
|
||||
agbenchmark_config/reports/*
|
||||
agbenchmark_config/reports/success_rate.json
|
||||
agbenchmark_config/reports/regression_tests.json
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 AutoGPT
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,25 +0,0 @@
|
||||
# Auto-GPT Benchmarks
|
||||
|
||||
Built for the purpose of benchmarking the performance of agents regardless of how they work.
|
||||
|
||||
Objectively know how well your agent is performing in categories like code, retrieval, memory, and safety.
|
||||
|
||||
Save time and money while doing it through smart dependencies. The best part? It's all automated.
|
||||
|
||||
## Scores:
|
||||
|
||||
<img width="733" alt="Screenshot 2023-07-25 at 10 35 01 AM" src="https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/assets/9652976/98963e0b-18b9-4b17-9a6a-4d3e4418af70">
|
||||
|
||||
## Ranking overall:
|
||||
|
||||
- 1- [Beebot](https://github.com/AutoPackAI/beebot)
|
||||
- 2- [mini-agi](https://github.com/muellerberndt/mini-agi)
|
||||
- 3- [Auto-GPT](https://github.com/Significant-Gravitas/AutoGPT)
|
||||
|
||||
## Detailed results:
|
||||
|
||||
<img width="733" alt="Screenshot 2023-07-25 at 10 42 15 AM" src="https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/assets/9652976/39be464c-c842-4437-b28a-07d878542a83">
|
||||
|
||||
[Click here to see the results and the raw data!](https://docs.google.com/spreadsheets/d/1WXm16P2AHNbKpkOI0LYBpcsGG0O7D8HYTG5Uj0PaJjA/edit#gid=203558751)!
|
||||
|
||||
More agents coming soon !
|
||||
@@ -1,69 +0,0 @@
|
||||
## As a user
|
||||
|
||||
1. `pip install auto-gpt-benchmarks`
|
||||
2. Add boilerplate code to run and kill agent
|
||||
3. `agbenchmark`
|
||||
- `--category challenge_category` to run tests in a specific category
|
||||
- `--mock` to only run mock tests if they exists for each test
|
||||
- `--noreg` to skip any tests that have passed in the past. When you run without this flag and a previous challenge that passed fails, it will now not be regression tests
|
||||
4. We call boilerplate code for your agent
|
||||
5. Show pass rate of tests, logs, and any other metrics
|
||||
|
||||
## Contributing
|
||||
|
||||
##### Diagrams: https://whimsical.com/agbenchmark-5n4hXBq1ZGzBwRsK4TVY7x
|
||||
|
||||
### To run the existing mocks
|
||||
|
||||
1. clone the repo `auto-gpt-benchmarks`
|
||||
2. `pip install poetry`
|
||||
3. `poetry shell`
|
||||
4. `poetry install`
|
||||
5. `cp .env_example .env`
|
||||
6. `git submodule update --init --remote --recursive`
|
||||
7. `uvicorn server:app --reload`
|
||||
8. `agbenchmark --mock`
|
||||
Keep config the same and watch the logs :)
|
||||
|
||||
### To run with mini-agi
|
||||
|
||||
1. Navigate to `auto-gpt-benchmarks/agent/mini-agi`
|
||||
2. `pip install -r requirements.txt`
|
||||
3. `cp .env_example .env`, set `PROMPT_USER=false` and add your `OPENAI_API_KEY=`. Sset `MODEL="gpt-3.5-turbo"` if you don't have access to `gpt-4` yet. Also make sure you have Python 3.10^ installed
|
||||
4. set `AGENT_NAME=mini-agi` in `.env` file and where you want your `REPORTS_FOLDER` to be
|
||||
5. Make sure to follow the commands above, and remove mock flag `agbenchmark`
|
||||
|
||||
- To add requirements `poetry add requirement`.
|
||||
|
||||
Feel free to create prs to merge with `main` at will (but also feel free to ask for review) - if you can't send msg in R&D chat for access.
|
||||
|
||||
If you push at any point and break things - it'll happen to everyone - fix it asap. Step 1 is to revert `master` to last working commit
|
||||
|
||||
Let people know what beautiful code you write does, document everything well
|
||||
|
||||
Share your progress :)
|
||||
|
||||
#### Dataset
|
||||
|
||||
Manually created, existing challenges within Auto-Gpt, https://osu-nlp-group.github.io/Mind2Web/
|
||||
|
||||
## How do I add new agents to agbenchmark ?
|
||||
|
||||
Example with smol developer.
|
||||
|
||||
1- Create a github branch with your agent following the same pattern as this example:
|
||||
|
||||
https://github.com/smol-ai/developer/pull/114/files
|
||||
|
||||
2- Create the submodule and the github workflow by following the same pattern as this example:
|
||||
|
||||
https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/pull/48/files
|
||||
|
||||
## How do I run agent in different environments?
|
||||
|
||||
**To just use as the benchmark for your agent**. `pip install` the package and run `agbenchmark`
|
||||
|
||||
**For internal Auto-GPT ci runs**, specify the `AGENT_NAME` you want you use and set the `HOME_ENV`.
|
||||
Ex. `AGENT_NAME=mini-agi`
|
||||
|
||||
**To develop agent alongside benchmark**, you can specify the `AGENT_NAME` you want you use and add as a submodule to the repo
|
||||
@@ -1,352 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
import click
|
||||
from click_default_group import DefaultGroup
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.utils.logging import configure_logging
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# try:
|
||||
# if os.getenv("HELICONE_API_KEY"):
|
||||
# import helicone # noqa
|
||||
|
||||
# helicone_enabled = True
|
||||
# else:
|
||||
# helicone_enabled = False
|
||||
# except ImportError:
|
||||
# helicone_enabled = False
|
||||
|
||||
|
||||
class InvalidInvocationError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BENCHMARK_START_TIME_DT = datetime.now(timezone.utc)
|
||||
BENCHMARK_START_TIME = BENCHMARK_START_TIME_DT.strftime("%Y-%m-%dT%H:%M:%S+00:00")
|
||||
|
||||
|
||||
# if helicone_enabled:
|
||||
# from helicone.lock import HeliconeLockManager
|
||||
|
||||
# HeliconeLockManager.write_custom_property(
|
||||
# "benchmark_start_time", BENCHMARK_START_TIME
|
||||
# )
|
||||
|
||||
|
||||
@click.group(cls=DefaultGroup, default_if_no_args=True)
|
||||
@click.option("--debug", is_flag=True, help="Enable debug output")
|
||||
def cli(
|
||||
debug: bool,
|
||||
) -> Any:
|
||||
configure_logging(logging.DEBUG if debug else logging.INFO)
|
||||
|
||||
|
||||
@cli.command(hidden=True)
|
||||
def start():
|
||||
raise DeprecationWarning(
|
||||
"`agbenchmark start` is deprecated. Use `agbenchmark run` instead."
|
||||
)
|
||||
|
||||
|
||||
@cli.command(default=True)
|
||||
@click.option(
|
||||
"-N", "--attempts", default=1, help="Number of times to run each challenge."
|
||||
)
|
||||
@click.option(
|
||||
"-c",
|
||||
"--category",
|
||||
multiple=True,
|
||||
help="(+) Select a category to run.",
|
||||
)
|
||||
@click.option(
|
||||
"-s",
|
||||
"--skip-category",
|
||||
multiple=True,
|
||||
help="(+) Exclude a category from running.",
|
||||
)
|
||||
@click.option("--test", multiple=True, help="(+) Select a test to run.")
|
||||
@click.option("--maintain", is_flag=True, help="Run only regression tests.")
|
||||
@click.option("--improve", is_flag=True, help="Run only non-regression tests.")
|
||||
@click.option(
|
||||
"--explore",
|
||||
is_flag=True,
|
||||
help="Run only challenges that have never been beaten.",
|
||||
)
|
||||
@click.option(
|
||||
"--no-dep",
|
||||
is_flag=True,
|
||||
help="Run all (selected) challenges, regardless of dependency success/failure.",
|
||||
)
|
||||
@click.option("--cutoff", type=int, help="Override the challenge time limit (seconds).")
|
||||
@click.option("--nc", is_flag=True, help="Disable the challenge time limit.")
|
||||
@click.option("--mock", is_flag=True, help="Run with mock")
|
||||
@click.option("--keep-answers", is_flag=True, help="Keep answers")
|
||||
@click.option(
|
||||
"--backend",
|
||||
is_flag=True,
|
||||
help="Write log output to a file instead of the terminal.",
|
||||
)
|
||||
# @click.argument(
|
||||
# "agent_path",
|
||||
# type=click.Path(exists=True, file_okay=False, path_type=Path),
|
||||
# required=False,
|
||||
# )
|
||||
def run(
|
||||
maintain: bool,
|
||||
improve: bool,
|
||||
explore: bool,
|
||||
mock: bool,
|
||||
no_dep: bool,
|
||||
nc: bool,
|
||||
keep_answers: bool,
|
||||
test: tuple[str],
|
||||
category: tuple[str],
|
||||
skip_category: tuple[str],
|
||||
attempts: int,
|
||||
cutoff: Optional[int] = None,
|
||||
backend: Optional[bool] = False,
|
||||
# agent_path: Optional[Path] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Run the benchmark on the agent in the current directory.
|
||||
|
||||
Options marked with (+) can be specified multiple times, to select multiple items.
|
||||
"""
|
||||
from agbenchmark.main import run_benchmark, validate_args
|
||||
|
||||
agbenchmark_config = AgentBenchmarkConfig.load()
|
||||
logger.debug(f"agbenchmark_config: {agbenchmark_config.agbenchmark_config_dir}")
|
||||
try:
|
||||
validate_args(
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
tests=test,
|
||||
categories=category,
|
||||
skip_categories=skip_category,
|
||||
no_cutoff=nc,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
except InvalidInvocationError as e:
|
||||
logger.error("Error: " + "\n".join(e.args))
|
||||
sys.exit(1)
|
||||
|
||||
original_stdout = sys.stdout # Save the original standard output
|
||||
exit_code = None
|
||||
|
||||
if backend:
|
||||
with open("backend/backend_stdout.txt", "w") as f:
|
||||
sys.stdout = f
|
||||
exit_code = run_benchmark(
|
||||
config=agbenchmark_config,
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
mock=mock,
|
||||
no_dep=no_dep,
|
||||
no_cutoff=nc,
|
||||
keep_answers=keep_answers,
|
||||
tests=test,
|
||||
categories=category,
|
||||
skip_categories=skip_category,
|
||||
attempts_per_challenge=attempts,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
|
||||
sys.stdout = original_stdout
|
||||
|
||||
else:
|
||||
exit_code = run_benchmark(
|
||||
config=agbenchmark_config,
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
mock=mock,
|
||||
no_dep=no_dep,
|
||||
no_cutoff=nc,
|
||||
keep_answers=keep_answers,
|
||||
tests=test,
|
||||
categories=category,
|
||||
skip_categories=skip_category,
|
||||
attempts_per_challenge=attempts,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--port", type=int, help="Port to run the API on.")
|
||||
def serve(port: Optional[int] = None):
|
||||
"""Serve the benchmark frontend and API on port 8080."""
|
||||
import uvicorn
|
||||
|
||||
from agbenchmark.app import setup_fastapi_app
|
||||
|
||||
config = AgentBenchmarkConfig.load()
|
||||
app = setup_fastapi_app(config)
|
||||
|
||||
# Run the FastAPI application using uvicorn
|
||||
port = port or int(os.getenv("PORT", 8080))
|
||||
uvicorn.run(app, host="0.0.0.0", port=port)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def config():
|
||||
"""Displays info regarding the present AGBenchmark config."""
|
||||
from .utils.utils import pretty_print_model
|
||||
|
||||
try:
|
||||
config = AgentBenchmarkConfig.load()
|
||||
except FileNotFoundError as e:
|
||||
click.echo(e, err=True)
|
||||
return 1
|
||||
|
||||
pretty_print_model(config, include_header=False)
|
||||
|
||||
|
||||
@cli.group()
|
||||
def challenge():
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
|
||||
@challenge.command("list")
|
||||
@click.option(
|
||||
"--all", "include_unavailable", is_flag=True, help="Include unavailable challenges."
|
||||
)
|
||||
@click.option(
|
||||
"--names", "only_names", is_flag=True, help="List only the challenge names."
|
||||
)
|
||||
@click.option("--json", "output_json", is_flag=True)
|
||||
def list_challenges(include_unavailable: bool, only_names: bool, output_json: bool):
|
||||
"""Lists [available|all] challenges."""
|
||||
import json
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
from .challenges.builtin import load_builtin_challenges
|
||||
from .challenges.webarena import load_webarena_challenges
|
||||
from .utils.data_types import Category, DifficultyLevel
|
||||
from .utils.utils import sorted_by_enum_index
|
||||
|
||||
DIFFICULTY_COLORS = {
|
||||
difficulty: color
|
||||
for difficulty, color in zip(
|
||||
DifficultyLevel,
|
||||
["black", "blue", "cyan", "green", "yellow", "red", "magenta", "white"],
|
||||
)
|
||||
}
|
||||
CATEGORY_COLORS = {
|
||||
category: f"bright_{color}"
|
||||
for category, color in zip(
|
||||
Category,
|
||||
["blue", "cyan", "green", "yellow", "magenta", "red", "white", "black"],
|
||||
)
|
||||
}
|
||||
|
||||
# Load challenges
|
||||
challenges = filter(
|
||||
lambda c: c.info.available or include_unavailable,
|
||||
[
|
||||
*load_builtin_challenges(),
|
||||
*load_webarena_challenges(skip_unavailable=False),
|
||||
],
|
||||
)
|
||||
challenges = sorted_by_enum_index(
|
||||
challenges, DifficultyLevel, key=lambda c: c.info.difficulty
|
||||
)
|
||||
|
||||
if only_names:
|
||||
if output_json:
|
||||
click.echo(json.dumps([c.info.name for c in challenges]))
|
||||
return
|
||||
|
||||
for c in challenges:
|
||||
click.echo(
|
||||
click.style(c.info.name, fg=None if c.info.available else "black")
|
||||
)
|
||||
return
|
||||
|
||||
if output_json:
|
||||
click.echo(
|
||||
json.dumps([json.loads(c.info.model_dump_json()) for c in challenges])
|
||||
)
|
||||
return
|
||||
|
||||
headers = tuple(
|
||||
click.style(h, bold=True) for h in ("Name", "Difficulty", "Categories")
|
||||
)
|
||||
table = [
|
||||
tuple(
|
||||
v if challenge.info.available else click.style(v, fg="black")
|
||||
for v in (
|
||||
challenge.info.name,
|
||||
(
|
||||
click.style(
|
||||
challenge.info.difficulty.value,
|
||||
fg=DIFFICULTY_COLORS[challenge.info.difficulty],
|
||||
)
|
||||
if challenge.info.difficulty
|
||||
else click.style("-", fg="black")
|
||||
),
|
||||
" ".join(
|
||||
click.style(cat.value, fg=CATEGORY_COLORS[cat])
|
||||
for cat in sorted_by_enum_index(challenge.info.category, Category)
|
||||
),
|
||||
)
|
||||
)
|
||||
for challenge in challenges
|
||||
]
|
||||
click.echo(tabulate(table, headers=headers))
|
||||
|
||||
|
||||
@challenge.command()
|
||||
@click.option("--json", is_flag=True)
|
||||
@click.argument("name")
|
||||
def info(name: str, json: bool):
|
||||
from itertools import chain
|
||||
|
||||
from .challenges.builtin import load_builtin_challenges
|
||||
from .challenges.webarena import load_webarena_challenges
|
||||
from .utils.utils import pretty_print_model
|
||||
|
||||
for challenge in chain(
|
||||
load_builtin_challenges(),
|
||||
load_webarena_challenges(skip_unavailable=False),
|
||||
):
|
||||
if challenge.info.name != name:
|
||||
continue
|
||||
|
||||
if json:
|
||||
click.echo(challenge.info.model_dump_json())
|
||||
break
|
||||
|
||||
pretty_print_model(challenge.info)
|
||||
break
|
||||
else:
|
||||
click.echo(click.style(f"Unknown challenge '{name}'", fg="red"), err=True)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def version():
|
||||
"""Print version info for the AGBenchmark application."""
|
||||
import toml
|
||||
|
||||
package_root = Path(__file__).resolve().parent.parent
|
||||
pyproject = toml.load(package_root / "pyproject.toml")
|
||||
version = pyproject["tool"]["poetry"]["version"]
|
||||
click.echo(f"AGBenchmark version {version}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -1,111 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import AsyncIterator, Optional
|
||||
|
||||
from agent_protocol_client import (
|
||||
AgentApi,
|
||||
ApiClient,
|
||||
Configuration,
|
||||
Step,
|
||||
TaskRequestBody,
|
||||
)
|
||||
|
||||
from agbenchmark.agent_interface import get_list_of_file_paths
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def run_api_agent(
|
||||
task: str,
|
||||
config: AgentBenchmarkConfig,
|
||||
timeout: int,
|
||||
artifacts_location: Optional[Path] = None,
|
||||
*,
|
||||
mock: bool = False,
|
||||
) -> AsyncIterator[Step]:
|
||||
configuration = Configuration(host=config.host)
|
||||
async with ApiClient(configuration) as api_client:
|
||||
api_instance = AgentApi(api_client)
|
||||
task_request_body = TaskRequestBody(input=task, additional_input=None)
|
||||
|
||||
start_time = time.time()
|
||||
response = await api_instance.create_agent_task(
|
||||
task_request_body=task_request_body
|
||||
)
|
||||
task_id = response.task_id
|
||||
|
||||
if artifacts_location:
|
||||
logger.debug("Uploading task input artifacts to agent...")
|
||||
await upload_artifacts(
|
||||
api_instance, artifacts_location, task_id, "artifacts_in"
|
||||
)
|
||||
|
||||
logger.debug("Running agent until finished or timeout...")
|
||||
while True:
|
||||
step = await api_instance.execute_agent_task_step(task_id=task_id)
|
||||
yield step
|
||||
|
||||
if time.time() - start_time > timeout:
|
||||
raise TimeoutError("Time limit exceeded")
|
||||
if step and mock:
|
||||
step.is_last = True
|
||||
if not step or step.is_last:
|
||||
break
|
||||
|
||||
if artifacts_location:
|
||||
# In "mock" mode, we cheat by giving the correct artifacts to pass the test
|
||||
if mock:
|
||||
logger.debug("Uploading mock artifacts to agent...")
|
||||
await upload_artifacts(
|
||||
api_instance, artifacts_location, task_id, "artifacts_out"
|
||||
)
|
||||
|
||||
logger.debug("Downloading agent artifacts...")
|
||||
await download_agent_artifacts_into_folder(
|
||||
api_instance, task_id, config.temp_folder
|
||||
)
|
||||
|
||||
|
||||
async def download_agent_artifacts_into_folder(
|
||||
api_instance: AgentApi, task_id: str, folder: Path
|
||||
):
|
||||
artifacts = await api_instance.list_agent_task_artifacts(task_id=task_id)
|
||||
|
||||
for artifact in artifacts.artifacts:
|
||||
# current absolute path of the directory of the file
|
||||
if artifact.relative_path:
|
||||
path: str = (
|
||||
artifact.relative_path
|
||||
if not artifact.relative_path.startswith("/")
|
||||
else artifact.relative_path[1:]
|
||||
)
|
||||
folder = (folder / path).parent
|
||||
|
||||
if not folder.exists():
|
||||
folder.mkdir(parents=True)
|
||||
|
||||
file_path = folder / artifact.file_name
|
||||
logger.debug(f"Downloading agent artifact {artifact.file_name} to {folder}")
|
||||
with open(file_path, "wb") as f:
|
||||
content = await api_instance.download_agent_task_artifact(
|
||||
task_id=task_id, artifact_id=artifact.artifact_id
|
||||
)
|
||||
|
||||
f.write(content)
|
||||
|
||||
|
||||
async def upload_artifacts(
|
||||
api_instance: AgentApi, artifacts_location: Path, task_id: str, type: str
|
||||
) -> None:
|
||||
for file_path in get_list_of_file_paths(artifacts_location, type):
|
||||
relative_path: Optional[str] = "/".join(
|
||||
str(file_path).split(f"{type}/", 1)[-1].split("/")[:-1]
|
||||
)
|
||||
if not relative_path:
|
||||
relative_path = None
|
||||
|
||||
await api_instance.upload_agent_task_artifacts(
|
||||
task_id=task_id, file=str(file_path), relative_path=relative_path
|
||||
)
|
||||
@@ -1,27 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
HELICONE_GRAPHQL_LOGS = os.getenv("HELICONE_GRAPHQL_LOGS", "").lower() == "true"
|
||||
|
||||
|
||||
def get_list_of_file_paths(
|
||||
challenge_dir_path: str | Path, artifact_folder_name: str
|
||||
) -> list[Path]:
|
||||
source_dir = Path(challenge_dir_path) / artifact_folder_name
|
||||
if not source_dir.exists():
|
||||
return []
|
||||
return list(source_dir.iterdir())
|
||||
|
||||
|
||||
def copy_challenge_artifacts_into_workspace(
|
||||
challenge_dir_path: str | Path, artifact_folder_name: str, workspace: str | Path
|
||||
) -> None:
|
||||
file_paths = get_list_of_file_paths(challenge_dir_path, artifact_folder_name)
|
||||
for file_path in file_paths:
|
||||
if file_path.is_file():
|
||||
shutil.copy(file_path, workspace)
|
||||
@@ -1,339 +0,0 @@
|
||||
import datetime
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from collections import deque
|
||||
from multiprocessing import Process
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
import psutil
|
||||
from agent_protocol_client import AgentApi, ApiClient, ApiException, Configuration
|
||||
from agent_protocol_client.models import Task, TaskRequestBody
|
||||
from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel, ConfigDict, ValidationError
|
||||
|
||||
from agbenchmark.challenges import ChallengeInfo
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.report_types_v2 import (
|
||||
BenchmarkRun,
|
||||
Metrics,
|
||||
RepositoryInfo,
|
||||
RunDetails,
|
||||
TaskInfo,
|
||||
)
|
||||
from agbenchmark.schema import TaskEvalRequestBody
|
||||
from agbenchmark.utils.utils import write_pretty_json
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CHALLENGES: dict[str, ChallengeInfo] = {}
|
||||
challenges_path = Path(__file__).parent / "challenges"
|
||||
challenge_spec_files = deque(
|
||||
glob.glob(
|
||||
f"{challenges_path}/**/data.json",
|
||||
recursive=True,
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug("Loading challenges...")
|
||||
while challenge_spec_files:
|
||||
challenge_spec_file = Path(challenge_spec_files.popleft())
|
||||
challenge_relpath = challenge_spec_file.relative_to(challenges_path.parent)
|
||||
if challenge_relpath.is_relative_to("challenges/deprecated"):
|
||||
continue
|
||||
|
||||
logger.debug(f"Loading {challenge_relpath}...")
|
||||
try:
|
||||
challenge_info = ChallengeInfo.model_validate_json(
|
||||
challenge_spec_file.read_text()
|
||||
)
|
||||
except ValidationError as e:
|
||||
if logging.getLogger().level == logging.DEBUG:
|
||||
logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
|
||||
logger.debug(f"Invalid challenge spec: {challenge_spec_file.read_text()}")
|
||||
continue
|
||||
|
||||
if not challenge_info.eval_id:
|
||||
challenge_info.eval_id = str(uuid.uuid4())
|
||||
# this will sort all the keys of the JSON systematically
|
||||
# so that the order is always the same
|
||||
write_pretty_json(challenge_info.model_dump(), challenge_spec_file)
|
||||
|
||||
CHALLENGES[challenge_info.eval_id] = challenge_info
|
||||
|
||||
|
||||
class BenchmarkTaskInfo(BaseModel):
|
||||
task_id: str
|
||||
start_time: datetime.datetime
|
||||
challenge_info: ChallengeInfo
|
||||
|
||||
|
||||
task_informations: dict[str, BenchmarkTaskInfo] = {}
|
||||
|
||||
|
||||
def find_agbenchmark_without_uvicorn():
|
||||
pids = []
|
||||
for process in psutil.process_iter(
|
||||
attrs=[
|
||||
"pid",
|
||||
"cmdline",
|
||||
"name",
|
||||
"username",
|
||||
"status",
|
||||
"cpu_percent",
|
||||
"memory_info",
|
||||
"create_time",
|
||||
"cwd",
|
||||
"connections",
|
||||
]
|
||||
):
|
||||
try:
|
||||
# Convert the process.info dictionary values to strings and concatenate them
|
||||
full_info = " ".join([str(v) for k, v in process.as_dict().items()])
|
||||
|
||||
if "agbenchmark" in full_info and "uvicorn" not in full_info:
|
||||
pids.append(process.pid)
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||
pass
|
||||
return pids
|
||||
|
||||
|
||||
class CreateReportRequest(BaseModel):
|
||||
test: str
|
||||
test_run_id: str
|
||||
# category: Optional[str] = []
|
||||
mock: Optional[bool] = False
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
updates_list = []
|
||||
|
||||
origins = [
|
||||
"http://localhost:8000",
|
||||
"http://localhost:8080",
|
||||
"http://127.0.0.1:5000",
|
||||
"http://localhost:5000",
|
||||
]
|
||||
|
||||
|
||||
def stream_output(pipe):
|
||||
for line in pipe:
|
||||
print(line, end="")
|
||||
|
||||
|
||||
def setup_fastapi_app(agbenchmark_config: AgentBenchmarkConfig) -> FastAPI:
|
||||
from agbenchmark.agent_api_interface import upload_artifacts
|
||||
from agbenchmark.challenges import get_challenge_from_source_uri
|
||||
from agbenchmark.main import run_benchmark
|
||||
|
||||
configuration = Configuration(
|
||||
host=agbenchmark_config.host or "http://localhost:8000"
|
||||
)
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
router = APIRouter()
|
||||
|
||||
@router.post("/reports")
|
||||
def run_single_test(body: CreateReportRequest) -> dict:
|
||||
pids = find_agbenchmark_without_uvicorn()
|
||||
logger.info(f"pids already running with agbenchmark: {pids}")
|
||||
|
||||
logger.debug(f"Request to /reports: {body.model_dump()}")
|
||||
|
||||
# Start the benchmark in a separate thread
|
||||
benchmark_process = Process(
|
||||
target=lambda: run_benchmark(
|
||||
config=agbenchmark_config,
|
||||
tests=(body.test,),
|
||||
mock=body.mock or False,
|
||||
)
|
||||
)
|
||||
benchmark_process.start()
|
||||
|
||||
# Wait for the benchmark to finish, with a timeout of 200 seconds
|
||||
timeout = 200
|
||||
start_time = time.time()
|
||||
while benchmark_process.is_alive():
|
||||
if time.time() - start_time > timeout:
|
||||
logger.warning(f"Benchmark run timed out after {timeout} seconds")
|
||||
benchmark_process.terminate()
|
||||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
logger.debug(f"Benchmark finished running in {time.time() - start_time} s")
|
||||
|
||||
# List all folders in the current working directory
|
||||
reports_folder = agbenchmark_config.reports_folder
|
||||
folders = [folder for folder in reports_folder.iterdir() if folder.is_dir()]
|
||||
|
||||
# Sort the folders based on their names
|
||||
sorted_folders = sorted(folders, key=lambda x: x.name)
|
||||
|
||||
# Get the last folder
|
||||
latest_folder = sorted_folders[-1] if sorted_folders else None
|
||||
|
||||
# Read report.json from this folder
|
||||
if latest_folder:
|
||||
report_path = latest_folder / "report.json"
|
||||
logger.debug(f"Getting latest report from {report_path}")
|
||||
if report_path.exists():
|
||||
with report_path.open() as file:
|
||||
data = json.load(file)
|
||||
logger.debug(f"Report data: {data}")
|
||||
else:
|
||||
raise HTTPException(
|
||||
502,
|
||||
"Could not get result after running benchmark: "
|
||||
f"'report.json' does not exist in '{latest_folder}'",
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
504, "Could not get result after running benchmark: no reports found"
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
@router.post("/agent/tasks", tags=["agent"])
|
||||
async def create_agent_task(task_eval_request: TaskEvalRequestBody) -> Task:
|
||||
"""
|
||||
Creates a new task using the provided TaskEvalRequestBody and returns a Task.
|
||||
|
||||
Args:
|
||||
task_eval_request: `TaskRequestBody` including an eval_id.
|
||||
|
||||
Returns:
|
||||
Task: A new task with task_id, input, additional_input,
|
||||
and empty lists for artifacts and steps.
|
||||
|
||||
Example:
|
||||
Request (TaskEvalRequestBody defined in schema.py):
|
||||
{
|
||||
...,
|
||||
"eval_id": "50da533e-3904-4401-8a07-c49adf88b5eb"
|
||||
}
|
||||
|
||||
Response (Task defined in `agent_protocol_client.models`):
|
||||
{
|
||||
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
|
||||
"input": "Write the word 'Washington' to a .txt file",
|
||||
"artifacts": []
|
||||
}
|
||||
"""
|
||||
try:
|
||||
challenge_info = CHALLENGES[task_eval_request.eval_id]
|
||||
async with ApiClient(configuration) as api_client:
|
||||
api_instance = AgentApi(api_client)
|
||||
task_input = challenge_info.task
|
||||
|
||||
task_request_body = TaskRequestBody(
|
||||
input=task_input, additional_input=None
|
||||
)
|
||||
task_response = await api_instance.create_agent_task(
|
||||
task_request_body=task_request_body
|
||||
)
|
||||
task_info = BenchmarkTaskInfo(
|
||||
task_id=task_response.task_id,
|
||||
start_time=datetime.datetime.now(datetime.timezone.utc),
|
||||
challenge_info=challenge_info,
|
||||
)
|
||||
task_informations[task_info.task_id] = task_info
|
||||
|
||||
if input_artifacts_dir := challenge_info.task_artifacts_dir:
|
||||
await upload_artifacts(
|
||||
api_instance,
|
||||
input_artifacts_dir,
|
||||
task_response.task_id,
|
||||
"artifacts_in",
|
||||
)
|
||||
return task_response
|
||||
except ApiException as e:
|
||||
logger.error(f"Error whilst trying to create a task:\n{e}")
|
||||
logger.error(
|
||||
"The above error was caused while processing request: "
|
||||
f"{task_eval_request}"
|
||||
)
|
||||
raise HTTPException(500)
|
||||
|
||||
@router.post("/agent/tasks/{task_id}/steps")
|
||||
async def proxy(request: Request, task_id: str):
|
||||
timeout = httpx.Timeout(300.0, read=300.0) # 5 minutes
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
# Construct the new URL
|
||||
new_url = f"{configuration.host}/ap/v1/agent/tasks/{task_id}/steps"
|
||||
|
||||
# Forward the request
|
||||
response = await client.post(
|
||||
new_url,
|
||||
content=await request.body(),
|
||||
headers=dict(request.headers),
|
||||
)
|
||||
|
||||
# Return the response from the forwarded request
|
||||
return Response(content=response.content, status_code=response.status_code)
|
||||
|
||||
@router.post("/agent/tasks/{task_id}/evaluations")
|
||||
async def create_evaluation(task_id: str) -> BenchmarkRun:
|
||||
task_info = task_informations[task_id]
|
||||
challenge = get_challenge_from_source_uri(task_info.challenge_info.source_uri)
|
||||
try:
|
||||
async with ApiClient(configuration) as api_client:
|
||||
api_instance = AgentApi(api_client)
|
||||
eval_results = await challenge.evaluate_task_state(
|
||||
api_instance, task_id
|
||||
)
|
||||
|
||||
eval_info = BenchmarkRun(
|
||||
repository_info=RepositoryInfo(),
|
||||
run_details=RunDetails(
|
||||
command=f"agbenchmark --test={challenge.info.name}",
|
||||
benchmark_start_time=(
|
||||
task_info.start_time.strftime("%Y-%m-%dT%H:%M:%S+00:00")
|
||||
),
|
||||
test_name=challenge.info.name,
|
||||
),
|
||||
task_info=TaskInfo(
|
||||
data_path=challenge.info.source_uri,
|
||||
is_regression=None,
|
||||
category=[c.value for c in challenge.info.category],
|
||||
task=challenge.info.task,
|
||||
answer=challenge.info.reference_answer or "",
|
||||
description=challenge.info.description or "",
|
||||
),
|
||||
metrics=Metrics(
|
||||
success=all(e.passed for e in eval_results),
|
||||
success_percentage=(
|
||||
100 * sum(e.score for e in eval_results) / len(eval_results)
|
||||
if eval_results # avoid division by 0
|
||||
else 0
|
||||
),
|
||||
attempted=True,
|
||||
),
|
||||
config={},
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Returning evaluation data:\n{eval_info.model_dump_json(indent=4)}"
|
||||
)
|
||||
return eval_info
|
||||
except ApiException as e:
|
||||
logger.error(f"Error {e} whilst trying to evaluate task: {task_id}")
|
||||
raise HTTPException(500)
|
||||
|
||||
app.include_router(router, prefix="/ap/v1")
|
||||
|
||||
return app
|
||||
@@ -1,128 +0,0 @@
|
||||
import json
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, ValidationInfo, field_validator
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
def _calculate_info_test_path(base_path: Path, benchmark_start_time: datetime) -> Path:
|
||||
"""
|
||||
Calculates the path to the directory where the test report will be saved.
|
||||
"""
|
||||
# Ensure the reports path exists
|
||||
base_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Get current UTC date-time stamp
|
||||
date_stamp = benchmark_start_time.strftime("%Y%m%dT%H%M%S")
|
||||
|
||||
# Default run name
|
||||
run_name = "full_run"
|
||||
|
||||
# Map command-line arguments to their respective labels
|
||||
arg_labels = {
|
||||
"--test": None,
|
||||
"--category": None,
|
||||
"--maintain": "maintain",
|
||||
"--improve": "improve",
|
||||
"--explore": "explore",
|
||||
}
|
||||
|
||||
# Identify the relevant command-line argument
|
||||
for arg, label in arg_labels.items():
|
||||
if arg in sys.argv:
|
||||
test_arg = sys.argv[sys.argv.index(arg) + 1] if label is None else None
|
||||
run_name = arg.strip("--")
|
||||
if test_arg:
|
||||
run_name = f"{run_name}_{test_arg}"
|
||||
break
|
||||
|
||||
# Create the full new directory path with ISO standard UTC date-time stamp
|
||||
report_path = base_path / f"{date_stamp}_{run_name}"
|
||||
|
||||
# Ensure the new directory is created
|
||||
# FIXME: this is not a desirable side-effect of loading the config
|
||||
report_path.mkdir(exist_ok=True)
|
||||
|
||||
return report_path
|
||||
|
||||
|
||||
class AgentBenchmarkConfig(BaseSettings, extra="allow"):
|
||||
"""
|
||||
Configuration model and loader for the AGBenchmark.
|
||||
|
||||
Projects that want to use AGBenchmark should contain an agbenchmark_config folder
|
||||
with a config.json file that - at minimum - specifies the `host` at which the
|
||||
subject application exposes an Agent Protocol compliant API.
|
||||
"""
|
||||
|
||||
agbenchmark_config_dir: Path = Field(exclude=True)
|
||||
"""Path to the agbenchmark_config folder of the subject agent application."""
|
||||
|
||||
categories: list[str] | None = None
|
||||
"""Categories to benchmark the agent for. If omitted, all categories are assumed."""
|
||||
|
||||
host: str
|
||||
"""Host (scheme://address:port) of the subject agent application."""
|
||||
|
||||
reports_folder: Path = Field(None)
|
||||
"""
|
||||
Path to the folder where new reports should be stored.
|
||||
Defaults to {agbenchmark_config_dir}/reports.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def load(cls, config_dir: Optional[Path] = None) -> "AgentBenchmarkConfig":
|
||||
config_dir = config_dir or cls.find_config_folder()
|
||||
with (config_dir / "config.json").open("r") as f:
|
||||
return cls(
|
||||
agbenchmark_config_dir=config_dir,
|
||||
**json.load(f),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def find_config_folder(for_dir: Path = Path.cwd()) -> Path:
|
||||
"""
|
||||
Find the closest ancestor folder containing an agbenchmark_config folder,
|
||||
and returns the path of that agbenchmark_config folder.
|
||||
"""
|
||||
current_directory = for_dir
|
||||
while current_directory != Path("/"):
|
||||
if (path := current_directory / "agbenchmark_config").exists():
|
||||
if (path / "config.json").is_file():
|
||||
return path
|
||||
current_directory = current_directory.parent
|
||||
raise FileNotFoundError(
|
||||
"No 'agbenchmark_config' directory found in the path hierarchy."
|
||||
)
|
||||
|
||||
@property
|
||||
def config_file(self) -> Path:
|
||||
return self.agbenchmark_config_dir / "config.json"
|
||||
|
||||
@field_validator("reports_folder", mode="before")
|
||||
def set_reports_folder(cls, value: Path, info: ValidationInfo):
|
||||
if not value:
|
||||
return info.data["agbenchmark_config_dir"] / "reports"
|
||||
return value
|
||||
|
||||
def get_report_dir(self, benchmark_start_time: datetime) -> Path:
|
||||
return _calculate_info_test_path(self.reports_folder, benchmark_start_time)
|
||||
|
||||
@property
|
||||
def regression_tests_file(self) -> Path:
|
||||
return self.reports_folder / "regression_tests.json"
|
||||
|
||||
@property
|
||||
def success_rate_file(self) -> Path:
|
||||
return self.reports_folder / "success_rate.json"
|
||||
|
||||
@property
|
||||
def challenges_already_beaten_file(self) -> Path:
|
||||
return self.agbenchmark_config_dir / "challenges_already_beaten.json"
|
||||
|
||||
@property
|
||||
def temp_folder(self) -> Path:
|
||||
return self.agbenchmark_config_dir / "temp_folder"
|
||||
@@ -1,339 +0,0 @@
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from agbenchmark.challenges import OPTIONAL_CATEGORIES, BaseChallenge
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.report_types import Test
|
||||
from agbenchmark.reports.ReportManager import RegressionTestsTracker
|
||||
from agbenchmark.reports.reports import (
|
||||
add_test_result_to_report,
|
||||
make_empty_test_report,
|
||||
session_finish,
|
||||
)
|
||||
from agbenchmark.utils.data_types import Category
|
||||
|
||||
GLOBAL_TIMEOUT = (
|
||||
1500 # The tests will stop after 25 minutes so we can send the reports.
|
||||
)
|
||||
|
||||
agbenchmark_config = AgentBenchmarkConfig.load()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
pytest_plugins = ["agbenchmark.utils.dependencies"]
|
||||
collect_ignore = ["challenges"]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def config() -> AgentBenchmarkConfig:
|
||||
return agbenchmark_config
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def temp_folder() -> Generator[Path, None, None]:
|
||||
"""
|
||||
Pytest fixture that sets up and tears down the temporary folder for each test.
|
||||
It is automatically used in every test due to the 'autouse=True' parameter.
|
||||
"""
|
||||
|
||||
# create output directory if it doesn't exist
|
||||
if not os.path.exists(agbenchmark_config.temp_folder):
|
||||
os.makedirs(agbenchmark_config.temp_folder, exist_ok=True)
|
||||
|
||||
yield agbenchmark_config.temp_folder
|
||||
# teardown after test function completes
|
||||
if not os.getenv("KEEP_TEMP_FOLDER_FILES"):
|
||||
for filename in os.listdir(agbenchmark_config.temp_folder):
|
||||
file_path = os.path.join(agbenchmark_config.temp_folder, filename)
|
||||
try:
|
||||
if os.path.isfile(file_path) or os.path.islink(file_path):
|
||||
os.unlink(file_path)
|
||||
elif os.path.isdir(file_path):
|
||||
shutil.rmtree(file_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete {file_path}. Reason: {e}")
|
||||
|
||||
|
||||
def pytest_addoption(parser: pytest.Parser) -> None:
|
||||
"""
|
||||
Pytest hook that adds command-line options to the `pytest` command.
|
||||
The added options are specific to agbenchmark and control its behavior:
|
||||
* `--mock` is used to run the tests in mock mode.
|
||||
* `--host` is used to specify the host for the tests.
|
||||
* `--category` is used to run only tests of a specific category.
|
||||
* `--nc` is used to run the tests without caching.
|
||||
* `--cutoff` is used to specify a cutoff time for the tests.
|
||||
* `--improve` is used to run only the tests that are marked for improvement.
|
||||
* `--maintain` is used to run only the tests that are marked for maintenance.
|
||||
* `--explore` is used to run the tests in exploration mode.
|
||||
* `--test` is used to run a specific test.
|
||||
* `--no-dep` is used to run the tests without dependencies.
|
||||
* `--keep-answers` is used to keep the answers of the tests.
|
||||
|
||||
Args:
|
||||
parser: The Pytest CLI parser to which the command-line options are added.
|
||||
"""
|
||||
parser.addoption("-N", "--attempts", action="store")
|
||||
parser.addoption("--no-dep", action="store_true")
|
||||
parser.addoption("--mock", action="store_true")
|
||||
parser.addoption("--host", default=None)
|
||||
parser.addoption("--nc", action="store_true")
|
||||
parser.addoption("--cutoff", action="store")
|
||||
parser.addoption("--category", action="append")
|
||||
parser.addoption("--test", action="append")
|
||||
parser.addoption("--improve", action="store_true")
|
||||
parser.addoption("--maintain", action="store_true")
|
||||
parser.addoption("--explore", action="store_true")
|
||||
parser.addoption("--keep-answers", action="store_true")
|
||||
|
||||
|
||||
def pytest_configure(config: pytest.Config) -> None:
|
||||
# Register category markers to prevent "unknown marker" warnings
|
||||
for category in Category:
|
||||
config.addinivalue_line("markers", f"{category.value}: {category}")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def check_regression(request: pytest.FixtureRequest) -> None:
|
||||
"""
|
||||
Fixture that checks for every test if it should be treated as a regression test,
|
||||
and whether to skip it based on that.
|
||||
|
||||
The test name is retrieved from the `request` object. Regression reports are loaded
|
||||
from the path specified in the benchmark configuration.
|
||||
|
||||
Effect:
|
||||
* If the `--improve` option is used and the current test is considered a regression
|
||||
test, it is skipped.
|
||||
* If the `--maintain` option is used and the current test is not considered a
|
||||
regression test, it is also skipped.
|
||||
|
||||
Args:
|
||||
request: The request object from which the test name and the benchmark
|
||||
configuration are retrieved.
|
||||
"""
|
||||
with contextlib.suppress(FileNotFoundError):
|
||||
rt_tracker = RegressionTestsTracker(agbenchmark_config.regression_tests_file)
|
||||
|
||||
assert isinstance(request.node, pytest.Function)
|
||||
assert isinstance(request.node.parent, pytest.Class)
|
||||
test_name = request.node.parent.name
|
||||
challenge_location = getattr(request.node.cls, "CHALLENGE_LOCATION", "")
|
||||
skip_string = f"Skipping {test_name} at {challenge_location}"
|
||||
|
||||
# Check if the test name exists in the regression tests
|
||||
is_regression_test = rt_tracker.has_regression_test(test_name)
|
||||
if request.config.getoption("--improve") and is_regression_test:
|
||||
pytest.skip(f"{skip_string} because it's a regression test")
|
||||
elif request.config.getoption("--maintain") and not is_regression_test:
|
||||
pytest.skip(f"{skip_string} because it's not a regression test")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="session")
|
||||
def mock(request: pytest.FixtureRequest) -> bool:
|
||||
"""
|
||||
Pytest fixture that retrieves the value of the `--mock` command-line option.
|
||||
The `--mock` option is used to run the tests in mock mode.
|
||||
|
||||
Args:
|
||||
request: The `pytest.FixtureRequest` from which the `--mock` option value
|
||||
is retrieved.
|
||||
|
||||
Returns:
|
||||
bool: Whether `--mock` is set for this session.
|
||||
"""
|
||||
mock = request.config.getoption("--mock")
|
||||
assert isinstance(mock, bool)
|
||||
return mock
|
||||
|
||||
|
||||
test_reports: dict[str, Test] = {}
|
||||
|
||||
|
||||
def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo) -> None:
|
||||
"""
|
||||
Pytest hook that is called when a test report is being generated.
|
||||
It is used to generate and finalize reports for each test.
|
||||
|
||||
Args:
|
||||
item: The test item for which the report is being generated.
|
||||
call: The call object from which the test result is retrieved.
|
||||
"""
|
||||
challenge: type[BaseChallenge] = item.cls # type: ignore
|
||||
challenge_id = challenge.info.eval_id
|
||||
|
||||
if challenge_id not in test_reports:
|
||||
test_reports[challenge_id] = make_empty_test_report(challenge.info)
|
||||
|
||||
if call.when == "setup":
|
||||
test_name = item.nodeid.split("::")[1]
|
||||
item.user_properties.append(("test_name", test_name))
|
||||
|
||||
if call.when == "call":
|
||||
add_test_result_to_report(
|
||||
test_reports[challenge_id], item, call, agbenchmark_config
|
||||
)
|
||||
|
||||
|
||||
def timeout_monitor(start_time: int) -> None:
|
||||
"""
|
||||
Function that limits the total execution time of the test suite.
|
||||
This function is supposed to be run in a separate thread and calls `pytest.exit`
|
||||
if the total execution time has exceeded the global timeout.
|
||||
|
||||
Args:
|
||||
start_time (int): The start time of the test suite.
|
||||
"""
|
||||
while time.time() - start_time < GLOBAL_TIMEOUT:
|
||||
time.sleep(1) # check every second
|
||||
|
||||
pytest.exit("Test suite exceeded the global timeout", returncode=1)
|
||||
|
||||
|
||||
def pytest_sessionstart(session: pytest.Session) -> None:
|
||||
"""
|
||||
Pytest hook that is called at the start of a test session.
|
||||
|
||||
Sets up and runs a `timeout_monitor` in a separate thread.
|
||||
"""
|
||||
start_time = time.time()
|
||||
t = threading.Thread(target=timeout_monitor, args=(start_time,))
|
||||
t.daemon = True # Daemon threads are abruptly stopped at shutdown
|
||||
t.start()
|
||||
|
||||
|
||||
def pytest_sessionfinish(session: pytest.Session) -> None:
|
||||
"""
|
||||
Pytest hook that is called at the end of a test session.
|
||||
|
||||
Finalizes and saves the test reports.
|
||||
"""
|
||||
session_finish(agbenchmark_config)
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
n = metafunc.config.getoption("-N")
|
||||
metafunc.parametrize("i_attempt", range(int(n)) if type(n) is str else [0])
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(
|
||||
items: list[pytest.Function], config: pytest.Config
|
||||
) -> None:
|
||||
"""
|
||||
Pytest hook that is called after initial test collection has been performed.
|
||||
Modifies the collected test items based on the agent benchmark configuration,
|
||||
adding the dependency marker and category markers.
|
||||
|
||||
Args:
|
||||
items: The collected test items to be modified.
|
||||
config: The active pytest configuration.
|
||||
"""
|
||||
rt_tracker = RegressionTestsTracker(agbenchmark_config.regression_tests_file)
|
||||
|
||||
try:
|
||||
challenges_beaten_in_the_past = json.loads(
|
||||
agbenchmark_config.challenges_already_beaten_file.read_bytes()
|
||||
)
|
||||
except FileNotFoundError:
|
||||
challenges_beaten_in_the_past = {}
|
||||
|
||||
selected_tests: tuple[str] = config.getoption("--test") # type: ignore
|
||||
selected_categories: tuple[str] = config.getoption("--category") # type: ignore
|
||||
|
||||
# Can't use a for-loop to remove items in-place
|
||||
i = 0
|
||||
while i < len(items):
|
||||
item = items[i]
|
||||
assert item.cls and issubclass(item.cls, BaseChallenge)
|
||||
challenge = item.cls
|
||||
challenge_name = challenge.info.name
|
||||
|
||||
if not issubclass(challenge, BaseChallenge):
|
||||
item.warn(
|
||||
pytest.PytestCollectionWarning(
|
||||
f"Non-challenge item collected: {challenge}"
|
||||
)
|
||||
)
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# --test: remove the test from the set if it's not specifically selected
|
||||
if selected_tests and challenge.info.name not in selected_tests:
|
||||
items.remove(item)
|
||||
continue
|
||||
|
||||
# Filter challenges for --maintain, --improve, and --explore:
|
||||
# --maintain -> only challenges expected to be passed (= regression tests)
|
||||
# --improve -> only challenges that so far are not passed (reliably)
|
||||
# --explore -> only challenges that have never been passed
|
||||
is_regression_test = rt_tracker.has_regression_test(challenge.info.name)
|
||||
has_been_passed = challenges_beaten_in_the_past.get(challenge.info.name, False)
|
||||
if (
|
||||
(config.getoption("--maintain") and not is_regression_test)
|
||||
or (config.getoption("--improve") and is_regression_test)
|
||||
or (config.getoption("--explore") and has_been_passed)
|
||||
):
|
||||
items.remove(item)
|
||||
continue
|
||||
|
||||
dependencies = challenge.info.dependencies
|
||||
if (
|
||||
config.getoption("--test")
|
||||
or config.getoption("--no-dep")
|
||||
or config.getoption("--maintain")
|
||||
):
|
||||
# Ignore dependencies:
|
||||
# --test -> user selected specific tests to run, don't care about deps
|
||||
# --no-dep -> ignore dependency relations regardless of test selection
|
||||
# --maintain -> all "regression" tests must pass, so run all of them
|
||||
dependencies = []
|
||||
elif config.getoption("--improve"):
|
||||
# Filter dependencies, keep only deps that are not "regression" tests
|
||||
dependencies = [
|
||||
d for d in dependencies if not rt_tracker.has_regression_test(d)
|
||||
]
|
||||
|
||||
# Set category markers
|
||||
challenge_categories = set(c.value for c in challenge.info.category)
|
||||
for category in challenge_categories:
|
||||
item.add_marker(category)
|
||||
|
||||
# Enforce category selection
|
||||
if selected_categories:
|
||||
if not challenge_categories.intersection(set(selected_categories)):
|
||||
items.remove(item)
|
||||
continue
|
||||
# # Filter dependencies, keep only deps from selected categories
|
||||
# dependencies = [
|
||||
# d for d in dependencies
|
||||
# if not set(d.categories).intersection(set(selected_categories))
|
||||
# ]
|
||||
|
||||
# Skip items in optional categories that are not selected for the subject agent
|
||||
challenge_optional_categories = challenge_categories & set(OPTIONAL_CATEGORIES)
|
||||
if challenge_optional_categories and not (
|
||||
agbenchmark_config.categories
|
||||
and challenge_optional_categories.issubset(
|
||||
set(agbenchmark_config.categories)
|
||||
)
|
||||
):
|
||||
logger.debug(
|
||||
f"Skipping {challenge_name}: "
|
||||
f"category {' and '.join(challenge_optional_categories)} is optional, "
|
||||
"and not explicitly selected in the benchmark config."
|
||||
)
|
||||
items.remove(item)
|
||||
continue
|
||||
|
||||
# Add marker for the DependencyManager
|
||||
item.add_marker(pytest.mark.depends(on=dependencies, name=challenge_name))
|
||||
|
||||
i += 1
|
||||
@@ -1,26 +0,0 @@
|
||||
"""
|
||||
AGBenchmark's test discovery endpoint for Pytest.
|
||||
|
||||
This module is picked up by Pytest's *_test.py file matching pattern, and all challenge
|
||||
classes in the module that conform to the `Test*` pattern are collected.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
from itertools import chain
|
||||
|
||||
from agbenchmark.challenges.builtin import load_builtin_challenges
|
||||
from agbenchmark.challenges.webarena import load_webarena_challenges
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DATA_CATEGORY = {}
|
||||
|
||||
# Load challenges and attach them to this module
|
||||
for challenge in chain(load_builtin_challenges(), load_webarena_challenges()):
|
||||
# Attach the Challenge class to this module so it can be discovered by pytest
|
||||
module = importlib.import_module(__name__)
|
||||
setattr(module, challenge.__name__, challenge)
|
||||
|
||||
# Build a map of challenge names and their primary category
|
||||
DATA_CATEGORY[challenge.info.name] = challenge.info.category[0].value
|
||||
@@ -1,158 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional, Sequence
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from agbenchmark.challenges import get_unique_categories
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
|
||||
load_dotenv()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_benchmark(
|
||||
config: AgentBenchmarkConfig,
|
||||
maintain: bool = False,
|
||||
improve: bool = False,
|
||||
explore: bool = False,
|
||||
tests: tuple[str, ...] = tuple(),
|
||||
categories: tuple[str, ...] = tuple(),
|
||||
skip_categories: tuple[str, ...] = tuple(),
|
||||
attempts_per_challenge: int = 1,
|
||||
mock: bool = False,
|
||||
no_dep: bool = False,
|
||||
no_cutoff: bool = False,
|
||||
cutoff: Optional[int] = None,
|
||||
keep_answers: bool = False,
|
||||
server: bool = False,
|
||||
) -> int:
|
||||
"""
|
||||
Starts the benchmark. If a category flag is provided, only challenges with the
|
||||
corresponding mark will be run.
|
||||
"""
|
||||
import pytest
|
||||
|
||||
from agbenchmark.reports.ReportManager import SingletonReportManager
|
||||
|
||||
validate_args(
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
tests=tests,
|
||||
categories=categories,
|
||||
skip_categories=skip_categories,
|
||||
no_cutoff=no_cutoff,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
|
||||
SingletonReportManager()
|
||||
|
||||
for key, value in vars(config).items():
|
||||
logger.debug(f"config.{key} = {repr(value)}")
|
||||
|
||||
pytest_args = ["-vs"]
|
||||
|
||||
if tests:
|
||||
logger.info(f"Running specific test(s): {' '.join(tests)}")
|
||||
pytest_args += [f"--test={t}" for t in tests]
|
||||
else:
|
||||
all_categories = get_unique_categories()
|
||||
|
||||
if categories or skip_categories:
|
||||
categories_to_run = set(categories) or all_categories
|
||||
if skip_categories:
|
||||
categories_to_run = categories_to_run.difference(set(skip_categories))
|
||||
assert categories_to_run, "Error: You can't skip all categories"
|
||||
pytest_args += [f"--category={c}" for c in categories_to_run]
|
||||
logger.info(f"Running tests of category: {categories_to_run}")
|
||||
else:
|
||||
logger.info("Running all categories")
|
||||
|
||||
if maintain:
|
||||
logger.info("Running only regression tests")
|
||||
elif improve:
|
||||
logger.info("Running only non-regression tests")
|
||||
elif explore:
|
||||
logger.info("Only attempt challenges that have never been beaten")
|
||||
|
||||
if mock:
|
||||
# TODO: unhack
|
||||
os.environ[
|
||||
"IS_MOCK"
|
||||
] = "True" # ugly hack to make the mock work when calling from API
|
||||
|
||||
# Pass through flags
|
||||
for flag, active in {
|
||||
"--maintain": maintain,
|
||||
"--improve": improve,
|
||||
"--explore": explore,
|
||||
"--no-dep": no_dep,
|
||||
"--mock": mock,
|
||||
"--nc": no_cutoff,
|
||||
"--keep-answers": keep_answers,
|
||||
}.items():
|
||||
if active:
|
||||
pytest_args.append(flag)
|
||||
|
||||
if attempts_per_challenge > 1:
|
||||
pytest_args.append(f"--attempts={attempts_per_challenge}")
|
||||
|
||||
if cutoff:
|
||||
pytest_args.append(f"--cutoff={cutoff}")
|
||||
logger.debug(f"Setting cuttoff override to {cutoff} seconds.")
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
pytest_args.append(str(current_dir / "generate_test.py"))
|
||||
|
||||
pytest_args.append("--cache-clear")
|
||||
logger.debug(f"Running Pytest with args: {pytest_args}")
|
||||
exit_code = pytest.main(pytest_args)
|
||||
|
||||
SingletonReportManager.clear_instance()
|
||||
return exit_code
|
||||
|
||||
|
||||
class InvalidInvocationError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def validate_args(
|
||||
maintain: bool,
|
||||
improve: bool,
|
||||
explore: bool,
|
||||
tests: Sequence[str],
|
||||
categories: Sequence[str],
|
||||
skip_categories: Sequence[str],
|
||||
no_cutoff: bool,
|
||||
cutoff: Optional[int],
|
||||
) -> None:
|
||||
if categories:
|
||||
all_categories = get_unique_categories()
|
||||
invalid_categories = set(categories) - all_categories
|
||||
if invalid_categories:
|
||||
raise InvalidInvocationError(
|
||||
"One or more invalid categories were specified: "
|
||||
f"{', '.join(invalid_categories)}.\n"
|
||||
f"Valid categories are: {', '.join(all_categories)}."
|
||||
)
|
||||
|
||||
if (maintain + improve + explore) > 1:
|
||||
raise InvalidInvocationError(
|
||||
"You can't use --maintain, --improve or --explore at the same time. "
|
||||
"Please choose one."
|
||||
)
|
||||
|
||||
if tests and (categories or skip_categories or maintain or improve or explore):
|
||||
raise InvalidInvocationError(
|
||||
"If you're running a specific test make sure no other options are "
|
||||
"selected. Please just pass the --test."
|
||||
)
|
||||
|
||||
if no_cutoff and cutoff:
|
||||
raise InvalidInvocationError(
|
||||
"You can't use both --nc and --cutoff at the same time. "
|
||||
"Please choose one."
|
||||
)
|
||||
@@ -1,217 +0,0 @@
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.graphs import save_single_radar_chart
|
||||
from agbenchmark.reports.processing.process_report import (
|
||||
get_highest_achieved_difficulty_per_category,
|
||||
)
|
||||
from agbenchmark.reports.processing.report_types import MetricsOverall, Report, Test
|
||||
from agbenchmark.utils.utils import get_highest_success_difficulty
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SingletonReportManager:
|
||||
instance = None
|
||||
|
||||
INFO_MANAGER: "SessionReportManager"
|
||||
REGRESSION_MANAGER: "RegressionTestsTracker"
|
||||
SUCCESS_RATE_TRACKER: "SuccessRatesTracker"
|
||||
|
||||
def __new__(cls):
|
||||
if not cls.instance:
|
||||
cls.instance = super(SingletonReportManager, cls).__new__(cls)
|
||||
|
||||
agent_benchmark_config = AgentBenchmarkConfig.load()
|
||||
benchmark_start_time_dt = datetime.now(
|
||||
timezone.utc
|
||||
) # or any logic to fetch the datetime
|
||||
|
||||
# Make the Managers class attributes
|
||||
cls.INFO_MANAGER = SessionReportManager(
|
||||
agent_benchmark_config.get_report_dir(benchmark_start_time_dt)
|
||||
/ "report.json",
|
||||
benchmark_start_time_dt,
|
||||
)
|
||||
cls.REGRESSION_MANAGER = RegressionTestsTracker(
|
||||
agent_benchmark_config.regression_tests_file
|
||||
)
|
||||
cls.SUCCESS_RATE_TRACKER = SuccessRatesTracker(
|
||||
agent_benchmark_config.success_rate_file
|
||||
)
|
||||
|
||||
return cls.instance
|
||||
|
||||
@classmethod
|
||||
def clear_instance(cls):
|
||||
cls.instance = None
|
||||
del cls.INFO_MANAGER
|
||||
del cls.REGRESSION_MANAGER
|
||||
del cls.SUCCESS_RATE_TRACKER
|
||||
|
||||
|
||||
class BaseReportManager:
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, Any]
|
||||
|
||||
def __init__(self, report_file: Path):
|
||||
self.report_file = report_file
|
||||
|
||||
self.load()
|
||||
|
||||
def load(self) -> None:
|
||||
if not self.report_file.exists():
|
||||
self.report_file.parent.mkdir(exist_ok=True)
|
||||
|
||||
try:
|
||||
with self.report_file.open("r") as f:
|
||||
data = json.load(f)
|
||||
self.tests = {k: data[k] for k in sorted(data)}
|
||||
except FileNotFoundError:
|
||||
self.tests = {}
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
logger.warning(f"Could not parse {self.report_file}: {e}")
|
||||
self.tests = {}
|
||||
|
||||
def save(self) -> None:
|
||||
with self.report_file.open("w") as f:
|
||||
json.dump(self.tests, f, indent=4)
|
||||
|
||||
def remove_test(self, test_name: str) -> None:
|
||||
if test_name in self.tests:
|
||||
del self.tests[test_name]
|
||||
self.save()
|
||||
|
||||
def reset(self) -> None:
|
||||
self.tests = {}
|
||||
self.save()
|
||||
|
||||
|
||||
class SessionReportManager(BaseReportManager):
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, Test]
|
||||
report: Report | None = None
|
||||
|
||||
def __init__(self, report_file: Path, benchmark_start_time: datetime):
|
||||
super().__init__(report_file)
|
||||
|
||||
self.start_time = time.time()
|
||||
self.benchmark_start_time = benchmark_start_time
|
||||
|
||||
def save(self) -> None:
|
||||
with self.report_file.open("w") as f:
|
||||
if self.report:
|
||||
f.write(self.report.model_dump_json(indent=4))
|
||||
else:
|
||||
json.dump(
|
||||
{k: v.model_dump() for k, v in self.tests.items()}, f, indent=4
|
||||
)
|
||||
|
||||
def load(self) -> None:
|
||||
super().load()
|
||||
|
||||
if "tests" in self.tests:
|
||||
self.report = Report.model_validate(self.tests)
|
||||
else:
|
||||
self.tests = {n: Test.model_validate(d) for n, d in self.tests.items()}
|
||||
|
||||
def add_test_report(self, test_name: str, test_report: Test) -> None:
|
||||
if self.report:
|
||||
raise RuntimeError("Session report already finalized")
|
||||
|
||||
if test_name.startswith("Test"):
|
||||
test_name = test_name[4:]
|
||||
self.tests[test_name] = test_report
|
||||
|
||||
self.save()
|
||||
|
||||
def finalize_session_report(self, config: AgentBenchmarkConfig) -> None:
|
||||
command = " ".join(sys.argv)
|
||||
|
||||
if self.report:
|
||||
raise RuntimeError("Session report already finalized")
|
||||
|
||||
self.report = Report(
|
||||
command=command.split(os.sep)[-1],
|
||||
benchmark_git_commit_sha="---",
|
||||
agent_git_commit_sha="---",
|
||||
completion_time=datetime.now(timezone.utc).strftime(
|
||||
"%Y-%m-%dT%H:%M:%S+00:00"
|
||||
),
|
||||
benchmark_start_time=self.benchmark_start_time.strftime(
|
||||
"%Y-%m-%dT%H:%M:%S+00:00"
|
||||
),
|
||||
metrics=MetricsOverall(
|
||||
run_time=str(round(time.time() - self.start_time, 2)) + " seconds",
|
||||
highest_difficulty=get_highest_success_difficulty(self.tests),
|
||||
total_cost=self.get_total_costs(),
|
||||
),
|
||||
tests=copy.copy(self.tests),
|
||||
config=config.model_dump(exclude={"reports_folder"}, exclude_none=True),
|
||||
)
|
||||
|
||||
agent_categories = get_highest_achieved_difficulty_per_category(self.report)
|
||||
if len(agent_categories) > 1:
|
||||
save_single_radar_chart(
|
||||
agent_categories,
|
||||
config.get_report_dir(self.benchmark_start_time) / "radar_chart.png",
|
||||
)
|
||||
|
||||
self.save()
|
||||
|
||||
def get_total_costs(self):
|
||||
if self.report:
|
||||
tests = self.report.tests
|
||||
else:
|
||||
tests = self.tests
|
||||
|
||||
total_cost = 0
|
||||
all_costs_none = True
|
||||
for test_data in tests.values():
|
||||
cost = sum(r.cost or 0 for r in test_data.results)
|
||||
|
||||
if cost is not None: # check if cost is not None
|
||||
all_costs_none = False
|
||||
total_cost += cost # add cost to total
|
||||
if all_costs_none:
|
||||
total_cost = None
|
||||
return total_cost
|
||||
|
||||
|
||||
class RegressionTestsTracker(BaseReportManager):
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, dict]
|
||||
|
||||
def add_test(self, test_name: str, test_details: dict) -> None:
|
||||
if test_name.startswith("Test"):
|
||||
test_name = test_name[4:]
|
||||
|
||||
self.tests[test_name] = test_details
|
||||
self.save()
|
||||
|
||||
def has_regression_test(self, test_name: str) -> bool:
|
||||
return self.tests.get(test_name) is not None
|
||||
|
||||
|
||||
class SuccessRatesTracker(BaseReportManager):
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, list[bool | None]]
|
||||
|
||||
def update(self, test_name: str, success_history: list[bool | None]) -> None:
|
||||
if test_name.startswith("Test"):
|
||||
test_name = test_name[4:]
|
||||
|
||||
self.tests[test_name] = success_history
|
||||
self.save()
|
||||
@@ -1,45 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from agbenchmark.reports.processing.graphs import (
|
||||
save_combined_bar_chart,
|
||||
save_combined_radar_chart,
|
||||
)
|
||||
from agbenchmark.reports.processing.process_report import (
|
||||
all_agent_categories,
|
||||
get_reports_data,
|
||||
)
|
||||
|
||||
|
||||
def generate_combined_chart() -> None:
|
||||
all_agents_path = Path(__file__).parent.parent.parent.parent / "reports"
|
||||
|
||||
combined_charts_folder = all_agents_path / "combined_charts"
|
||||
|
||||
reports_data = get_reports_data(str(all_agents_path))
|
||||
|
||||
categories = all_agent_categories(reports_data)
|
||||
|
||||
# Count the number of directories in this directory
|
||||
num_dirs = len([f for f in combined_charts_folder.iterdir() if f.is_dir()])
|
||||
|
||||
run_charts_folder = combined_charts_folder / f"run{num_dirs + 1}"
|
||||
|
||||
if not os.path.exists(run_charts_folder):
|
||||
os.makedirs(run_charts_folder)
|
||||
|
||||
info_data = {
|
||||
report_name: data.benchmark_start_time
|
||||
for report_name, data in reports_data.items()
|
||||
if report_name in categories
|
||||
}
|
||||
with open(Path(run_charts_folder) / "run_info.json", "w") as f:
|
||||
json.dump(info_data, f)
|
||||
|
||||
save_combined_radar_chart(categories, Path(run_charts_folder) / "radar_chart.png")
|
||||
save_combined_bar_chart(categories, Path(run_charts_folder) / "bar_chart.png")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
generate_combined_chart()
|
||||
@@ -1,34 +0,0 @@
|
||||
import os
|
||||
|
||||
|
||||
def get_last_subdirectory(directory_path: str) -> str | None:
|
||||
# Get all subdirectories in the directory
|
||||
subdirs = [
|
||||
os.path.join(directory_path, name)
|
||||
for name in os.listdir(directory_path)
|
||||
if os.path.isdir(os.path.join(directory_path, name))
|
||||
]
|
||||
|
||||
# Sort the subdirectories by creation time
|
||||
subdirs.sort(key=os.path.getctime)
|
||||
|
||||
# Return the last subdirectory in the list
|
||||
return subdirs[-1] if subdirs else None
|
||||
|
||||
|
||||
def get_latest_report_from_agent_directories(
|
||||
directory_path: str,
|
||||
) -> list[tuple[os.DirEntry[str], str]]:
|
||||
latest_reports = []
|
||||
|
||||
for subdir in os.scandir(directory_path):
|
||||
if subdir.is_dir():
|
||||
# Get the most recently created subdirectory within this agent's directory
|
||||
latest_subdir = get_last_subdirectory(subdir.path)
|
||||
if latest_subdir is not None:
|
||||
# Look for 'report.json' in the subdirectory
|
||||
report_file = os.path.join(latest_subdir, "report.json")
|
||||
if os.path.isfile(report_file):
|
||||
latest_reports.append((subdir, report_file))
|
||||
|
||||
return latest_reports
|
||||
@@ -1,199 +0,0 @@
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import matplotlib.patches as mpatches
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def save_combined_radar_chart(
|
||||
categories: dict[str, Any], save_path: str | Path
|
||||
) -> None:
|
||||
categories = {k: v for k, v in categories.items() if v}
|
||||
if not all(categories.values()):
|
||||
raise Exception("No data to plot")
|
||||
labels = np.array(
|
||||
list(next(iter(categories.values())).keys())
|
||||
) # We use the first category to get the keys
|
||||
num_vars = len(labels)
|
||||
angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()
|
||||
angles += angles[
|
||||
:1
|
||||
] # Add the first angle to the end of the list to ensure the polygon is closed
|
||||
|
||||
# Create radar chart
|
||||
fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
|
||||
ax.set_theta_offset(np.pi / 2) # type: ignore
|
||||
ax.set_theta_direction(-1) # type: ignore
|
||||
ax.spines["polar"].set_visible(False) # Remove border
|
||||
|
||||
cmap = plt.cm.get_cmap("nipy_spectral", len(categories)) # type: ignore
|
||||
|
||||
colors = [cmap(i) for i in range(len(categories))]
|
||||
|
||||
for i, (cat_name, cat_values) in enumerate(
|
||||
categories.items()
|
||||
): # Iterating through each category (series)
|
||||
values = np.array(list(cat_values.values()))
|
||||
values = np.concatenate((values, values[:1])) # Ensure the polygon is closed
|
||||
|
||||
ax.fill(angles, values, color=colors[i], alpha=0.25) # Draw the filled polygon
|
||||
ax.plot(angles, values, color=colors[i], linewidth=2) # Draw polygon
|
||||
ax.plot(
|
||||
angles,
|
||||
values,
|
||||
"o",
|
||||
color="white",
|
||||
markersize=7,
|
||||
markeredgecolor=colors[i],
|
||||
markeredgewidth=2,
|
||||
) # Draw points
|
||||
|
||||
# Draw legend
|
||||
ax.legend(
|
||||
handles=[
|
||||
mpatches.Patch(color=color, label=cat_name, alpha=0.25)
|
||||
for cat_name, color in zip(categories.keys(), colors)
|
||||
],
|
||||
loc="upper left",
|
||||
bbox_to_anchor=(0.7, 1.3),
|
||||
)
|
||||
|
||||
# Adjust layout to make room for the legend
|
||||
plt.tight_layout()
|
||||
|
||||
lines, labels = plt.thetagrids(
|
||||
np.degrees(angles[:-1]), (list(next(iter(categories.values())).keys()))
|
||||
) # We use the first category to get the keys
|
||||
|
||||
highest_score = 7
|
||||
|
||||
# Set y-axis limit to 7
|
||||
ax.set_ylim(top=highest_score)
|
||||
|
||||
# Move labels away from the plot
|
||||
for label in labels:
|
||||
label.set_position(
|
||||
(label.get_position()[0], label.get_position()[1] + -0.05)
|
||||
) # adjust 0.1 as needed
|
||||
|
||||
# Move radial labels away from the plot
|
||||
ax.set_rlabel_position(180) # type: ignore
|
||||
|
||||
ax.set_yticks([]) # Remove default yticks
|
||||
|
||||
# Manually create gridlines
|
||||
for y in np.arange(0, highest_score + 1, 1):
|
||||
if y != highest_score:
|
||||
ax.plot(
|
||||
angles, [y] * len(angles), color="gray", linewidth=0.5, linestyle=":"
|
||||
)
|
||||
# Add labels for manually created gridlines
|
||||
ax.text(
|
||||
angles[0],
|
||||
y + 0.2,
|
||||
str(int(y)),
|
||||
color="black",
|
||||
size=9,
|
||||
horizontalalignment="center",
|
||||
verticalalignment="center",
|
||||
)
|
||||
|
||||
plt.savefig(save_path, dpi=300) # Save the figure as a PNG file
|
||||
plt.close() # Close the figure to free up memory
|
||||
|
||||
|
||||
def save_single_radar_chart(
|
||||
category_dict: dict[str, int], save_path: str | Path
|
||||
) -> None:
|
||||
labels = np.array(list(category_dict.keys()))
|
||||
values = np.array(list(category_dict.values()))
|
||||
|
||||
num_vars = len(labels)
|
||||
|
||||
angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()
|
||||
|
||||
angles += angles[:1]
|
||||
values = np.concatenate((values, values[:1]))
|
||||
|
||||
colors = ["#1f77b4"]
|
||||
|
||||
fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
|
||||
ax.set_theta_offset(np.pi / 2) # type: ignore
|
||||
ax.set_theta_direction(-1) # type: ignore
|
||||
|
||||
ax.spines["polar"].set_visible(False)
|
||||
|
||||
lines, labels = plt.thetagrids(
|
||||
np.degrees(angles[:-1]), (list(category_dict.keys()))
|
||||
)
|
||||
|
||||
highest_score = 7
|
||||
|
||||
# Set y-axis limit to 7
|
||||
ax.set_ylim(top=highest_score)
|
||||
|
||||
for label in labels:
|
||||
label.set_position((label.get_position()[0], label.get_position()[1] + -0.05))
|
||||
|
||||
ax.fill(angles, values, color=colors[0], alpha=0.25)
|
||||
ax.plot(angles, values, color=colors[0], linewidth=2)
|
||||
|
||||
for i, (angle, value) in enumerate(zip(angles, values)):
|
||||
ha = "left"
|
||||
if angle in {0, np.pi}:
|
||||
ha = "center"
|
||||
elif np.pi < angle < 2 * np.pi:
|
||||
ha = "right"
|
||||
ax.text(
|
||||
angle,
|
||||
value - 0.5,
|
||||
f"{value}",
|
||||
size=10,
|
||||
horizontalalignment=ha,
|
||||
verticalalignment="center",
|
||||
color="black",
|
||||
)
|
||||
|
||||
ax.set_yticklabels([])
|
||||
|
||||
ax.set_yticks([])
|
||||
|
||||
if values.size == 0:
|
||||
return
|
||||
|
||||
for y in np.arange(0, highest_score, 1):
|
||||
ax.plot(angles, [y] * len(angles), color="gray", linewidth=0.5, linestyle=":")
|
||||
|
||||
for angle, value in zip(angles, values):
|
||||
ax.plot(
|
||||
angle,
|
||||
value,
|
||||
"o",
|
||||
color="white",
|
||||
markersize=7,
|
||||
markeredgecolor=colors[0],
|
||||
markeredgewidth=2,
|
||||
)
|
||||
|
||||
plt.savefig(save_path, dpi=300) # Save the figure as a PNG file
|
||||
plt.close() # Close the figure to free up memory
|
||||
|
||||
|
||||
def save_combined_bar_chart(categories: dict[str, Any], save_path: str | Path) -> None:
|
||||
if not all(categories.values()):
|
||||
raise Exception("No data to plot")
|
||||
|
||||
# Convert dictionary to DataFrame
|
||||
df = pd.DataFrame(categories)
|
||||
|
||||
# Create a grouped bar chart
|
||||
df.plot(kind="bar", figsize=(10, 7))
|
||||
|
||||
plt.title("Performance by Category for Each Agent")
|
||||
plt.xlabel("Category")
|
||||
plt.ylabel("Performance")
|
||||
|
||||
plt.savefig(save_path, dpi=300) # Save the figure as a PNG file
|
||||
plt.close() # Close the figure to free up memory
|
||||
@@ -1,67 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from agbenchmark.reports.processing.get_files import (
|
||||
get_latest_report_from_agent_directories,
|
||||
)
|
||||
from agbenchmark.reports.processing.report_types import Report
|
||||
from agbenchmark.utils.data_types import STRING_DIFFICULTY_MAP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_reports_data(report_path: str) -> dict[str, Any]:
|
||||
latest_files = get_latest_report_from_agent_directories(report_path)
|
||||
|
||||
reports_data = {}
|
||||
|
||||
if latest_files is None:
|
||||
raise Exception("No files found in the reports directory")
|
||||
|
||||
# This will print the latest file in each s
|
||||
# ubdirectory and add to the files_data dictionary
|
||||
for subdir, file in latest_files:
|
||||
subdir_name = os.path.basename(os.path.normpath(subdir))
|
||||
with open(Path(subdir) / file, "r") as f:
|
||||
# Load the JSON data from the file
|
||||
json_data = json.load(f)
|
||||
converted_data = Report.model_validate(json_data)
|
||||
# get the last directory name in the path as key
|
||||
reports_data[subdir_name] = converted_data
|
||||
|
||||
return reports_data
|
||||
|
||||
|
||||
def get_highest_achieved_difficulty_per_category(report: Report) -> dict[str, Any]:
|
||||
categories: dict[str, Any] = {}
|
||||
|
||||
for _, test_data in report.tests.items():
|
||||
for category in test_data.category:
|
||||
if category in ("interface", "iterate", "product_advisor"):
|
||||
continue
|
||||
categories.setdefault(category, 0)
|
||||
if (
|
||||
test_data.results
|
||||
and all(r.success for r in test_data.results)
|
||||
and test_data.difficulty
|
||||
):
|
||||
num_dif = STRING_DIFFICULTY_MAP[test_data.difficulty]
|
||||
if num_dif > categories[category]:
|
||||
categories[category] = num_dif
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
def all_agent_categories(reports_data: dict[str, Any]) -> dict[str, Any]:
|
||||
all_categories: dict[str, Any] = {}
|
||||
|
||||
for name, report in reports_data.items():
|
||||
categories = get_highest_achieved_difficulty_per_category(report)
|
||||
if categories: # only add to all_categories if categories is not empty
|
||||
logger.debug(f"Adding {name}: {categories}")
|
||||
all_categories[name] = categories
|
||||
|
||||
return all_categories
|
||||
@@ -1,106 +0,0 @@
|
||||
"""
|
||||
Model definitions used internally and for reports generated during command-line runs.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Annotated, Any, Dict, List
|
||||
|
||||
from agent_protocol_client import Step
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
StringConstraints,
|
||||
ValidationInfo,
|
||||
field_validator,
|
||||
)
|
||||
|
||||
datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestResult(BaseModel):
|
||||
"""Result details for a single run of a test/challenge."""
|
||||
|
||||
success: bool | None = None
|
||||
"""Whether the run was successful"""
|
||||
run_time: str | None = None
|
||||
"""The (formatted) duration of the run"""
|
||||
fail_reason: str | None = None
|
||||
"""If applicable, the reason why the run was not successful"""
|
||||
reached_cutoff: bool | None = None # None if in progress
|
||||
"""Whether the run had to be stopped due to reaching the timeout"""
|
||||
n_steps: int | None = None
|
||||
"""The number of steps executed by the agent"""
|
||||
steps: list[Step] = []
|
||||
"""The steps generated by the agent"""
|
||||
cost: float | None = None
|
||||
"""The (known) cost incurred by the run, e.g. from using paid LLM APIs"""
|
||||
|
||||
@field_validator("fail_reason")
|
||||
def success_xor_fail_reason(cls, value, info: ValidationInfo):
|
||||
if bool(value) == bool(info.data["success"]):
|
||||
logger.error(
|
||||
"Error validating `success ^ fail_reason` on TestResult: "
|
||||
f"success = {repr(info.data['success'])}; "
|
||||
f"fail_reason = {repr(value)}"
|
||||
)
|
||||
if value:
|
||||
success = info.data["success"]
|
||||
assert not success, "fail_reason must only be specified if success=False"
|
||||
else:
|
||||
assert info.data["success"], "fail_reason is required if success=False"
|
||||
return value
|
||||
|
||||
|
||||
class TestMetrics(BaseModel):
|
||||
"""
|
||||
Result metrics for a set of runs for a test/challenge. Should be an aggregate of all
|
||||
results for the same test/challenge within a benchmarking session.
|
||||
"""
|
||||
|
||||
attempted: bool
|
||||
"""Whether the challenge was attempted during this session"""
|
||||
is_regression: bool
|
||||
"""Whether the challenge was considered a regression test at the time of running"""
|
||||
success_percentage: float | None = Field(default=None, alias="success_%")
|
||||
"""Success rate (0-100) for this challenge within the session"""
|
||||
|
||||
|
||||
class MetricsOverall(BaseModel):
|
||||
"""Global metrics concerning a benchmarking session"""
|
||||
|
||||
run_time: str
|
||||
"""Duration from beginning to end of the session"""
|
||||
highest_difficulty: str
|
||||
"""
|
||||
Difficulty of the most difficult challenge that succeeded at least once this session
|
||||
"""
|
||||
total_cost: float | None = None
|
||||
"""Total known cost of the session"""
|
||||
|
||||
|
||||
class Test(BaseModel):
|
||||
category: List[str]
|
||||
difficulty: str | None
|
||||
data_path: str
|
||||
description: str
|
||||
task: str
|
||||
answer: str
|
||||
metrics: TestMetrics
|
||||
results: list[TestResult]
|
||||
metadata: dict[str, Any] | None = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ReportBase(BaseModel):
|
||||
command: str
|
||||
completion_time: str | None = None
|
||||
benchmark_start_time: Annotated[str, StringConstraints(pattern=datetime_format)]
|
||||
metrics: MetricsOverall
|
||||
config: Dict[str, str | dict[str, str]]
|
||||
agent_git_commit_sha: str | None = None
|
||||
benchmark_git_commit_sha: str | None = None
|
||||
repo_url: str | None = None
|
||||
|
||||
|
||||
class Report(ReportBase):
|
||||
tests: Dict[str, Test]
|
||||
@@ -1,49 +0,0 @@
|
||||
"""Model definitions for use in the API"""
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import BaseModel, StringConstraints
|
||||
|
||||
datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
|
||||
|
||||
|
||||
class TaskInfo(BaseModel):
|
||||
data_path: str
|
||||
is_regression: bool | None
|
||||
answer: str
|
||||
description: str
|
||||
category: list[str]
|
||||
task: str
|
||||
|
||||
|
||||
class RepositoryInfo(BaseModel):
|
||||
repo_url: str | None = None
|
||||
team_name: str | None = None
|
||||
agent_git_commit_sha: str | None = None
|
||||
benchmark_git_commit_sha: str | None = None
|
||||
|
||||
|
||||
class Metrics(BaseModel):
|
||||
cost: float | None = None
|
||||
success: bool
|
||||
attempted: bool
|
||||
difficulty: str | None = None
|
||||
run_time: str | None = None
|
||||
fail_reason: str | None = None
|
||||
success_percentage: float | None = None
|
||||
|
||||
|
||||
class RunDetails(BaseModel):
|
||||
test_name: str
|
||||
run_id: str | None = None
|
||||
command: str
|
||||
completion_time: str | None = None
|
||||
benchmark_start_time: Annotated[str, StringConstraints(pattern=datetime_format)]
|
||||
|
||||
|
||||
class BenchmarkRun(BaseModel):
|
||||
repository_info: RepositoryInfo
|
||||
run_details: RunDetails
|
||||
task_info: TaskInfo
|
||||
metrics: Metrics
|
||||
reached_cutoff: bool | None = None
|
||||
config: dict[str, str | dict[str, str]]
|
||||
@@ -1,157 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from agbenchmark.challenges import ChallengeInfo
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.report_types import Test, TestMetrics, TestResult
|
||||
from agbenchmark.reports.ReportManager import SingletonReportManager
|
||||
from agbenchmark.utils.data_types import DifficultyLevel
|
||||
|
||||
# from agbenchmark.utils.get_data_from_helicone import get_data_from_helicone
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_and_update_success_history(
|
||||
test_name: str, success: bool | None
|
||||
) -> list[bool | None]:
|
||||
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
|
||||
|
||||
prev_test_results = SingletonReportManager().SUCCESS_RATE_TRACKER.tests.get(
|
||||
test_name, []
|
||||
)
|
||||
|
||||
if not mock:
|
||||
# only add if it's an actual test
|
||||
prev_test_results.append(success)
|
||||
SingletonReportManager().SUCCESS_RATE_TRACKER.update(
|
||||
test_name, prev_test_results
|
||||
)
|
||||
|
||||
return prev_test_results
|
||||
|
||||
|
||||
def update_regression_tests(
|
||||
prev_test_results: list[bool | None],
|
||||
test_report: Test,
|
||||
test_name: str,
|
||||
) -> None:
|
||||
if len(prev_test_results) >= 3 and prev_test_results[-3:] == [True, True, True]:
|
||||
# if the last 3 tests were successful, add to the regression tests
|
||||
test_report.metrics.is_regression = True
|
||||
SingletonReportManager().REGRESSION_MANAGER.add_test(
|
||||
test_name, test_report.model_dump(include={"difficulty", "data_path"})
|
||||
)
|
||||
|
||||
|
||||
def make_empty_test_report(
|
||||
challenge_info: ChallengeInfo,
|
||||
) -> Test:
|
||||
difficulty = challenge_info.difficulty
|
||||
if isinstance(difficulty, DifficultyLevel):
|
||||
difficulty = difficulty.value
|
||||
|
||||
return Test(
|
||||
category=[c.value for c in challenge_info.category],
|
||||
difficulty=difficulty,
|
||||
data_path=challenge_info.source_uri,
|
||||
description=challenge_info.description or "",
|
||||
task=challenge_info.task,
|
||||
answer=challenge_info.reference_answer or "",
|
||||
metrics=TestMetrics(attempted=False, is_regression=False),
|
||||
results=[],
|
||||
)
|
||||
|
||||
|
||||
def add_test_result_to_report(
|
||||
test_report: Test,
|
||||
item: pytest.Item,
|
||||
call: pytest.CallInfo,
|
||||
config: AgentBenchmarkConfig,
|
||||
) -> None:
|
||||
user_properties: dict = dict(item.user_properties)
|
||||
test_name: str = user_properties.get("test_name", "")
|
||||
|
||||
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
|
||||
|
||||
if call.excinfo:
|
||||
if not mock:
|
||||
SingletonReportManager().REGRESSION_MANAGER.remove_test(test_name)
|
||||
|
||||
test_report.metrics.attempted = call.excinfo.typename != "Skipped"
|
||||
else:
|
||||
test_report.metrics.attempted = True
|
||||
|
||||
try:
|
||||
test_report.results.append(
|
||||
TestResult(
|
||||
success=call.excinfo is None,
|
||||
run_time=f"{str(round(call.duration, 3))} seconds",
|
||||
fail_reason=(
|
||||
str(call.excinfo.value) if call.excinfo is not None else None
|
||||
),
|
||||
reached_cutoff=user_properties.get("timed_out", False),
|
||||
n_steps=user_properties.get("n_steps"),
|
||||
steps=user_properties.get("steps", []),
|
||||
cost=user_properties.get("agent_task_cost"),
|
||||
)
|
||||
)
|
||||
test_report.metrics.success_percentage = (
|
||||
sum(r.success or False for r in test_report.results)
|
||||
/ len(test_report.results)
|
||||
* 100
|
||||
)
|
||||
except ValidationError:
|
||||
if call.excinfo:
|
||||
logger.error(
|
||||
"Validation failed on TestResult; "
|
||||
f"call.excinfo = {repr(call.excinfo)};\n{call.excinfo.getrepr()})"
|
||||
)
|
||||
raise
|
||||
|
||||
prev_test_results: list[bool | None] = get_and_update_success_history(
|
||||
test_name, test_report.results[-1].success
|
||||
)
|
||||
|
||||
update_regression_tests(prev_test_results, test_report, test_name)
|
||||
|
||||
if test_report and test_name:
|
||||
# if "--mock" not in sys.argv and os.environ.get("HELICONE_API_KEY"):
|
||||
# logger.debug("Getting cost from Helicone")
|
||||
# test_report.metrics.cost = get_data_from_helicone(test_name)
|
||||
# logger.debug(f"Cost: {cost}")
|
||||
|
||||
if not mock:
|
||||
update_challenges_already_beaten(
|
||||
config.challenges_already_beaten_file, test_report, test_name
|
||||
)
|
||||
|
||||
SingletonReportManager().INFO_MANAGER.add_test_report(test_name, test_report)
|
||||
|
||||
|
||||
def update_challenges_already_beaten(
|
||||
challenges_already_beaten_file: Path, test_report: Test, test_name: str
|
||||
) -> None:
|
||||
current_run_successful = any(r.success for r in test_report.results)
|
||||
try:
|
||||
with open(challenges_already_beaten_file, "r") as f:
|
||||
challenges_beaten_before = json.load(f)
|
||||
except FileNotFoundError:
|
||||
challenges_beaten_before = {}
|
||||
|
||||
has_ever_been_beaten = challenges_beaten_before.get(test_name)
|
||||
challenges_beaten_before[test_name] = has_ever_been_beaten or current_run_successful
|
||||
|
||||
with open(challenges_already_beaten_file, "w") as f:
|
||||
json.dump(challenges_beaten_before, f, indent=4)
|
||||
|
||||
|
||||
def session_finish(agbenchmark_config: AgentBenchmarkConfig) -> None:
|
||||
SingletonReportManager().INFO_MANAGER.finalize_session_report(agbenchmark_config)
|
||||
SingletonReportManager().REGRESSION_MANAGER.save()
|
||||
SingletonReportManager().SUCCESS_RATE_TRACKER.save()
|
||||
@@ -1,18 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class TaskRequestBody(BaseModel):
|
||||
input: str = Field(
|
||||
min_length=1,
|
||||
description="Input prompt for the task.",
|
||||
examples=["Write the words you receive to the file 'output.txt'."],
|
||||
)
|
||||
additional_input: Optional[dict[str, Any]] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class TaskEvalRequestBody(TaskRequestBody):
|
||||
eval_id: str
|
||||
@@ -1,46 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class DifficultyLevel(Enum):
|
||||
interface = "interface"
|
||||
basic = "basic"
|
||||
novice = "novice"
|
||||
intermediate = "intermediate"
|
||||
advanced = "advanced"
|
||||
expert = "expert"
|
||||
human = "human"
|
||||
|
||||
|
||||
# map from enum to difficulty level (numeric)
|
||||
DIFFICULTY_MAP = {
|
||||
DifficultyLevel.interface: 1,
|
||||
DifficultyLevel.basic: 2,
|
||||
DifficultyLevel.novice: 3,
|
||||
DifficultyLevel.intermediate: 4,
|
||||
DifficultyLevel.advanced: 5,
|
||||
DifficultyLevel.expert: 6,
|
||||
DifficultyLevel.human: 7,
|
||||
}
|
||||
|
||||
STRING_DIFFICULTY_MAP = {e.value: DIFFICULTY_MAP[e] for e in DifficultyLevel}
|
||||
|
||||
|
||||
class Category(str, Enum):
|
||||
GENERALIST = "general"
|
||||
DATA = "data"
|
||||
CODING = "coding"
|
||||
SCRAPE_SYNTHESIZE = "scrape_synthesize"
|
||||
WEB = "web"
|
||||
GAIA_1 = "GAIA_1"
|
||||
GAIA_2 = "GAIA_2"
|
||||
GAIA_3 = "GAIA_3"
|
||||
|
||||
|
||||
class EvalResult(BaseModel):
|
||||
result: str
|
||||
result_source: Literal["step_output"] | str
|
||||
score: float
|
||||
passed: bool
|
||||
@@ -1,206 +0,0 @@
|
||||
"""
|
||||
A module that provides the pytest hooks for this plugin.
|
||||
|
||||
The logic itself is in main.py.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import pytest
|
||||
from _pytest.config.argparsing import OptionGroup, Parser
|
||||
from _pytest.nodes import Item
|
||||
|
||||
from .main import DependencyManager
|
||||
|
||||
managers: list[DependencyManager] = []
|
||||
|
||||
|
||||
DEPENDENCY_PROBLEM_ACTIONS: dict[str, Callable[[str], None] | None] = {
|
||||
"run": None,
|
||||
"skip": lambda m: pytest.skip(m),
|
||||
"fail": lambda m: pytest.fail(m, False),
|
||||
"warning": lambda m: warnings.warn(m),
|
||||
}
|
||||
|
||||
|
||||
def _add_ini_and_option(
|
||||
parser: Any,
|
||||
group: OptionGroup,
|
||||
name: str,
|
||||
help: str,
|
||||
default: str | bool | int,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""
|
||||
Add an option to both the ini file and the command line flags.
|
||||
Command line flags/options takes precedence over the ini config.
|
||||
"""
|
||||
parser.addini(
|
||||
name,
|
||||
help + " This overrides the similarly named option from the config.",
|
||||
default=default,
|
||||
)
|
||||
group.addoption(f'--{name.replace("_", "-")}', help=help, default=None, **kwargs)
|
||||
|
||||
|
||||
def _get_ini_or_option(
|
||||
config: Any, name: str, choices: Optional[list[str]]
|
||||
) -> str | None:
|
||||
"""
|
||||
Get an option from either the ini file or the command line flags,
|
||||
with the latter taking precedence.
|
||||
"""
|
||||
value = config.getini(name)
|
||||
if value is not None and choices is not None and value not in choices:
|
||||
raise ValueError(
|
||||
f'Invalid ini value for {name}, choose from {", ".join(choices)}'
|
||||
)
|
||||
return config.getoption(name) or value
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
# get all current option strings
|
||||
current_options = []
|
||||
for action in parser._anonymous.options:
|
||||
current_options += action._short_opts + action._long_opts
|
||||
|
||||
for group in parser._groups:
|
||||
for action in group.options:
|
||||
current_options += action._short_opts + action._long_opts
|
||||
|
||||
group = parser.getgroup("depends")
|
||||
|
||||
# Add a flag to list all names + the tests they resolve to
|
||||
if "--list-dependency-names" not in current_options:
|
||||
group.addoption(
|
||||
"--list-dependency-names",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"List all non-nodeid dependency names + the tests they resolve to. "
|
||||
"Will also list all nodeid dependency names in verbose mode."
|
||||
),
|
||||
)
|
||||
|
||||
# Add a flag to list all (resolved) dependencies for all tests + unresolvable names
|
||||
if "--list-processed-dependencies" not in current_options:
|
||||
group.addoption(
|
||||
"--list-processed-dependencies",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"List all dependencies of all tests as a list of nodeids "
|
||||
"+ the names that could not be resolved."
|
||||
),
|
||||
)
|
||||
|
||||
# Add an ini option + flag to choose the action to take for failed dependencies
|
||||
if "--failed-dependency-action" not in current_options:
|
||||
_add_ini_and_option(
|
||||
parser,
|
||||
group,
|
||||
name="failed_dependency_action",
|
||||
help=(
|
||||
"The action to take when a test has dependencies that failed. "
|
||||
'Use "run" to run the test anyway, "skip" to skip the test, '
|
||||
'and "fail" to fail the test.'
|
||||
),
|
||||
default="skip",
|
||||
choices=DEPENDENCY_PROBLEM_ACTIONS.keys(),
|
||||
)
|
||||
|
||||
# Add an ini option + flag to choose the action to take for unresolved dependencies
|
||||
if "--missing-dependency-action" not in current_options:
|
||||
_add_ini_and_option(
|
||||
parser,
|
||||
group,
|
||||
name="missing_dependency_action",
|
||||
help=(
|
||||
"The action to take when a test has dependencies that cannot be found "
|
||||
"within the current scope. "
|
||||
'Use "run" to run the test anyway, "skip" to skip the test, '
|
||||
'and "fail" to fail the test.'
|
||||
),
|
||||
default="warning",
|
||||
choices=DEPENDENCY_PROBLEM_ACTIONS.keys(),
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config: Any) -> None:
|
||||
manager = DependencyManager()
|
||||
managers.append(manager)
|
||||
|
||||
# Setup the handling of problems with dependencies
|
||||
manager.options["failed_dependency_action"] = _get_ini_or_option(
|
||||
config,
|
||||
"failed_dependency_action",
|
||||
list(DEPENDENCY_PROBLEM_ACTIONS.keys()),
|
||||
)
|
||||
manager.options["missing_dependency_action"] = _get_ini_or_option(
|
||||
config,
|
||||
"missing_dependency_action",
|
||||
list(DEPENDENCY_PROBLEM_ACTIONS.keys()),
|
||||
)
|
||||
|
||||
# Register marker
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"depends(name='name', on=['other_name']): marks dependencies between tests.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_collection_modifyitems(config: Any, items: list[pytest.Function]) -> None:
|
||||
manager = managers[-1]
|
||||
|
||||
# Register the founds tests on the manager
|
||||
manager.items = items
|
||||
|
||||
# Show the extra information if requested
|
||||
if config.getoption("list_dependency_names"):
|
||||
verbose = config.getoption("verbose") > 1
|
||||
manager.print_name_map(verbose)
|
||||
if config.getoption("list_processed_dependencies"):
|
||||
color = config.getoption("color")
|
||||
manager.print_processed_dependencies(color)
|
||||
|
||||
# Reorder the items so that tests run after their dependencies
|
||||
items[:] = manager.sorted_items
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item: Item) -> Any:
|
||||
manager = managers[-1]
|
||||
|
||||
# Run the step
|
||||
outcome = yield
|
||||
|
||||
# Store the result on the manager
|
||||
manager.register_result(item, outcome.get_result())
|
||||
|
||||
|
||||
def pytest_runtest_call(item: Item) -> None:
|
||||
manager = managers[-1]
|
||||
|
||||
# Handle missing dependencies
|
||||
missing_dependency_action = DEPENDENCY_PROBLEM_ACTIONS[
|
||||
manager.options["missing_dependency_action"]
|
||||
]
|
||||
missing = manager.get_missing(item)
|
||||
if missing_dependency_action and missing:
|
||||
missing_dependency_action(
|
||||
f'{item.nodeid} depends on {", ".join(missing)}, which was not found'
|
||||
)
|
||||
|
||||
# Check whether all dependencies succeeded
|
||||
failed_dependency_action = DEPENDENCY_PROBLEM_ACTIONS[
|
||||
manager.options["failed_dependency_action"]
|
||||
]
|
||||
failed = manager.get_failed(item)
|
||||
if failed_dependency_action and failed:
|
||||
failed_dependency_action(f'{item.nodeid} depends on {", ".join(failed)}')
|
||||
|
||||
|
||||
def pytest_unconfigure() -> None:
|
||||
managers.pop()
|
||||
@@ -1,10 +0,0 @@
|
||||
""" Constants for this module. """
|
||||
|
||||
# The name of the marker used
|
||||
MARKER_NAME = "depends"
|
||||
|
||||
# The name of the kwarg for 'depends' markers that contains custom name(s) for the tests
|
||||
MARKER_KWARG_ID = "name"
|
||||
|
||||
# The name of the keyword argument for the marker that specifies the tests to depend on
|
||||
MARKER_KWARG_DEPENDENCIES = "on"
|
||||
@@ -1,453 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import matplotlib.patches as patches
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
from pyvis.network import Network
|
||||
|
||||
from agbenchmark.generate_test import DATA_CATEGORY
|
||||
from agbenchmark.utils.utils import write_pretty_json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def bezier_curve(
|
||||
src: np.ndarray, ctrl: List[float], dst: np.ndarray
|
||||
) -> List[np.ndarray]:
|
||||
"""
|
||||
Generate Bézier curve points.
|
||||
|
||||
Args:
|
||||
- src (np.ndarray): The source point.
|
||||
- ctrl (List[float]): The control point.
|
||||
- dst (np.ndarray): The destination point.
|
||||
|
||||
Returns:
|
||||
- List[np.ndarray]: The Bézier curve points.
|
||||
"""
|
||||
curve = []
|
||||
for t in np.linspace(0, 1, num=100):
|
||||
curve_point = (
|
||||
np.outer((1 - t) ** 2, src)
|
||||
+ 2 * np.outer((1 - t) * t, ctrl)
|
||||
+ np.outer(t**2, dst)
|
||||
)
|
||||
curve.append(curve_point[0])
|
||||
return curve
|
||||
|
||||
|
||||
def curved_edges(
|
||||
G: nx.Graph, pos: Dict[Any, Tuple[float, float]], dist: float = 0.2
|
||||
) -> None:
|
||||
"""
|
||||
Draw curved edges for nodes on the same level.
|
||||
|
||||
Args:
|
||||
- G (Any): The graph object.
|
||||
- pos (Dict[Any, Tuple[float, float]]): Dictionary with node positions.
|
||||
- dist (float, optional): Distance for curvature. Defaults to 0.2.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
ax = plt.gca()
|
||||
for u, v, data in G.edges(data=True):
|
||||
_src = pos[u]
|
||||
_dst = pos[v]
|
||||
src = np.array(_src)
|
||||
dst = np.array(_dst)
|
||||
|
||||
same_level = abs(src[1] - dst[1]) < 0.01
|
||||
|
||||
if same_level:
|
||||
control = [(src[0] + dst[0]) / 2, src[1] + dist]
|
||||
curve = bezier_curve(src, control, dst)
|
||||
arrow = patches.FancyArrowPatch(
|
||||
posA=curve[0], # type: ignore
|
||||
posB=curve[-1], # type: ignore
|
||||
connectionstyle="arc3,rad=0.2",
|
||||
color="gray",
|
||||
arrowstyle="-|>",
|
||||
mutation_scale=15.0,
|
||||
lw=1,
|
||||
shrinkA=10,
|
||||
shrinkB=10,
|
||||
)
|
||||
ax.add_patch(arrow)
|
||||
else:
|
||||
ax.annotate(
|
||||
"",
|
||||
xy=_dst,
|
||||
xytext=_src,
|
||||
arrowprops=dict(
|
||||
arrowstyle="-|>", color="gray", lw=1, shrinkA=10, shrinkB=10
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def tree_layout(graph: nx.DiGraph, root_node: Any) -> Dict[Any, Tuple[float, float]]:
|
||||
"""Compute positions as a tree layout centered on the root
|
||||
with alternating vertical shifts."""
|
||||
bfs_tree = nx.bfs_tree(graph, source=root_node)
|
||||
levels = {
|
||||
node: depth
|
||||
for node, depth in nx.single_source_shortest_path_length(
|
||||
bfs_tree, root_node
|
||||
).items()
|
||||
}
|
||||
|
||||
pos = {}
|
||||
max_depth = max(levels.values())
|
||||
level_positions = {i: 0 for i in range(max_depth + 1)} # type: ignore
|
||||
|
||||
# Count the number of nodes per level to compute the width
|
||||
level_count: Any = {}
|
||||
for node, level in levels.items():
|
||||
level_count[level] = level_count.get(level, 0) + 1
|
||||
|
||||
vertical_offset = (
|
||||
0.07 # The amount of vertical shift per node within the same level
|
||||
)
|
||||
|
||||
# Assign positions
|
||||
for node, level in sorted(levels.items(), key=lambda x: x[1]):
|
||||
total_nodes_in_level = level_count[level]
|
||||
horizontal_spacing = 1.0 / (total_nodes_in_level + 1)
|
||||
pos_x = (
|
||||
0.5
|
||||
- (total_nodes_in_level - 1) * horizontal_spacing / 2
|
||||
+ level_positions[level] * horizontal_spacing
|
||||
)
|
||||
|
||||
# Alternately shift nodes up and down within the same level
|
||||
pos_y = (
|
||||
-level
|
||||
+ (level_positions[level] % 2) * vertical_offset
|
||||
- ((level_positions[level] + 1) % 2) * vertical_offset
|
||||
)
|
||||
pos[node] = (pos_x, pos_y)
|
||||
|
||||
level_positions[level] += 1
|
||||
|
||||
return pos
|
||||
|
||||
|
||||
def graph_spring_layout(
|
||||
dag: nx.DiGraph, labels: Dict[Any, str], tree: bool = True
|
||||
) -> None:
|
||||
num_nodes = len(list(dag.nodes()))
|
||||
# Setting up the figure and axis
|
||||
fig, ax = plt.subplots()
|
||||
ax.axis("off") # Turn off the axis
|
||||
|
||||
base = 3.0
|
||||
|
||||
if num_nodes > 10:
|
||||
base /= 1 + math.log(num_nodes)
|
||||
font_size = base * 10
|
||||
|
||||
font_size = max(10, base * 10)
|
||||
node_size = max(300, base * 1000)
|
||||
|
||||
if tree:
|
||||
root_node = [node for node, degree in dag.in_degree() if degree == 0][0]
|
||||
pos = tree_layout(dag, root_node)
|
||||
else:
|
||||
# Adjust k for the spring layout based on node count
|
||||
k_value = 3 / math.sqrt(num_nodes)
|
||||
|
||||
pos = nx.spring_layout(dag, k=k_value, iterations=50)
|
||||
|
||||
# Draw nodes and labels
|
||||
nx.draw_networkx_nodes(dag, pos, node_color="skyblue", node_size=int(node_size))
|
||||
nx.draw_networkx_labels(dag, pos, labels=labels, font_size=int(font_size))
|
||||
|
||||
# Draw curved edges
|
||||
curved_edges(dag, pos) # type: ignore
|
||||
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
|
||||
def rgb_to_hex(rgb: Tuple[float, float, float]) -> str:
|
||||
return "#{:02x}{:02x}{:02x}".format(
|
||||
int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)
|
||||
)
|
||||
|
||||
|
||||
def get_category_colors(categories: Dict[Any, str]) -> Dict[str, str]:
|
||||
unique_categories = set(categories.values())
|
||||
colormap = plt.cm.get_cmap("tab10", len(unique_categories)) # type: ignore
|
||||
return {
|
||||
category: rgb_to_hex(colormap(i)[:3])
|
||||
for i, category in enumerate(unique_categories)
|
||||
}
|
||||
|
||||
|
||||
def graph_interactive_network(
|
||||
dag: nx.DiGraph,
|
||||
labels: Dict[Any, Dict[str, Any]],
|
||||
html_graph_path: str = "",
|
||||
) -> None:
|
||||
nt = Network(notebook=True, width="100%", height="800px", directed=True)
|
||||
|
||||
category_colors = get_category_colors(DATA_CATEGORY)
|
||||
|
||||
# Add nodes and edges to the pyvis network
|
||||
for node, json_data in labels.items():
|
||||
label = json_data.get("name", "")
|
||||
# remove the first 4 letters of label
|
||||
label_without_test = label[4:]
|
||||
node_id_str = node.nodeid
|
||||
|
||||
# Get the category for this label
|
||||
category = DATA_CATEGORY.get(
|
||||
label, "unknown"
|
||||
) # Default to 'unknown' if label not found
|
||||
|
||||
# Get the color for this category
|
||||
color = category_colors.get(category, "grey")
|
||||
|
||||
nt.add_node(
|
||||
node_id_str,
|
||||
label=label_without_test,
|
||||
color=color,
|
||||
data=json_data,
|
||||
)
|
||||
|
||||
# Add edges to the pyvis network
|
||||
for edge in dag.edges():
|
||||
source_id_str = edge[0].nodeid
|
||||
target_id_str = edge[1].nodeid
|
||||
edge_id_str = (
|
||||
f"{source_id_str}_to_{target_id_str}" # Construct a unique edge id
|
||||
)
|
||||
if not (source_id_str in nt.get_nodes() and target_id_str in nt.get_nodes()):
|
||||
logger.warning(
|
||||
f"Skipping edge {source_id_str} -> {target_id_str} due to missing nodes"
|
||||
)
|
||||
continue
|
||||
nt.add_edge(source_id_str, target_id_str, id=edge_id_str)
|
||||
|
||||
# Configure physics for hierarchical layout
|
||||
hierarchical_options = {
|
||||
"enabled": True,
|
||||
"levelSeparation": 200, # Increased vertical spacing between levels
|
||||
"nodeSpacing": 250, # Increased spacing between nodes on the same level
|
||||
"treeSpacing": 250, # Increased spacing between different trees (for forest)
|
||||
"blockShifting": True,
|
||||
"edgeMinimization": True,
|
||||
"parentCentralization": True,
|
||||
"direction": "UD",
|
||||
"sortMethod": "directed",
|
||||
}
|
||||
|
||||
physics_options = {
|
||||
"stabilization": {
|
||||
"enabled": True,
|
||||
"iterations": 1000, # Default is often around 100
|
||||
},
|
||||
"hierarchicalRepulsion": {
|
||||
"centralGravity": 0.0,
|
||||
"springLength": 200, # Increased edge length
|
||||
"springConstant": 0.01,
|
||||
"nodeDistance": 250, # Increased minimum distance between nodes
|
||||
"damping": 0.09,
|
||||
},
|
||||
"solver": "hierarchicalRepulsion",
|
||||
"timestep": 0.5,
|
||||
}
|
||||
|
||||
nt.options = {
|
||||
"nodes": {
|
||||
"font": {
|
||||
"size": 20, # Increased font size for labels
|
||||
"color": "black", # Set a readable font color
|
||||
},
|
||||
"shapeProperties": {"useBorderWithImage": True},
|
||||
},
|
||||
"edges": {
|
||||
"length": 250, # Increased edge length
|
||||
},
|
||||
"physics": physics_options,
|
||||
"layout": {"hierarchical": hierarchical_options},
|
||||
}
|
||||
|
||||
# Serialize the graph to JSON and save in appropriate locations
|
||||
graph_data = {"nodes": nt.nodes, "edges": nt.edges}
|
||||
logger.debug(f"Generated graph data:\n{json.dumps(graph_data, indent=4)}")
|
||||
|
||||
# FIXME: use more reliable method to find the right location for these files.
|
||||
# This will fail in all cases except if run from the root of our repo.
|
||||
home_path = Path.cwd()
|
||||
write_pretty_json(graph_data, home_path / "frontend" / "public" / "graph.json")
|
||||
|
||||
flutter_app_path = home_path.parent / "frontend" / "assets"
|
||||
|
||||
# Optionally, save to a file
|
||||
# Sync with the flutter UI
|
||||
# this literally only works in the AutoGPT repo, but this part of the code
|
||||
# is not reached if BUILD_SKILL_TREE is false
|
||||
write_pretty_json(graph_data, flutter_app_path / "tree_structure.json")
|
||||
validate_skill_tree(graph_data, "")
|
||||
|
||||
# Extract node IDs with category "coding"
|
||||
|
||||
coding_tree = extract_subgraph_based_on_category(graph_data.copy(), "coding")
|
||||
validate_skill_tree(coding_tree, "coding")
|
||||
write_pretty_json(
|
||||
coding_tree,
|
||||
flutter_app_path / "coding_tree_structure.json",
|
||||
)
|
||||
|
||||
data_tree = extract_subgraph_based_on_category(graph_data.copy(), "data")
|
||||
# validate_skill_tree(data_tree, "data")
|
||||
write_pretty_json(
|
||||
data_tree,
|
||||
flutter_app_path / "data_tree_structure.json",
|
||||
)
|
||||
|
||||
general_tree = extract_subgraph_based_on_category(graph_data.copy(), "general")
|
||||
validate_skill_tree(general_tree, "general")
|
||||
write_pretty_json(
|
||||
general_tree,
|
||||
flutter_app_path / "general_tree_structure.json",
|
||||
)
|
||||
|
||||
scrape_synthesize_tree = extract_subgraph_based_on_category(
|
||||
graph_data.copy(), "scrape_synthesize"
|
||||
)
|
||||
validate_skill_tree(scrape_synthesize_tree, "scrape_synthesize")
|
||||
write_pretty_json(
|
||||
scrape_synthesize_tree,
|
||||
flutter_app_path / "scrape_synthesize_tree_structure.json",
|
||||
)
|
||||
|
||||
if html_graph_path:
|
||||
file_path = str(Path(html_graph_path).resolve())
|
||||
|
||||
nt.write_html(file_path)
|
||||
|
||||
|
||||
def extract_subgraph_based_on_category(graph, category):
|
||||
"""
|
||||
Extracts a subgraph that includes all nodes and edges required to reach all nodes
|
||||
with a specified category.
|
||||
|
||||
:param graph: The original graph.
|
||||
:param category: The target category.
|
||||
:return: Subgraph with nodes and edges required to reach the nodes
|
||||
with the given category.
|
||||
"""
|
||||
|
||||
subgraph = {"nodes": [], "edges": []}
|
||||
visited = set()
|
||||
|
||||
def reverse_dfs(node_id):
|
||||
if node_id in visited:
|
||||
return
|
||||
visited.add(node_id)
|
||||
|
||||
node_data = next(node for node in graph["nodes"] if node["id"] == node_id)
|
||||
|
||||
# Add the node to the subgraph if it's not already present.
|
||||
if node_data not in subgraph["nodes"]:
|
||||
subgraph["nodes"].append(node_data)
|
||||
|
||||
for edge in graph["edges"]:
|
||||
if edge["to"] == node_id:
|
||||
if edge not in subgraph["edges"]:
|
||||
subgraph["edges"].append(edge)
|
||||
reverse_dfs(edge["from"])
|
||||
|
||||
# Identify nodes with the target category and initiate reverse DFS from them.
|
||||
nodes_with_target_category = [
|
||||
node["id"] for node in graph["nodes"] if category in node["data"]["category"]
|
||||
]
|
||||
|
||||
for node_id in nodes_with_target_category:
|
||||
reverse_dfs(node_id)
|
||||
|
||||
return subgraph
|
||||
|
||||
|
||||
def is_circular(graph):
|
||||
def dfs(node, visited, stack, parent_map):
|
||||
visited.add(node)
|
||||
stack.add(node)
|
||||
for edge in graph["edges"]:
|
||||
if edge["from"] == node:
|
||||
if edge["to"] in stack:
|
||||
# Detected a cycle
|
||||
cycle_path = []
|
||||
current = node
|
||||
while current != edge["to"]:
|
||||
cycle_path.append(current)
|
||||
current = parent_map.get(current)
|
||||
cycle_path.append(edge["to"])
|
||||
cycle_path.append(node)
|
||||
return cycle_path[::-1]
|
||||
elif edge["to"] not in visited:
|
||||
parent_map[edge["to"]] = node
|
||||
cycle_path = dfs(edge["to"], visited, stack, parent_map)
|
||||
if cycle_path:
|
||||
return cycle_path
|
||||
stack.remove(node)
|
||||
return None
|
||||
|
||||
visited = set()
|
||||
stack = set()
|
||||
parent_map = {}
|
||||
for node in graph["nodes"]:
|
||||
node_id = node["id"]
|
||||
if node_id not in visited:
|
||||
cycle_path = dfs(node_id, visited, stack, parent_map)
|
||||
if cycle_path:
|
||||
return cycle_path
|
||||
return None
|
||||
|
||||
|
||||
def get_roots(graph):
|
||||
"""
|
||||
Return the roots of a graph. Roots are nodes with no incoming edges.
|
||||
"""
|
||||
# Create a set of all node IDs
|
||||
all_nodes = {node["id"] for node in graph["nodes"]}
|
||||
|
||||
# Create a set of nodes with incoming edges
|
||||
nodes_with_incoming_edges = {edge["to"] for edge in graph["edges"]}
|
||||
|
||||
# Roots are nodes that have no incoming edges
|
||||
roots = all_nodes - nodes_with_incoming_edges
|
||||
|
||||
return list(roots)
|
||||
|
||||
|
||||
def validate_skill_tree(graph, skill_tree_name):
|
||||
"""
|
||||
Validate if a given graph represents a valid skill tree
|
||||
and raise appropriate exceptions if not.
|
||||
|
||||
:param graph: A dictionary representing the graph with 'nodes' and 'edges'.
|
||||
:raises: ValueError with a description of the invalidity.
|
||||
"""
|
||||
# Check for circularity
|
||||
cycle_path = is_circular(graph)
|
||||
if cycle_path:
|
||||
cycle_str = " -> ".join(cycle_path)
|
||||
raise ValueError(
|
||||
f"{skill_tree_name} skill tree is circular! "
|
||||
f"Detected circular path: {cycle_str}."
|
||||
)
|
||||
|
||||
# Check for multiple roots
|
||||
roots = get_roots(graph)
|
||||
if len(roots) > 1:
|
||||
raise ValueError(f"{skill_tree_name} skill tree has multiple roots: {roots}.")
|
||||
elif not roots:
|
||||
raise ValueError(f"{skill_tree_name} skill tree has no roots.")
|
||||
@@ -1,255 +0,0 @@
|
||||
"""
|
||||
A module to manage dependencies between pytest tests.
|
||||
|
||||
This module provides the methods implementing the main logic.
|
||||
These are used in the pytest hooks that are in __init__.py.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import os
|
||||
from typing import Any, Generator
|
||||
|
||||
import colorama
|
||||
import networkx
|
||||
from pytest import Function, Item
|
||||
|
||||
from agbenchmark.challenges.base import BaseChallenge
|
||||
|
||||
from .constants import MARKER_KWARG_DEPENDENCIES, MARKER_NAME
|
||||
from .graphs import graph_interactive_network
|
||||
from .util import clean_nodeid, get_absolute_nodeid, get_markers, get_name
|
||||
|
||||
|
||||
class TestResult(object):
|
||||
"""Keeps track of the results of a single test."""
|
||||
|
||||
STEPS = ["setup", "call", "teardown"]
|
||||
GOOD_OUTCOMES = ["passed"]
|
||||
|
||||
def __init__(self, nodeid: str) -> None:
|
||||
"""Create a new instance for a test with a given node id."""
|
||||
self.nodeid = nodeid
|
||||
self.results: dict[str, Any] = {}
|
||||
|
||||
def register_result(self, result: Any) -> None:
|
||||
"""Register a result of this test."""
|
||||
if result.when not in self.STEPS:
|
||||
raise ValueError(
|
||||
f"Received result for unknown step {result.when} of test {self.nodeid}"
|
||||
)
|
||||
if result.when in self.results:
|
||||
raise AttributeError(
|
||||
f"Received multiple results for step {result.when} "
|
||||
f"of test {self.nodeid}"
|
||||
)
|
||||
self.results[result.when] = result.outcome
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
"""Whether the entire test was successful."""
|
||||
return all(
|
||||
self.results.get(step, None) in self.GOOD_OUTCOMES for step in self.STEPS
|
||||
)
|
||||
|
||||
|
||||
class TestDependencies(object):
|
||||
"""Information about the resolved dependencies of a single test."""
|
||||
|
||||
def __init__(self, item: Item, manager: "DependencyManager") -> None:
|
||||
"""Create a new instance for a given test."""
|
||||
self.nodeid = clean_nodeid(item.nodeid)
|
||||
self.dependencies = set()
|
||||
self.unresolved = set()
|
||||
|
||||
markers = get_markers(item, MARKER_NAME)
|
||||
dependencies = [
|
||||
dep
|
||||
for marker in markers
|
||||
for dep in marker.kwargs.get(MARKER_KWARG_DEPENDENCIES, [])
|
||||
]
|
||||
for dependency in dependencies:
|
||||
# If the name is not known, try to make it absolute (file::[class::]method)
|
||||
if dependency not in manager.name_to_nodeids:
|
||||
absolute_dependency = get_absolute_nodeid(dependency, self.nodeid)
|
||||
if absolute_dependency in manager.name_to_nodeids:
|
||||
dependency = absolute_dependency
|
||||
|
||||
# Add all items matching the name
|
||||
if dependency in manager.name_to_nodeids:
|
||||
for nodeid in manager.name_to_nodeids[dependency]:
|
||||
self.dependencies.add(nodeid)
|
||||
else:
|
||||
self.unresolved.add(dependency)
|
||||
|
||||
|
||||
class DependencyManager(object):
|
||||
"""Keep track of tests, their names and their dependencies."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Create a new DependencyManager."""
|
||||
self.options: dict[str, Any] = {}
|
||||
self._items: list[Function] | None = None
|
||||
self._name_to_nodeids: Any = None
|
||||
self._nodeid_to_item: Any = None
|
||||
self._results: Any = None
|
||||
|
||||
@property
|
||||
def items(self) -> list[Function]:
|
||||
"""The collected tests that are managed by this instance."""
|
||||
if self._items is None:
|
||||
raise AttributeError("The items attribute has not been set yet")
|
||||
return self._items
|
||||
|
||||
@items.setter
|
||||
def items(self, items: list[Function]) -> None:
|
||||
if self._items is not None:
|
||||
raise AttributeError("The items attribute has already been set")
|
||||
self._items = items
|
||||
|
||||
self._name_to_nodeids = collections.defaultdict(list)
|
||||
self._nodeid_to_item = {}
|
||||
self._results = {}
|
||||
self._dependencies = {}
|
||||
|
||||
for item in items:
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
# Add the mapping from nodeid to the test item
|
||||
self._nodeid_to_item[nodeid] = item
|
||||
# Add the mappings from all names to the node id
|
||||
name = get_name(item)
|
||||
self._name_to_nodeids[name].append(nodeid)
|
||||
# Create the object that will contain the results of this test
|
||||
self._results[nodeid] = TestResult(clean_nodeid(item.nodeid))
|
||||
|
||||
# Don't allow using unknown keys on the name_to_nodeids mapping
|
||||
self._name_to_nodeids.default_factory = None
|
||||
|
||||
for item in items:
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
# Process the dependencies of this test
|
||||
# This uses the mappings created in the previous loop,
|
||||
# and can thus not be merged into that loop
|
||||
self._dependencies[nodeid] = TestDependencies(item, self)
|
||||
|
||||
@property
|
||||
def name_to_nodeids(self) -> dict[str, list[str]]:
|
||||
"""A mapping from names to matching node id(s)."""
|
||||
assert self.items is not None
|
||||
return self._name_to_nodeids
|
||||
|
||||
@property
|
||||
def nodeid_to_item(self) -> dict[str, Function]:
|
||||
"""A mapping from node ids to test items."""
|
||||
assert self.items is not None
|
||||
return self._nodeid_to_item
|
||||
|
||||
@property
|
||||
def results(self) -> dict[str, TestResult]:
|
||||
"""The results of the tests."""
|
||||
assert self.items is not None
|
||||
return self._results
|
||||
|
||||
@property
|
||||
def dependencies(self) -> dict[str, TestDependencies]:
|
||||
"""The dependencies of the tests."""
|
||||
assert self.items is not None
|
||||
return self._dependencies
|
||||
|
||||
def print_name_map(self, verbose: bool = False) -> None:
|
||||
"""Print a human-readable version of the name -> test mapping."""
|
||||
print("Available dependency names:")
|
||||
for name, nodeids in sorted(self.name_to_nodeids.items(), key=lambda x: x[0]):
|
||||
if len(nodeids) == 1:
|
||||
if name == nodeids[0]:
|
||||
# This is just the base name, only print this when verbose
|
||||
if verbose:
|
||||
print(f" {name}")
|
||||
else:
|
||||
# Name refers to a single node id, so use the short format
|
||||
print(f" {name} -> {nodeids[0]}")
|
||||
else:
|
||||
# Name refers to multiple node ids, so use the long format
|
||||
print(f" {name} ->")
|
||||
for nodeid in sorted(nodeids):
|
||||
print(f" {nodeid}")
|
||||
|
||||
def print_processed_dependencies(self, colors: bool = False) -> None:
|
||||
"""Print a human-readable list of the processed dependencies."""
|
||||
missing = "MISSING"
|
||||
if colors:
|
||||
missing = f"{colorama.Fore.RED}{missing}{colorama.Fore.RESET}"
|
||||
colorama.init()
|
||||
try:
|
||||
print("Dependencies:")
|
||||
for nodeid, info in sorted(self.dependencies.items(), key=lambda x: x[0]):
|
||||
descriptions = []
|
||||
for dependency in info.dependencies:
|
||||
descriptions.append(dependency)
|
||||
for dependency in info.unresolved:
|
||||
descriptions.append(f"{dependency} ({missing})")
|
||||
if descriptions:
|
||||
print(f" {nodeid} depends on")
|
||||
for description in sorted(descriptions):
|
||||
print(f" {description}")
|
||||
finally:
|
||||
if colors:
|
||||
colorama.deinit()
|
||||
|
||||
@property
|
||||
def sorted_items(self) -> Generator:
|
||||
"""
|
||||
Get a sorted list of tests where all tests are sorted after their dependencies.
|
||||
"""
|
||||
# Build a directed graph for sorting
|
||||
build_skill_tree = os.getenv("BUILD_SKILL_TREE")
|
||||
BUILD_SKILL_TREE = (
|
||||
build_skill_tree.lower() == "true" if build_skill_tree else False
|
||||
)
|
||||
dag = networkx.DiGraph()
|
||||
|
||||
# Insert all items as nodes, to prevent items that have no dependencies
|
||||
# and are not dependencies themselves from being lost
|
||||
dag.add_nodes_from(self.items)
|
||||
|
||||
# Insert edges for all the dependencies
|
||||
for item in self.items:
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
for dependency in self.dependencies[nodeid].dependencies:
|
||||
dag.add_edge(self.nodeid_to_item[dependency], item)
|
||||
|
||||
labels = {}
|
||||
for item in self.items:
|
||||
assert item.cls and issubclass(item.cls, BaseChallenge)
|
||||
data = item.cls.info.model_dump()
|
||||
|
||||
node_name = get_name(item)
|
||||
data["name"] = node_name
|
||||
labels[item] = data
|
||||
|
||||
# only build the tree if it's specified in the env and is a whole run
|
||||
if BUILD_SKILL_TREE:
|
||||
# graph_spring_layout(dag, labels)
|
||||
graph_interactive_network(dag, labels, html_graph_path="")
|
||||
|
||||
# Sort based on the dependencies
|
||||
return networkx.topological_sort(dag)
|
||||
|
||||
def register_result(self, item: Item, result: Any) -> None:
|
||||
"""Register a result of a test."""
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
self.results[nodeid].register_result(result)
|
||||
|
||||
def get_failed(self, item: Item) -> Any:
|
||||
"""Get a list of unfulfilled dependencies for a test."""
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
failed = []
|
||||
for dependency in self.dependencies[nodeid].dependencies:
|
||||
result = self.results[dependency]
|
||||
if not result.success:
|
||||
failed.append(dependency)
|
||||
return failed
|
||||
|
||||
def get_missing(self, item: Item) -> Any:
|
||||
"""Get a list of missing dependencies for a test."""
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
return self.dependencies[nodeid].unresolved
|
||||
@@ -1,86 +0,0 @@
|
||||
""" Utility functions to process the identifiers of tests. """
|
||||
import re
|
||||
from typing import Iterator
|
||||
|
||||
from _pytest.mark.structures import Mark
|
||||
from _pytest.nodes import Item
|
||||
|
||||
from .constants import MARKER_KWARG_ID, MARKER_NAME
|
||||
|
||||
REGEX_PARAMETERS = re.compile(r"\[.+\]$")
|
||||
|
||||
|
||||
def clean_nodeid(nodeid: str) -> str:
|
||||
"""
|
||||
Remove any superfluous ::() from a node id.
|
||||
|
||||
>>> clean_nodeid('test_file.py::TestClass::()::test')
|
||||
'test_file.py::TestClass::test'
|
||||
>>> clean_nodeid('test_file.py::TestClass::test')
|
||||
'test_file.py::TestClass::test'
|
||||
>>> clean_nodeid('test_file.py::test')
|
||||
'test_file.py::test'
|
||||
"""
|
||||
return nodeid.replace("::()::", "::")
|
||||
|
||||
|
||||
def strip_nodeid_parameters(nodeid: str) -> str:
|
||||
"""
|
||||
Strip parameters from a node id.
|
||||
|
||||
>>> strip_nodeid_parameters('test_file.py::TestClass::test[foo]')
|
||||
'test_file.py::TestClass::test'
|
||||
>>> strip_nodeid_parameters('test_file.py::TestClass::test')
|
||||
'test_file.py::TestClass::test'
|
||||
"""
|
||||
return REGEX_PARAMETERS.sub("", nodeid)
|
||||
|
||||
|
||||
def get_absolute_nodeid(nodeid: str, scope: str) -> str:
|
||||
"""
|
||||
Transform a possibly relative node id to an absolute one
|
||||
using the scope in which it is used.
|
||||
|
||||
>>> scope = 'test_file.py::TestClass::test'
|
||||
>>> get_absolute_nodeid('test2', scope)
|
||||
'test_file.py::TestClass::test2'
|
||||
>>> get_absolute_nodeid('TestClass2::test2', scope)
|
||||
'test_file.py::TestClass2::test2'
|
||||
>>> get_absolute_nodeid('test_file2.py::TestClass2::test2', scope)
|
||||
'test_file2.py::TestClass2::test2'
|
||||
"""
|
||||
parts = nodeid.split("::")
|
||||
# Completely relative (test_name): add the full current scope (file::class or file)
|
||||
if len(parts) == 1:
|
||||
base_nodeid = scope.rsplit("::", 1)[0]
|
||||
nodeid = f"{base_nodeid}::{nodeid}"
|
||||
# Contains some scope already (Class::test_name), so only add the current file scope
|
||||
elif "." not in parts[0]:
|
||||
base_nodeid = scope.split("::", 1)[0]
|
||||
nodeid = f"{base_nodeid}::{nodeid}"
|
||||
return clean_nodeid(nodeid)
|
||||
|
||||
|
||||
def get_name(item: Item) -> str:
|
||||
"""
|
||||
Get all names for a test.
|
||||
|
||||
This will use the following methods to determine the name of the test:
|
||||
- If given, the custom name(s) passed to the keyword argument name on the marker
|
||||
"""
|
||||
name = ""
|
||||
|
||||
# Custom name
|
||||
markers = get_markers(item, MARKER_NAME)
|
||||
for marker in markers:
|
||||
if MARKER_KWARG_ID in marker.kwargs:
|
||||
name = marker.kwargs[MARKER_KWARG_ID]
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def get_markers(item: Item, name: str) -> Iterator[Mark]:
|
||||
"""Get all markers with the given name for a given item."""
|
||||
for marker in item.iter_markers():
|
||||
if marker.name == name:
|
||||
yield marker
|
||||
@@ -1,84 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from agbenchmark.__main__ import BENCHMARK_START_TIME
|
||||
from agbenchmark.agent_interface import HELICONE_GRAPHQL_LOGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_data_from_helicone(challenge: str) -> Optional[float]:
|
||||
# Define the endpoint of your GraphQL server
|
||||
url = "https://www.helicone.ai/api/graphql"
|
||||
|
||||
# Set the headers, usually you'd need to set the content type
|
||||
# and possibly an authorization token
|
||||
headers = {"authorization": f"Bearer {os.environ.get('HELICONE_API_KEY')}"}
|
||||
|
||||
# Define the query, variables, and operation name
|
||||
query = """
|
||||
query ExampleQuery($properties: [PropertyFilter!]){
|
||||
aggregatedHeliconeRequest(properties: $properties) {
|
||||
costUSD
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {
|
||||
"properties": [
|
||||
{
|
||||
"value": {"equals": os.environ.get("AGENT_NAME")},
|
||||
"name": "agent",
|
||||
},
|
||||
{
|
||||
"value": {"equals": BENCHMARK_START_TIME},
|
||||
"name": "benchmark_start_time",
|
||||
},
|
||||
{"value": {"equals": challenge}, "name": "challenge"},
|
||||
]
|
||||
}
|
||||
if HELICONE_GRAPHQL_LOGS:
|
||||
logger.debug(f"Executing Helicone query:\n{query.strip()}")
|
||||
logger.debug(f"Query variables:\n{json.dumps(variables, indent=4)}")
|
||||
|
||||
operation_name = "ExampleQuery"
|
||||
|
||||
data = {}
|
||||
response = None
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
url,
|
||||
headers=headers,
|
||||
json={
|
||||
"query": query,
|
||||
"variables": variables,
|
||||
"operationName": operation_name,
|
||||
},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
except requests.HTTPError as http_err:
|
||||
logger.error(f"Helicone returned an HTTP error: {http_err}")
|
||||
return None
|
||||
except json.JSONDecodeError:
|
||||
raw_response = response.text # type: ignore
|
||||
logger.error(
|
||||
f"Helicone returned an invalid JSON response: '''{raw_response}'''"
|
||||
)
|
||||
return None
|
||||
except Exception as err:
|
||||
logger.error(f"Error while trying to get data from Helicone: {err}")
|
||||
return None
|
||||
|
||||
if data is None or data.get("data") is None:
|
||||
logger.error("Invalid response received from Helicone: no data")
|
||||
logger.error(f"Offending response: {response}")
|
||||
return None
|
||||
return (
|
||||
data.get("data", {}).get("aggregatedHeliconeRequest", {}).get("costUSD", None)
|
||||
)
|
||||
@@ -1,74 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
SIMPLE_LOG_FORMAT = "[%(asctime)s] %(levelname)s %(message)s"
|
||||
DEBUG_LOG_FORMAT = "[%(asctime)s] %(levelname)s %(filename)s:%(lineno)03d %(message)s"
|
||||
|
||||
|
||||
def configure_logging(
|
||||
level: int = logging.INFO,
|
||||
) -> None:
|
||||
"""Configure the native logging module."""
|
||||
|
||||
# Auto-adjust default log format based on log level
|
||||
log_format = DEBUG_LOG_FORMAT if level == logging.DEBUG else SIMPLE_LOG_FORMAT
|
||||
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setFormatter(FancyConsoleFormatter(log_format))
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
level=level,
|
||||
format=log_format,
|
||||
handlers=[console_handler],
|
||||
)
|
||||
|
||||
|
||||
class FancyConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
A custom logging formatter designed for console output.
|
||||
|
||||
This formatter enhances the standard logging output with color coding. The color
|
||||
coding is based on the level of the log message, making it easier to distinguish
|
||||
between different types of messages in the console output.
|
||||
|
||||
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
|
||||
"""
|
||||
|
||||
# level -> (level & text color, title color)
|
||||
LEVEL_COLOR_MAP = {
|
||||
logging.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
logging.INFO: Fore.BLUE,
|
||||
logging.WARNING: Fore.YELLOW,
|
||||
logging.ERROR: Fore.RED,
|
||||
logging.CRITICAL: Fore.RED + Style.BRIGHT,
|
||||
}
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif not type(record.msg) is str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Justify the level name to 5 characters minimum
|
||||
record.levelname = record.levelname.ljust(5)
|
||||
|
||||
# Determine default color based on error level
|
||||
level_color = ""
|
||||
if record.levelno in self.LEVEL_COLOR_MAP:
|
||||
level_color = self.LEVEL_COLOR_MAP[record.levelno]
|
||||
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
|
||||
|
||||
# Determine color for message
|
||||
color = getattr(record, "color", level_color)
|
||||
color_is_specified = hasattr(record, "color")
|
||||
|
||||
# Don't color INFO messages unless the color is explicitly specified.
|
||||
if color and (record.levelno != logging.INFO or color_is_specified):
|
||||
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
|
||||
|
||||
return super().format(record)
|
||||
@@ -1,79 +0,0 @@
|
||||
SCORING_MAP = {
|
||||
"percentage": (
|
||||
"assign a float score that will represent a percentage out of 100. "
|
||||
"Use decimal points to be even more accurate. "
|
||||
"0 represents the worst possible generation, "
|
||||
"while 100 represents the ideal generation"
|
||||
),
|
||||
"scale": (
|
||||
"assign an integer score from a scale of 1-10. "
|
||||
"1 represents a really bad generation, while 10 represents an ideal generation"
|
||||
),
|
||||
"binary": (
|
||||
"assign a binary score of either 0 or 1. "
|
||||
"0 represents a failure, while 1 represents a success"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
REFERENCE_PROMPT = """Ignore previous directions. You are now an expert at evaluating how close machine generated responses are to human answers. You essentially act as a hyper advanced BLEU score.
|
||||
In order to score the machine generated response you will {scoring}. Make sure to factor in the distance to the ideal response into your thinking, deliberation, and final result regarding scoring. Return nothing but a float score.
|
||||
|
||||
Here is the given task for you to evaluate:
|
||||
{task}
|
||||
|
||||
Here is the ideal response you're comparing to based on the task:
|
||||
{answer}
|
||||
|
||||
Here is the current machine generated response to the task that you need to evaluate:
|
||||
{response}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
RUBRIC_PROMPT = """Ignore previous directions. You are now an expert at evaluating machine generated responses to given tasks.
|
||||
In order to score the generated texts you will {scoring}. Make sure to factor in rubric into your thinking, deliberation, and final result regarding scoring. Return nothing but a float score.
|
||||
|
||||
Here is the given task for you to evaluate:
|
||||
{task}
|
||||
|
||||
Use the below rubric to guide your thinking about scoring:
|
||||
{answer}
|
||||
|
||||
Here is the current machine generated response to the task that you need to evaluate:
|
||||
{response}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
QUESTION_PROMPT = """Ignore previous directions. You are now an expert at evaluating machine generated responses to given tasks.
|
||||
In order to score the generated texts you will {scoring}. Make sure to think about whether the generated response answers the question well in order to score accurately. Return nothing but a float score.
|
||||
|
||||
Here is the given task:
|
||||
{task}
|
||||
|
||||
Here is a question that checks if the task was completed correctly:
|
||||
{answer}
|
||||
|
||||
Here is the current machine generated response to the task that you need to evaluate:
|
||||
{response}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
FEW_SHOT_EXAMPLES = """Here are some examples of how to score a machine generated response based on the above:
|
||||
{examples}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
CUSTOM_PROMPT = """{custom}
|
||||
{scoring}
|
||||
|
||||
"""
|
||||
|
||||
PROMPT_MAP = {
|
||||
"rubric": RUBRIC_PROMPT,
|
||||
"reference": REFERENCE_PROMPT,
|
||||
"question": QUESTION_PROMPT,
|
||||
"custom": CUSTOM_PROMPT,
|
||||
}
|
||||
|
||||
END_PROMPT = """Remember to always end your response with nothing but a float score.
|
||||
Float score:"""
|
||||
@@ -1,216 +0,0 @@
|
||||
# radio charts, logs, helper functions for tests, anything else relevant.
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Iterable, Optional, TypeVar, overload
|
||||
|
||||
import click
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel
|
||||
|
||||
from agbenchmark.reports.processing.report_types import Test
|
||||
from agbenchmark.utils.data_types import DIFFICULTY_MAP, DifficultyLevel
|
||||
|
||||
load_dotenv()
|
||||
|
||||
AGENT_NAME = os.getenv("AGENT_NAME")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
T = TypeVar("T")
|
||||
E = TypeVar("E", bound=Enum)
|
||||
|
||||
|
||||
def replace_backslash(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
return re.sub(
|
||||
r"\\+", "/", value
|
||||
) # replace one or more backslashes with a forward slash
|
||||
elif isinstance(value, list):
|
||||
return [replace_backslash(i) for i in value]
|
||||
elif isinstance(value, dict):
|
||||
return {k: replace_backslash(v) for k, v in value.items()}
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def get_test_path(json_file: str | Path) -> str:
|
||||
if isinstance(json_file, str):
|
||||
json_file = Path(json_file)
|
||||
|
||||
# Find the index of "agbenchmark" in the path parts
|
||||
try:
|
||||
agbenchmark_index = json_file.parts.index("benchmark")
|
||||
except ValueError:
|
||||
raise ValueError("Invalid challenge location.")
|
||||
|
||||
# Create the path from "agbenchmark" onwards
|
||||
challenge_location = Path(*json_file.parts[agbenchmark_index:])
|
||||
|
||||
formatted_location = replace_backslash(str(challenge_location))
|
||||
if isinstance(formatted_location, str):
|
||||
return formatted_location
|
||||
else:
|
||||
return str(challenge_location)
|
||||
|
||||
|
||||
def get_highest_success_difficulty(
|
||||
data: dict[str, Test], just_string: Optional[bool] = None
|
||||
) -> str:
|
||||
highest_difficulty = None
|
||||
highest_difficulty_level = 0
|
||||
|
||||
for test_name, test_data in data.items():
|
||||
try:
|
||||
if any(r.success for r in test_data.results):
|
||||
difficulty_str = test_data.difficulty
|
||||
if not difficulty_str:
|
||||
continue
|
||||
|
||||
try:
|
||||
difficulty_enum = DifficultyLevel[difficulty_str.lower()]
|
||||
difficulty_level = DIFFICULTY_MAP[difficulty_enum]
|
||||
|
||||
if difficulty_level > highest_difficulty_level:
|
||||
highest_difficulty = difficulty_enum
|
||||
highest_difficulty_level = difficulty_level
|
||||
except KeyError:
|
||||
logger.warning(
|
||||
f"Unexpected difficulty level '{difficulty_str}' "
|
||||
f"in test '{test_name}'"
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"An unexpected error [1] occurred while analyzing report [2]."
|
||||
"Please notify a maintainer.\n"
|
||||
f"Report data [1]: {data}\n"
|
||||
f"Error [2]: {e}"
|
||||
)
|
||||
logger.warning(
|
||||
"Make sure you selected the right test, no reports were generated."
|
||||
)
|
||||
break
|
||||
|
||||
if highest_difficulty is not None:
|
||||
highest_difficulty_str = highest_difficulty.name # convert enum to string
|
||||
else:
|
||||
highest_difficulty_str = ""
|
||||
|
||||
if highest_difficulty_level and not just_string:
|
||||
return f"{highest_difficulty_str}: {highest_difficulty_level}"
|
||||
elif highest_difficulty_str:
|
||||
return highest_difficulty_str
|
||||
return "No successful tests"
|
||||
|
||||
|
||||
# def get_git_commit_sha(directory: Path) -> Optional[str]:
|
||||
# try:
|
||||
# repo = git.Repo(directory)
|
||||
# remote_url = repo.remotes.origin.url
|
||||
# if remote_url.endswith(".git"):
|
||||
# remote_url = remote_url[:-4]
|
||||
# git_commit_sha = f"{remote_url}/tree/{repo.head.commit.hexsha}"
|
||||
|
||||
# # logger.debug(f"GIT_COMMIT_SHA: {git_commit_sha}")
|
||||
# return git_commit_sha
|
||||
# except Exception:
|
||||
# # logger.error(f"{directory} is not a git repository!")
|
||||
# return None
|
||||
|
||||
|
||||
def write_pretty_json(data, json_file):
|
||||
sorted_data = deep_sort(data)
|
||||
json_graph = json.dumps(sorted_data, indent=4)
|
||||
with open(json_file, "w") as f:
|
||||
f.write(json_graph)
|
||||
f.write("\n")
|
||||
|
||||
|
||||
def pretty_print_model(model: BaseModel, include_header: bool = True) -> None:
|
||||
indent = ""
|
||||
if include_header:
|
||||
# Try to find the ID and/or name attribute of the model
|
||||
id, name = None, None
|
||||
for attr, value in model.model_dump().items():
|
||||
if attr == "id" or attr.endswith("_id"):
|
||||
id = value
|
||||
if attr.endswith("name"):
|
||||
name = value
|
||||
if id and name:
|
||||
break
|
||||
identifiers = [v for v in [name, id] if v]
|
||||
click.echo(
|
||||
f"{model.__repr_name__()}{repr(identifiers) if identifiers else ''}:"
|
||||
)
|
||||
indent = " " * 2
|
||||
|
||||
k_col_width = max(len(k) for k in model.model_dump().keys())
|
||||
for k, v in model.model_dump().items():
|
||||
v_fmt = repr(v)
|
||||
if v is None or v == "":
|
||||
v_fmt = click.style(v_fmt, fg="black")
|
||||
elif type(v) is bool:
|
||||
v_fmt = click.style(v_fmt, fg="green" if v else "red")
|
||||
elif type(v) is str and "\n" in v:
|
||||
v_fmt = f"\n{v}".replace(
|
||||
"\n", f"\n{indent} {click.style('|', fg='black')} "
|
||||
)
|
||||
if isinstance(v, Enum):
|
||||
v_fmt = click.style(v.value, fg="blue")
|
||||
elif type(v) is list and len(v) > 0 and isinstance(v[0], Enum):
|
||||
v_fmt = ", ".join(click.style(lv.value, fg="blue") for lv in v)
|
||||
click.echo(f"{indent}{k: <{k_col_width}} = {v_fmt}")
|
||||
|
||||
|
||||
def deep_sort(obj):
|
||||
"""
|
||||
Recursively sort the keys in JSON object
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
return {k: deep_sort(v) for k, v in sorted(obj.items())}
|
||||
if isinstance(obj, list):
|
||||
return [deep_sort(elem) for elem in obj]
|
||||
return obj
|
||||
|
||||
|
||||
@overload
|
||||
def sorted_by_enum_index(
|
||||
sortable: Iterable[E],
|
||||
enum: type[E],
|
||||
*,
|
||||
reverse: bool = False,
|
||||
) -> list[E]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def sorted_by_enum_index(
|
||||
sortable: Iterable[T],
|
||||
enum: type[Enum],
|
||||
*,
|
||||
key: Callable[[T], Enum | None],
|
||||
reverse: bool = False,
|
||||
) -> list[T]:
|
||||
...
|
||||
|
||||
|
||||
def sorted_by_enum_index(
|
||||
sortable: Iterable[T],
|
||||
enum: type[Enum],
|
||||
*,
|
||||
key: Optional[Callable[[T], Enum | None]] = None,
|
||||
reverse: bool = False,
|
||||
) -> list[T]:
|
||||
return sorted(
|
||||
sortable,
|
||||
key=lambda x: (
|
||||
enum._member_names_.index(e.name) # type: ignore
|
||||
if (e := key(x) if key else x)
|
||||
else 420e3
|
||||
),
|
||||
reverse=reverse,
|
||||
)
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"workspace": {"input": "auto_gpt_workspace", "output": "auto_gpt_workspace"},
|
||||
"host": "http://localhost:8000"
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
{
|
||||
"Auto-GPT": {
|
||||
"url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"branch": "master",
|
||||
"commit": "3a2d08fb415071cc94dd6fcee24cfbdd1fb487dd"
|
||||
},
|
||||
"gpt-engineer": {
|
||||
"url": "https://github.com/merwanehamadi/gpt-engineer.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "9bb81041ace9f09e8ea0e34e29f2e46bb9d46a36"
|
||||
},
|
||||
"mini-agi": {
|
||||
"url": "https://github.com/SilenNaihin/mini-agi.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "2fc70aa0032eec986dfb1020854a1b3b8aaf6780"
|
||||
},
|
||||
"smol-developer": {
|
||||
"url": "https://github.com/e2b-dev/smol-developer.git",
|
||||
"branch": "benchmarks",
|
||||
"commit": "a23d01369cea976e80b7889fdbf1096619471301"
|
||||
},
|
||||
"SuperAGI": {
|
||||
"url": "https://github.com/SilenNaihin/SuperAGI.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "48b2101374264b97dbdfc2c0bb0ae45e769e157d"
|
||||
},
|
||||
"babyagi": {
|
||||
"url": "https://github.com/SilenNaihin/babyagi.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "16f1b9519fea5543695203be0262a1b41c77cbba"
|
||||
},
|
||||
"beebot": {
|
||||
"url": "https://github.com/AutoPackAI/beebot.git",
|
||||
"branch": "main",
|
||||
"commit": "59d4e93c133612a0319d135bb0eb08bbcead9fa2"
|
||||
},
|
||||
"PolyGPT": {
|
||||
"url": "https://github.com/polywrap/PolyGPT.git",
|
||||
"branch": "nerfzael-use-local-wrap-library",
|
||||
"commit": "d621adf5f54cc0f9a6d191139fb67ac3d1436d7b"
|
||||
},
|
||||
"Auto-GPT-Turbo": {
|
||||
"url": "https://github.com/lc0rp/Auto-GPT-Turbo.git",
|
||||
"branch": "main",
|
||||
"commit": "8469e09ae204f2d5f41d489b217551544597ee14"
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
# Since the ".env" file is gitignored, you can use the ".env.example" file to
|
||||
# build a new ".env" file when you clone the repo. Keep this file up-to-date
|
||||
# when you add new variables to `.env`.
|
||||
|
||||
# This file will be committed to version control, so make sure not to have any
|
||||
# secrets in it. If you are cloning this repo, create a copy of this file named
|
||||
# ".env" and populate it with your secrets.
|
||||
|
||||
# When adding additional environment variables, the schema in "/src/env.mjs"
|
||||
# should be updated accordingly.
|
||||
|
||||
# Prisma
|
||||
# https://www.prisma.io/docs/reference/database-reference/connection-urls#env
|
||||
DATABASE_URL="file:./db.sqlite"
|
||||
42
classic/benchmark/frontend/.gitignore
vendored
42
classic/benchmark/frontend/.gitignore
vendored
@@ -1,42 +0,0 @@
|
||||
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
||||
|
||||
# dependencies
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
|
||||
# testing
|
||||
/coverage
|
||||
|
||||
# database
|
||||
/prisma/db.sqlite
|
||||
/prisma/db.sqlite-journal
|
||||
|
||||
# next.js
|
||||
/.next/
|
||||
/out/
|
||||
next-env.d.ts
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
*.pem
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
# do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables
|
||||
.env
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
@@ -1,7 +0,0 @@
|
||||
# agbenchmark-frontend
|
||||
|
||||
Frontend for https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks
|
||||
|
||||
Objectively know how well your agent is performing in categories like code, retrieval, memory, and safety.
|
||||
|
||||
Save time and money while doing it through smart dependencies. Best part? It's all automated.
|
||||
@@ -1,30 +0,0 @@
|
||||
/** @type {import("eslint").Linter.Config} */
|
||||
const config = {
|
||||
parser: "@typescript-eslint/parser",
|
||||
parserOptions: {
|
||||
project: true,
|
||||
},
|
||||
plugins: ["@typescript-eslint"],
|
||||
extends: [
|
||||
"next/core-web-vitals",
|
||||
"plugin:@typescript-eslint/recommended-type-checked",
|
||||
"plugin:@typescript-eslint/stylistic-type-checked",
|
||||
],
|
||||
rules: {
|
||||
// These opinionated rules are enabled in stylistic-type-checked above.
|
||||
// Feel free to reconfigure them to your own preference.
|
||||
"@typescript-eslint/array-type": "off",
|
||||
"@typescript-eslint/consistent-type-definitions": "off",
|
||||
|
||||
"@typescript-eslint/consistent-type-imports": [
|
||||
"warn",
|
||||
{
|
||||
prefer: "type-imports",
|
||||
fixStyle: "inline-type-imports",
|
||||
},
|
||||
],
|
||||
"@typescript-eslint/no-unused-vars": ["warn", { argsIgnorePattern: "^_" }],
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
@@ -1,22 +0,0 @@
|
||||
/**
|
||||
* Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. This is especially useful
|
||||
* for Docker builds.
|
||||
*/
|
||||
await import("./src/env.mjs");
|
||||
|
||||
/** @type {import("next").NextConfig} */
|
||||
const config = {
|
||||
reactStrictMode: true,
|
||||
|
||||
/**
|
||||
* If you are using `appDir` then you must comment the below `i18n` config out.
|
||||
*
|
||||
* @see https://github.com/vercel/next.js/issues/41980
|
||||
*/
|
||||
i18n: {
|
||||
locales: ["en"],
|
||||
defaultLocale: "en",
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
@@ -1,47 +0,0 @@
|
||||
{
|
||||
"name": "my-t3-app",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "next build",
|
||||
"dev": "next dev",
|
||||
"postinstall": "prisma generate",
|
||||
"lint": "next lint",
|
||||
"start": "next start"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-svg-core": "^6.4.2",
|
||||
"@fortawesome/free-solid-svg-icons": "^6.4.2",
|
||||
"@fortawesome/react-fontawesome": "^0.2.0",
|
||||
"@prisma/client": "^5.1.1",
|
||||
"@t3-oss/env-nextjs": "^0.3.1",
|
||||
"next": "^13.4.2",
|
||||
"react": "18.2.0",
|
||||
"react-dom": "18.2.0",
|
||||
"tailwind-styled-components": "^2.2.0",
|
||||
"vis-data": "^7.1.6",
|
||||
"vis-network": "^9.1.6",
|
||||
"zod": "^3.21.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/eslint": "^8.37.0",
|
||||
"@types/node": "^18.16.0",
|
||||
"@types/prettier": "^2.7.2",
|
||||
"@types/react": "^18.2.6",
|
||||
"@types/react-dom": "^18.2.4",
|
||||
"@typescript-eslint/eslint-plugin": "6.0.0",
|
||||
"@typescript-eslint/parser": "6.0.0",
|
||||
"autoprefixer": "^10.4.14",
|
||||
"eslint": "^8.40.0",
|
||||
"eslint-config-next": "^13.4.2",
|
||||
"postcss": "^8.4.27",
|
||||
"prettier": "^2.8.8",
|
||||
"prettier-plugin-tailwindcss": "^0.2.8",
|
||||
"prisma": "^5.1.1",
|
||||
"tailwindcss": "^3.3.3",
|
||||
"typescript": "^5.0.4"
|
||||
},
|
||||
"ct3aMetadata": {
|
||||
"initVersion": "7.18.0"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
const config = {
|
||||
plugins: {
|
||||
tailwindcss: {},
|
||||
autoprefixer: {},
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
@@ -1,6 +0,0 @@
|
||||
/** @type {import("prettier").Config} */
|
||||
const config = {
|
||||
plugins: [require.resolve("prettier-plugin-tailwindcss")],
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 15 KiB |
File diff suppressed because one or more lines are too long
@@ -1,45 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
import RadarChart from "./dashboard/RadarChart";
|
||||
import CategorySuccess from "./dashboard/CategorySuccess";
|
||||
import CurrentEnv from "./dashboard/CurrentEnv";
|
||||
|
||||
interface DashboardProps {
|
||||
data: any;
|
||||
}
|
||||
|
||||
const Dashboard: React.FC<DashboardProps> = ({ data }) => {
|
||||
return (
|
||||
<DashboardContainer>
|
||||
<CardWrapper>
|
||||
<RadarChart />
|
||||
</CardWrapper>
|
||||
<CardWrapper>
|
||||
<CategorySuccess />
|
||||
</CardWrapper>
|
||||
<CardWrapper>
|
||||
<CurrentEnv />
|
||||
</CardWrapper>
|
||||
</DashboardContainer>
|
||||
);
|
||||
};
|
||||
|
||||
export default Dashboard;
|
||||
|
||||
const DashboardContainer = tw.div`
|
||||
w-full
|
||||
h-96
|
||||
flex
|
||||
justify-between
|
||||
items-center
|
||||
`;
|
||||
|
||||
const CardWrapper = tw.div`
|
||||
w-[30%]
|
||||
h-72
|
||||
rounded-xl
|
||||
shadow-lg
|
||||
border
|
||||
p-4
|
||||
`;
|
||||
@@ -1,28 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
interface ReportsProps {
|
||||
data: any;
|
||||
}
|
||||
|
||||
const Reports: React.FC<ReportsProps> = ({ data }) => {
|
||||
return (
|
||||
<ReportsContainer>
|
||||
<Table></Table>
|
||||
</ReportsContainer>
|
||||
);
|
||||
};
|
||||
|
||||
export default Reports;
|
||||
|
||||
const ReportsContainer = tw.div`
|
||||
w-full
|
||||
`;
|
||||
|
||||
const Table = tw.div`
|
||||
w-full
|
||||
border
|
||||
shadow-lg
|
||||
rounded-xl
|
||||
h-96
|
||||
`;
|
||||
@@ -1,16 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
interface CategorySuccessProps {
|
||||
data: any;
|
||||
}
|
||||
|
||||
const CategorySuccess: React.FC<CategorySuccessProps> = ({ data }) => {
|
||||
return <CategorySuccessContainer></CategorySuccessContainer>;
|
||||
};
|
||||
|
||||
export default CategorySuccess;
|
||||
|
||||
const CategorySuccessContainer = tw.div`
|
||||
|
||||
`;
|
||||
@@ -1,68 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
interface CurrentEnvProps {
|
||||
data: any;
|
||||
}
|
||||
|
||||
const CurrentEnv: React.FC<CurrentEnvProps> = ({ data }) => {
|
||||
const [agentName, setAgentName] = useState<string>("mini-agi");
|
||||
const [reportLocation, setReportLocation] = useState<string>(
|
||||
"../reports/mini-agi"
|
||||
);
|
||||
const [openAiKey, setOpenAiKey] = useState<string>();
|
||||
|
||||
return (
|
||||
<CurrentEnvContainer>
|
||||
<Title>Env Variables</Title>
|
||||
<EnvWrapper>
|
||||
<EnvLabel>Agent Name</EnvLabel>
|
||||
<EnvInput
|
||||
onChange={(e) => setAgentName(e.targetValue)}
|
||||
placeholder="mini-agi"
|
||||
/>
|
||||
</EnvWrapper>
|
||||
<EnvWrapper>
|
||||
<EnvLabel>Report Location</EnvLabel>
|
||||
<EnvInput placeholder="Location from root" />
|
||||
</EnvWrapper>
|
||||
<EnvWrapper>
|
||||
<EnvLabel>OpenAI Key</EnvLabel>
|
||||
<EnvInput type="password" placeholder="sk-" />
|
||||
</EnvWrapper>
|
||||
</CurrentEnvContainer>
|
||||
);
|
||||
};
|
||||
|
||||
export default CurrentEnv;
|
||||
|
||||
const CurrentEnvContainer = tw.div`
|
||||
w-full
|
||||
h-full
|
||||
flex
|
||||
flex-col
|
||||
justify-center
|
||||
`;
|
||||
|
||||
const Title = tw.h3`
|
||||
font-bold
|
||||
text-lg
|
||||
text-center
|
||||
`;
|
||||
|
||||
const EnvWrapper = tw.div`
|
||||
flex
|
||||
mt-4
|
||||
justify-between
|
||||
items-center
|
||||
`;
|
||||
|
||||
const EnvLabel = tw.label`
|
||||
|
||||
`;
|
||||
|
||||
const EnvInput = tw.input`
|
||||
border
|
||||
rounded
|
||||
px-2
|
||||
`;
|
||||
@@ -1,16 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
interface RadarChartProps {
|
||||
data: any;
|
||||
}
|
||||
|
||||
const RadarChart: React.FC<RadarChartProps> = ({ data }) => {
|
||||
return <RadarChartContainer></RadarChartContainer>;
|
||||
};
|
||||
|
||||
export default RadarChart;
|
||||
|
||||
const RadarChartContainer = tw.div`
|
||||
|
||||
`;
|
||||
@@ -1,112 +0,0 @@
|
||||
import React, { useEffect, useRef, useState } from "react";
|
||||
import { Network } from "vis-network";
|
||||
import { DataSet } from "vis-data";
|
||||
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
import { GraphNode, TaskData } from "../../lib/types";
|
||||
|
||||
interface GraphEdge {
|
||||
id: string;
|
||||
from: string;
|
||||
to: string;
|
||||
arrows: string;
|
||||
}
|
||||
|
||||
interface GraphProps {
|
||||
graphData: {
|
||||
nodes: GraphNode[];
|
||||
edges: GraphEdge[];
|
||||
};
|
||||
setSelectedTask: React.Dispatch<React.SetStateAction<TaskData | null>>;
|
||||
setIsTaskInfoExpanded: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
}
|
||||
|
||||
const Graph: React.FC<GraphProps> = ({
|
||||
graphData,
|
||||
setSelectedTask,
|
||||
setIsTaskInfoExpanded,
|
||||
}) => {
|
||||
const graphRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (!graphRef.current) {
|
||||
return;
|
||||
}
|
||||
const nodes = new DataSet<GraphNode>(graphData.nodes);
|
||||
const edges = new DataSet<GraphEdge>(graphData.edges);
|
||||
|
||||
const data = {
|
||||
nodes: nodes,
|
||||
edges: edges,
|
||||
};
|
||||
|
||||
const options = {
|
||||
nodes: {
|
||||
font: {
|
||||
size: 20, // Increased font size for labels
|
||||
color: "black", // Set a readable font color
|
||||
},
|
||||
shapeProperties: {
|
||||
useBorderWithImage: true,
|
||||
},
|
||||
},
|
||||
edges: {
|
||||
length: 250, // Increased edge length
|
||||
},
|
||||
layout: {
|
||||
hierarchical: {
|
||||
enabled: true,
|
||||
levelSeparation: 300,
|
||||
nodeSpacing: 250,
|
||||
treeSpacing: 250,
|
||||
blockShifting: true,
|
||||
edgeMinimization: true,
|
||||
parentCentralization: true,
|
||||
direction: "UD",
|
||||
sortMethod: "directed",
|
||||
},
|
||||
},
|
||||
physics: {
|
||||
stabilization: {
|
||||
enabled: true,
|
||||
iterations: 1000,
|
||||
},
|
||||
hierarchicalRepulsion: {
|
||||
centralGravity: 0.0,
|
||||
springLength: 200,
|
||||
springConstant: 0.01,
|
||||
nodeDistance: 300,
|
||||
damping: 0.09,
|
||||
},
|
||||
timestep: 0.5,
|
||||
},
|
||||
};
|
||||
|
||||
const network = new Network(graphRef.current, data, options);
|
||||
|
||||
// Add an event listener for node clicks
|
||||
network.on("click", (params) => {
|
||||
if (params.nodes.length) {
|
||||
const nodeId = params.nodes[0];
|
||||
const clickedNodeArray = nodes.get(nodeId);
|
||||
if (clickedNodeArray) {
|
||||
setSelectedTask((clickedNodeArray as any).data as TaskData);
|
||||
setIsTaskInfoExpanded(true);
|
||||
}
|
||||
} else {
|
||||
setSelectedTask(null);
|
||||
setIsTaskInfoExpanded(false);
|
||||
}
|
||||
});
|
||||
}, [graphData]);
|
||||
|
||||
return <GraphContainer ref={graphRef} />;
|
||||
};
|
||||
|
||||
export default Graph;
|
||||
|
||||
const GraphContainer = tw.div`
|
||||
w-full
|
||||
h-full
|
||||
`;
|
||||
@@ -1,39 +0,0 @@
|
||||
import React from "react";
|
||||
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
interface MockCheckboxProps {
|
||||
isMock: boolean;
|
||||
setIsMock: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
}
|
||||
|
||||
const MockCheckbox: React.FC<MockCheckboxProps> = ({ isMock, setIsMock }) => {
|
||||
return (
|
||||
<CheckboxWrapper>
|
||||
<MockCheckboxInput
|
||||
type="checkbox"
|
||||
checked={isMock}
|
||||
onChange={() => setIsMock(!isMock)}
|
||||
/>
|
||||
<span>Run mock test</span>
|
||||
</CheckboxWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
export default MockCheckbox;
|
||||
|
||||
const MockCheckboxInput = tw.input`
|
||||
border
|
||||
rounded
|
||||
focus:border-blue-400
|
||||
focus:ring
|
||||
focus:ring-blue-200
|
||||
focus:ring-opacity-50
|
||||
`;
|
||||
|
||||
const CheckboxWrapper = tw.label`
|
||||
flex
|
||||
items-center
|
||||
space-x-2
|
||||
mt-2
|
||||
`;
|
||||
@@ -1,80 +0,0 @@
|
||||
import React, { useState, useEffect } from "react";
|
||||
|
||||
import tw from "tailwind-styled-components";
|
||||
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
||||
import { faCircleNotch } from "@fortawesome/free-solid-svg-icons";
|
||||
|
||||
interface RunButtonProps {
|
||||
testRun: () => Promise<void>;
|
||||
isLoading: boolean;
|
||||
cutoff?: string;
|
||||
isMock: boolean;
|
||||
}
|
||||
|
||||
const RunButton: React.FC<RunButtonProps> = ({
|
||||
testRun,
|
||||
isLoading,
|
||||
cutoff,
|
||||
isMock,
|
||||
}) => {
|
||||
const intCutoff = cutoff ? parseInt(cutoff) : null;
|
||||
const [timeElapsed, setTimeElapsed] = useState<number>(0);
|
||||
|
||||
useEffect(() => {
|
||||
let interval: NodeJS.Timeout | null = null;
|
||||
|
||||
if (isLoading) {
|
||||
interval = setInterval(() => {
|
||||
setTimeElapsed((prevTime) => prevTime + 1);
|
||||
}, 1000);
|
||||
} else {
|
||||
if (interval !== null) {
|
||||
clearInterval(interval);
|
||||
}
|
||||
setTimeElapsed(0); // Reset the timer when not loading
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (interval !== null) {
|
||||
clearInterval(interval);
|
||||
}
|
||||
};
|
||||
}, [isLoading]);
|
||||
|
||||
const timeUntilCutoff = intCutoff ? intCutoff - timeElapsed : null;
|
||||
|
||||
return (
|
||||
<>
|
||||
<RunButtonWrapper onClick={testRun}>
|
||||
{!isLoading ? (
|
||||
"Run Task"
|
||||
) : (
|
||||
<FontAwesomeIcon size="lg" icon={faCircleNotch} spin />
|
||||
)}
|
||||
</RunButtonWrapper>
|
||||
{cutoff && isLoading && (
|
||||
<>
|
||||
{isMock ? (
|
||||
<p>Time elapsed: {timeElapsed} seconds</p>
|
||||
) : (
|
||||
<p>Time until cutoff: {timeUntilCutoff} seconds</p>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default RunButton;
|
||||
|
||||
const RunButtonWrapper = tw.button`
|
||||
border
|
||||
mt-4
|
||||
py-1
|
||||
px-3
|
||||
w-28
|
||||
rounded
|
||||
flex
|
||||
items-center
|
||||
justify-center
|
||||
`;
|
||||
@@ -1,129 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import { LatestRun } from "../../lib/types";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
const RecursiveDropdown: React.FC<{ data: any; skipKeys: string[] }> = ({
|
||||
data,
|
||||
skipKeys,
|
||||
}) => {
|
||||
if (typeof data !== "object" || data === null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{Object.entries(data).map(([key, value]) => {
|
||||
if (skipKeys.includes(key)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Special case for 'category' key
|
||||
if (key === "category" && Array.isArray(value)) {
|
||||
return (
|
||||
<Section key={key}>
|
||||
<Label>{key}:</Label>
|
||||
<Data>{value.join(", ")}</Data>
|
||||
</Section>
|
||||
);
|
||||
}
|
||||
|
||||
if (typeof value === "object" && value !== null) {
|
||||
return (
|
||||
<Dropdown key={key}>
|
||||
<DropdownSummary>{key}</DropdownSummary>
|
||||
<DropdownContent>
|
||||
<RecursiveDropdown data={value} skipKeys={skipKeys} />
|
||||
</DropdownContent>
|
||||
</Dropdown>
|
||||
);
|
||||
} else {
|
||||
return (
|
||||
<Section key={key}>
|
||||
<Label>{key}:</Label>
|
||||
<Data>
|
||||
{typeof value === "string" ? value : JSON.stringify(value)}
|
||||
</Data>
|
||||
</Section>
|
||||
);
|
||||
}
|
||||
})}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
const RunData: React.FC<{ latestRun: LatestRun }> = ({ latestRun }) => {
|
||||
const date = new Date(latestRun.benchmark_start_time);
|
||||
return (
|
||||
<Card>
|
||||
<Section>
|
||||
<Label>Command:</Label>
|
||||
<Data>{latestRun.command}</Data>
|
||||
</Section>
|
||||
<Section>
|
||||
<Label>Start time:</Label>
|
||||
<Data>{date.toLocaleString()}</Data>
|
||||
</Section>
|
||||
<Section>
|
||||
<Label>Run time:</Label>
|
||||
<Data>{latestRun.metrics.run_time}</Data>
|
||||
</Section>
|
||||
<Section>
|
||||
<Label>Highest difficulty:</Label>
|
||||
<Data>
|
||||
{latestRun.metrics.highest_difficulty.split(":")[1]?.slice(-1)}
|
||||
</Data>
|
||||
</Section>
|
||||
|
||||
{Object.keys(latestRun.tests).map((testKey) => (
|
||||
<Dropdown key={testKey}>
|
||||
<DropdownSummary>{testKey}</DropdownSummary>
|
||||
<DropdownContent>
|
||||
{latestRun.tests[testKey] && (
|
||||
<RecursiveDropdown
|
||||
data={latestRun.tests[testKey]}
|
||||
skipKeys={["cost", "data_path"]}
|
||||
/>
|
||||
)}
|
||||
</DropdownContent>
|
||||
</Dropdown>
|
||||
))}
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
|
||||
export default RunData;
|
||||
|
||||
const Card = tw.div`
|
||||
bg-white
|
||||
p-4
|
||||
rounded
|
||||
shadow-lg
|
||||
w-full
|
||||
mt-4
|
||||
`;
|
||||
|
||||
const Section = tw.div`
|
||||
mt-2
|
||||
`;
|
||||
|
||||
const Label = tw.span`
|
||||
font-medium
|
||||
`;
|
||||
|
||||
const Data = tw.span`
|
||||
ml-1
|
||||
`;
|
||||
|
||||
const Dropdown = tw.details`
|
||||
mt-4
|
||||
`;
|
||||
|
||||
const DropdownSummary = tw.summary`
|
||||
cursor-pointer
|
||||
text-blue-500
|
||||
`;
|
||||
|
||||
const DropdownContent = tw.div`
|
||||
pl-4
|
||||
mt-2
|
||||
`;
|
||||
@@ -1,112 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
import { TaskData } from "../../lib/types";
|
||||
import RunButton from "./RunButton";
|
||||
import MockCheckbox from "./MockCheckbox";
|
||||
|
||||
interface SelectedTaskProps {
|
||||
selectedTask: TaskData | null;
|
||||
isMock: boolean;
|
||||
setIsMock: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
cutoff: number | null;
|
||||
setResponseData: React.Dispatch<React.SetStateAction<any>>;
|
||||
allResponseData: any[];
|
||||
setAllResponseData: React.Dispatch<React.SetStateAction<any[]>>;
|
||||
}
|
||||
|
||||
const SelectedTask: React.FC<SelectedTaskProps> = ({
|
||||
selectedTask,
|
||||
isMock,
|
||||
setIsMock,
|
||||
cutoff,
|
||||
setResponseData,
|
||||
setAllResponseData,
|
||||
allResponseData,
|
||||
}) => {
|
||||
const [isLoading, setIsLoading] = useState<boolean>(false);
|
||||
|
||||
const runTest = async () => {
|
||||
// If there's no selected task, do nothing
|
||||
if (!selectedTask?.name) return;
|
||||
|
||||
const testParam = selectedTask.name;
|
||||
setIsLoading(true);
|
||||
try {
|
||||
let url = `http://localhost:8000/run_single_test?test=${testParam}&mock=${isMock}`;
|
||||
cutoff && !isMock && (url += `&cutoff=${cutoff}`);
|
||||
const response = await fetch(url);
|
||||
const data = await response.json();
|
||||
|
||||
if (data["returncode"] > 0) {
|
||||
throw new Error(data["stderr"]);
|
||||
} else {
|
||||
const jsonObject = JSON.parse(data["stdout"]);
|
||||
setAllResponseData([...allResponseData, jsonObject]);
|
||||
setResponseData(jsonObject);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("There was an error fetching the data", error);
|
||||
}
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<TaskName>{selectedTask?.name}</TaskName>
|
||||
<TaskPrompt>{selectedTask?.task}</TaskPrompt>
|
||||
<Detail>
|
||||
<b>Cutoff:</b> {selectedTask?.cutoff}
|
||||
</Detail>
|
||||
<Detail>
|
||||
<b>Description:</b> {selectedTask?.info?.description}
|
||||
</Detail>
|
||||
<Detail>
|
||||
<b>Difficulty:</b> {selectedTask?.info?.difficulty}
|
||||
</Detail>
|
||||
<Detail>
|
||||
<b>Category:</b> {selectedTask?.category.join(", ")}
|
||||
</Detail>
|
||||
<RunButton
|
||||
cutoff={selectedTask?.cutoff}
|
||||
isLoading={isLoading}
|
||||
testRun={runTest}
|
||||
isMock={isMock}
|
||||
/>
|
||||
<MockCheckbox isMock={isMock} setIsMock={setIsMock} />
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default SelectedTask;
|
||||
|
||||
const TaskName = tw.h1`
|
||||
font-bold
|
||||
text-2xl
|
||||
break-words
|
||||
`;
|
||||
|
||||
const TaskPrompt = tw.p`
|
||||
text-gray-900
|
||||
break-words
|
||||
`;
|
||||
const Detail = tw.p`
|
||||
mt-2
|
||||
`;
|
||||
|
||||
const MockCheckboxInput = tw.input`
|
||||
border
|
||||
rounded
|
||||
focus:border-blue-400
|
||||
focus:ring
|
||||
focus:ring-blue-200
|
||||
focus:ring-opacity-50
|
||||
`;
|
||||
|
||||
const CheckboxWrapper = tw.label`
|
||||
flex
|
||||
items-center
|
||||
space-x-2
|
||||
mt-2
|
||||
`;
|
||||
@@ -1,164 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
import { TaskData } from "../../lib/types";
|
||||
import RunData from "./RunData";
|
||||
import SelectedTask from "./SelectedTask";
|
||||
import MockCheckbox from "./MockCheckbox";
|
||||
import RunButton from "./RunButton";
|
||||
|
||||
interface TaskInfoProps {
|
||||
selectedTask: TaskData | null;
|
||||
isTaskInfoExpanded: boolean;
|
||||
setIsTaskInfoExpanded: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
setSelectedTask: React.Dispatch<React.SetStateAction<TaskData | null>>;
|
||||
}
|
||||
|
||||
const TaskInfo: React.FC<TaskInfoProps> = ({
|
||||
selectedTask,
|
||||
isTaskInfoExpanded,
|
||||
setIsTaskInfoExpanded,
|
||||
setSelectedTask,
|
||||
}) => {
|
||||
const [isMock, setIsMock] = useState<boolean>(false);
|
||||
const [isLoading, setIsLoading] = useState<boolean>(false);
|
||||
const [allResponseData, setAllResponseData] = useState<any[]>([]);
|
||||
const [responseData, setResponseData] = useState<any>();
|
||||
const [cutoff, setCutoff] = useState<number | null>(null);
|
||||
|
||||
const runBenchmark = async () => {
|
||||
setIsLoading(true);
|
||||
try {
|
||||
let url = `http://localhost:8000/run?mock=${isMock}`;
|
||||
cutoff && !isMock && (url += `&cutoff=${cutoff}`);
|
||||
const response = await fetch(url);
|
||||
const data = await response.json();
|
||||
|
||||
if (data["returncode"] > 0) {
|
||||
throw new Error(data["stderr"]);
|
||||
} else {
|
||||
const jsonObject = JSON.parse(data["stdout"]);
|
||||
setAllResponseData([...allResponseData, jsonObject]);
|
||||
setResponseData(jsonObject);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("There was an error fetching the data", error);
|
||||
}
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
return (
|
||||
<TaskDetails isExpanded={isTaskInfoExpanded}>
|
||||
{isTaskInfoExpanded ? (
|
||||
<ToggleButton
|
||||
onClick={() => {
|
||||
setIsTaskInfoExpanded(!isTaskInfoExpanded);
|
||||
setSelectedTask(null);
|
||||
}}
|
||||
>
|
||||
→
|
||||
</ToggleButton>
|
||||
) : (
|
||||
<BenchmarkWrapper>
|
||||
<RunButton
|
||||
cutoff={selectedTask?.cutoff}
|
||||
isLoading={isLoading}
|
||||
testRun={runBenchmark}
|
||||
isMock={isMock}
|
||||
/>
|
||||
<MockCheckbox isMock={isMock} setIsMock={setIsMock} />
|
||||
<Detail>
|
||||
<b>or click a node on the left</b>
|
||||
</Detail>
|
||||
</BenchmarkWrapper>
|
||||
)}
|
||||
|
||||
{selectedTask && (
|
||||
<SelectedTask
|
||||
selectedTask={selectedTask}
|
||||
isMock={isMock}
|
||||
setIsMock={setIsMock}
|
||||
cutoff={cutoff}
|
||||
setResponseData={setResponseData}
|
||||
allResponseData={allResponseData}
|
||||
setAllResponseData={setAllResponseData}
|
||||
/>
|
||||
)}
|
||||
{!isMock && (
|
||||
<CheckboxWrapper>
|
||||
<p>Custom cutoff</p>
|
||||
<CutoffInput
|
||||
type="number"
|
||||
placeholder="Leave blank for default"
|
||||
value={cutoff ?? ""}
|
||||
onChange={(e) =>
|
||||
setCutoff(e.target.value ? parseInt(e.target.value) : null)
|
||||
}
|
||||
/>
|
||||
</CheckboxWrapper>
|
||||
)}
|
||||
<Header>Previous Run</Header>
|
||||
{!responseData && <p>No runs yet</p>}
|
||||
{responseData && <RunData latestRun={responseData} />}
|
||||
<Header>All Runs</Header>
|
||||
{allResponseData.length === 0 && <p>No runs yet</p>}
|
||||
{allResponseData.length > 1 &&
|
||||
allResponseData
|
||||
.slice(0, -1)
|
||||
.map((responseData, index) => (
|
||||
<RunData key={index} latestRun={responseData} />
|
||||
))}
|
||||
</TaskDetails>
|
||||
);
|
||||
};
|
||||
|
||||
export default TaskInfo;
|
||||
|
||||
const TaskDetails = tw.div<{ isExpanded: boolean }>`
|
||||
${(p) => (p.isExpanded ? "w-1/2" : "w-1/4")}
|
||||
ml-5
|
||||
transition-all
|
||||
duration-500
|
||||
ease-in-out
|
||||
p-4
|
||||
border
|
||||
border-gray-400
|
||||
h-full
|
||||
overflow-x-hidden
|
||||
`;
|
||||
|
||||
const Header = tw.h5`
|
||||
text-xl
|
||||
font-semibold
|
||||
mt-4
|
||||
`;
|
||||
|
||||
const ToggleButton = tw.button`
|
||||
font-bold
|
||||
text-2xl
|
||||
`;
|
||||
|
||||
const BenchmarkWrapper = tw.div`
|
||||
flex
|
||||
flex-col
|
||||
items-center
|
||||
justify-center
|
||||
`;
|
||||
|
||||
const CutoffInput = tw.input`
|
||||
border rounded w-1/2 h-8 text-sm
|
||||
focus:outline-none focus:border-blue-400
|
||||
pl-2
|
||||
`;
|
||||
|
||||
const Detail = tw.p`
|
||||
mt-2
|
||||
`;
|
||||
|
||||
const CheckboxWrapper = tw.label`
|
||||
flex
|
||||
items-center
|
||||
space-x-2
|
||||
mt-2
|
||||
`;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user