mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 16:25:05 -05:00
Compare commits
83 Commits
swiftyos/s
...
make-old-w
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0754958f0 | ||
|
|
d437e756e9 | ||
|
|
9622ba8cbb | ||
|
|
053b92e72c | ||
|
|
24b38f2cdd | ||
|
|
1480183c47 | ||
|
|
711f0da63c | ||
|
|
ac7de17eb4 | ||
|
|
f56abcef4f | ||
|
|
6210b3259d | ||
|
|
60f506add9 | ||
|
|
b3f35953ed | ||
|
|
d8d87f2853 | ||
|
|
791e1d8982 | ||
|
|
0040636948 | ||
|
|
c671af851f | ||
|
|
7dd181f4b0 | ||
|
|
114856cef1 | ||
|
|
68b9bd0c51 | ||
|
|
ff076b1f15 | ||
|
|
57fbab500b | ||
|
|
6faabef24d | ||
|
|
a67d475a69 | ||
|
|
326554d89a | ||
|
|
5e22a1888a | ||
|
|
a4d7b0142f | ||
|
|
7d6375f59c | ||
|
|
aeec0ce509 | ||
|
|
b32bfcaac5 | ||
|
|
5373a6eb6e | ||
|
|
98cde46ccb | ||
|
|
bd10da10d9 | ||
|
|
60fdee1345 | ||
|
|
6f2783468c | ||
|
|
c1031b286d | ||
|
|
b849eafb7f | ||
|
|
572c3f5e0d | ||
|
|
89003a585d | ||
|
|
0e65785228 | ||
|
|
f07dff1cdd | ||
|
|
00e02a4696 | ||
|
|
634bff8277 | ||
|
|
d591f36c7b | ||
|
|
a347bed0b1 | ||
|
|
4eeb6ee2b0 | ||
|
|
7db962b9f9 | ||
|
|
9108b21541 | ||
|
|
ffe9325296 | ||
|
|
0a616d9267 | ||
|
|
ab95077e5b | ||
|
|
e477150979 | ||
|
|
804430e243 | ||
|
|
acb320d32d | ||
|
|
32f68d5999 | ||
|
|
49f56b4e8d | ||
|
|
bead811e73 | ||
|
|
013f728ebf | ||
|
|
cda9572acd | ||
|
|
e0784f8f6b | ||
|
|
3040f39136 | ||
|
|
515504c604 | ||
|
|
18edeaeaf4 | ||
|
|
44182aff9c | ||
|
|
864c5a7846 | ||
|
|
699fffb1a8 | ||
|
|
f0641c2d26 | ||
|
|
94b6f74c95 | ||
|
|
46aabab3ea | ||
|
|
0a65df5102 | ||
|
|
6fbd208fe3 | ||
|
|
8fc174ca87 | ||
|
|
cacc89790f | ||
|
|
b9113bee02 | ||
|
|
3f65da03e7 | ||
|
|
9e96d11b2d | ||
|
|
4c264b7ae9 | ||
|
|
0adbc0bd05 | ||
|
|
8f3291bc92 | ||
|
|
7a20de880d | ||
|
|
ef8a6d2528 | ||
|
|
fd66be2aaa | ||
|
|
ae2cc97dc4 | ||
|
|
ea521eed26 |
10
.claude/settings.json
Normal file
10
.claude/settings.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allowedTools": [
|
||||
"Read", "Grep", "Glob",
|
||||
"Bash(ls:*)", "Bash(cat:*)", "Bash(grep:*)", "Bash(find:*)",
|
||||
"Bash(git status:*)", "Bash(git diff:*)", "Bash(git log:*)", "Bash(git worktree:*)",
|
||||
"Bash(tmux:*)", "Bash(sleep:*)", "Bash(branchlet:*)"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -5,13 +5,42 @@
|
||||
!docs/
|
||||
|
||||
# Platform - Libs
|
||||
!autogpt_platform/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||
!autogpt_platform/autogpt_libs/poetry.lock
|
||||
!autogpt_platform/autogpt_libs/README.md
|
||||
|
||||
# Platform - Backend
|
||||
!autogpt_platform/backend/
|
||||
!autogpt_platform/backend/backend/
|
||||
!autogpt_platform/backend/test/e2e_test_data.py
|
||||
!autogpt_platform/backend/migrations/
|
||||
!autogpt_platform/backend/schema.prisma
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
!autogpt_platform/backend/.env
|
||||
!autogpt_platform/backend/gen_prisma_types_stub.py
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
!autogpt_platform/market/scripts.py
|
||||
!autogpt_platform/market/schema.prisma
|
||||
!autogpt_platform/market/pyproject.toml
|
||||
!autogpt_platform/market/poetry.lock
|
||||
!autogpt_platform/market/README.md
|
||||
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/scripts/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
!autogpt_platform/frontend/.env
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
@@ -35,38 +64,6 @@
|
||||
# Classic - Frontend
|
||||
!classic/frontend/build/web/
|
||||
|
||||
# Explicitly re-ignore unwanted files from whitelisted directories
|
||||
# Note: These patterns MUST come after the whitelist rules to take effect
|
||||
|
||||
# Hidden files and directories (but keep frontend .env files needed for build)
|
||||
**/.*
|
||||
!autogpt_platform/frontend/.env
|
||||
!autogpt_platform/frontend/.env.default
|
||||
!autogpt_platform/frontend/.env.production
|
||||
|
||||
# Python artifacts
|
||||
**/__pycache__/
|
||||
**/*.pyc
|
||||
**/*.pyo
|
||||
**/.venv/
|
||||
**/.ruff_cache/
|
||||
**/.pytest_cache/
|
||||
**/.coverage
|
||||
**/htmlcov/
|
||||
|
||||
# Node artifacts
|
||||
**/node_modules/
|
||||
**/.next/
|
||||
**/storybook-static/
|
||||
**/playwright-report/
|
||||
**/test-results/
|
||||
|
||||
# Build artifacts
|
||||
**/dist/
|
||||
**/build/
|
||||
!autogpt_platform/frontend/src/**/build/
|
||||
**/target/
|
||||
|
||||
# Logs and temp files
|
||||
**/*.log
|
||||
**/*.tmp
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
|
||||
74
.github/workflows/classic-autogpt-ci.yml
vendored
74
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -6,11 +6,15 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/forge/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -19,47 +23,22 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/original_autogpt
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
- name: Start MinIO service
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -71,41 +50,23 @@ jobs:
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: "3.12"
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
@@ -116,12 +77,13 @@ jobs:
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests/unit tests/integration
|
||||
original_autogpt/tests/unit original_autogpt/tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
@@ -135,11 +97,11 @@ jobs:
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
flags: autogpt-agent
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/original_autogpt/logs/
|
||||
path: classic/logs/
|
||||
|
||||
36
.github/workflows/classic-autogpts-ci.yml
vendored
36
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -11,9 +11,6 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
@@ -22,9 +19,6 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
defaults:
|
||||
@@ -35,13 +29,9 @@ defaults:
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ original_autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
min-python-version: '3.12'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -55,22 +45,22 @@ jobs:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run smoke tests with direct-benchmark
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests ReadFile,WriteFile \
|
||||
--json
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
HELICONE_CACHE_ENABLED: false
|
||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
CI: true
|
||||
|
||||
194
.github/workflows/classic-benchmark-ci.yml
vendored
194
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -1,17 +1,21 @@
|
||||
name: Classic - AGBenchmark CI
|
||||
name: Classic - Direct Benchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
@@ -23,23 +27,16 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
min-python-version: '3.12'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
benchmark-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/benchmark
|
||||
working-directory: classic
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -47,71 +44,88 @@ jobs:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run pytest with coverage
|
||||
- name: Run basic benchmark tests
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests
|
||||
echo "Testing ReadFile challenge with one_shot strategy..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests ReadFile \
|
||||
--json
|
||||
|
||||
echo "Testing WriteFile challenge..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests WriteFile \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Test category filtering
|
||||
run: |
|
||||
echo "Testing coding category..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--categories coding \
|
||||
--tests ReadFile,WriteFile \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
- name: Test multiple strategies
|
||||
run: |
|
||||
echo "Testing multiple strategies..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot,plan_execute \
|
||||
--models claude \
|
||||
--tests ReadFile \
|
||||
--parallel 2 \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
self-test-with-agent:
|
||||
# Run regression tests on maintain challenges
|
||||
regression-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 45
|
||||
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -126,51 +140,23 @@ jobs:
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: classic
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
poetry run agbenchmark --maintain --mock
|
||||
EXIT_CODE=$?
|
||||
set -e # Stop ignoring non-zero exit codes
|
||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||
if [ $EXIT_CODE -eq 5 ]; then
|
||||
echo "regression_tests.json is empty."
|
||||
fi
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock"
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||
poetry run agbenchmark --mock --category=data
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
echo "Running regression tests (previously beaten challenges)..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--maintain \
|
||||
--parallel 4 \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
185
.github/workflows/classic-forge-ci.yml
vendored
185
.github/workflows/classic-forge-ci.yml
vendored
@@ -6,13 +6,11 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -21,131 +19,60 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic/forge
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
- name: Start MinIO service
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Install Playwright browsers
|
||||
run: poetry run playwright install chromium
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
forge
|
||||
forge/forge forge/tests
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
# API keys - tests that need these will skip if not available
|
||||
# Secrets are not available to fork PRs (GitHub security feature)
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
@@ -159,85 +86,11 @@ jobs:
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
flags: forge
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/forge/logs/
|
||||
path: classic/logs/
|
||||
|
||||
60
.github/workflows/classic-frontend-ci.yml
vendored
60
.github/workflows/classic-frontend-ci.yml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Classic - Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v8
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
67
.github/workflows/classic-python-checks.yml
vendored
67
.github/workflows/classic-python-checks.yml
vendored
@@ -7,7 +7,9 @@ on:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/pyproject.toml'
|
||||
- 'classic/poetry.lock'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
@@ -16,7 +18,9 @@ on:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/pyproject.toml'
|
||||
- 'classic/poetry.lock'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
@@ -27,44 +31,13 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
get-changed-parts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: changes-in
|
||||
name: Determine affected subprojects
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
original_autogpt:
|
||||
- classic/original_autogpt/autogpt/**
|
||||
- classic/original_autogpt/tests/**
|
||||
- classic/original_autogpt/poetry.lock
|
||||
forge:
|
||||
- classic/forge/forge/**
|
||||
- classic/forge/tests/**
|
||||
- classic/forge/poetry.lock
|
||||
benchmark:
|
||||
- classic/benchmark/agbenchmark/**
|
||||
- classic/benchmark/tests/**
|
||||
- classic/benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
lint:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
min-python-version: "3.12"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -81,42 +54,31 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
run: poetry install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
min-python-version: "3.12"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -133,19 +95,16 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
run: poetry install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
42
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
42
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
@@ -40,48 +40,6 @@ jobs:
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||
|
||||
# Backend Python/Poetry setup (so Claude can run linting/tests)
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
cd autogpt_platform/backend
|
||||
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# Frontend Node.js/pnpm setup (so Claude can run linting/tests)
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Get CI failure details
|
||||
id: failure_details
|
||||
uses: actions/github-script@v8
|
||||
|
||||
22
.github/workflows/claude-dependabot.yml
vendored
22
.github/workflows/claude-dependabot.yml
vendored
@@ -77,15 +77,27 @@ jobs:
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
22
.github/workflows/claude.yml
vendored
22
.github/workflows/claude.yml
vendored
@@ -93,15 +93,27 @@ jobs:
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set pnpm store directory
|
||||
run: |
|
||||
pnpm config set store-dir ~/.pnpm-store
|
||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||
|
||||
- name: Cache frontend dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -62,7 +62,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
@@ -93,6 +93,6 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
249
.github/workflows/platform-frontend-ci.yml
vendored
249
.github/workflows/platform-frontend-ci.yml
vendored
@@ -26,6 +26,7 @@ jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
components-changed: ${{ steps.filter.outputs.components }}
|
||||
|
||||
steps:
|
||||
@@ -40,17 +41,28 @@ jobs:
|
||||
components:
|
||||
- 'autogpt_platform/frontend/src/components/**'
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies to populate cache
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
lint:
|
||||
@@ -61,15 +73,22 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
@@ -92,15 +111,22 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
@@ -115,8 +141,10 @@ jobs:
|
||||
exitOnceUploaded: true
|
||||
|
||||
e2e_test:
|
||||
name: end-to-end tests
|
||||
runs-on: big-boi
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -124,11 +152,19 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Platform - Copy default supabase .env
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||
- name: Copy backend .env and set OpenAI API key
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||
@@ -136,125 +172,77 @@ jobs:
|
||||
# Used by E2E test data script to generate embeddings for approved store agents
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Set up Platform - Set up Docker Buildx
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
|
||||
- name: Set up Platform - Build Docker images (with cache)
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
# Resolve extends and generate a flat compose file that bake can understand
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
|
||||
# Add cache configuration to the resolved compose file
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
||||
--git-ref "${{ github.ref }}"
|
||||
|
||||
# Build with bake using the resolved compose file (now includes cache config)
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Cache E2E test data
|
||||
id: e2e-data-cache
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/e2e_test_data.sql
|
||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-frontend-test-
|
||||
|
||||
- name: Set up Platform - Start Supabase DB + Auth
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||
echo "Waiting for auth service to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||
|
||||
- name: Set up Platform - Run migrations
|
||||
run: |
|
||||
echo "Running migrations..."
|
||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||
echo "✅ Migrations completed"
|
||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Set up tests - Load cached E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||
- name: Move cache
|
||||
run: |
|
||||
echo "✅ Found cached E2E test data, restoring..."
|
||||
{
|
||||
echo "SET session_replication_role = 'replica';"
|
||||
cat /tmp/e2e_test_data.sql
|
||||
echo "SET session_replication_role = 'origin';"
|
||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||
# Refresh materialized views after restore
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||
rm -rf /tmp/.buildx-cache
|
||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
fi
|
||||
|
||||
echo "✅ E2E test data restored from cache"
|
||||
|
||||
- name: Set up Platform - Start (all other services)
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Set up tests - Create E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||
- name: Create E2E test data
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
# First try to run the script from inside the container
|
||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||
# Copy the script into the container and run it
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||
echo "❌ Failed to copy script to container"
|
||||
exit 1
|
||||
}
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||
echo "Dumping database for cache..."
|
||||
{
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--table='auth.users' postgres
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--schema=platform \
|
||||
--exclude-table='platform._prisma_migrations' \
|
||||
--exclude-table='platform.apscheduler_jobs' \
|
||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||
postgres
|
||||
} > /tmp/e2e_test_data.sql
|
||||
|
||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||
|
||||
- name: Set up tests - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up tests - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Set up tests - Install dependencies
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Set up tests - Install browser 'chromium'
|
||||
- name: Install Browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
@@ -281,7 +269,7 @@ jobs:
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
|
||||
integration_test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -293,15 +281,22 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add cache configuration to a resolved docker-compose file for all services
|
||||
that have a build key, and ensure image names match what docker compose expects.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
DEFAULT_BRANCH = "dev"
|
||||
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Add cache config to a resolved compose file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source",
|
||||
required=True,
|
||||
help="Source compose file to read (should be output of `docker compose config`)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache-from",
|
||||
default="type=gha",
|
||||
help="Cache source configuration",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache-to",
|
||||
default="type=gha,mode=max",
|
||||
help="Cache destination configuration",
|
||||
)
|
||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||
parser.add_argument(
|
||||
f"--{component}-hash",
|
||||
default="",
|
||||
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--git-ref",
|
||||
default="",
|
||||
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
|
||||
git_ref_scope = ""
|
||||
if args.git_ref:
|
||||
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
|
||||
|
||||
with open(args.source, "r") as f:
|
||||
compose = yaml.safe_load(f)
|
||||
|
||||
# Get project name from compose file or default
|
||||
project_name = compose.get("name", "autogpt_platform")
|
||||
|
||||
def get_image_name(dockerfile: str, target: str) -> str:
|
||||
"""Generate image name based on Dockerfile folder and build target."""
|
||||
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
||||
if len(dockerfile_parts) >= 2:
|
||||
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
||||
else:
|
||||
folder_name = "app"
|
||||
return f"{project_name}-{folder_name}:{target}"
|
||||
|
||||
def get_build_key(dockerfile: str, target: str) -> str:
|
||||
"""Generate a unique key for a Dockerfile+target combination."""
|
||||
return f"{dockerfile}:{target}"
|
||||
|
||||
def get_component(dockerfile: str) -> str | None:
|
||||
"""Get component name (frontend/backend) from dockerfile path."""
|
||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||
if component in dockerfile:
|
||||
return component
|
||||
return None
|
||||
|
||||
# First pass: collect all services with build configs and identify duplicates
|
||||
# Track which (dockerfile, target) combinations we've seen
|
||||
build_key_to_first_service: dict[str, str] = {}
|
||||
services_to_build: list[str] = []
|
||||
services_to_dedupe: list[str] = []
|
||||
|
||||
for service_name, service_config in compose.get("services", {}).items():
|
||||
if "build" not in service_config:
|
||||
continue
|
||||
|
||||
build_config = service_config["build"]
|
||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||
target = build_config.get("target", "default")
|
||||
build_key = get_build_key(dockerfile, target)
|
||||
|
||||
if build_key not in build_key_to_first_service:
|
||||
# First service with this build config - it will do the actual build
|
||||
build_key_to_first_service[build_key] = service_name
|
||||
services_to_build.append(service_name)
|
||||
else:
|
||||
# Duplicate - will just use the image from the first service
|
||||
services_to_dedupe.append(service_name)
|
||||
|
||||
# Second pass: configure builds and deduplicate
|
||||
modified_services = []
|
||||
for service_name, service_config in compose.get("services", {}).items():
|
||||
if "build" not in service_config:
|
||||
continue
|
||||
|
||||
build_config = service_config["build"]
|
||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||
target = build_config.get("target", "latest")
|
||||
image_name = get_image_name(dockerfile, target)
|
||||
|
||||
# Set image name for all services (needed for both builders and deduped)
|
||||
service_config["image"] = image_name
|
||||
|
||||
if service_name in services_to_dedupe:
|
||||
# Remove build config - this service will use the pre-built image
|
||||
del service_config["build"]
|
||||
continue
|
||||
|
||||
# This service will do the actual build - add cache config
|
||||
cache_from_list = []
|
||||
cache_to_list = []
|
||||
|
||||
component = get_component(dockerfile)
|
||||
if not component:
|
||||
# Skip services that don't clearly match frontend/backend
|
||||
continue
|
||||
|
||||
# Get the hash for this component
|
||||
component_hash = getattr(args, f"{component}_hash")
|
||||
|
||||
# Scope format: platform-{component}-{target}-{hash|ref}
|
||||
# Example: platform-backend-server-abc123
|
||||
|
||||
if "type=gha" in args.cache_from:
|
||||
# 1. Primary: exact hash match (most specific)
|
||||
if component_hash:
|
||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
|
||||
|
||||
# 2. Fallback: branch-based cache
|
||||
if git_ref_scope:
|
||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
|
||||
|
||||
# 3. Fallback: dev branch cache (for PRs/feature branches)
|
||||
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
|
||||
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
|
||||
|
||||
if "type=gha" in args.cache_to:
|
||||
# Write to both hash-based and branch-based scopes
|
||||
if component_hash:
|
||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
|
||||
|
||||
if git_ref_scope:
|
||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
|
||||
|
||||
# Ensure we have at least one cache source/target
|
||||
if not cache_from_list:
|
||||
cache_from_list.append(args.cache_from)
|
||||
if not cache_to_list:
|
||||
cache_to_list.append(args.cache_to)
|
||||
|
||||
build_config["cache_from"] = cache_from_list
|
||||
build_config["cache_to"] = cache_to_list
|
||||
modified_services.append(service_name)
|
||||
|
||||
# Write back to the same file
|
||||
with open(args.source, "w") as f:
|
||||
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
||||
|
||||
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
||||
for svc in modified_services:
|
||||
svc_config = compose["services"][svc]
|
||||
build_cfg = svc_config.get("build", {})
|
||||
cache_from_list = build_cfg.get("cache_from", ["none"])
|
||||
cache_to_list = build_cfg.get("cache_to", ["none"])
|
||||
print(f" - {svc}")
|
||||
print(f" image: {svc_config.get('image', 'N/A')}")
|
||||
print(f" cache_from: {cache_from_list}")
|
||||
print(f" cache_to: {cache_to_list}")
|
||||
if services_to_dedupe:
|
||||
print(
|
||||
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
||||
)
|
||||
for svc in services_to_dedupe:
|
||||
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -3,6 +3,7 @@
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
.autogpt/
|
||||
*.mpeg
|
||||
.env
|
||||
# Root .env files
|
||||
@@ -159,6 +160,10 @@ CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
classic/benchmark/agbenchmark/reports/
|
||||
classic/reports/
|
||||
classic/direct_benchmark/reports/
|
||||
classic/.benchmark_workspaces/
|
||||
classic/direct_benchmark/.benchmark_workspaces/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
@@ -177,7 +182,11 @@ autogpt_platform/backend/settings.py
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
**/.claude/settings.local.json
|
||||
.claude/settings.local.json
|
||||
CLAUDE.local.md
|
||||
/autogpt_platform/backend/logs
|
||||
.next
|
||||
|
||||
# Test database
|
||||
test.db
|
||||
.next
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
@@ -43,29 +43,10 @@ repos:
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
name: Check & Install dependencies - Classic
|
||||
alias: poetry-install-classic
|
||||
entry: poetry -C classic install
|
||||
files: ^classic/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
@@ -116,26 +97,10 @@ repos:
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -P classic/forge run isort -p forge
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||
files: ^classic/benchmark/
|
||||
name: Lint (isort) - Classic
|
||||
alias: isort-classic
|
||||
entry: bash -c 'cd classic && poetry run isort $(echo "$@" | sed "s|classic/||g")' --
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
@@ -149,26 +114,13 @@ repos:
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
# To have flake8 load the config of the individual subprojects, we have to call
|
||||
# them separately.
|
||||
# Use consolidated flake8 config at classic/.flake8
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
name: Lint (Flake8) - Classic
|
||||
alias: flake8-classic
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||
args: [--config=classic/.flake8]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
@@ -204,29 +156,10 @@ repos:
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
name: Typecheck - Classic
|
||||
alias: pyright-classic
|
||||
entry: poetry -C classic run pyright
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/.*\.py$|^classic/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
@@ -45,11 +45,6 @@ AutoGPT Platform is a monorepo containing:
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||
|
||||
### Branching Strategy
|
||||
|
||||
- **`dev`** is the main development branch. All PRs should target `dev`.
|
||||
- **`master`** is the production branch. Only used for production releases.
|
||||
|
||||
### Creating Pull Requests
|
||||
|
||||
- Create the PR against the `dev` branch of the repository.
|
||||
|
||||
169
autogpt_platform/autogpt_libs/poetry.lock
generated
169
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "46.0.5"
|
||||
version = "46.0.4"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"},
|
||||
{file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
|
||||
{file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||
sdist = ["build (>=1.0.0)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test-randomorder = ["pytest-randomly"]
|
||||
|
||||
[[package]]
|
||||
@@ -570,25 +570,24 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.128.7"
|
||||
version = "0.128.0"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||
{file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
|
||||
{file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-doc = ">=0.0.2"
|
||||
pydantic = ">=2.7.0"
|
||||
starlette = ">=0.40.0,<1.0.0"
|
||||
starlette = ">=0.40.0,<0.51.0"
|
||||
typing-extensions = ">=4.8.0"
|
||||
typing-inspection = ">=0.4.2"
|
||||
|
||||
[package.extras]
|
||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||
|
||||
@@ -1063,14 +1062,14 @@ urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-server-sdk"
|
||||
version = "9.15.0"
|
||||
version = "9.14.1"
|
||||
description = "LaunchDarkly SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1479,14 +1478,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||
{file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
|
||||
{file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2249,14 +2248,14 @@ cli = ["click (>=5.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "realtime"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||
{file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
|
||||
{file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2437,14 +2436,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||
{file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
|
||||
{file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2488,35 +2487,35 @@ python-dateutil = ">=2.6.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||
{file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
|
||||
{file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.26,<0.29"
|
||||
postgrest = "2.28.0"
|
||||
realtime = "2.28.0"
|
||||
storage3 = "2.28.0"
|
||||
supabase-auth = "2.28.0"
|
||||
supabase-functions = "2.28.0"
|
||||
postgrest = "2.27.2"
|
||||
realtime = "2.27.2"
|
||||
storage3 = "2.27.2"
|
||||
supabase-auth = "2.27.2"
|
||||
supabase-functions = "2.27.2"
|
||||
yarl = ">=1.22.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase-auth"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||
{file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
|
||||
{file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2526,14 +2525,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||
|
||||
[[package]]
|
||||
name = "supabase-functions"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||
{file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
|
||||
{file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2912,4 +2911,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb"
|
||||
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
|
||||
|
||||
@@ -11,14 +11,14 @@ python = ">=3.10,<4.0"
|
||||
colorama = "^0.4.6"
|
||||
cryptography = "^46.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.128.7"
|
||||
fastapi = "^0.128.0"
|
||||
google-cloud-logging = "^3.13.0"
|
||||
launchdarkly-server-sdk = "^9.15.0"
|
||||
launchdarkly-server-sdk = "^9.14.1"
|
||||
pydantic = "^2.12.5"
|
||||
pydantic-settings = "^2.12.0"
|
||||
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
||||
redis = "^6.2.0"
|
||||
supabase = "^2.28.0"
|
||||
supabase = "^2.27.2"
|
||||
uvicorn = "^0.40.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
||||
@@ -104,12 +104,6 @@ TWITTER_CLIENT_SECRET=
|
||||
# Make a new workspace for your OAuth APP -- trust me
|
||||
# https://linear.app/settings/api/applications/new
|
||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
LINEAR_API_KEY=
|
||||
# Linear project and team IDs for the feature request tracker.
|
||||
# Find these in your Linear workspace URL: linear.app/<workspace>/project/<project-id>
|
||||
# and in team settings. Used by the chat copilot to file and search feature requests.
|
||||
LINEAR_FEATURE_REQUEST_PROJECT_ID=
|
||||
LINEAR_FEATURE_REQUEST_TEAM_ID=
|
||||
LINEAR_CLIENT_ID=
|
||||
LINEAR_CLIENT_SECRET=
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
# ============================ DEPENDENCY BUILDER ============================ #
|
||||
|
||||
FROM debian:13-slim AS builder
|
||||
|
||||
# Set environment variables
|
||||
@@ -53,9 +51,7 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# ============================== BACKEND SERVER ============================== #
|
||||
|
||||
FROM debian:13-slim AS server
|
||||
FROM debian:13-slim AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -67,14 +63,15 @@ ENV POETRY_HOME=/opt/poetry \
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ffmpeg \
|
||||
imagemagick \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
COPY --from=builder /app /app
|
||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||
# Copy Node.js installation for Prisma
|
||||
@@ -84,54 +81,30 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
# Copy only the .venv from builder (not the entire /app directory)
|
||||
# The .venv includes the generated Prisma client
|
||||
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||
|
||||
# Copy dependency files + autogpt_libs (path dependency)
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||
RUN mkdir -p /app/autogpt_platform/backend
|
||||
|
||||
# Copy backend code + docs (for Copilot docs search)
|
||||
COPY autogpt_platform/backend ./
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
FROM server_dependencies AS migrate
|
||||
|
||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
COPY docs /app/docs
|
||||
RUN poetry install --no-ansi --only-root
|
||||
|
||||
ENV PORT=8000
|
||||
|
||||
CMD ["poetry", "run", "rest"]
|
||||
|
||||
# =============================== DB MIGRATOR =============================== #
|
||||
|
||||
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
||||
FROM debian:13-slim AS migrate
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy Node.js from builder (needed for Prisma CLI)
|
||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||
|
||||
# Copy Prisma binaries
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
# Install prisma-client-py directly (much smaller than copying full venv)
|
||||
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
||||
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||
COPY autogpt_platform/backend/migrations ./migrations
|
||||
|
||||
@@ -24,7 +24,6 @@ from .tools.models import (
|
||||
AgentPreviewResponse,
|
||||
AgentSavedResponse,
|
||||
AgentsFoundResponse,
|
||||
BlockDetailsResponse,
|
||||
BlockListResponse,
|
||||
BlockOutputResponse,
|
||||
ClarificationNeededResponse,
|
||||
@@ -972,7 +971,6 @@ ToolResponseUnion = (
|
||||
| AgentSavedResponse
|
||||
| ClarificationNeededResponse
|
||||
| BlockListResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| DocSearchResultsResponse
|
||||
| DocPageResponse
|
||||
|
||||
@@ -1245,7 +1245,6 @@ async def _stream_chat_chunks(
|
||||
return
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
|
||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||
retry_count += 1
|
||||
# Calculate delay with exponential backoff
|
||||
@@ -1261,27 +1260,12 @@ async def _stream_chat_chunks(
|
||||
continue # Retry the stream
|
||||
else:
|
||||
# Non-retryable error or max retries exceeded
|
||||
_log_api_error(
|
||||
error=e,
|
||||
context="stream (not retrying)",
|
||||
session_id=session.session_id if session else None,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=model,
|
||||
retry_count=retry_count,
|
||||
logger.error(
|
||||
f"Error in stream (not retrying): {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
error_code = None
|
||||
error_text = str(e)
|
||||
|
||||
error_details = _extract_api_error_details(e)
|
||||
if error_details.get("response_body"):
|
||||
body = error_details["response_body"]
|
||||
if isinstance(body, dict):
|
||||
err = body.get("error")
|
||||
if isinstance(err, dict) and err.get("message"):
|
||||
error_text = err["message"]
|
||||
elif body.get("message"):
|
||||
error_text = body["message"]
|
||||
|
||||
if _is_region_blocked_error(e):
|
||||
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
||||
error_text = (
|
||||
@@ -1298,13 +1282,9 @@ async def _stream_chat_chunks(
|
||||
|
||||
# If we exit the retry loop without returning, it means we exhausted retries
|
||||
if last_error:
|
||||
_log_api_error(
|
||||
error=last_error,
|
||||
context=f"stream (max retries {MAX_RETRIES} exceeded)",
|
||||
session_id=session.session_id if session else None,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=model,
|
||||
retry_count=MAX_RETRIES,
|
||||
logger.error(
|
||||
f"Max retries ({MAX_RETRIES}) exceeded. Last error: {last_error!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
||||
yield StreamFinish()
|
||||
@@ -1877,7 +1857,6 @@ async def _generate_llm_continuation(
|
||||
break # Success, exit retry loop
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
|
||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||
retry_count += 1
|
||||
delay = min(
|
||||
@@ -1891,25 +1870,17 @@ async def _generate_llm_continuation(
|
||||
await asyncio.sleep(delay)
|
||||
continue
|
||||
else:
|
||||
# Non-retryable error - log details and exit gracefully
|
||||
_log_api_error(
|
||||
error=e,
|
||||
context="LLM continuation (not retrying)",
|
||||
session_id=session_id,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=config.model,
|
||||
retry_count=retry_count,
|
||||
# Non-retryable error - log and exit gracefully
|
||||
logger.error(
|
||||
f"Non-retryable error in LLM continuation: {e!s}",
|
||||
exc_info=True,
|
||||
)
|
||||
return
|
||||
|
||||
if last_error:
|
||||
_log_api_error(
|
||||
error=last_error,
|
||||
context=f"LLM continuation (max retries {MAX_RETRIES} exceeded)",
|
||||
session_id=session_id,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=config.model,
|
||||
retry_count=MAX_RETRIES,
|
||||
logger.error(
|
||||
f"Max retries ({MAX_RETRIES}) exceeded for LLM continuation. "
|
||||
f"Last error: {last_error!s}"
|
||||
)
|
||||
return
|
||||
|
||||
@@ -1949,91 +1920,6 @@ async def _generate_llm_continuation(
|
||||
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
||||
|
||||
|
||||
def _log_api_error(
|
||||
error: Exception,
|
||||
context: str,
|
||||
session_id: str | None = None,
|
||||
message_count: int | None = None,
|
||||
model: str | None = None,
|
||||
retry_count: int = 0,
|
||||
) -> None:
|
||||
"""Log detailed API error information for debugging."""
|
||||
details = _extract_api_error_details(error)
|
||||
details["context"] = context
|
||||
details["session_id"] = session_id
|
||||
details["message_count"] = message_count
|
||||
details["model"] = model
|
||||
details["retry_count"] = retry_count
|
||||
|
||||
if isinstance(error, RateLimitError):
|
||||
logger.warning(f"Rate limit error in {context}: {details}", exc_info=error)
|
||||
elif isinstance(error, APIConnectionError):
|
||||
logger.warning(f"API connection error in {context}: {details}", exc_info=error)
|
||||
elif isinstance(error, APIStatusError) and error.status_code >= 500:
|
||||
logger.error(f"API server error (5xx) in {context}: {details}", exc_info=error)
|
||||
else:
|
||||
logger.error(f"API error in {context}: {details}", exc_info=error)
|
||||
|
||||
|
||||
def _extract_api_error_details(error: Exception) -> dict[str, Any]:
|
||||
"""Extract detailed information from OpenAI/OpenRouter API errors."""
|
||||
error_msg = str(error)
|
||||
details: dict[str, Any] = {
|
||||
"error_type": type(error).__name__,
|
||||
"error_message": error_msg[:500] + "..." if len(error_msg) > 500 else error_msg,
|
||||
}
|
||||
|
||||
if hasattr(error, "code"):
|
||||
details["code"] = getattr(error, "code", None)
|
||||
if hasattr(error, "param"):
|
||||
details["param"] = getattr(error, "param", None)
|
||||
|
||||
if isinstance(error, APIStatusError):
|
||||
details["status_code"] = error.status_code
|
||||
details["request_id"] = getattr(error, "request_id", None)
|
||||
|
||||
if hasattr(error, "body") and error.body:
|
||||
details["response_body"] = _sanitize_error_body(error.body)
|
||||
|
||||
if hasattr(error, "response") and error.response:
|
||||
headers = error.response.headers
|
||||
details["openrouter_provider"] = headers.get("x-openrouter-provider")
|
||||
details["openrouter_model"] = headers.get("x-openrouter-model")
|
||||
details["retry_after"] = headers.get("retry-after")
|
||||
details["rate_limit_remaining"] = headers.get("x-ratelimit-remaining")
|
||||
|
||||
return details
|
||||
|
||||
|
||||
def _sanitize_error_body(
|
||||
body: Any, max_length: int = 2000
|
||||
) -> dict[str, Any] | str | None:
|
||||
"""Extract only safe fields from error response body to avoid logging sensitive data."""
|
||||
if not isinstance(body, dict):
|
||||
# Non-dict bodies (e.g., HTML error pages) - return truncated string
|
||||
if body is not None:
|
||||
body_str = str(body)
|
||||
if len(body_str) > max_length:
|
||||
return body_str[:max_length] + "...[truncated]"
|
||||
return body_str
|
||||
return None
|
||||
|
||||
safe_fields = ("message", "type", "code", "param", "error")
|
||||
sanitized: dict[str, Any] = {}
|
||||
|
||||
for field in safe_fields:
|
||||
if field in body:
|
||||
value = body[field]
|
||||
if field == "error" and isinstance(value, dict):
|
||||
sanitized[field] = _sanitize_error_body(value, max_length)
|
||||
elif isinstance(value, str) and len(value) > max_length:
|
||||
sanitized[field] = value[:max_length] + "...[truncated]"
|
||||
else:
|
||||
sanitized[field] = value
|
||||
|
||||
return sanitized if sanitized else None
|
||||
|
||||
|
||||
async def _generate_llm_continuation_with_streaming(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
|
||||
@@ -52,38 +52,6 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
||||
"delete_workspace_file": DeleteWorkspaceFileTool(),
|
||||
}
|
||||
|
||||
|
||||
def _register_feature_request_tools() -> None:
|
||||
"""Register feature request tools only if Linear configuration is available."""
|
||||
from backend.util.settings import Settings
|
||||
|
||||
try:
|
||||
secrets = Settings().secrets
|
||||
except Exception:
|
||||
logger.warning("Feature request tools disabled: failed to load settings")
|
||||
return
|
||||
|
||||
if not (
|
||||
secrets.linear_api_key
|
||||
and secrets.linear_feature_request_project_id
|
||||
and secrets.linear_feature_request_team_id
|
||||
):
|
||||
logger.info(
|
||||
"Feature request tools disabled: LINEAR_API_KEY, "
|
||||
"LINEAR_FEATURE_REQUEST_PROJECT_ID, or "
|
||||
"LINEAR_FEATURE_REQUEST_TEAM_ID is not configured"
|
||||
)
|
||||
return
|
||||
|
||||
from .feature_requests import CreateFeatureRequestTool, SearchFeatureRequestsTool
|
||||
|
||||
TOOL_REGISTRY["search_feature_requests"] = SearchFeatureRequestsTool()
|
||||
TOOL_REGISTRY["create_feature_request"] = CreateFeatureRequestTool()
|
||||
logger.info("Feature request tools enabled")
|
||||
|
||||
|
||||
_register_feature_request_tools()
|
||||
|
||||
# Export individual tool instances for backwards compatibility
|
||||
find_agent_tool = TOOL_REGISTRY["find_agent"]
|
||||
run_agent_tool = TOOL_REGISTRY["run_agent"]
|
||||
|
||||
@@ -1,448 +0,0 @@
|
||||
"""Feature request tools - search and create feature requests via Linear."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
FeatureRequestCreatedResponse,
|
||||
FeatureRequestInfo,
|
||||
FeatureRequestSearchResponse,
|
||||
NoResultsResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
from backend.blocks.linear._api import LinearClient
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.data.user import get_user_email_by_id
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_SEARCH_RESULTS = 10
|
||||
|
||||
# GraphQL queries/mutations
|
||||
SEARCH_ISSUES_QUERY = """
|
||||
query SearchFeatureRequests($term: String!, $filter: IssueFilter, $first: Int) {
|
||||
searchIssues(term: $term, filter: $filter, first: $first) {
|
||||
nodes {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CUSTOMER_UPSERT_MUTATION = """
|
||||
mutation CustomerUpsert($input: CustomerUpsertInput!) {
|
||||
customerUpsert(input: $input) {
|
||||
success
|
||||
customer {
|
||||
id
|
||||
name
|
||||
externalIds
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
ISSUE_CREATE_MUTATION = """
|
||||
mutation IssueCreate($input: IssueCreateInput!) {
|
||||
issueCreate(input: $input) {
|
||||
success
|
||||
issue {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CUSTOMER_NEED_CREATE_MUTATION = """
|
||||
mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) {
|
||||
customerNeedCreate(input: $input) {
|
||||
success
|
||||
need {
|
||||
id
|
||||
body
|
||||
customer {
|
||||
id
|
||||
name
|
||||
}
|
||||
issue {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
_settings: Settings | None = None
|
||||
|
||||
|
||||
def _get_settings() -> Settings:
|
||||
global _settings
|
||||
if _settings is None:
|
||||
_settings = Settings()
|
||||
return _settings
|
||||
|
||||
|
||||
def _get_linear_config() -> tuple[LinearClient, str, str]:
|
||||
"""Return a configured Linear client, project ID, and team ID.
|
||||
|
||||
Raises RuntimeError if any required setting is missing.
|
||||
"""
|
||||
secrets = _get_settings().secrets
|
||||
if not secrets.linear_api_key:
|
||||
raise RuntimeError("LINEAR_API_KEY is not configured")
|
||||
if not secrets.linear_feature_request_project_id:
|
||||
raise RuntimeError("LINEAR_FEATURE_REQUEST_PROJECT_ID is not configured")
|
||||
if not secrets.linear_feature_request_team_id:
|
||||
raise RuntimeError("LINEAR_FEATURE_REQUEST_TEAM_ID is not configured")
|
||||
|
||||
credentials = APIKeyCredentials(
|
||||
id="system-linear",
|
||||
provider="linear",
|
||||
api_key=SecretStr(secrets.linear_api_key),
|
||||
title="System Linear API Key",
|
||||
)
|
||||
client = LinearClient(credentials=credentials)
|
||||
return (
|
||||
client,
|
||||
secrets.linear_feature_request_project_id,
|
||||
secrets.linear_feature_request_team_id,
|
||||
)
|
||||
|
||||
|
||||
class SearchFeatureRequestsTool(BaseTool):
|
||||
"""Tool for searching existing feature requests in Linear."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "search_feature_requests"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search existing feature requests to check if a similar request "
|
||||
"already exists before creating a new one. Returns matching feature "
|
||||
"requests with their ID, title, and description."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search term to find matching feature requests.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query.",
|
||||
error="Missing query parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
client, project_id, _team_id = _get_linear_config()
|
||||
data = await client.query(
|
||||
SEARCH_ISSUES_QUERY,
|
||||
{
|
||||
"term": query,
|
||||
"filter": {
|
||||
"project": {"id": {"eq": project_id}},
|
||||
},
|
||||
"first": MAX_SEARCH_RESULTS,
|
||||
},
|
||||
)
|
||||
|
||||
nodes = data.get("searchIssues", {}).get("nodes", [])
|
||||
|
||||
if not nodes:
|
||||
return NoResultsResponse(
|
||||
message=f"No feature requests found matching '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use broader search terms",
|
||||
"You can create a new feature request if none exists",
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
results = [
|
||||
FeatureRequestInfo(
|
||||
id=node["id"],
|
||||
identifier=node["identifier"],
|
||||
title=node["title"],
|
||||
description=node.get("description"),
|
||||
)
|
||||
for node in nodes
|
||||
]
|
||||
|
||||
return FeatureRequestSearchResponse(
|
||||
message=f"Found {len(results)} feature request(s) matching '{query}'.",
|
||||
results=results,
|
||||
count=len(results),
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to search feature requests")
|
||||
return ErrorResponse(
|
||||
message="Failed to search feature requests.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
class CreateFeatureRequestTool(BaseTool):
|
||||
"""Tool for creating feature requests (or adding needs to existing ones)."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "create_feature_request"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Create a new feature request or add a customer need to an existing one. "
|
||||
"Always search first with search_feature_requests to avoid duplicates. "
|
||||
"If a matching request exists, pass its ID as existing_issue_id to add "
|
||||
"the user's need to it instead of creating a duplicate."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Title for the feature request.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Detailed description of what the user wants and why.",
|
||||
},
|
||||
"existing_issue_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"If adding a need to an existing feature request, "
|
||||
"provide its Linear issue ID (from search results). "
|
||||
"Omit to create a new feature request."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["title", "description"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _find_or_create_customer(
|
||||
self, client: LinearClient, user_id: str, name: str
|
||||
) -> dict:
|
||||
"""Find existing customer by user_id or create a new one via upsert.
|
||||
|
||||
Args:
|
||||
client: Linear API client.
|
||||
user_id: Stable external ID used to deduplicate customers.
|
||||
name: Human-readable display name (e.g. the user's email).
|
||||
"""
|
||||
data = await client.mutate(
|
||||
CUSTOMER_UPSERT_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"name": name,
|
||||
"externalId": user_id,
|
||||
},
|
||||
},
|
||||
)
|
||||
result = data.get("customerUpsert", {})
|
||||
if not result.get("success"):
|
||||
raise RuntimeError(f"Failed to upsert customer: {data}")
|
||||
return result["customer"]
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
title = kwargs.get("title", "").strip()
|
||||
description = kwargs.get("description", "").strip()
|
||||
existing_issue_id = kwargs.get("existing_issue_id")
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not title or not description:
|
||||
return ErrorResponse(
|
||||
message="Both title and description are required.",
|
||||
error="Missing required parameters",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required to create feature requests.",
|
||||
error="Missing user_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
client, project_id, team_id = _get_linear_config()
|
||||
except Exception as e:
|
||||
logger.exception("Failed to initialize Linear client")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Resolve a human-readable name (email) for the Linear customer record.
|
||||
# Fall back to user_id if the lookup fails or returns None.
|
||||
try:
|
||||
customer_display_name = await get_user_email_by_id(user_id) or user_id
|
||||
except Exception:
|
||||
customer_display_name = user_id
|
||||
|
||||
# Step 1: Find or create customer for this user
|
||||
try:
|
||||
customer = await self._find_or_create_customer(
|
||||
client, user_id, customer_display_name
|
||||
)
|
||||
customer_id = customer["id"]
|
||||
customer_name = customer["name"]
|
||||
except Exception as e:
|
||||
logger.exception("Failed to upsert customer in Linear")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Step 2: Create or reuse issue
|
||||
issue_id: str | None = None
|
||||
issue_identifier: str | None = None
|
||||
if existing_issue_id:
|
||||
# Add need to existing issue - we still need the issue details for response
|
||||
is_new_issue = False
|
||||
issue_id = existing_issue_id
|
||||
else:
|
||||
# Create new issue in the feature requests project
|
||||
try:
|
||||
data = await client.mutate(
|
||||
ISSUE_CREATE_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"title": title,
|
||||
"description": description,
|
||||
"teamId": team_id,
|
||||
"projectId": project_id,
|
||||
},
|
||||
},
|
||||
)
|
||||
result = data.get("issueCreate", {})
|
||||
if not result.get("success"):
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request issue.",
|
||||
error=str(data),
|
||||
session_id=session_id,
|
||||
)
|
||||
issue = result["issue"]
|
||||
issue_id = issue["id"]
|
||||
issue_identifier = issue.get("identifier")
|
||||
except Exception as e:
|
||||
logger.exception("Failed to create feature request issue")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
is_new_issue = True
|
||||
|
||||
# Step 3: Create customer need on the issue
|
||||
try:
|
||||
data = await client.mutate(
|
||||
CUSTOMER_NEED_CREATE_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"customerId": customer_id,
|
||||
"issueId": issue_id,
|
||||
"body": description,
|
||||
"priority": 0,
|
||||
},
|
||||
},
|
||||
)
|
||||
need_result = data.get("customerNeedCreate", {})
|
||||
if not need_result.get("success"):
|
||||
orphaned = (
|
||||
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||
if is_new_issue
|
||||
else None
|
||||
)
|
||||
return ErrorResponse(
|
||||
message="Failed to attach customer need to the feature request.",
|
||||
error=str(data),
|
||||
details=orphaned,
|
||||
session_id=session_id,
|
||||
)
|
||||
need = need_result["need"]
|
||||
issue_info = need["issue"]
|
||||
except Exception as e:
|
||||
logger.exception("Failed to create customer need")
|
||||
orphaned = (
|
||||
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||
if is_new_issue
|
||||
else None
|
||||
)
|
||||
return ErrorResponse(
|
||||
message="Failed to attach customer need to the feature request.",
|
||||
error=str(e),
|
||||
details=orphaned,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
return FeatureRequestCreatedResponse(
|
||||
message=(
|
||||
f"{'Created new feature request' if is_new_issue else 'Added your request to existing feature request'}: "
|
||||
f"{issue_info['title']}."
|
||||
),
|
||||
issue_id=issue_info["id"],
|
||||
issue_identifier=issue_info["identifier"],
|
||||
issue_title=issue_info["title"],
|
||||
issue_url=issue_info.get("url", ""),
|
||||
is_new_issue=is_new_issue,
|
||||
customer_name=customer_name,
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -1,615 +0,0 @@
|
||||
"""Tests for SearchFeatureRequestsTool and CreateFeatureRequestTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.feature_requests import (
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
)
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
FeatureRequestCreatedResponse,
|
||||
FeatureRequestSearchResponse,
|
||||
NoResultsResponse,
|
||||
)
|
||||
|
||||
from ._test_data import make_session
|
||||
|
||||
_TEST_USER_ID = "test-user-feature-requests"
|
||||
_TEST_USER_EMAIL = "testuser@example.com"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
_FAKE_PROJECT_ID = "test-project-id"
|
||||
_FAKE_TEAM_ID = "test-team-id"
|
||||
|
||||
|
||||
def _mock_linear_config(*, query_return=None, mutate_return=None):
|
||||
"""Return a patched _get_linear_config that yields a mock LinearClient."""
|
||||
client = AsyncMock()
|
||||
if query_return is not None:
|
||||
client.query.return_value = query_return
|
||||
if mutate_return is not None:
|
||||
client.mutate.return_value = mutate_return
|
||||
return (
|
||||
patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
return_value=(client, _FAKE_PROJECT_ID, _FAKE_TEAM_ID),
|
||||
),
|
||||
client,
|
||||
)
|
||||
|
||||
|
||||
def _search_response(nodes: list[dict]) -> dict:
|
||||
return {"searchIssues": {"nodes": nodes}}
|
||||
|
||||
|
||||
def _customer_upsert_response(
|
||||
customer_id: str = "cust-1", name: str = _TEST_USER_EMAIL, success: bool = True
|
||||
) -> dict:
|
||||
return {
|
||||
"customerUpsert": {
|
||||
"success": success,
|
||||
"customer": {"id": customer_id, "name": name, "externalIds": [name]},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _issue_create_response(
|
||||
issue_id: str = "issue-1",
|
||||
identifier: str = "FR-1",
|
||||
title: str = "New Feature",
|
||||
success: bool = True,
|
||||
) -> dict:
|
||||
return {
|
||||
"issueCreate": {
|
||||
"success": success,
|
||||
"issue": {
|
||||
"id": issue_id,
|
||||
"identifier": identifier,
|
||||
"title": title,
|
||||
"url": f"https://linear.app/issue/{identifier}",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _need_create_response(
|
||||
need_id: str = "need-1",
|
||||
issue_id: str = "issue-1",
|
||||
identifier: str = "FR-1",
|
||||
title: str = "New Feature",
|
||||
success: bool = True,
|
||||
) -> dict:
|
||||
return {
|
||||
"customerNeedCreate": {
|
||||
"success": success,
|
||||
"need": {
|
||||
"id": need_id,
|
||||
"body": "description",
|
||||
"customer": {"id": "cust-1", "name": _TEST_USER_EMAIL},
|
||||
"issue": {
|
||||
"id": issue_id,
|
||||
"identifier": identifier,
|
||||
"title": title,
|
||||
"url": f"https://linear.app/issue/{identifier}",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# SearchFeatureRequestsTool
|
||||
# ===========================================================================
|
||||
|
||||
|
||||
class TestSearchFeatureRequestsTool:
|
||||
"""Tests for SearchFeatureRequestsTool._execute."""
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_successful_search(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
nodes = [
|
||||
{
|
||||
"id": "id-1",
|
||||
"identifier": "FR-1",
|
||||
"title": "Dark mode",
|
||||
"description": "Add dark mode support",
|
||||
},
|
||||
{
|
||||
"id": "id-2",
|
||||
"identifier": "FR-2",
|
||||
"title": "Dark theme",
|
||||
"description": None,
|
||||
},
|
||||
]
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response(nodes))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="dark mode"
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestSearchResponse)
|
||||
assert resp.count == 2
|
||||
assert resp.results[0].id == "id-1"
|
||||
assert resp.results[1].identifier == "FR-2"
|
||||
assert resp.query == "dark mode"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_no_results(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response([]))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="nonexistent"
|
||||
)
|
||||
|
||||
assert isinstance(resp, NoResultsResponse)
|
||||
assert "nonexistent" in resp.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_empty_query_returns_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(user_id=_TEST_USER_ID, session=session, query=" ")
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "query" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_query_returns_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(user_id=_TEST_USER_ID, session=session)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_api_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.query.side_effect = RuntimeError("Linear API down")
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Linear API down" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_malformed_node_returns_error(self):
|
||||
"""A node missing required keys should be caught by the try/except."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
# Node missing 'identifier' key
|
||||
bad_nodes = [{"id": "id-1", "title": "Missing identifier"}]
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response(bad_nodes))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_linear_client_init_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
side_effect=RuntimeError("No API key"),
|
||||
):
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "No API key" in resp.error
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# CreateFeatureRequestTool
|
||||
# ===========================================================================
|
||||
|
||||
|
||||
class TestCreateFeatureRequestTool:
|
||||
"""Tests for CreateFeatureRequestTool._execute."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _patch_email_lookup(self):
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests.get_user_email_by_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=_TEST_USER_EMAIL,
|
||||
):
|
||||
yield
|
||||
|
||||
# ---- Happy paths -------------------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_create_new_issue(self):
|
||||
"""Full happy path: upsert customer -> create issue -> attach need."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(),
|
||||
_need_create_response(),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="New Feature",
|
||||
description="Please add this",
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||
assert resp.is_new_issue is True
|
||||
assert resp.issue_identifier == "FR-1"
|
||||
assert resp.customer_name == _TEST_USER_EMAIL
|
||||
assert client.mutate.call_count == 3
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_need_to_existing_issue(self):
|
||||
"""When existing_issue_id is provided, skip issue creation."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_need_create_response(issue_id="existing-1", identifier="FR-99"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Existing Feature",
|
||||
description="Me too",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||
assert resp.is_new_issue is False
|
||||
assert resp.issue_id == "existing-1"
|
||||
# Only 2 mutations: customer upsert + need create (no issue create)
|
||||
assert client.mutate.call_count == 2
|
||||
|
||||
# ---- Validation errors -------------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_title(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="",
|
||||
description="some desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "required" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_description(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Some title",
|
||||
description="",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "required" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_user_id(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=None,
|
||||
session=session,
|
||||
title="Some title",
|
||||
description="Some desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "user_id" in resp.error.lower()
|
||||
|
||||
# ---- Linear client init failure ----------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_linear_client_init_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
side_effect=RuntimeError("No API key"),
|
||||
):
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "No API key" in resp.error
|
||||
|
||||
# ---- Customer upsert failures ------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_upsert_api_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = RuntimeError("Customer API error")
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Customer API error" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_upsert_not_success(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.return_value = _customer_upsert_response(success=False)
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_malformed_response(self):
|
||||
"""Customer dict missing 'id' key should be caught."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
# success=True but customer has no 'id'
|
||||
client.mutate.return_value = {
|
||||
"customerUpsert": {
|
||||
"success": True,
|
||||
"customer": {"name": _TEST_USER_ID},
|
||||
}
|
||||
}
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
# ---- Issue creation failures -------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_api_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
RuntimeError("Issue create failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Issue create failed" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_not_success(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert "Failed to create feature request issue" in resp.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_malformed_response(self):
|
||||
"""issueCreate success=True but missing 'issue' key."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
{"issueCreate": {"success": True}}, # no 'issue' key
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
# ---- Customer need attachment failures ---------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_api_error_new_issue(self):
|
||||
"""Need creation fails after new issue was created -> orphaned issue info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(issue_id="orphan-1", identifier="FR-10"),
|
||||
RuntimeError("Need attach failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Need attach failed" in resp.error
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "orphan-1"
|
||||
assert resp.details["issue_identifier"] == "FR-10"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_api_error_existing_issue(self):
|
||||
"""Need creation fails on existing issue -> no orphaned info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
RuntimeError("Need attach failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is None
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_not_success_includes_orphaned_info(self):
|
||||
"""customerNeedCreate returns success=False -> includes orphaned issue."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(issue_id="orphan-2", identifier="FR-20"),
|
||||
_need_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "orphan-2"
|
||||
assert resp.details["issue_identifier"] == "FR-20"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_not_success_existing_issue_no_details(self):
|
||||
"""customerNeedCreate fails on existing issue -> no orphaned info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_need_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is None
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_malformed_response(self):
|
||||
"""need_result missing 'need' key after success=True."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(),
|
||||
{"customerNeedCreate": {"success": True}}, # no 'need' key
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "issue-1"
|
||||
@@ -7,6 +7,7 @@ from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockInfoSummary,
|
||||
BlockInputFieldInfo,
|
||||
BlockListResponse,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
@@ -54,8 +55,7 @@ class FindBlockTool(BaseTool):
|
||||
"Blocks are reusable components that perform specific tasks like "
|
||||
"sending emails, making API calls, processing text, etc. "
|
||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||
"The response includes each block's id, name, and description. "
|
||||
"Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it."
|
||||
"The response includes each block's id, required_inputs, and input_schema."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -124,7 +124,7 @@ class FindBlockTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Enrich results with block information
|
||||
# Enrich results with full block information
|
||||
blocks: list[BlockInfoSummary] = []
|
||||
for result in results:
|
||||
block_id = result["content_id"]
|
||||
@@ -141,11 +141,65 @@ class FindBlockTool(BaseTool):
|
||||
):
|
||||
continue
|
||||
|
||||
# Get input/output schemas
|
||||
input_schema = {}
|
||||
output_schema = {}
|
||||
try:
|
||||
input_schema = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
try:
|
||||
output_schema = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
|
||||
# Get categories from block instance
|
||||
categories = []
|
||||
if hasattr(block, "categories") and block.categories:
|
||||
categories = [cat.value for cat in block.categories]
|
||||
|
||||
# Extract required inputs for easier use
|
||||
required_inputs: list[BlockInputFieldInfo] = []
|
||||
if input_schema:
|
||||
properties = input_schema.get("properties", {})
|
||||
required_fields = set(input_schema.get("required", []))
|
||||
# Get credential field names to exclude from required inputs
|
||||
credentials_fields = set(
|
||||
block.input_schema.get_credentials_fields().keys()
|
||||
)
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
# Skip credential fields - they're handled separately
|
||||
if field_name in credentials_fields:
|
||||
continue
|
||||
|
||||
required_inputs.append(
|
||||
BlockInputFieldInfo(
|
||||
name=field_name,
|
||||
type=field_schema.get("type", "string"),
|
||||
description=field_schema.get("description", ""),
|
||||
required=field_name in required_fields,
|
||||
default=field_schema.get("default"),
|
||||
)
|
||||
)
|
||||
|
||||
blocks.append(
|
||||
BlockInfoSummary(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
categories=categories,
|
||||
input_schema=input_schema,
|
||||
output_schema=output_schema,
|
||||
required_inputs=required_inputs,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -174,7 +228,8 @@ class FindBlockTool(BaseTool):
|
||||
return BlockListResponse(
|
||||
message=(
|
||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||
"To see a block's inputs/outputs and execute it, use run_block with the block's 'id' - providing no inputs."
|
||||
"To execute a block, use run_block with the block's 'id' field "
|
||||
"and provide 'input_data' matching the block's input_schema."
|
||||
),
|
||||
blocks=blocks,
|
||||
count=len(blocks),
|
||||
|
||||
@@ -18,13 +18,7 @@ _TEST_USER_ID = "test-user-find-block"
|
||||
|
||||
|
||||
def make_mock_block(
|
||||
block_id: str,
|
||||
name: str,
|
||||
block_type: BlockType,
|
||||
disabled: bool = False,
|
||||
input_schema: dict | None = None,
|
||||
output_schema: dict | None = None,
|
||||
credentials_fields: dict | None = None,
|
||||
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
||||
):
|
||||
"""Create a mock block for testing."""
|
||||
mock = MagicMock()
|
||||
@@ -34,13 +28,10 @@ def make_mock_block(
|
||||
mock.block_type = block_type
|
||||
mock.disabled = disabled
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = input_schema or {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = credentials_fields or {}
|
||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = output_schema or {}
|
||||
mock.output_schema.jsonschema.return_value = {}
|
||||
mock.categories = []
|
||||
return mock
|
||||
|
||||
@@ -146,241 +137,3 @@ class TestFindBlockFiltering:
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert len(response.blocks) == 1
|
||||
assert response.blocks[0].id == "normal-block-id"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_response_size_average_chars_per_block(self):
|
||||
"""Measure average chars per block in the serialized response."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Realistic block definitions modeled after real blocks
|
||||
block_defs = [
|
||||
{
|
||||
"id": "http-block-id",
|
||||
"name": "Send Web Request",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL to send the request to",
|
||||
},
|
||||
"method": {
|
||||
"type": "string",
|
||||
"description": "The HTTP method to use",
|
||||
},
|
||||
"headers": {
|
||||
"type": "object",
|
||||
"description": "Headers to include in the request",
|
||||
},
|
||||
"json_format": {
|
||||
"type": "boolean",
|
||||
"description": "If true, send the body as JSON",
|
||||
},
|
||||
"body": {
|
||||
"type": "object",
|
||||
"description": "Form/JSON body payload",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "HTTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["url", "method"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "object",
|
||||
"description": "The response from the server",
|
||||
},
|
||||
"client_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 4xx status codes",
|
||||
},
|
||||
"server_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 5xx status codes",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Errors for all other exceptions",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "email-block-id",
|
||||
"name": "Send Email",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"to_email": {
|
||||
"type": "string",
|
||||
"description": "Recipient email address",
|
||||
},
|
||||
"subject": {
|
||||
"type": "string",
|
||||
"description": "Subject of the email",
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Body of the email",
|
||||
},
|
||||
"config": {
|
||||
"type": "object",
|
||||
"description": "SMTP Config",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "SMTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["to_email", "subject", "body", "credentials"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Status of the email sending operation",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if sending failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "claude-code-block-id",
|
||||
"name": "Claude Code",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"e2b_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for E2B platform",
|
||||
},
|
||||
"anthropic_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for Anthropic",
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "Task or instruction for Claude Code",
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": "Sandbox timeout in seconds",
|
||||
},
|
||||
"setup_commands": {
|
||||
"type": "array",
|
||||
"description": "Shell commands to run before execution",
|
||||
},
|
||||
"working_directory": {
|
||||
"type": "string",
|
||||
"description": "Working directory for Claude Code",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID to resume a conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "Sandbox ID to reconnect to",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Previous conversation history",
|
||||
},
|
||||
"dispose_sandbox": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to dispose sandbox after execution",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"e2b_credentials",
|
||||
"anthropic_credentials",
|
||||
"prompt",
|
||||
],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "string",
|
||||
"description": "Output from Claude Code execution",
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"description": "Files created/modified by Claude Code",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Full conversation history",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID for this conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "ID of the sandbox instance",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if execution failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {
|
||||
"e2b_credentials": True,
|
||||
"anthropic_credentials": True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
search_results = [
|
||||
{"content_id": d["id"], "score": 0.9 - i * 0.1}
|
||||
for i, d in enumerate(block_defs)
|
||||
]
|
||||
mock_blocks = {
|
||||
d["id"]: make_mock_block(
|
||||
block_id=d["id"],
|
||||
name=d["name"],
|
||||
block_type=BlockType.STANDARD,
|
||||
input_schema=d["input_schema"],
|
||||
output_schema=d["output_schema"],
|
||||
credentials_fields=d["credentials_fields"],
|
||||
)
|
||||
for d in block_defs
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(search_results, len(search_results)),
|
||||
), patch(
|
||||
"backend.api.features.chat.tools.find_block.get_block",
|
||||
side_effect=lambda bid: mock_blocks.get(bid),
|
||||
):
|
||||
tool = FindBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert response.count == len(block_defs)
|
||||
|
||||
total_chars = len(response.model_dump_json())
|
||||
avg_chars = total_chars // response.count
|
||||
|
||||
# Print for visibility in test output
|
||||
print(f"\nTotal response size: {total_chars} chars")
|
||||
print(f"Number of blocks: {response.count}")
|
||||
print(f"Average chars per block: {avg_chars}")
|
||||
|
||||
# The old response was ~90K for 10 blocks (~9K per block).
|
||||
# Previous optimization reduced it to ~1.5K per block (no raw JSON schemas).
|
||||
# Now with only id/name/description, we expect ~300 chars per block.
|
||||
assert avg_chars < 500, (
|
||||
f"Average chars per block ({avg_chars}) exceeds 500. "
|
||||
f"Total response: {total_chars} chars for {response.count} blocks."
|
||||
)
|
||||
|
||||
@@ -25,7 +25,6 @@ class ResponseType(str, Enum):
|
||||
AGENT_SAVED = "agent_saved"
|
||||
CLARIFICATION_NEEDED = "clarification_needed"
|
||||
BLOCK_LIST = "block_list"
|
||||
BLOCK_DETAILS = "block_details"
|
||||
BLOCK_OUTPUT = "block_output"
|
||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||
DOC_PAGE = "doc_page"
|
||||
@@ -41,9 +40,6 @@ class ResponseType(str, Enum):
|
||||
OPERATION_IN_PROGRESS = "operation_in_progress"
|
||||
# Input validation
|
||||
INPUT_VALIDATION_ERROR = "input_validation_error"
|
||||
# Feature request types
|
||||
FEATURE_REQUEST_SEARCH = "feature_request_search"
|
||||
FEATURE_REQUEST_CREATED = "feature_request_created"
|
||||
|
||||
|
||||
# Base response model
|
||||
@@ -338,6 +334,13 @@ class BlockInfoSummary(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
required_inputs: list[BlockInputFieldInfo] = Field(
|
||||
default_factory=list,
|
||||
description="List of required input fields for this block",
|
||||
)
|
||||
|
||||
|
||||
class BlockListResponse(ToolResponseBase):
|
||||
@@ -347,25 +350,10 @@ class BlockListResponse(ToolResponseBase):
|
||||
blocks: list[BlockInfoSummary]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
class BlockDetails(BaseModel):
|
||||
"""Detailed block information."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
inputs: dict[str, Any] = {}
|
||||
outputs: dict[str, Any] = {}
|
||||
credentials: list[CredentialsMetaInput] = []
|
||||
|
||||
|
||||
class BlockDetailsResponse(ToolResponseBase):
|
||||
"""Response for block details (first run_block attempt)."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_DETAILS
|
||||
block: BlockDetails
|
||||
user_authenticated: bool = False
|
||||
usage_hint: str = Field(
|
||||
default="To execute a block, call run_block with block_id set to the block's "
|
||||
"'id' field and input_data containing the required fields from input_schema."
|
||||
)
|
||||
|
||||
|
||||
class BlockOutputResponse(ToolResponseBase):
|
||||
@@ -433,34 +421,3 @@ class AsyncProcessingResponse(ToolResponseBase):
|
||||
status: str = "accepted" # Must be "accepted" for detection
|
||||
operation_id: str | None = None
|
||||
task_id: str | None = None
|
||||
|
||||
|
||||
# Feature request models
|
||||
class FeatureRequestInfo(BaseModel):
|
||||
"""Information about a feature request issue."""
|
||||
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None = None
|
||||
|
||||
|
||||
class FeatureRequestSearchResponse(ToolResponseBase):
|
||||
"""Response for search_feature_requests tool."""
|
||||
|
||||
type: ResponseType = ResponseType.FEATURE_REQUEST_SEARCH
|
||||
results: list[FeatureRequestInfo]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
class FeatureRequestCreatedResponse(ToolResponseBase):
|
||||
"""Response for create_feature_request tool."""
|
||||
|
||||
type: ResponseType = ResponseType.FEATURE_REQUEST_CREATED
|
||||
issue_id: str
|
||||
issue_identifier: str
|
||||
issue_title: str
|
||||
issue_url: str
|
||||
is_new_issue: bool # False if added to existing
|
||||
customer_name: str
|
||||
|
||||
@@ -23,11 +23,8 @@ from backend.util.exceptions import BlockError
|
||||
from .base import BaseTool
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .models import (
|
||||
BlockDetails,
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
SetupInfo,
|
||||
SetupRequirementsResponse,
|
||||
ToolResponseBase,
|
||||
@@ -54,8 +51,8 @@ class RunBlockTool(BaseTool):
|
||||
"Execute a specific block with the provided input data. "
|
||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||
"do NOT guess or make up block IDs. "
|
||||
"On first attempt (without input_data), returns detailed schema showing "
|
||||
"required inputs and outputs. Then call again with proper input_data to execute."
|
||||
"Use the 'id' from find_block results and provide input_data "
|
||||
"matching the block's required_inputs."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -70,19 +67,11 @@ class RunBlockTool(BaseTool):
|
||||
"NEVER guess this - always get it from find_block first."
|
||||
),
|
||||
},
|
||||
"block_name": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The block's human-readable name from find_block results. "
|
||||
"Used for display purposes in the UI."
|
||||
),
|
||||
},
|
||||
"input_data": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Input values for the block. "
|
||||
"First call with empty {} to see the block's schema, "
|
||||
"then call again with proper values to execute."
|
||||
"Input values for the block. Use the 'required_inputs' field "
|
||||
"from find_block to see what fields are needed."
|
||||
),
|
||||
},
|
||||
},
|
||||
@@ -167,34 +156,6 @@ class RunBlockTool(BaseTool):
|
||||
await self._resolve_block_credentials(user_id, block, input_data)
|
||||
)
|
||||
|
||||
# Get block schemas for details/validation
|
||||
try:
|
||||
input_schema: dict[str, Any] = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid input schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
try:
|
||||
output_schema: dict[str, Any] = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid output schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
# Return setup requirements response with missing credentials
|
||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||
@@ -227,53 +188,6 @@ class RunBlockTool(BaseTool):
|
||||
graph_version=None,
|
||||
)
|
||||
|
||||
# Check if this is a first attempt (required inputs missing)
|
||||
# Return block details so user can see what inputs are needed
|
||||
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
||||
required_keys = set(input_schema.get("required", []))
|
||||
required_non_credential_keys = required_keys - credentials_fields
|
||||
provided_input_keys = set(input_data.keys()) - credentials_fields
|
||||
|
||||
# Check for unknown input fields
|
||||
valid_fields = (
|
||||
set(input_schema.get("properties", {}).keys()) - credentials_fields
|
||||
)
|
||||
unrecognized_fields = provided_input_keys - valid_fields
|
||||
if unrecognized_fields:
|
||||
return InputValidationErrorResponse(
|
||||
message=(
|
||||
f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. "
|
||||
f"Block was not executed. Please use the correct field names from the schema."
|
||||
),
|
||||
session_id=session_id,
|
||||
unrecognized_fields=sorted(unrecognized_fields),
|
||||
inputs=input_schema,
|
||||
)
|
||||
|
||||
# Show details when not all required non-credential inputs are provided
|
||||
if not (required_non_credential_keys <= provided_input_keys):
|
||||
# Get credentials info for the response
|
||||
credentials_meta = []
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
credentials_meta.append(cred_meta)
|
||||
|
||||
return BlockDetailsResponse(
|
||||
message=(
|
||||
f"Block '{block.name}' details. "
|
||||
"Provide input_data matching the inputs schema to execute the block."
|
||||
),
|
||||
session_id=session_id,
|
||||
block=BlockDetails(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
inputs=input_schema,
|
||||
outputs=output_schema,
|
||||
credentials=credentials_meta,
|
||||
),
|
||||
user_authenticated=True,
|
||||
)
|
||||
|
||||
try:
|
||||
# Get or create user's workspace for CoPilot file operations
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
"""Tests for block execution guards and input validation in RunBlockTool."""
|
||||
"""Tests for block execution guards in RunBlockTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
)
|
||||
from backend.api.features.chat.tools.models import ErrorResponse
|
||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||
from backend.blocks._base import BlockType
|
||||
|
||||
@@ -33,39 +28,6 @@ def make_mock_block(
|
||||
return mock
|
||||
|
||||
|
||||
def make_mock_block_with_schema(
|
||||
block_id: str,
|
||||
name: str,
|
||||
input_properties: dict,
|
||||
required_fields: list[str],
|
||||
output_properties: dict | None = None,
|
||||
):
|
||||
"""Create a mock block with a defined input/output schema for validation tests."""
|
||||
mock = MagicMock()
|
||||
mock.id = block_id
|
||||
mock.name = name
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
mock.description = f"Test block: {name}"
|
||||
|
||||
input_schema = {
|
||||
"properties": input_properties,
|
||||
"required": required_fields,
|
||||
}
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = input_schema
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
|
||||
output_schema = {
|
||||
"properties": output_properties or {"result": {"type": "string"}},
|
||||
}
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = output_schema
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
class TestRunBlockFiltering:
|
||||
"""Tests for block execution guards in RunBlockTool."""
|
||||
|
||||
@@ -142,221 +104,3 @@ class TestRunBlockFiltering:
|
||||
# (may be other errors like missing credentials, but not the exclusion guard)
|
||||
if isinstance(response, ErrorResponse):
|
||||
assert "cannot be run directly in CoPilot" not in response.message
|
||||
|
||||
|
||||
class TestRunBlockInputValidation:
|
||||
"""Tests for input field validation in RunBlockTool.
|
||||
|
||||
run_block rejects unknown input field names with InputValidationErrorResponse,
|
||||
preventing silent failures where incorrect keys would be ignored and the block
|
||||
would execute with default values instead of the caller's intended values.
|
||||
"""
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_unknown_input_fields_are_rejected(self):
|
||||
"""run_block rejects unknown input fields instead of silently ignoring them.
|
||||
|
||||
Scenario: The AI Text Generator block has a field called 'model' (for LLM model
|
||||
selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model'
|
||||
instead. The block should reject the request and return the valid schema.
|
||||
"""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string", "description": "The prompt to send"},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The LLM model to use",
|
||||
"default": "gpt-4o-mini",
|
||||
},
|
||||
"sys_prompt": {
|
||||
"type": "string",
|
||||
"description": "System prompt",
|
||||
"default": "",
|
||||
},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
output_properties={"response": {"type": "string"}},
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key)
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku about coding",
|
||||
"LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model'
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
assert "Block was not executed" in response.message
|
||||
assert "inputs" in response.model_dump() # valid schema included
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_multiple_wrong_keys_are_all_reported(self):
|
||||
"""All unrecognized field names are reported in a single error response."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
"sys_prompt": {"type": "string", "default": ""},
|
||||
"retry": {"type": "integer", "default": 3},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Hello", # correct
|
||||
"llm_model": "claude-opus-4-6", # WRONG - should be 'model'
|
||||
"system_prompt": "Be helpful", # WRONG - should be 'sys_prompt'
|
||||
"retries": 5, # WRONG - should be 'retry'
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert set(response.unrecognized_fields) == {
|
||||
"llm_model",
|
||||
"system_prompt",
|
||||
"retries",
|
||||
}
|
||||
assert "Block was not executed" in response.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_unknown_fields_rejected_even_with_missing_required(self):
|
||||
"""Unknown fields are caught before the missing-required-fields check."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# 'prompt' is missing AND 'LLM_Model' is an unknown field
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing
|
||||
},
|
||||
)
|
||||
|
||||
# Unknown fields are caught first
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_correct_inputs_still_execute(self):
|
||||
"""Correct input field names pass validation and the block executes."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
async def mock_execute(input_data, **kwargs):
|
||||
yield "response", "Generated text"
|
||||
|
||||
mock_block.execute = mock_execute
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.api.features.chat.tools.run_block.get_or_create_workspace",
|
||||
new_callable=AsyncMock,
|
||||
return_value=MagicMock(id="test-workspace-id"),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku",
|
||||
"model": "gpt-4o-mini", # correct field name
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockOutputResponse)
|
||||
assert response.success is True
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_required_fields_returns_details(self):
|
||||
"""Missing required fields returns BlockDetailsResponse with schema."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Only provide valid optional field, missing required 'prompt'
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"model": "gpt-4o-mini", # valid but optional
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
"""Tests for BlockDetailsResponse in RunBlockTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.models import BlockDetailsResponse
|
||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||
from backend.blocks._base import BlockType
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
from ._test_data import make_session
|
||||
|
||||
_TEST_USER_ID = "test-user-run-block-details"
|
||||
|
||||
|
||||
def make_mock_block_with_inputs(
|
||||
block_id: str, name: str, description: str = "Test description"
|
||||
):
|
||||
"""Create a mock block with input/output schemas for testing."""
|
||||
mock = MagicMock()
|
||||
mock.id = block_id
|
||||
mock.name = name
|
||||
mock.description = description
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
# Input schema with non-credential fields
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"url": {"type": "string", "description": "URL to fetch"},
|
||||
"method": {"type": "string", "description": "HTTP method"},
|
||||
},
|
||||
"required": ["url"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
# Output schema
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"response": {"type": "object", "description": "HTTP response"},
|
||||
"error": {"type": "string", "description": "Error message"},
|
||||
}
|
||||
}
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_no_input_provided():
|
||||
"""When run_block is called without input_data, it should return BlockDetailsResponse."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with inputs
|
||||
http_block = make_mock_block_with_inputs(
|
||||
"http-block-id", "HTTP Request", "Send HTTP requests"
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=http_block,
|
||||
):
|
||||
# Mock credentials check to return no missing credentials
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=({}, []), # (matched_credentials, missing_credentials)
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="http-block-id",
|
||||
input_data={}, # Empty input data
|
||||
)
|
||||
|
||||
# Should return BlockDetailsResponse showing the schema
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "http-block-id"
|
||||
assert response.block.name == "HTTP Request"
|
||||
assert response.block.description == "Send HTTP requests"
|
||||
assert "url" in response.block.inputs["properties"]
|
||||
assert "method" in response.block.inputs["properties"]
|
||||
assert "response" in response.block.outputs["properties"]
|
||||
assert response.user_authenticated is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_only_credentials_provided():
|
||||
"""When only credentials are provided (no actual input), should return details."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with both credential and non-credential inputs
|
||||
mock = MagicMock()
|
||||
mock.id = "api-block-id"
|
||||
mock.name = "API Call"
|
||||
mock.description = "Make API calls"
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"credentials": {"type": "object", "description": "API credentials"},
|
||||
"endpoint": {"type": "string", "description": "API endpoint"},
|
||||
},
|
||||
"required": ["credentials", "endpoint"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {"credentials": True}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {"result": {"type": "object"}}
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock,
|
||||
):
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(
|
||||
{
|
||||
"credentials": CredentialsMetaInput(
|
||||
id="cred-id",
|
||||
provider=ProviderName("test_provider"),
|
||||
type="api_key",
|
||||
title="Test Credential",
|
||||
)
|
||||
},
|
||||
[],
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="api-block-id",
|
||||
input_data={"credentials": {"some": "cred"}}, # Only credential
|
||||
)
|
||||
|
||||
# Should return details because no non-credential inputs provided
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "api-block-id"
|
||||
assert response.block.name == "API Call"
|
||||
@@ -126,7 +126,6 @@ class PrintToConsoleBlock(Block):
|
||||
output_schema=PrintToConsoleBlock.Output,
|
||||
test_input={"text": "Hello, World!"},
|
||||
is_sensitive_action=True,
|
||||
disabled=True, # Disabled per Nick Tindle's request (OPEN-3000)
|
||||
test_output=[
|
||||
("output", "Hello, World!"),
|
||||
("status", "printed"),
|
||||
|
||||
@@ -662,17 +662,6 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||
|
||||
linear_api_key: str = Field(
|
||||
default="", description="Linear API key for system-level operations"
|
||||
)
|
||||
linear_feature_request_project_id: str = Field(
|
||||
default="",
|
||||
description="Linear project ID where feature requests are tracked",
|
||||
)
|
||||
linear_feature_request_team_id: str = Field(
|
||||
default="",
|
||||
description="Linear team ID used when creating feature request issues",
|
||||
)
|
||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||
|
||||
|
||||
68
autogpt_platform/backend/poetry.lock
generated
68
autogpt_platform/backend/poetry.lock
generated
@@ -441,14 +441,14 @@ develop = true
|
||||
colorama = "^0.4.6"
|
||||
cryptography = "^46.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.128.7"
|
||||
fastapi = "^0.128.0"
|
||||
google-cloud-logging = "^3.13.0"
|
||||
launchdarkly-server-sdk = "^9.15.0"
|
||||
launchdarkly-server-sdk = "^9.14.1"
|
||||
pydantic = "^2.12.5"
|
||||
pydantic-settings = "^2.12.0"
|
||||
pyjwt = {version = "^2.11.0", extras = ["crypto"]}
|
||||
redis = "^6.2.0"
|
||||
supabase = "^2.28.0"
|
||||
supabase = "^2.27.2"
|
||||
uvicorn = "^0.40.0"
|
||||
|
||||
[package.source]
|
||||
@@ -1382,14 +1382,14 @@ tzdata = "*"
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.128.7"
|
||||
version = "0.128.6"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||
{file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"},
|
||||
{file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3117,14 +3117,14 @@ urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-server-sdk"
|
||||
version = "9.15.0"
|
||||
version = "9.14.1"
|
||||
description = "LaunchDarkly SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -4728,14 +4728,14 @@ tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||
{file = "postgrest-2.27.3-py3-none-any.whl", hash = "sha256:ed79123af7127edd78d538bfe8351d277e45b1a36994a4dbf57ae27dde87a7b7"},
|
||||
{file = "postgrest-2.27.3.tar.gz", hash = "sha256:c2e2679addfc8eaab23197bad7ddaee6cbb4cbe8c483ebd2d2e5219543037cc3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -6260,14 +6260,14 @@ all = ["numpy"]
|
||||
|
||||
[[package]]
|
||||
name = "realtime"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||
{file = "realtime-2.27.3-py3-none-any.whl", hash = "sha256:f571115f86988e33c41c895cb3fba2eaa1b693aeaede3617288f44274ca90f43"},
|
||||
{file = "realtime-2.27.3.tar.gz", hash = "sha256:02b082243107656a5ef3fb63e8e2ab4c40bc199abb45adb8a42ed63f089a1041"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7024,14 +7024,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||
{file = "storage3-2.27.3-py3-none-any.whl", hash = "sha256:11a05b7da84bccabeeea12d940bca3760cf63fe6ca441868677335cfe4fdfbe0"},
|
||||
{file = "storage3-2.27.3.tar.gz", hash = "sha256:dc1a4a010cf36d5482c5cb6c1c28fc5f00e23284342b89e4ae43b5eae8501ddb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7091,35 +7091,35 @@ typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""}
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||
{file = "supabase-2.27.3-py3-none-any.whl", hash = "sha256:082a74642fcf9954693f1ce8c251baf23e4bda26ffdbc8dcd4c99c82e60d69ff"},
|
||||
{file = "supabase-2.27.3.tar.gz", hash = "sha256:5e5a348232ac4315c1032ddd687278f0b982465471f0cbb52bca7e6a66495ff3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.26,<0.29"
|
||||
postgrest = "2.28.0"
|
||||
realtime = "2.28.0"
|
||||
storage3 = "2.28.0"
|
||||
supabase-auth = "2.28.0"
|
||||
supabase-functions = "2.28.0"
|
||||
postgrest = "2.27.3"
|
||||
realtime = "2.27.3"
|
||||
storage3 = "2.27.3"
|
||||
supabase-auth = "2.27.3"
|
||||
supabase-functions = "2.27.3"
|
||||
yarl = ">=1.22.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase-auth"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||
{file = "supabase_auth-2.27.3-py3-none-any.whl", hash = "sha256:82a4262eaad85383319d394dab0eea11fcf3ebd774062aef8ea3874ae2f02579"},
|
||||
{file = "supabase_auth-2.27.3.tar.gz", hash = "sha256:39894d4bc60b6f23b5cff4d0d7d4c1659e5d69563cadf014d4896f780ca8ca78"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7129,14 +7129,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||
|
||||
[[package]]
|
||||
name = "supabase-functions"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||
{file = "supabase_functions-2.27.3-py3-none-any.whl", hash = "sha256:9d14a931d49ede1c6cf5fbfceb11c44061535ba1c3f310f15384964d86a83d9e"},
|
||||
{file = "supabase_functions-2.27.3.tar.gz", hash = "sha256:e954f1646da8ca6e7e16accef58d0884a5f97b25956ee98e7d4927a210ed92f9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "fa9c5deadf593e815dd2190f58e22152373900603f5f244b9616cd721de84d2f"
|
||||
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||
|
||||
@@ -65,7 +65,7 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
|
||||
sqlalchemy = "^2.0.40"
|
||||
strenum = "^0.4.9"
|
||||
stripe = "^11.5.0"
|
||||
supabase = "2.28.0"
|
||||
supabase = "2.27.3"
|
||||
tenacity = "^9.1.4"
|
||||
todoist-api-python = "^2.1.7"
|
||||
tweepy = "^4.16.0"
|
||||
|
||||
@@ -37,7 +37,7 @@ services:
|
||||
context: ../
|
||||
dockerfile: autogpt_platform/backend/Dockerfile
|
||||
target: migrate
|
||||
command: ["sh", "-c", "prisma generate && python3 gen_prisma_types_stub.py && prisma migrate deploy"]
|
||||
command: ["sh", "-c", "poetry run prisma generate && poetry run gen-prisma-stub && poetry run prisma migrate deploy"]
|
||||
develop:
|
||||
watch:
|
||||
- path: ./
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||
"poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
|
||||
@@ -15,10 +15,6 @@ import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||
import {
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
} from "../../tools/FeatureRequests/FeatureRequests";
|
||||
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
||||
import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
|
||||
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
|
||||
@@ -258,20 +254,6 @@ export const ChatMessagesContainer = ({
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-search_feature_requests":
|
||||
return (
|
||||
<SearchFeatureRequestsTool
|
||||
key={`${message.id}-${i}`}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-create_feature_request":
|
||||
return (
|
||||
<CreateFeatureRequestTool
|
||||
key={`${message.id}-${i}`}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -14,10 +14,6 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider";
|
||||
import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../tools/EditAgent/EditAgent";
|
||||
import {
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
} from "../tools/FeatureRequests/FeatureRequests";
|
||||
import { FindAgentsTool } from "../tools/FindAgents/FindAgents";
|
||||
import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks";
|
||||
import { RunAgentTool } from "../tools/RunAgent/RunAgent";
|
||||
@@ -49,8 +45,6 @@ const SECTIONS = [
|
||||
"Tool: Create Agent",
|
||||
"Tool: Edit Agent",
|
||||
"Tool: View Agent Output",
|
||||
"Tool: Search Feature Requests",
|
||||
"Tool: Create Feature Request",
|
||||
"Full Conversation Example",
|
||||
] as const;
|
||||
|
||||
@@ -1427,235 +1421,6 @@ export default function StyleguidePage() {
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* SEARCH FEATURE REQUESTS */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
<Section title="Tool: Search Feature Requests">
|
||||
<SubSection label="Input streaming">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "input-streaming",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Input available">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "input-available",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (with results)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "dark mode" },
|
||||
output: {
|
||||
type: "feature_request_search",
|
||||
message:
|
||||
'Found 2 feature request(s) matching "dark mode".',
|
||||
query: "dark mode",
|
||||
count: 2,
|
||||
results: [
|
||||
{
|
||||
id: "fr-001",
|
||||
identifier: "INT-42",
|
||||
title: "Add dark mode to the platform",
|
||||
description:
|
||||
"Users have requested a dark mode option for the builder and copilot interfaces to reduce eye strain during long sessions.",
|
||||
},
|
||||
{
|
||||
id: "fr-002",
|
||||
identifier: "INT-87",
|
||||
title: "Dark theme for agent output viewer",
|
||||
description:
|
||||
"Specifically requesting dark theme support for the agent output/execution viewer panel.",
|
||||
},
|
||||
],
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (no results)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "teleportation" },
|
||||
output: {
|
||||
type: "no_results",
|
||||
message:
|
||||
"No feature requests found matching 'teleportation'.",
|
||||
suggestions: [
|
||||
"Try different keywords",
|
||||
"Use broader search terms",
|
||||
"You can create a new feature request if none exists",
|
||||
],
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (error)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "dark mode" },
|
||||
output: {
|
||||
type: "error",
|
||||
message: "Failed to search feature requests.",
|
||||
error: "LINEAR_API_KEY environment variable is not set",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output error">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-error",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* CREATE FEATURE REQUEST */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
<Section title="Tool: Create Feature Request">
|
||||
<SubSection label="Input streaming">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "input-streaming",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Input available">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "input-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (new issue created)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
output: {
|
||||
type: "feature_request_created",
|
||||
message:
|
||||
"Created new feature request [INT-105] Add dark mode.",
|
||||
issue_id: "issue-new-123",
|
||||
issue_identifier: "INT-105",
|
||||
issue_title: "Add dark mode",
|
||||
issue_url:
|
||||
"https://linear.app/autogpt/issue/INT-105/add-dark-mode",
|
||||
is_new_issue: true,
|
||||
customer_name: "user-abc-123",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (added to existing issue)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Dark mode support",
|
||||
description:
|
||||
"Please add dark mode, it would help with long sessions.",
|
||||
existing_issue_id: "fr-001",
|
||||
},
|
||||
output: {
|
||||
type: "feature_request_created",
|
||||
message:
|
||||
"Added your request to existing feature request [INT-42] Add dark mode to the platform.",
|
||||
issue_id: "fr-001",
|
||||
issue_identifier: "INT-42",
|
||||
issue_title: "Add dark mode to the platform",
|
||||
issue_url:
|
||||
"https://linear.app/autogpt/issue/INT-42/add-dark-mode-to-the-platform",
|
||||
is_new_issue: false,
|
||||
customer_name: "user-xyz-789",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (error)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode.",
|
||||
},
|
||||
output: {
|
||||
type: "error",
|
||||
message:
|
||||
"Failed to attach customer need to the feature request.",
|
||||
error: "Linear API request failed (500): Internal error",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output error">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-error",
|
||||
input: { title: "Add dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* FULL CONVERSATION EXAMPLE */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
@@ -1,227 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { useMemo } from "react";
|
||||
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import {
|
||||
ContentBadge,
|
||||
ContentCard,
|
||||
ContentCardDescription,
|
||||
ContentCardHeader,
|
||||
ContentCardTitle,
|
||||
ContentGrid,
|
||||
ContentMessage,
|
||||
ContentSuggestionsList,
|
||||
} from "../../components/ToolAccordion/AccordionContent";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import {
|
||||
AccordionIcon,
|
||||
getAccordionTitle,
|
||||
getAnimationText,
|
||||
getFeatureRequestOutput,
|
||||
isCreatedOutput,
|
||||
isErrorOutput,
|
||||
isNoResultsOutput,
|
||||
isSearchResultsOutput,
|
||||
ToolIcon,
|
||||
type FeatureRequestToolType,
|
||||
} from "./helpers";
|
||||
|
||||
export interface FeatureRequestToolPart {
|
||||
type: FeatureRequestToolType;
|
||||
toolCallId: string;
|
||||
state: ToolUIPart["state"];
|
||||
input?: unknown;
|
||||
output?: unknown;
|
||||
}
|
||||
|
||||
interface Props {
|
||||
part: FeatureRequestToolPart;
|
||||
}
|
||||
|
||||
function truncate(text: string, maxChars: number): string {
|
||||
const trimmed = text.trim();
|
||||
if (trimmed.length <= maxChars) return trimmed;
|
||||
return `${trimmed.slice(0, maxChars).trimEnd()}…`;
|
||||
}
|
||||
|
||||
export function SearchFeatureRequestsTool({ part }: Props) {
|
||||
const output = getFeatureRequestOutput(part);
|
||||
const text = getAnimationText(part);
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
const isError =
|
||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||
|
||||
const normalized = useMemo(() => {
|
||||
if (!output) return null;
|
||||
return { title: getAccordionTitle(part.type, output) };
|
||||
}, [output, part.type]);
|
||||
|
||||
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||
|
||||
const searchOutput =
|
||||
isOutputAvailable && output && isSearchResultsOutput(output)
|
||||
? output
|
||||
: null;
|
||||
const noResultsOutput =
|
||||
isOutputAvailable && output && isNoResultsOutput(output) ? output : null;
|
||||
const errorOutput =
|
||||
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||
|
||||
const hasExpandableContent =
|
||||
isOutputAvailable &&
|
||||
((!!searchOutput && searchOutput.count > 0) ||
|
||||
!!noResultsOutput ||
|
||||
!!errorOutput);
|
||||
|
||||
const accordionDescription =
|
||||
hasExpandableContent && searchOutput
|
||||
? `Found ${searchOutput.count} result${searchOutput.count === 1 ? "" : "s"} for "${searchOutput.query}"`
|
||||
: hasExpandableContent && (noResultsOutput || errorOutput)
|
||||
? ((noResultsOutput ?? errorOutput)?.message ?? null)
|
||||
: null;
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ToolIcon
|
||||
toolType={part.type}
|
||||
isStreaming={isStreaming}
|
||||
isError={isError}
|
||||
/>
|
||||
<MorphingTextAnimation
|
||||
text={text}
|
||||
className={isError ? "text-red-500" : undefined}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && normalized && (
|
||||
<ToolAccordion
|
||||
icon={<AccordionIcon toolType={part.type} />}
|
||||
title={normalized.title}
|
||||
description={accordionDescription}
|
||||
>
|
||||
{searchOutput && (
|
||||
<ContentGrid>
|
||||
{searchOutput.results.map((r) => (
|
||||
<ContentCard key={r.id}>
|
||||
<ContentCardHeader>
|
||||
<ContentCardTitle>{r.title}</ContentCardTitle>
|
||||
</ContentCardHeader>
|
||||
{r.description && (
|
||||
<ContentCardDescription>
|
||||
{truncate(r.description, 200)}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</ContentCard>
|
||||
))}
|
||||
</ContentGrid>
|
||||
)}
|
||||
|
||||
{noResultsOutput && (
|
||||
<div>
|
||||
<ContentMessage>{noResultsOutput.message}</ContentMessage>
|
||||
{noResultsOutput.suggestions &&
|
||||
noResultsOutput.suggestions.length > 0 && (
|
||||
<ContentSuggestionsList items={noResultsOutput.suggestions} />
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{errorOutput && (
|
||||
<div>
|
||||
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||
{errorOutput.error && (
|
||||
<ContentCardDescription>
|
||||
{errorOutput.error}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</ToolAccordion>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export function CreateFeatureRequestTool({ part }: Props) {
|
||||
const output = getFeatureRequestOutput(part);
|
||||
const text = getAnimationText(part);
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
const isError =
|
||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||
|
||||
const normalized = useMemo(() => {
|
||||
if (!output) return null;
|
||||
return { title: getAccordionTitle(part.type, output) };
|
||||
}, [output, part.type]);
|
||||
|
||||
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||
|
||||
const createdOutput =
|
||||
isOutputAvailable && output && isCreatedOutput(output) ? output : null;
|
||||
const errorOutput =
|
||||
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||
|
||||
const hasExpandableContent =
|
||||
isOutputAvailable && (!!createdOutput || !!errorOutput);
|
||||
|
||||
const accordionDescription =
|
||||
hasExpandableContent && createdOutput
|
||||
? createdOutput.issue_title
|
||||
: hasExpandableContent && errorOutput
|
||||
? errorOutput.message
|
||||
: null;
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ToolIcon
|
||||
toolType={part.type}
|
||||
isStreaming={isStreaming}
|
||||
isError={isError}
|
||||
/>
|
||||
<MorphingTextAnimation
|
||||
text={text}
|
||||
className={isError ? "text-red-500" : undefined}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && normalized && (
|
||||
<ToolAccordion
|
||||
icon={<AccordionIcon toolType={part.type} />}
|
||||
title={normalized.title}
|
||||
description={accordionDescription}
|
||||
>
|
||||
{createdOutput && (
|
||||
<ContentCard>
|
||||
<ContentCardHeader>
|
||||
<ContentCardTitle>{createdOutput.issue_title}</ContentCardTitle>
|
||||
</ContentCardHeader>
|
||||
<div className="mt-2 flex items-center gap-2">
|
||||
<ContentBadge>
|
||||
{createdOutput.is_new_issue ? "New" : "Existing"}
|
||||
</ContentBadge>
|
||||
</div>
|
||||
<ContentMessage>{createdOutput.message}</ContentMessage>
|
||||
</ContentCard>
|
||||
)}
|
||||
|
||||
{errorOutput && (
|
||||
<div>
|
||||
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||
{errorOutput.error && (
|
||||
<ContentCardDescription>
|
||||
{errorOutput.error}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</ToolAccordion>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,271 +0,0 @@
|
||||
import {
|
||||
CheckCircleIcon,
|
||||
LightbulbIcon,
|
||||
MagnifyingGlassIcon,
|
||||
PlusCircleIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import type { ToolUIPart } from "ai";
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Types (local until API client is regenerated) */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface FeatureRequestInfo {
|
||||
id: string;
|
||||
identifier: string;
|
||||
title: string;
|
||||
description?: string | null;
|
||||
}
|
||||
|
||||
export interface FeatureRequestSearchResponse {
|
||||
type: "feature_request_search";
|
||||
message: string;
|
||||
results: FeatureRequestInfo[];
|
||||
count: number;
|
||||
query: string;
|
||||
}
|
||||
|
||||
export interface FeatureRequestCreatedResponse {
|
||||
type: "feature_request_created";
|
||||
message: string;
|
||||
issue_id: string;
|
||||
issue_identifier: string;
|
||||
issue_title: string;
|
||||
issue_url: string;
|
||||
is_new_issue: boolean;
|
||||
customer_name: string;
|
||||
}
|
||||
|
||||
interface NoResultsResponse {
|
||||
type: "no_results";
|
||||
message: string;
|
||||
suggestions?: string[];
|
||||
}
|
||||
|
||||
interface ErrorResponse {
|
||||
type: "error";
|
||||
message: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export type FeatureRequestOutput =
|
||||
| FeatureRequestSearchResponse
|
||||
| FeatureRequestCreatedResponse
|
||||
| NoResultsResponse
|
||||
| ErrorResponse;
|
||||
|
||||
export type FeatureRequestToolType =
|
||||
| "tool-search_feature_requests"
|
||||
| "tool-create_feature_request"
|
||||
| string;
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Output parsing */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
function parseOutput(output: unknown): FeatureRequestOutput | null {
|
||||
if (!output) return null;
|
||||
if (typeof output === "string") {
|
||||
const trimmed = output.trim();
|
||||
if (!trimmed) return null;
|
||||
try {
|
||||
return parseOutput(JSON.parse(trimmed) as unknown);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
if (typeof output === "object") {
|
||||
const type = (output as { type?: unknown }).type;
|
||||
if (
|
||||
type === "feature_request_search" ||
|
||||
type === "feature_request_created" ||
|
||||
type === "no_results" ||
|
||||
type === "error"
|
||||
) {
|
||||
return output as FeatureRequestOutput;
|
||||
}
|
||||
// Fallback structural checks
|
||||
if ("results" in output && "query" in output)
|
||||
return output as FeatureRequestSearchResponse;
|
||||
if ("issue_identifier" in output)
|
||||
return output as FeatureRequestCreatedResponse;
|
||||
if ("suggestions" in output && !("error" in output))
|
||||
return output as NoResultsResponse;
|
||||
if ("error" in output || "details" in output)
|
||||
return output as ErrorResponse;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function getFeatureRequestOutput(
|
||||
part: unknown,
|
||||
): FeatureRequestOutput | null {
|
||||
if (!part || typeof part !== "object") return null;
|
||||
return parseOutput((part as { output?: unknown }).output);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Type guards */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function isSearchResultsOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is FeatureRequestSearchResponse {
|
||||
return (
|
||||
output.type === "feature_request_search" ||
|
||||
("results" in output && "query" in output)
|
||||
);
|
||||
}
|
||||
|
||||
export function isCreatedOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is FeatureRequestCreatedResponse {
|
||||
return (
|
||||
output.type === "feature_request_created" || "issue_identifier" in output
|
||||
);
|
||||
}
|
||||
|
||||
export function isNoResultsOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is NoResultsResponse {
|
||||
return (
|
||||
output.type === "no_results" ||
|
||||
("suggestions" in output && !("error" in output))
|
||||
);
|
||||
}
|
||||
|
||||
export function isErrorOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is ErrorResponse {
|
||||
return output.type === "error" || "error" in output;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Accordion metadata */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function getAccordionTitle(
|
||||
toolType: FeatureRequestToolType,
|
||||
output: FeatureRequestOutput,
|
||||
): string {
|
||||
if (toolType === "tool-search_feature_requests") {
|
||||
if (isSearchResultsOutput(output)) return "Feature requests";
|
||||
if (isNoResultsOutput(output)) return "No feature requests found";
|
||||
return "Feature request search error";
|
||||
}
|
||||
if (isCreatedOutput(output)) {
|
||||
return output.is_new_issue
|
||||
? "Feature request created"
|
||||
: "Added to feature request";
|
||||
}
|
||||
if (isErrorOutput(output)) return "Feature request error";
|
||||
return "Feature request";
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Animation text */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface AnimationPart {
|
||||
type: FeatureRequestToolType;
|
||||
state: ToolUIPart["state"];
|
||||
input?: unknown;
|
||||
output?: unknown;
|
||||
}
|
||||
|
||||
export function getAnimationText(part: AnimationPart): string {
|
||||
if (part.type === "tool-search_feature_requests") {
|
||||
const query = (part.input as { query?: string } | undefined)?.query?.trim();
|
||||
const queryText = query ? ` for "${query}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Searching feature requests${queryText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Searching feature requests${queryText}`;
|
||||
if (isSearchResultsOutput(output)) {
|
||||
return `Found ${output.count} feature request${output.count === 1 ? "" : "s"}${queryText}`;
|
||||
}
|
||||
if (isNoResultsOutput(output))
|
||||
return `No feature requests found${queryText}`;
|
||||
return `Error searching feature requests${queryText}`;
|
||||
}
|
||||
case "output-error":
|
||||
return `Error searching feature requests${queryText}`;
|
||||
default:
|
||||
return "Searching feature requests";
|
||||
}
|
||||
}
|
||||
|
||||
// create_feature_request
|
||||
const title = (part.input as { title?: string } | undefined)?.title?.trim();
|
||||
const titleText = title ? ` "${title}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Creating feature request${titleText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Creating feature request${titleText}`;
|
||||
if (isCreatedOutput(output)) {
|
||||
return output.is_new_issue
|
||||
? "Feature request created"
|
||||
: "Added to existing feature request";
|
||||
}
|
||||
if (isErrorOutput(output)) return "Error creating feature request";
|
||||
return `Created feature request${titleText}`;
|
||||
}
|
||||
case "output-error":
|
||||
return "Error creating feature request";
|
||||
default:
|
||||
return "Creating feature request";
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Icons */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function ToolIcon({
|
||||
toolType,
|
||||
isStreaming,
|
||||
isError,
|
||||
}: {
|
||||
toolType: FeatureRequestToolType;
|
||||
isStreaming?: boolean;
|
||||
isError?: boolean;
|
||||
}) {
|
||||
const IconComponent =
|
||||
toolType === "tool-create_feature_request"
|
||||
? PlusCircleIcon
|
||||
: MagnifyingGlassIcon;
|
||||
|
||||
return (
|
||||
<IconComponent
|
||||
size={14}
|
||||
weight="regular"
|
||||
className={
|
||||
isError
|
||||
? "text-red-500"
|
||||
: isStreaming
|
||||
? "text-neutral-500"
|
||||
: "text-neutral-400"
|
||||
}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function AccordionIcon({
|
||||
toolType,
|
||||
}: {
|
||||
toolType: FeatureRequestToolType;
|
||||
}) {
|
||||
const IconComponent =
|
||||
toolType === "tool-create_feature_request"
|
||||
? CheckCircleIcon
|
||||
: LightbulbIcon;
|
||||
return <IconComponent size={32} weight="light" />;
|
||||
}
|
||||
@@ -3,7 +3,6 @@
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import { BlockDetailsCard } from "./components/BlockDetailsCard/BlockDetailsCard";
|
||||
import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard";
|
||||
import { ErrorCard } from "./components/ErrorCard/ErrorCard";
|
||||
import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard";
|
||||
@@ -12,7 +11,6 @@ import {
|
||||
getAnimationText,
|
||||
getRunBlockToolOutput,
|
||||
isRunBlockBlockOutput,
|
||||
isRunBlockDetailsOutput,
|
||||
isRunBlockErrorOutput,
|
||||
isRunBlockSetupRequirementsOutput,
|
||||
ToolIcon,
|
||||
@@ -43,7 +41,6 @@ export function RunBlockTool({ part }: Props) {
|
||||
part.state === "output-available" &&
|
||||
!!output &&
|
||||
(isRunBlockBlockOutput(output) ||
|
||||
isRunBlockDetailsOutput(output) ||
|
||||
isRunBlockSetupRequirementsOutput(output) ||
|
||||
isRunBlockErrorOutput(output));
|
||||
|
||||
@@ -61,10 +58,6 @@ export function RunBlockTool({ part }: Props) {
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||
|
||||
{isRunBlockDetailsOutput(output) && (
|
||||
<BlockDetailsCard output={output} />
|
||||
)}
|
||||
|
||||
{isRunBlockSetupRequirementsOutput(output) && (
|
||||
<SetupRequirementsCard output={output} />
|
||||
)}
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
import type { Meta, StoryObj } from "@storybook/nextjs";
|
||||
import { ResponseType } from "@/app/api/__generated__/models/responseType";
|
||||
import type { BlockDetailsResponse } from "../../helpers";
|
||||
import { BlockDetailsCard } from "./BlockDetailsCard";
|
||||
|
||||
const meta: Meta<typeof BlockDetailsCard> = {
|
||||
title: "Copilot/RunBlock/BlockDetailsCard",
|
||||
component: BlockDetailsCard,
|
||||
parameters: {
|
||||
layout: "centered",
|
||||
},
|
||||
tags: ["autodocs"],
|
||||
decorators: [
|
||||
(Story) => (
|
||||
<div style={{ maxWidth: 480 }}>
|
||||
<Story />
|
||||
</div>
|
||||
),
|
||||
],
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof meta>;
|
||||
|
||||
const baseBlock: BlockDetailsResponse = {
|
||||
type: ResponseType.block_details,
|
||||
message:
|
||||
"Here are the details for the GetWeather block. Provide the required inputs to run it.",
|
||||
session_id: "session-123",
|
||||
user_authenticated: true,
|
||||
block: {
|
||||
id: "block-abc-123",
|
||||
name: "GetWeather",
|
||||
description: "Fetches current weather data for a given location.",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: {
|
||||
title: "Location",
|
||||
type: "string",
|
||||
description:
|
||||
"City name or coordinates (e.g. 'London' or '51.5,-0.1')",
|
||||
},
|
||||
units: {
|
||||
title: "Units",
|
||||
type: "string",
|
||||
description: "Temperature units: 'metric' or 'imperial'",
|
||||
},
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
temperature: {
|
||||
title: "Temperature",
|
||||
type: "number",
|
||||
description: "Current temperature in the requested units",
|
||||
},
|
||||
condition: {
|
||||
title: "Condition",
|
||||
type: "string",
|
||||
description: "Weather condition description (e.g. 'Sunny', 'Rain')",
|
||||
},
|
||||
},
|
||||
},
|
||||
credentials: [],
|
||||
},
|
||||
};
|
||||
|
||||
export const Default: Story = {
|
||||
args: {
|
||||
output: baseBlock,
|
||||
},
|
||||
};
|
||||
|
||||
export const InputsOnly: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "This block requires inputs. No outputs are defined.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
outputs: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const OutputsOnly: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "This block has no required inputs.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
inputs: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const ManyFields: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "Block with many input and output fields.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
name: "SendEmail",
|
||||
description: "Sends an email via SMTP.",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
to: {
|
||||
title: "To",
|
||||
type: "string",
|
||||
description: "Recipient email address",
|
||||
},
|
||||
subject: {
|
||||
title: "Subject",
|
||||
type: "string",
|
||||
description: "Email subject line",
|
||||
},
|
||||
body: {
|
||||
title: "Body",
|
||||
type: "string",
|
||||
description: "Email body content",
|
||||
},
|
||||
cc: {
|
||||
title: "CC",
|
||||
type: "string",
|
||||
description: "CC recipients (comma-separated)",
|
||||
},
|
||||
bcc: {
|
||||
title: "BCC",
|
||||
type: "string",
|
||||
description: "BCC recipients (comma-separated)",
|
||||
},
|
||||
},
|
||||
required: ["to", "subject", "body"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
message_id: {
|
||||
title: "Message ID",
|
||||
type: "string",
|
||||
description: "Unique ID of the sent email",
|
||||
},
|
||||
status: {
|
||||
title: "Status",
|
||||
type: "string",
|
||||
description: "Delivery status",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const NoFieldDescriptions: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "Fields without descriptions.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
name: "SimpleBlock",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
input_a: { title: "Input A", type: "string" },
|
||||
input_b: { title: "Input B", type: "number" },
|
||||
},
|
||||
required: ["input_a"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
result: { title: "Result", type: "string" },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
@@ -1,103 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import type { BlockDetailsResponse } from "../../helpers";
|
||||
import {
|
||||
ContentBadge,
|
||||
ContentCard,
|
||||
ContentCardDescription,
|
||||
ContentCardTitle,
|
||||
ContentGrid,
|
||||
ContentMessage,
|
||||
} from "../../../../components/ToolAccordion/AccordionContent";
|
||||
|
||||
interface Props {
|
||||
output: BlockDetailsResponse;
|
||||
}
|
||||
|
||||
function SchemaFieldList({
|
||||
title,
|
||||
properties,
|
||||
required,
|
||||
}: {
|
||||
title: string;
|
||||
properties: Record<string, unknown>;
|
||||
required?: string[];
|
||||
}) {
|
||||
const entries = Object.entries(properties);
|
||||
if (entries.length === 0) return null;
|
||||
|
||||
const requiredSet = new Set(required ?? []);
|
||||
|
||||
return (
|
||||
<ContentCard>
|
||||
<ContentCardTitle className="text-xs">{title}</ContentCardTitle>
|
||||
<div className="mt-2 grid gap-2">
|
||||
{entries.map(([name, schema]) => {
|
||||
const field = schema as Record<string, unknown> | undefined;
|
||||
const fieldTitle =
|
||||
typeof field?.title === "string" ? field.title : name;
|
||||
const fieldType =
|
||||
typeof field?.type === "string" ? field.type : "unknown";
|
||||
const description =
|
||||
typeof field?.description === "string"
|
||||
? field.description
|
||||
: undefined;
|
||||
|
||||
return (
|
||||
<div key={name} className="rounded-xl border p-2">
|
||||
<div className="flex items-center justify-between gap-2">
|
||||
<ContentCardTitle className="text-xs">
|
||||
{fieldTitle}
|
||||
</ContentCardTitle>
|
||||
<div className="flex gap-1">
|
||||
<ContentBadge>{fieldType}</ContentBadge>
|
||||
{requiredSet.has(name) && (
|
||||
<ContentBadge>Required</ContentBadge>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{description && (
|
||||
<ContentCardDescription className="mt-1 text-xs">
|
||||
{description}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</ContentCard>
|
||||
);
|
||||
}
|
||||
|
||||
export function BlockDetailsCard({ output }: Props) {
|
||||
const inputs = output.block.inputs as {
|
||||
properties?: Record<string, unknown>;
|
||||
required?: string[];
|
||||
} | null;
|
||||
const outputs = output.block.outputs as {
|
||||
properties?: Record<string, unknown>;
|
||||
required?: string[];
|
||||
} | null;
|
||||
|
||||
return (
|
||||
<ContentGrid>
|
||||
<ContentMessage>{output.message}</ContentMessage>
|
||||
|
||||
{inputs?.properties && Object.keys(inputs.properties).length > 0 && (
|
||||
<SchemaFieldList
|
||||
title="Inputs"
|
||||
properties={inputs.properties}
|
||||
required={inputs.required}
|
||||
/>
|
||||
)}
|
||||
|
||||
{outputs?.properties && Object.keys(outputs.properties).length > 0 && (
|
||||
<SchemaFieldList
|
||||
title="Outputs"
|
||||
properties={outputs.properties}
|
||||
required={outputs.required}
|
||||
/>
|
||||
)}
|
||||
</ContentGrid>
|
||||
);
|
||||
}
|
||||
@@ -10,37 +10,18 @@ import {
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
|
||||
/** Block details returned on first run_block attempt (before input_data provided). */
|
||||
export interface BlockDetailsResponse {
|
||||
type: typeof ResponseType.block_details;
|
||||
message: string;
|
||||
session_id?: string | null;
|
||||
block: {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
inputs: Record<string, unknown>;
|
||||
outputs: Record<string, unknown>;
|
||||
credentials: unknown[];
|
||||
};
|
||||
user_authenticated: boolean;
|
||||
}
|
||||
|
||||
export interface RunBlockInput {
|
||||
block_id?: string;
|
||||
block_name?: string;
|
||||
input_data?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export type RunBlockToolOutput =
|
||||
| SetupRequirementsResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| ErrorResponse;
|
||||
|
||||
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
||||
ResponseType.setup_requirements,
|
||||
ResponseType.block_details,
|
||||
ResponseType.block_output,
|
||||
ResponseType.error,
|
||||
]);
|
||||
@@ -54,15 +35,6 @@ export function isRunBlockSetupRequirementsOutput(
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockDetailsOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockDetailsResponse {
|
||||
return (
|
||||
output.type === ResponseType.block_details ||
|
||||
("block" in output && typeof output.block === "object")
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockBlockOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockOutputResponse {
|
||||
@@ -92,7 +64,6 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
|
||||
return output as RunBlockToolOutput;
|
||||
}
|
||||
if ("block_id" in output) return output as BlockOutputResponse;
|
||||
if ("block" in output) return output as BlockDetailsResponse;
|
||||
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
||||
if ("error" in output || "details" in output)
|
||||
return output as ErrorResponse;
|
||||
@@ -113,25 +84,17 @@ export function getAnimationText(part: {
|
||||
output?: unknown;
|
||||
}): string {
|
||||
const input = part.input as RunBlockInput | undefined;
|
||||
const blockName = input?.block_name?.trim();
|
||||
const blockId = input?.block_id?.trim();
|
||||
// Prefer block_name if available, otherwise fall back to block_id
|
||||
const blockText = blockName
|
||||
? ` "${blockName}"`
|
||||
: blockId
|
||||
? ` "${blockId}"`
|
||||
: "";
|
||||
const blockText = blockId ? ` "${blockId}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Running${blockText}`;
|
||||
return `Running the block${blockText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Running${blockText}`;
|
||||
if (!output) return `Running the block${blockText}`;
|
||||
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
||||
if (isRunBlockDetailsOutput(output))
|
||||
return `Details for "${output.block.name}"`;
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
return `Setup needed for "${output.setup_info.agent_name}"`;
|
||||
}
|
||||
@@ -195,21 +158,6 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockDetailsOutput(output)) {
|
||||
const inputKeys = Object.keys(
|
||||
(output.block.inputs as { properties?: Record<string, unknown> })
|
||||
?.properties ?? {},
|
||||
);
|
||||
return {
|
||||
icon,
|
||||
title: output.block.name,
|
||||
description:
|
||||
inputKeys.length > 0
|
||||
? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available`
|
||||
: output.message,
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
const missingCredsCount = Object.keys(
|
||||
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
||||
|
||||
@@ -1053,7 +1053,6 @@
|
||||
"$ref": "#/components/schemas/ClarificationNeededResponse"
|
||||
},
|
||||
{ "$ref": "#/components/schemas/BlockListResponse" },
|
||||
{ "$ref": "#/components/schemas/BlockDetailsResponse" },
|
||||
{ "$ref": "#/components/schemas/BlockOutputResponse" },
|
||||
{ "$ref": "#/components/schemas/DocSearchResultsResponse" },
|
||||
{ "$ref": "#/components/schemas/DocPageResponse" },
|
||||
@@ -6959,58 +6958,6 @@
|
||||
"enum": ["run", "byte", "second"],
|
||||
"title": "BlockCostType"
|
||||
},
|
||||
"BlockDetails": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"inputs": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Inputs",
|
||||
"default": {}
|
||||
},
|
||||
"outputs": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Outputs",
|
||||
"default": {}
|
||||
},
|
||||
"credentials": {
|
||||
"items": { "$ref": "#/components/schemas/CredentialsMetaInput" },
|
||||
"type": "array",
|
||||
"title": "Credentials",
|
||||
"default": []
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["id", "name", "description"],
|
||||
"title": "BlockDetails",
|
||||
"description": "Detailed block information."
|
||||
},
|
||||
"BlockDetailsResponse": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"$ref": "#/components/schemas/ResponseType",
|
||||
"default": "block_details"
|
||||
},
|
||||
"message": { "type": "string", "title": "Message" },
|
||||
"session_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Session Id"
|
||||
},
|
||||
"block": { "$ref": "#/components/schemas/BlockDetails" },
|
||||
"user_authenticated": {
|
||||
"type": "boolean",
|
||||
"title": "User Authenticated",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["message", "block"],
|
||||
"title": "BlockDetailsResponse",
|
||||
"description": "Response for block details (first run_block attempt)."
|
||||
},
|
||||
"BlockInfo": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
@@ -7066,13 +7013,62 @@
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"description": { "type": "string", "title": "Description" }
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"categories": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Categories"
|
||||
},
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Schema"
|
||||
},
|
||||
"output_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Output Schema"
|
||||
},
|
||||
"required_inputs": {
|
||||
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
|
||||
"type": "array",
|
||||
"title": "Required Inputs",
|
||||
"description": "List of required input fields for this block"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["id", "name", "description"],
|
||||
"required": [
|
||||
"id",
|
||||
"name",
|
||||
"description",
|
||||
"categories",
|
||||
"input_schema",
|
||||
"output_schema"
|
||||
],
|
||||
"title": "BlockInfoSummary",
|
||||
"description": "Summary of a block for search results."
|
||||
},
|
||||
"BlockInputFieldInfo": {
|
||||
"properties": {
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"type": { "type": "string", "title": "Type" },
|
||||
"description": {
|
||||
"type": "string",
|
||||
"title": "Description",
|
||||
"default": ""
|
||||
},
|
||||
"required": {
|
||||
"type": "boolean",
|
||||
"title": "Required",
|
||||
"default": false
|
||||
},
|
||||
"default": { "anyOf": [{}, { "type": "null" }], "title": "Default" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "type"],
|
||||
"title": "BlockInputFieldInfo",
|
||||
"description": "Information about a block input field."
|
||||
},
|
||||
"BlockListResponse": {
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -7090,7 +7086,12 @@
|
||||
"title": "Blocks"
|
||||
},
|
||||
"count": { "type": "integer", "title": "Count" },
|
||||
"query": { "type": "string", "title": "Query" }
|
||||
"query": { "type": "string", "title": "Query" },
|
||||
"usage_hint": {
|
||||
"type": "string",
|
||||
"title": "Usage Hint",
|
||||
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["message", "blocks", "count", "query"],
|
||||
@@ -10483,7 +10484,6 @@
|
||||
"agent_saved",
|
||||
"clarification_needed",
|
||||
"block_list",
|
||||
"block_details",
|
||||
"block_output",
|
||||
"doc_search_results",
|
||||
"doc_page",
|
||||
@@ -10495,9 +10495,7 @@
|
||||
"operation_started",
|
||||
"operation_pending",
|
||||
"operation_in_progress",
|
||||
"input_validation_error",
|
||||
"feature_request_search",
|
||||
"feature_request_created"
|
||||
"input_validation_error"
|
||||
],
|
||||
"title": "ResponseType",
|
||||
"description": "Types of tool responses."
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
extend-ignore = E203
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
*.pyc,
|
||||
.env
|
||||
venv*/*,
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
data/*,
|
||||
.env,
|
||||
venv*,
|
||||
.venv,
|
||||
reports,
|
||||
dist,
|
||||
data,
|
||||
.benchmark_workspaces,
|
||||
.autogpt,
|
||||
|
||||
291
classic/CLAUDE.md
Normal file
291
classic/CLAUDE.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
AutoGPT Classic is an experimental, **unsupported** project demonstrating autonomous GPT-4 operation. Dependencies will not be updated, and the codebase contains known vulnerabilities. This is preserved for educational/historical purposes.
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
classic/
|
||||
├── pyproject.toml # Single consolidated Poetry project
|
||||
├── poetry.lock # Single lock file
|
||||
├── forge/
|
||||
│ └── forge/ # Core agent framework package
|
||||
├── original_autogpt/
|
||||
│ └── autogpt/ # AutoGPT agent package
|
||||
├── direct_benchmark/
|
||||
│ └── direct_benchmark/ # Benchmark harness package
|
||||
└── benchmark/ # Challenge definitions (data, not code)
|
||||
```
|
||||
|
||||
All packages are managed by a single `pyproject.toml` at the classic/ root.
|
||||
|
||||
## Common Commands
|
||||
|
||||
### Setup & Install
|
||||
```bash
|
||||
# Install everything from classic/ directory
|
||||
cd classic
|
||||
poetry install
|
||||
```
|
||||
|
||||
### Running Agents
|
||||
```bash
|
||||
# Run forge agent
|
||||
poetry run python -m forge
|
||||
|
||||
# Run original autogpt server
|
||||
poetry run serve --debug
|
||||
|
||||
# Run autogpt CLI
|
||||
poetry run autogpt
|
||||
```
|
||||
|
||||
Agents run on `http://localhost:8000` by default.
|
||||
|
||||
### Benchmarking
|
||||
```bash
|
||||
# Run benchmarks
|
||||
poetry run direct-benchmark run
|
||||
|
||||
# Run specific strategies and models
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,rewoo \
|
||||
--models claude \
|
||||
--parallel 4
|
||||
|
||||
# Run a single test
|
||||
poetry run direct-benchmark run --tests ReadFile
|
||||
|
||||
# List available commands
|
||||
poetry run direct-benchmark --help
|
||||
```
|
||||
|
||||
### Testing
|
||||
```bash
|
||||
poetry run pytest # All tests
|
||||
poetry run pytest forge/tests/ # Forge tests only
|
||||
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
|
||||
poetry run pytest -k test_name # Single test by name
|
||||
poetry run pytest path/to/test.py # Specific test file
|
||||
poetry run pytest --cov # With coverage
|
||||
```
|
||||
|
||||
### Linting & Formatting
|
||||
|
||||
Run from the classic/ directory:
|
||||
|
||||
```bash
|
||||
# Format everything (recommended to run together)
|
||||
poetry run black . && poetry run isort .
|
||||
|
||||
# Check formatting (CI-style, no changes)
|
||||
poetry run black --check . && poetry run isort --check-only .
|
||||
|
||||
# Lint
|
||||
poetry run flake8 # Style linting
|
||||
|
||||
# Type check
|
||||
poetry run pyright # Type checking (some errors are expected in infrastructure code)
|
||||
```
|
||||
|
||||
Note: Always run linters over the entire directory, not specific files, for best results.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Forge (Core Framework)
|
||||
The `forge` package is the foundation that other components depend on:
|
||||
- `forge/agent/` - Agent implementation and protocols
|
||||
- `forge/llm/` - Multi-provider LLM integrations (OpenAI, Anthropic, Groq, LiteLLM)
|
||||
- `forge/components/` - Reusable agent components
|
||||
- `forge/file_storage/` - File system abstraction
|
||||
- `forge/config/` - Configuration management
|
||||
|
||||
### Original AutoGPT
|
||||
- `original_autogpt/autogpt/app/` - CLI application entry points
|
||||
- `original_autogpt/autogpt/agents/` - Agent implementations
|
||||
- `original_autogpt/autogpt/agent_factory/` - Agent creation logic
|
||||
|
||||
### Direct Benchmark
|
||||
Benchmark harness for testing agent performance:
|
||||
- `direct_benchmark/direct_benchmark/` - CLI and harness code
|
||||
- `benchmark/agbenchmark/challenges/` - Test cases organized by category (code, retrieval, data, etc.)
|
||||
- Reports generated in `direct_benchmark/reports/`
|
||||
|
||||
### Package Structure
|
||||
All three packages are included in a single Poetry project. Imports are fully qualified:
|
||||
- `from forge.agent.base import BaseAgent`
|
||||
- `from autogpt.agents.agent import Agent`
|
||||
- `from direct_benchmark.harness import BenchmarkHarness`
|
||||
|
||||
## Code Style
|
||||
|
||||
- Python 3.12 target
|
||||
- Line length: 88 characters (Black default)
|
||||
- Black for formatting, isort for imports (profile="black")
|
||||
- Type hints with Pyright checking
|
||||
|
||||
## Testing Patterns
|
||||
|
||||
- Async support via pytest-asyncio
|
||||
- Fixtures defined in `conftest.py` files provide: `tmp_project_root`, `storage`, `config`, `llm_provider`, `agent`
|
||||
- Tests requiring API keys (OPENAI_API_KEY, ANTHROPIC_API_KEY) will skip if not set
|
||||
|
||||
## Environment Setup
|
||||
|
||||
Copy `.env.example` to `.env` in the relevant directory and add your API keys:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env with your OPENAI_API_KEY, etc.
|
||||
```
|
||||
|
||||
## Workspaces
|
||||
|
||||
Agents operate within a **workspace** - a directory containing all agent data and files. The workspace root defaults to the current working directory.
|
||||
|
||||
### Workspace Structure
|
||||
|
||||
```
|
||||
{workspace}/
|
||||
├── .autogpt/
|
||||
│ ├── autogpt.yaml # Workspace-level permissions
|
||||
│ ├── ap_server.db # Agent Protocol database (server mode)
|
||||
│ └── agents/
|
||||
│ └── AutoGPT-{agent_id}/
|
||||
│ ├── state.json # Agent profile, directives, action history
|
||||
│ ├── permissions.yaml # Agent-specific permission overrides
|
||||
│ └── workspace/ # Agent's sandboxed working directory
|
||||
```
|
||||
|
||||
### Key Concepts
|
||||
|
||||
- **Multiple agents** can coexist in the same workspace (each gets its own subdirectory)
|
||||
- **File access** is sandboxed to the agent's `workspace/` directory by default
|
||||
- **State persistence** - agent state saves to `state.json` and survives across sessions
|
||||
- **Storage backends** - supports local filesystem, S3, and GCS (via `FILE_STORAGE_BACKEND` env var)
|
||||
|
||||
### Specifying a Workspace
|
||||
|
||||
```bash
|
||||
# Default: uses current directory
|
||||
cd /path/to/my/project && poetry run autogpt
|
||||
|
||||
# Or specify explicitly via CLI (if supported)
|
||||
poetry run autogpt --workspace /path/to/workspace
|
||||
```
|
||||
|
||||
## Settings Location
|
||||
|
||||
Configuration uses a **layered system** with three levels (in order of precedence):
|
||||
|
||||
### 1. Environment Variables (Global)
|
||||
|
||||
Loaded from `.env` file in the working directory:
|
||||
|
||||
```bash
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional LLM settings
|
||||
SMART_LLM=gpt-4o # Model for complex reasoning
|
||||
FAST_LLM=gpt-4o-mini # Model for simple tasks
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
# Optional search providers (for web search component)
|
||||
TAVILY_API_KEY=tvly-...
|
||||
SERPER_API_KEY=...
|
||||
GOOGLE_API_KEY=...
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID=...
|
||||
|
||||
# Optional infrastructure
|
||||
LOG_LEVEL=DEBUG # DEBUG, INFO, WARNING, ERROR
|
||||
DATABASE_STRING=sqlite:///agent.db # Agent Protocol database
|
||||
PORT=8000 # Server port
|
||||
FILE_STORAGE_BACKEND=local # local, s3, or gcs
|
||||
```
|
||||
|
||||
### 2. Workspace Settings (`{workspace}/.autogpt/autogpt.yaml`)
|
||||
|
||||
Workspace-wide permissions that apply to **all agents** in this workspace:
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- read_file({workspace}/**)
|
||||
- write_to_file({workspace}/**)
|
||||
- list_folder({workspace}/**)
|
||||
- web_search(*)
|
||||
|
||||
deny:
|
||||
- read_file(**.env)
|
||||
- read_file(**.env.*)
|
||||
- read_file(**.key)
|
||||
- read_file(**.pem)
|
||||
- execute_shell(rm -rf:*)
|
||||
- execute_shell(sudo:*)
|
||||
```
|
||||
|
||||
Auto-generated with sensible defaults if missing.
|
||||
|
||||
### 3. Agent Settings (`{workspace}/.autogpt/agents/{id}/permissions.yaml`)
|
||||
|
||||
Agent-specific permission overrides:
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- execute_python(*)
|
||||
- web_search(*)
|
||||
|
||||
deny:
|
||||
- execute_shell(*)
|
||||
```
|
||||
|
||||
## Permissions
|
||||
|
||||
The permission system uses **pattern matching** with a **first-match-wins** evaluation order.
|
||||
|
||||
### Permission Check Order
|
||||
|
||||
1. Agent deny list → **Block**
|
||||
2. Workspace deny list → **Block**
|
||||
3. Agent allow list → **Allow**
|
||||
4. Workspace allow list → **Allow**
|
||||
5. Session denied list → **Block** (commands denied during this session)
|
||||
6. **Prompt user** → Interactive approval (if in interactive mode)
|
||||
|
||||
### Pattern Syntax
|
||||
|
||||
Format: `command_name(glob_pattern)`
|
||||
|
||||
| Pattern | Description |
|
||||
|---------|-------------|
|
||||
| `read_file({workspace}/**)` | Read any file in workspace (recursive) |
|
||||
| `write_to_file({workspace}/*.txt)` | Write only .txt files in workspace root |
|
||||
| `execute_shell(python:**)` | Execute Python commands only |
|
||||
| `execute_shell(git:*)` | Execute any git command |
|
||||
| `web_search(*)` | Allow all web searches |
|
||||
|
||||
Special tokens:
|
||||
- `{workspace}` - Replaced with actual workspace path
|
||||
- `**` - Matches any path including `/`
|
||||
- `*` - Matches any characters except `/`
|
||||
|
||||
### Interactive Approval Scopes
|
||||
|
||||
When prompted for permission, users can choose:
|
||||
|
||||
| Scope | Effect |
|
||||
|-------|--------|
|
||||
| **Once** | Allow this one time only (not saved) |
|
||||
| **Agent** | Always allow for this agent (saves to agent `permissions.yaml`) |
|
||||
| **Workspace** | Always allow for all agents (saves to `autogpt.yaml`) |
|
||||
| **Deny** | Deny this command (saves to appropriate deny list) |
|
||||
|
||||
### Default Security
|
||||
|
||||
Out of the box, the following are **denied by default**:
|
||||
- Reading sensitive files (`.env`, `.key`, `.pem`)
|
||||
- Destructive shell commands (`rm -rf`, `sudo`)
|
||||
- Operations outside the workspace directory
|
||||
@@ -1,182 +0,0 @@
|
||||
## CLI Documentation
|
||||
|
||||
This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
|
||||
|
||||
### 1. Entry Point for the CLI
|
||||
|
||||
Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
|
||||
|
||||
```sh
|
||||
./run
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
agent Commands to create, start and stop agents
|
||||
benchmark Commands to start the benchmark and list tests and categories
|
||||
setup Installs dependencies needed for your system.
|
||||
```
|
||||
|
||||
If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
|
||||
|
||||
```sh
|
||||
./run COMMAND --help
|
||||
```
|
||||
|
||||
This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
|
||||
|
||||
### 2. Setup Command
|
||||
|
||||
```sh
|
||||
./run setup
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Setup initiated
|
||||
Installation has been completed.
|
||||
```
|
||||
|
||||
This command initializes the setup of the project.
|
||||
|
||||
### 3. Agents Commands
|
||||
|
||||
**a. List All Agents**
|
||||
|
||||
```sh
|
||||
./run agent list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available agents: 🤖
|
||||
🐙 forge
|
||||
🐙 autogpt
|
||||
```
|
||||
|
||||
Lists all the available agents.
|
||||
|
||||
**b. Create a New Agent**
|
||||
|
||||
```sh
|
||||
./run agent create my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
|
||||
```
|
||||
|
||||
Creates a new agent named 'my_agent'.
|
||||
|
||||
**c. Start an Agent**
|
||||
|
||||
```sh
|
||||
./run agent start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
... (ASCII Art representing the agent startup)
|
||||
[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
|
||||
[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
|
||||
```
|
||||
|
||||
Starts the 'my_agent' and displays startup ASCII art and logs.
|
||||
|
||||
**d. Stop an Agent**
|
||||
|
||||
```sh
|
||||
./run agent stop
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Agent stopped
|
||||
```
|
||||
|
||||
Stops the running agent.
|
||||
|
||||
### 4. Benchmark Commands
|
||||
|
||||
**a. List Benchmark Categories**
|
||||
|
||||
```sh
|
||||
./run benchmark categories list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available categories: 📚
|
||||
📖 code
|
||||
📖 safety
|
||||
📖 memory
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark categories.
|
||||
|
||||
**b. List Benchmark Tests**
|
||||
|
||||
```sh
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
Available tests: 📚
|
||||
📖 interface
|
||||
🔬 Search - TestSearch
|
||||
🔬 Write File - TestWriteFile
|
||||
... (and so on)
|
||||
```
|
||||
|
||||
Lists all available benchmark tests.
|
||||
|
||||
**c. Show Details of a Benchmark Test**
|
||||
|
||||
```sh
|
||||
./run benchmark tests details TestWriteFile
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
TestWriteFile
|
||||
-------------
|
||||
|
||||
Category: interface
|
||||
Task: Write the word 'Washington' to a .txt file
|
||||
... (and other details)
|
||||
```
|
||||
|
||||
Displays the details of the 'TestWriteFile' benchmark test.
|
||||
|
||||
**d. Start Benchmark for the Agent**
|
||||
|
||||
```sh
|
||||
./run benchmark start my_agent
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
(more details about the testing process shown whilst the test are running)
|
||||
============= 13 failed, 1 passed in 0.97s ============...
|
||||
```
|
||||
|
||||
Displays the results of the benchmark tests on 'my_agent'.
|
||||
@@ -2,7 +2,7 @@
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
FROM python:3.12-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -34,9 +34,6 @@ COPY original_autogpt/pyproject.toml original_autogpt/poetry.lock ./
|
||||
# Include forge so it can be used as a path dependency
|
||||
COPY forge/ ../forge
|
||||
|
||||
# Include frontend
|
||||
COPY frontend/ ../frontend
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["poetry", "run", "autogpt"]
|
||||
CMD []
|
||||
|
||||
@@ -1,173 +0,0 @@
|
||||
# Quickstart Guide
|
||||
|
||||
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
|
||||
|
||||
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
|
||||
|
||||
## System Requirements
|
||||
|
||||
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
|
||||
|
||||
## Getting Setup
|
||||
1. **Fork the Repository**
|
||||
To fork the repository, follow these steps:
|
||||
- Navigate to the main page of the repository.
|
||||
|
||||

|
||||
- In the top-right corner of the page, click Fork.
|
||||
|
||||

|
||||
- On the next page, select your GitHub account to create the fork.
|
||||
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
|
||||
|
||||
2. **Clone the Repository**
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
- Open your terminal.
|
||||
- Navigate to the directory where you want to clone the repository.
|
||||
- Run the git clone command for the fork you just created
|
||||
|
||||

|
||||
|
||||
- Then open your project in your ide
|
||||
|
||||

|
||||
|
||||
4. **Setup the Project**
|
||||
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
|
||||
It can be accessed by running the `run` command by typing `./run` in the terminal.
|
||||
|
||||
The first command you need to use is `./run setup.` This will guide you through setting up your system.
|
||||
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
|
||||
|
||||

|
||||
|
||||
### For Windows Users
|
||||
|
||||
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
|
||||
|
||||
#### Update WSL
|
||||
Run the following command in Powershell or Command Prompt:
|
||||
1. Enable the optional WSL and Virtual Machine Platform components.
|
||||
2. Download and install the latest Linux kernel.
|
||||
3. Set WSL 2 as the default.
|
||||
4. Download and install the Ubuntu Linux distribution (a reboot may be required).
|
||||
|
||||
```shell
|
||||
wsl --install
|
||||
```
|
||||
|
||||
For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
|
||||
|
||||
#### Resolve FileNotFoundError or "No such file or directory" Errors
|
||||
When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
|
||||
|
||||
To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script:
|
||||
|
||||
```shell
|
||||
sudo apt update
|
||||
sudo apt install dos2unix
|
||||
dos2unix ./run
|
||||
```
|
||||
|
||||
After executing the above commands, running `./run setup` should work successfully.
|
||||
|
||||
#### Store Project Files within the WSL File System
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
|
||||
|
||||
You can keep running the command to get feedback on where you are up to with your setup.
|
||||
When setup has been completed, the command will return an output like this:
|
||||
|
||||

|
||||
|
||||
## Creating Your Agent
|
||||
|
||||
After completing the setup, the next step is to create your agent template.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
|
||||
|
||||
Tips for naming your agent:
|
||||
* Give it its own unique name, or name it after yourself
|
||||
* Include an important aspect of your agent in the name, such as its purpose
|
||||
|
||||
Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
|
||||
|
||||

|
||||
|
||||
## Running your Agent
|
||||
|
||||
Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
|
||||
|
||||
This starts the agent on the URL: `http://localhost:8000/`
|
||||
|
||||

|
||||
|
||||
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
|
||||
|
||||

|
||||
|
||||
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
|
||||
|
||||

|
||||
|
||||
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
|
||||
|
||||
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
|
||||
|
||||
## Benchmarking your Agent
|
||||
|
||||
The benchmarking system can also be accessed using the CLI too:
|
||||
|
||||
```bash
|
||||
agpt % ./run benchmark
|
||||
Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Commands to start the benchmark and list tests and categories
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
categories Benchmark categories group command
|
||||
start Starts the benchmark command
|
||||
tests Benchmark tests group command
|
||||
agpt % ./run benchmark categories
|
||||
Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark categories group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
list List benchmark categories command
|
||||
agpt % ./run benchmark tests
|
||||
Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Benchmark tests group command
|
||||
|
||||
Options:
|
||||
--help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
details Benchmark test details command
|
||||
list List benchmark tests command
|
||||
```
|
||||
|
||||
The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
|
||||
```bash
|
||||
./run benchmark categories list
|
||||
# And what tests are available with
|
||||
./run benchmark tests list
|
||||
```
|
||||
|
||||

|
||||
|
||||
|
||||
Finally, you can run the benchmark with
|
||||
|
||||
```bash
|
||||
./run benchmark start YOUR_AGENT_NAME
|
||||
|
||||
```
|
||||
|
||||
>
|
||||
@@ -4,7 +4,7 @@ AutoGPT Classic was an experimental project to demonstrate autonomous GPT-4 oper
|
||||
|
||||
## Project Status
|
||||
|
||||
⚠️ **This project is unsupported, and dependencies will not be updated. It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform)**
|
||||
**This project is unsupported, and dependencies will not be updated.** It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform).
|
||||
|
||||
For those interested in autonomous AI agents, we recommend exploring more actively maintained alternatives or referring to this codebase for educational purposes only.
|
||||
|
||||
@@ -16,37 +16,171 @@ AutoGPT Classic was one of the first implementations of autonomous AI agents - A
|
||||
- Learn from the results and adjust its approach
|
||||
- Chain multiple actions together to achieve an objective
|
||||
|
||||
## Key Features
|
||||
|
||||
- 🔄 Autonomous task chaining
|
||||
- 🛠 Tool and API integration capabilities
|
||||
- 💾 Memory management for context retention
|
||||
- 🔍 Web browsing and information gathering
|
||||
- 📝 File operations and content creation
|
||||
- 🔄 Self-prompting and task breakdown
|
||||
|
||||
## Structure
|
||||
|
||||
The project is organized into several key components:
|
||||
- `/benchmark` - Performance testing tools
|
||||
- `/forge` - Core autonomous agent framework
|
||||
- `/frontend` - User interface components
|
||||
- `/original_autogpt` - Original implementation
|
||||
```
|
||||
classic/
|
||||
├── pyproject.toml # Single consolidated Poetry project
|
||||
├── poetry.lock # Single lock file
|
||||
├── forge/ # Core autonomous agent framework
|
||||
├── original_autogpt/ # Original implementation
|
||||
├── direct_benchmark/ # Benchmark harness
|
||||
└── benchmark/ # Challenge definitions (data)
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
While this project is no longer actively maintained, you can still explore the codebase:
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.12+
|
||||
- [Poetry](https://python-poetry.org/docs/#installation)
|
||||
|
||||
### Installation
|
||||
|
||||
1. Clone the repository:
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/Significant-Gravitas/AutoGPT.git
|
||||
cd classic
|
||||
|
||||
# Install everything
|
||||
poetry install
|
||||
```
|
||||
|
||||
2. Review the documentation:
|
||||
- For reference, see the [documentation](https://docs.agpt.co). You can browse at the same point in time as this commit so the docs don't change.
|
||||
- Check `CLI-USAGE.md` for command-line interface details
|
||||
- Refer to `TROUBLESHOOTING.md` for common issues
|
||||
### Configuration
|
||||
|
||||
Configuration uses a layered system:
|
||||
|
||||
1. **Environment variables** (`.env` file)
|
||||
2. **Workspace settings** (`.autogpt/autogpt.yaml`)
|
||||
3. **Agent settings** (`.autogpt/agents/{id}/permissions.yaml`)
|
||||
|
||||
Copy the example environment file and add your API keys:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Key environment variables:
|
||||
```bash
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional LLM settings
|
||||
SMART_LLM=gpt-4o # Model for complex reasoning
|
||||
FAST_LLM=gpt-4o-mini # Model for simple tasks
|
||||
|
||||
# Optional search providers
|
||||
TAVILY_API_KEY=tvly-...
|
||||
SERPER_API_KEY=...
|
||||
|
||||
# Optional infrastructure
|
||||
LOG_LEVEL=DEBUG
|
||||
PORT=8000
|
||||
FILE_STORAGE_BACKEND=local # local, s3, or gcs
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
All commands run from the `classic/` directory:
|
||||
|
||||
```bash
|
||||
# Run forge agent
|
||||
poetry run python -m forge
|
||||
|
||||
# Run original autogpt server
|
||||
poetry run serve --debug
|
||||
|
||||
# Run autogpt CLI
|
||||
poetry run autogpt
|
||||
```
|
||||
|
||||
Agents run on `http://localhost:8000` by default.
|
||||
|
||||
### Benchmarking
|
||||
|
||||
```bash
|
||||
poetry run direct-benchmark run
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
poetry run pytest # All tests
|
||||
poetry run pytest forge/tests/ # Forge tests only
|
||||
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
|
||||
```
|
||||
|
||||
## Workspaces
|
||||
|
||||
Agents operate within a **workspace** directory that contains all agent data and files:
|
||||
|
||||
```
|
||||
{workspace}/
|
||||
├── .autogpt/
|
||||
│ ├── autogpt.yaml # Workspace-level permissions
|
||||
│ ├── ap_server.db # Agent Protocol database (server mode)
|
||||
│ └── agents/
|
||||
│ └── AutoGPT-{agent_id}/
|
||||
│ ├── state.json # Agent profile, directives, history
|
||||
│ ├── permissions.yaml # Agent-specific permissions
|
||||
│ └── workspace/ # Agent's sandboxed working directory
|
||||
```
|
||||
|
||||
- The workspace defaults to the current working directory
|
||||
- Multiple agents can coexist in the same workspace
|
||||
- Agent file access is sandboxed to their `workspace/` subdirectory
|
||||
- State persists across sessions via `state.json`
|
||||
|
||||
## Permissions
|
||||
|
||||
AutoGPT uses a **layered permission system** with pattern matching:
|
||||
|
||||
### Permission Files
|
||||
|
||||
| File | Scope | Location |
|
||||
|------|-------|----------|
|
||||
| `autogpt.yaml` | All agents in workspace | `.autogpt/autogpt.yaml` |
|
||||
| `permissions.yaml` | Single agent | `.autogpt/agents/{id}/permissions.yaml` |
|
||||
|
||||
### Permission Format
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- read_file({workspace}/**) # Read any file in workspace
|
||||
- write_to_file({workspace}/**) # Write any file in workspace
|
||||
- web_search(*) # All web searches
|
||||
|
||||
deny:
|
||||
- read_file(**.env) # Block .env files
|
||||
- execute_shell(sudo:*) # Block sudo commands
|
||||
```
|
||||
|
||||
### Check Order (First Match Wins)
|
||||
|
||||
1. Agent deny → Block
|
||||
2. Workspace deny → Block
|
||||
3. Agent allow → Allow
|
||||
4. Workspace allow → Allow
|
||||
5. Prompt user → Interactive approval
|
||||
|
||||
### Interactive Approval
|
||||
|
||||
When prompted, users can approve commands with different scopes:
|
||||
- **Once** - Allow this one time only
|
||||
- **Agent** - Always allow for this agent
|
||||
- **Workspace** - Always allow for all agents
|
||||
- **Deny** - Block this command
|
||||
|
||||
### Default Security
|
||||
|
||||
Denied by default:
|
||||
- Sensitive files (`.env`, `.key`, `.pem`)
|
||||
- Destructive commands (`rm -rf`, `sudo`)
|
||||
- Operations outside the workspace
|
||||
|
||||
## Security Notice
|
||||
|
||||
This codebase has **known vulnerabilities** and issues with its dependencies. It will not be updated to new dependencies. Use for educational purposes only.
|
||||
|
||||
## License
|
||||
|
||||
@@ -55,27 +189,3 @@ This project segment is licensed under the MIT License - see the [LICENSE](LICEN
|
||||
## Documentation
|
||||
|
||||
Please refer to the [documentation](https://docs.agpt.co) for more detailed information about the project's architecture and concepts.
|
||||
You can browse at the same point in time as this commit so the docs don't change.
|
||||
|
||||
## Historical Impact
|
||||
|
||||
AutoGPT Classic played a significant role in advancing the field of autonomous AI agents:
|
||||
- Demonstrated practical implementation of AI autonomy
|
||||
- Inspired numerous derivative projects and research
|
||||
- Contributed to the development of AI agent architectures
|
||||
- Helped identify key challenges in AI autonomy
|
||||
|
||||
## Security Notice
|
||||
|
||||
If you're studying this codebase, please understand this has KNOWN vulnerabilities and issues with its dependencies. It will not be updated to new dependencies.
|
||||
|
||||
## Community & Support
|
||||
|
||||
While active development has concluded:
|
||||
- The codebase remains available for study and reference
|
||||
- Historical discussions can be found in project issues
|
||||
- Related research and developments continue in the broader AI agent community
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
Thanks to all contributors who participated in this experimental project and helped advance the field of autonomous AI agents.
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
# Ignore rules that conflict with Black code style
|
||||
extend-ignore = E203, W503
|
||||
exclude =
|
||||
__pycache__/,
|
||||
*.pyc,
|
||||
.pytest_cache/,
|
||||
venv*/,
|
||||
.venv/,
|
||||
reports/,
|
||||
agbenchmark/reports/,
|
||||
174
classic/benchmark/.gitignore
vendored
174
classic/benchmark/.gitignore
vendored
@@ -1,174 +0,0 @@
|
||||
agbenchmark_config/workspace/
|
||||
backend/backend_stdout.txt
|
||||
reports/df*.pkl
|
||||
reports/raw*
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
.DS_Store
|
||||
```
|
||||
secrets.json
|
||||
agbenchmark_config/challenges_already_beaten.json
|
||||
agbenchmark_config/challenges/pri_*
|
||||
agbenchmark_config/updates.json
|
||||
agbenchmark_config/reports/*
|
||||
agbenchmark_config/reports/success_rate.json
|
||||
agbenchmark_config/reports/regression_tests.json
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 AutoGPT
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,25 +0,0 @@
|
||||
# Auto-GPT Benchmarks
|
||||
|
||||
Built for the purpose of benchmarking the performance of agents regardless of how they work.
|
||||
|
||||
Objectively know how well your agent is performing in categories like code, retrieval, memory, and safety.
|
||||
|
||||
Save time and money while doing it through smart dependencies. The best part? It's all automated.
|
||||
|
||||
## Scores:
|
||||
|
||||
<img width="733" alt="Screenshot 2023-07-25 at 10 35 01 AM" src="https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/assets/9652976/98963e0b-18b9-4b17-9a6a-4d3e4418af70">
|
||||
|
||||
## Ranking overall:
|
||||
|
||||
- 1- [Beebot](https://github.com/AutoPackAI/beebot)
|
||||
- 2- [mini-agi](https://github.com/muellerberndt/mini-agi)
|
||||
- 3- [Auto-GPT](https://github.com/Significant-Gravitas/AutoGPT)
|
||||
|
||||
## Detailed results:
|
||||
|
||||
<img width="733" alt="Screenshot 2023-07-25 at 10 42 15 AM" src="https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/assets/9652976/39be464c-c842-4437-b28a-07d878542a83">
|
||||
|
||||
[Click here to see the results and the raw data!](https://docs.google.com/spreadsheets/d/1WXm16P2AHNbKpkOI0LYBpcsGG0O7D8HYTG5Uj0PaJjA/edit#gid=203558751)!
|
||||
|
||||
More agents coming soon !
|
||||
@@ -1,69 +0,0 @@
|
||||
## As a user
|
||||
|
||||
1. `pip install auto-gpt-benchmarks`
|
||||
2. Add boilerplate code to run and kill agent
|
||||
3. `agbenchmark`
|
||||
- `--category challenge_category` to run tests in a specific category
|
||||
- `--mock` to only run mock tests if they exists for each test
|
||||
- `--noreg` to skip any tests that have passed in the past. When you run without this flag and a previous challenge that passed fails, it will now not be regression tests
|
||||
4. We call boilerplate code for your agent
|
||||
5. Show pass rate of tests, logs, and any other metrics
|
||||
|
||||
## Contributing
|
||||
|
||||
##### Diagrams: https://whimsical.com/agbenchmark-5n4hXBq1ZGzBwRsK4TVY7x
|
||||
|
||||
### To run the existing mocks
|
||||
|
||||
1. clone the repo `auto-gpt-benchmarks`
|
||||
2. `pip install poetry`
|
||||
3. `poetry shell`
|
||||
4. `poetry install`
|
||||
5. `cp .env_example .env`
|
||||
6. `git submodule update --init --remote --recursive`
|
||||
7. `uvicorn server:app --reload`
|
||||
8. `agbenchmark --mock`
|
||||
Keep config the same and watch the logs :)
|
||||
|
||||
### To run with mini-agi
|
||||
|
||||
1. Navigate to `auto-gpt-benchmarks/agent/mini-agi`
|
||||
2. `pip install -r requirements.txt`
|
||||
3. `cp .env_example .env`, set `PROMPT_USER=false` and add your `OPENAI_API_KEY=`. Sset `MODEL="gpt-3.5-turbo"` if you don't have access to `gpt-4` yet. Also make sure you have Python 3.10^ installed
|
||||
4. set `AGENT_NAME=mini-agi` in `.env` file and where you want your `REPORTS_FOLDER` to be
|
||||
5. Make sure to follow the commands above, and remove mock flag `agbenchmark`
|
||||
|
||||
- To add requirements `poetry add requirement`.
|
||||
|
||||
Feel free to create prs to merge with `main` at will (but also feel free to ask for review) - if you can't send msg in R&D chat for access.
|
||||
|
||||
If you push at any point and break things - it'll happen to everyone - fix it asap. Step 1 is to revert `master` to last working commit
|
||||
|
||||
Let people know what beautiful code you write does, document everything well
|
||||
|
||||
Share your progress :)
|
||||
|
||||
#### Dataset
|
||||
|
||||
Manually created, existing challenges within Auto-Gpt, https://osu-nlp-group.github.io/Mind2Web/
|
||||
|
||||
## How do I add new agents to agbenchmark ?
|
||||
|
||||
Example with smol developer.
|
||||
|
||||
1- Create a github branch with your agent following the same pattern as this example:
|
||||
|
||||
https://github.com/smol-ai/developer/pull/114/files
|
||||
|
||||
2- Create the submodule and the github workflow by following the same pattern as this example:
|
||||
|
||||
https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/pull/48/files
|
||||
|
||||
## How do I run agent in different environments?
|
||||
|
||||
**To just use as the benchmark for your agent**. `pip install` the package and run `agbenchmark`
|
||||
|
||||
**For internal Auto-GPT ci runs**, specify the `AGENT_NAME` you want you use and set the `HOME_ENV`.
|
||||
Ex. `AGENT_NAME=mini-agi`
|
||||
|
||||
**To develop agent alongside benchmark**, you can specify the `AGENT_NAME` you want you use and add as a submodule to the repo
|
||||
@@ -1,352 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
import click
|
||||
from click_default_group import DefaultGroup
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.utils.logging import configure_logging
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# try:
|
||||
# if os.getenv("HELICONE_API_KEY"):
|
||||
# import helicone # noqa
|
||||
|
||||
# helicone_enabled = True
|
||||
# else:
|
||||
# helicone_enabled = False
|
||||
# except ImportError:
|
||||
# helicone_enabled = False
|
||||
|
||||
|
||||
class InvalidInvocationError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BENCHMARK_START_TIME_DT = datetime.now(timezone.utc)
|
||||
BENCHMARK_START_TIME = BENCHMARK_START_TIME_DT.strftime("%Y-%m-%dT%H:%M:%S+00:00")
|
||||
|
||||
|
||||
# if helicone_enabled:
|
||||
# from helicone.lock import HeliconeLockManager
|
||||
|
||||
# HeliconeLockManager.write_custom_property(
|
||||
# "benchmark_start_time", BENCHMARK_START_TIME
|
||||
# )
|
||||
|
||||
|
||||
@click.group(cls=DefaultGroup, default_if_no_args=True)
|
||||
@click.option("--debug", is_flag=True, help="Enable debug output")
|
||||
def cli(
|
||||
debug: bool,
|
||||
) -> Any:
|
||||
configure_logging(logging.DEBUG if debug else logging.INFO)
|
||||
|
||||
|
||||
@cli.command(hidden=True)
|
||||
def start():
|
||||
raise DeprecationWarning(
|
||||
"`agbenchmark start` is deprecated. Use `agbenchmark run` instead."
|
||||
)
|
||||
|
||||
|
||||
@cli.command(default=True)
|
||||
@click.option(
|
||||
"-N", "--attempts", default=1, help="Number of times to run each challenge."
|
||||
)
|
||||
@click.option(
|
||||
"-c",
|
||||
"--category",
|
||||
multiple=True,
|
||||
help="(+) Select a category to run.",
|
||||
)
|
||||
@click.option(
|
||||
"-s",
|
||||
"--skip-category",
|
||||
multiple=True,
|
||||
help="(+) Exclude a category from running.",
|
||||
)
|
||||
@click.option("--test", multiple=True, help="(+) Select a test to run.")
|
||||
@click.option("--maintain", is_flag=True, help="Run only regression tests.")
|
||||
@click.option("--improve", is_flag=True, help="Run only non-regression tests.")
|
||||
@click.option(
|
||||
"--explore",
|
||||
is_flag=True,
|
||||
help="Run only challenges that have never been beaten.",
|
||||
)
|
||||
@click.option(
|
||||
"--no-dep",
|
||||
is_flag=True,
|
||||
help="Run all (selected) challenges, regardless of dependency success/failure.",
|
||||
)
|
||||
@click.option("--cutoff", type=int, help="Override the challenge time limit (seconds).")
|
||||
@click.option("--nc", is_flag=True, help="Disable the challenge time limit.")
|
||||
@click.option("--mock", is_flag=True, help="Run with mock")
|
||||
@click.option("--keep-answers", is_flag=True, help="Keep answers")
|
||||
@click.option(
|
||||
"--backend",
|
||||
is_flag=True,
|
||||
help="Write log output to a file instead of the terminal.",
|
||||
)
|
||||
# @click.argument(
|
||||
# "agent_path",
|
||||
# type=click.Path(exists=True, file_okay=False, path_type=Path),
|
||||
# required=False,
|
||||
# )
|
||||
def run(
|
||||
maintain: bool,
|
||||
improve: bool,
|
||||
explore: bool,
|
||||
mock: bool,
|
||||
no_dep: bool,
|
||||
nc: bool,
|
||||
keep_answers: bool,
|
||||
test: tuple[str],
|
||||
category: tuple[str],
|
||||
skip_category: tuple[str],
|
||||
attempts: int,
|
||||
cutoff: Optional[int] = None,
|
||||
backend: Optional[bool] = False,
|
||||
# agent_path: Optional[Path] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Run the benchmark on the agent in the current directory.
|
||||
|
||||
Options marked with (+) can be specified multiple times, to select multiple items.
|
||||
"""
|
||||
from agbenchmark.main import run_benchmark, validate_args
|
||||
|
||||
agbenchmark_config = AgentBenchmarkConfig.load()
|
||||
logger.debug(f"agbenchmark_config: {agbenchmark_config.agbenchmark_config_dir}")
|
||||
try:
|
||||
validate_args(
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
tests=test,
|
||||
categories=category,
|
||||
skip_categories=skip_category,
|
||||
no_cutoff=nc,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
except InvalidInvocationError as e:
|
||||
logger.error("Error: " + "\n".join(e.args))
|
||||
sys.exit(1)
|
||||
|
||||
original_stdout = sys.stdout # Save the original standard output
|
||||
exit_code = None
|
||||
|
||||
if backend:
|
||||
with open("backend/backend_stdout.txt", "w") as f:
|
||||
sys.stdout = f
|
||||
exit_code = run_benchmark(
|
||||
config=agbenchmark_config,
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
mock=mock,
|
||||
no_dep=no_dep,
|
||||
no_cutoff=nc,
|
||||
keep_answers=keep_answers,
|
||||
tests=test,
|
||||
categories=category,
|
||||
skip_categories=skip_category,
|
||||
attempts_per_challenge=attempts,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
|
||||
sys.stdout = original_stdout
|
||||
|
||||
else:
|
||||
exit_code = run_benchmark(
|
||||
config=agbenchmark_config,
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
mock=mock,
|
||||
no_dep=no_dep,
|
||||
no_cutoff=nc,
|
||||
keep_answers=keep_answers,
|
||||
tests=test,
|
||||
categories=category,
|
||||
skip_categories=skip_category,
|
||||
attempts_per_challenge=attempts,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--port", type=int, help="Port to run the API on.")
|
||||
def serve(port: Optional[int] = None):
|
||||
"""Serve the benchmark frontend and API on port 8080."""
|
||||
import uvicorn
|
||||
|
||||
from agbenchmark.app import setup_fastapi_app
|
||||
|
||||
config = AgentBenchmarkConfig.load()
|
||||
app = setup_fastapi_app(config)
|
||||
|
||||
# Run the FastAPI application using uvicorn
|
||||
port = port or int(os.getenv("PORT", 8080))
|
||||
uvicorn.run(app, host="0.0.0.0", port=port)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def config():
|
||||
"""Displays info regarding the present AGBenchmark config."""
|
||||
from .utils.utils import pretty_print_model
|
||||
|
||||
try:
|
||||
config = AgentBenchmarkConfig.load()
|
||||
except FileNotFoundError as e:
|
||||
click.echo(e, err=True)
|
||||
return 1
|
||||
|
||||
pretty_print_model(config, include_header=False)
|
||||
|
||||
|
||||
@cli.group()
|
||||
def challenge():
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
|
||||
@challenge.command("list")
|
||||
@click.option(
|
||||
"--all", "include_unavailable", is_flag=True, help="Include unavailable challenges."
|
||||
)
|
||||
@click.option(
|
||||
"--names", "only_names", is_flag=True, help="List only the challenge names."
|
||||
)
|
||||
@click.option("--json", "output_json", is_flag=True)
|
||||
def list_challenges(include_unavailable: bool, only_names: bool, output_json: bool):
|
||||
"""Lists [available|all] challenges."""
|
||||
import json
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
from .challenges.builtin import load_builtin_challenges
|
||||
from .challenges.webarena import load_webarena_challenges
|
||||
from .utils.data_types import Category, DifficultyLevel
|
||||
from .utils.utils import sorted_by_enum_index
|
||||
|
||||
DIFFICULTY_COLORS = {
|
||||
difficulty: color
|
||||
for difficulty, color in zip(
|
||||
DifficultyLevel,
|
||||
["black", "blue", "cyan", "green", "yellow", "red", "magenta", "white"],
|
||||
)
|
||||
}
|
||||
CATEGORY_COLORS = {
|
||||
category: f"bright_{color}"
|
||||
for category, color in zip(
|
||||
Category,
|
||||
["blue", "cyan", "green", "yellow", "magenta", "red", "white", "black"],
|
||||
)
|
||||
}
|
||||
|
||||
# Load challenges
|
||||
challenges = filter(
|
||||
lambda c: c.info.available or include_unavailable,
|
||||
[
|
||||
*load_builtin_challenges(),
|
||||
*load_webarena_challenges(skip_unavailable=False),
|
||||
],
|
||||
)
|
||||
challenges = sorted_by_enum_index(
|
||||
challenges, DifficultyLevel, key=lambda c: c.info.difficulty
|
||||
)
|
||||
|
||||
if only_names:
|
||||
if output_json:
|
||||
click.echo(json.dumps([c.info.name for c in challenges]))
|
||||
return
|
||||
|
||||
for c in challenges:
|
||||
click.echo(
|
||||
click.style(c.info.name, fg=None if c.info.available else "black")
|
||||
)
|
||||
return
|
||||
|
||||
if output_json:
|
||||
click.echo(
|
||||
json.dumps([json.loads(c.info.model_dump_json()) for c in challenges])
|
||||
)
|
||||
return
|
||||
|
||||
headers = tuple(
|
||||
click.style(h, bold=True) for h in ("Name", "Difficulty", "Categories")
|
||||
)
|
||||
table = [
|
||||
tuple(
|
||||
v if challenge.info.available else click.style(v, fg="black")
|
||||
for v in (
|
||||
challenge.info.name,
|
||||
(
|
||||
click.style(
|
||||
challenge.info.difficulty.value,
|
||||
fg=DIFFICULTY_COLORS[challenge.info.difficulty],
|
||||
)
|
||||
if challenge.info.difficulty
|
||||
else click.style("-", fg="black")
|
||||
),
|
||||
" ".join(
|
||||
click.style(cat.value, fg=CATEGORY_COLORS[cat])
|
||||
for cat in sorted_by_enum_index(challenge.info.category, Category)
|
||||
),
|
||||
)
|
||||
)
|
||||
for challenge in challenges
|
||||
]
|
||||
click.echo(tabulate(table, headers=headers))
|
||||
|
||||
|
||||
@challenge.command()
|
||||
@click.option("--json", is_flag=True)
|
||||
@click.argument("name")
|
||||
def info(name: str, json: bool):
|
||||
from itertools import chain
|
||||
|
||||
from .challenges.builtin import load_builtin_challenges
|
||||
from .challenges.webarena import load_webarena_challenges
|
||||
from .utils.utils import pretty_print_model
|
||||
|
||||
for challenge in chain(
|
||||
load_builtin_challenges(),
|
||||
load_webarena_challenges(skip_unavailable=False),
|
||||
):
|
||||
if challenge.info.name != name:
|
||||
continue
|
||||
|
||||
if json:
|
||||
click.echo(challenge.info.model_dump_json())
|
||||
break
|
||||
|
||||
pretty_print_model(challenge.info)
|
||||
break
|
||||
else:
|
||||
click.echo(click.style(f"Unknown challenge '{name}'", fg="red"), err=True)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def version():
|
||||
"""Print version info for the AGBenchmark application."""
|
||||
import toml
|
||||
|
||||
package_root = Path(__file__).resolve().parent.parent
|
||||
pyproject = toml.load(package_root / "pyproject.toml")
|
||||
version = pyproject["tool"]["poetry"]["version"]
|
||||
click.echo(f"AGBenchmark version {version}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -1,111 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import AsyncIterator, Optional
|
||||
|
||||
from agent_protocol_client import (
|
||||
AgentApi,
|
||||
ApiClient,
|
||||
Configuration,
|
||||
Step,
|
||||
TaskRequestBody,
|
||||
)
|
||||
|
||||
from agbenchmark.agent_interface import get_list_of_file_paths
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def run_api_agent(
|
||||
task: str,
|
||||
config: AgentBenchmarkConfig,
|
||||
timeout: int,
|
||||
artifacts_location: Optional[Path] = None,
|
||||
*,
|
||||
mock: bool = False,
|
||||
) -> AsyncIterator[Step]:
|
||||
configuration = Configuration(host=config.host)
|
||||
async with ApiClient(configuration) as api_client:
|
||||
api_instance = AgentApi(api_client)
|
||||
task_request_body = TaskRequestBody(input=task, additional_input=None)
|
||||
|
||||
start_time = time.time()
|
||||
response = await api_instance.create_agent_task(
|
||||
task_request_body=task_request_body
|
||||
)
|
||||
task_id = response.task_id
|
||||
|
||||
if artifacts_location:
|
||||
logger.debug("Uploading task input artifacts to agent...")
|
||||
await upload_artifacts(
|
||||
api_instance, artifacts_location, task_id, "artifacts_in"
|
||||
)
|
||||
|
||||
logger.debug("Running agent until finished or timeout...")
|
||||
while True:
|
||||
step = await api_instance.execute_agent_task_step(task_id=task_id)
|
||||
yield step
|
||||
|
||||
if time.time() - start_time > timeout:
|
||||
raise TimeoutError("Time limit exceeded")
|
||||
if step and mock:
|
||||
step.is_last = True
|
||||
if not step or step.is_last:
|
||||
break
|
||||
|
||||
if artifacts_location:
|
||||
# In "mock" mode, we cheat by giving the correct artifacts to pass the test
|
||||
if mock:
|
||||
logger.debug("Uploading mock artifacts to agent...")
|
||||
await upload_artifacts(
|
||||
api_instance, artifacts_location, task_id, "artifacts_out"
|
||||
)
|
||||
|
||||
logger.debug("Downloading agent artifacts...")
|
||||
await download_agent_artifacts_into_folder(
|
||||
api_instance, task_id, config.temp_folder
|
||||
)
|
||||
|
||||
|
||||
async def download_agent_artifacts_into_folder(
|
||||
api_instance: AgentApi, task_id: str, folder: Path
|
||||
):
|
||||
artifacts = await api_instance.list_agent_task_artifacts(task_id=task_id)
|
||||
|
||||
for artifact in artifacts.artifacts:
|
||||
# current absolute path of the directory of the file
|
||||
if artifact.relative_path:
|
||||
path: str = (
|
||||
artifact.relative_path
|
||||
if not artifact.relative_path.startswith("/")
|
||||
else artifact.relative_path[1:]
|
||||
)
|
||||
folder = (folder / path).parent
|
||||
|
||||
if not folder.exists():
|
||||
folder.mkdir(parents=True)
|
||||
|
||||
file_path = folder / artifact.file_name
|
||||
logger.debug(f"Downloading agent artifact {artifact.file_name} to {folder}")
|
||||
with open(file_path, "wb") as f:
|
||||
content = await api_instance.download_agent_task_artifact(
|
||||
task_id=task_id, artifact_id=artifact.artifact_id
|
||||
)
|
||||
|
||||
f.write(content)
|
||||
|
||||
|
||||
async def upload_artifacts(
|
||||
api_instance: AgentApi, artifacts_location: Path, task_id: str, type: str
|
||||
) -> None:
|
||||
for file_path in get_list_of_file_paths(artifacts_location, type):
|
||||
relative_path: Optional[str] = "/".join(
|
||||
str(file_path).split(f"{type}/", 1)[-1].split("/")[:-1]
|
||||
)
|
||||
if not relative_path:
|
||||
relative_path = None
|
||||
|
||||
await api_instance.upload_agent_task_artifacts(
|
||||
task_id=task_id, file=str(file_path), relative_path=relative_path
|
||||
)
|
||||
@@ -1,27 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
HELICONE_GRAPHQL_LOGS = os.getenv("HELICONE_GRAPHQL_LOGS", "").lower() == "true"
|
||||
|
||||
|
||||
def get_list_of_file_paths(
|
||||
challenge_dir_path: str | Path, artifact_folder_name: str
|
||||
) -> list[Path]:
|
||||
source_dir = Path(challenge_dir_path) / artifact_folder_name
|
||||
if not source_dir.exists():
|
||||
return []
|
||||
return list(source_dir.iterdir())
|
||||
|
||||
|
||||
def copy_challenge_artifacts_into_workspace(
|
||||
challenge_dir_path: str | Path, artifact_folder_name: str, workspace: str | Path
|
||||
) -> None:
|
||||
file_paths = get_list_of_file_paths(challenge_dir_path, artifact_folder_name)
|
||||
for file_path in file_paths:
|
||||
if file_path.is_file():
|
||||
shutil.copy(file_path, workspace)
|
||||
@@ -1,339 +0,0 @@
|
||||
import datetime
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from collections import deque
|
||||
from multiprocessing import Process
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
import psutil
|
||||
from agent_protocol_client import AgentApi, ApiClient, ApiException, Configuration
|
||||
from agent_protocol_client.models import Task, TaskRequestBody
|
||||
from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel, ConfigDict, ValidationError
|
||||
|
||||
from agbenchmark.challenges import ChallengeInfo
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.report_types_v2 import (
|
||||
BenchmarkRun,
|
||||
Metrics,
|
||||
RepositoryInfo,
|
||||
RunDetails,
|
||||
TaskInfo,
|
||||
)
|
||||
from agbenchmark.schema import TaskEvalRequestBody
|
||||
from agbenchmark.utils.utils import write_pretty_json
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CHALLENGES: dict[str, ChallengeInfo] = {}
|
||||
challenges_path = Path(__file__).parent / "challenges"
|
||||
challenge_spec_files = deque(
|
||||
glob.glob(
|
||||
f"{challenges_path}/**/data.json",
|
||||
recursive=True,
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug("Loading challenges...")
|
||||
while challenge_spec_files:
|
||||
challenge_spec_file = Path(challenge_spec_files.popleft())
|
||||
challenge_relpath = challenge_spec_file.relative_to(challenges_path.parent)
|
||||
if challenge_relpath.is_relative_to("challenges/deprecated"):
|
||||
continue
|
||||
|
||||
logger.debug(f"Loading {challenge_relpath}...")
|
||||
try:
|
||||
challenge_info = ChallengeInfo.model_validate_json(
|
||||
challenge_spec_file.read_text()
|
||||
)
|
||||
except ValidationError as e:
|
||||
if logging.getLogger().level == logging.DEBUG:
|
||||
logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
|
||||
logger.debug(f"Invalid challenge spec: {challenge_spec_file.read_text()}")
|
||||
continue
|
||||
|
||||
if not challenge_info.eval_id:
|
||||
challenge_info.eval_id = str(uuid.uuid4())
|
||||
# this will sort all the keys of the JSON systematically
|
||||
# so that the order is always the same
|
||||
write_pretty_json(challenge_info.model_dump(), challenge_spec_file)
|
||||
|
||||
CHALLENGES[challenge_info.eval_id] = challenge_info
|
||||
|
||||
|
||||
class BenchmarkTaskInfo(BaseModel):
|
||||
task_id: str
|
||||
start_time: datetime.datetime
|
||||
challenge_info: ChallengeInfo
|
||||
|
||||
|
||||
task_informations: dict[str, BenchmarkTaskInfo] = {}
|
||||
|
||||
|
||||
def find_agbenchmark_without_uvicorn():
|
||||
pids = []
|
||||
for process in psutil.process_iter(
|
||||
attrs=[
|
||||
"pid",
|
||||
"cmdline",
|
||||
"name",
|
||||
"username",
|
||||
"status",
|
||||
"cpu_percent",
|
||||
"memory_info",
|
||||
"create_time",
|
||||
"cwd",
|
||||
"connections",
|
||||
]
|
||||
):
|
||||
try:
|
||||
# Convert the process.info dictionary values to strings and concatenate them
|
||||
full_info = " ".join([str(v) for k, v in process.as_dict().items()])
|
||||
|
||||
if "agbenchmark" in full_info and "uvicorn" not in full_info:
|
||||
pids.append(process.pid)
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||
pass
|
||||
return pids
|
||||
|
||||
|
||||
class CreateReportRequest(BaseModel):
|
||||
test: str
|
||||
test_run_id: str
|
||||
# category: Optional[str] = []
|
||||
mock: Optional[bool] = False
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
updates_list = []
|
||||
|
||||
origins = [
|
||||
"http://localhost:8000",
|
||||
"http://localhost:8080",
|
||||
"http://127.0.0.1:5000",
|
||||
"http://localhost:5000",
|
||||
]
|
||||
|
||||
|
||||
def stream_output(pipe):
|
||||
for line in pipe:
|
||||
print(line, end="")
|
||||
|
||||
|
||||
def setup_fastapi_app(agbenchmark_config: AgentBenchmarkConfig) -> FastAPI:
|
||||
from agbenchmark.agent_api_interface import upload_artifacts
|
||||
from agbenchmark.challenges import get_challenge_from_source_uri
|
||||
from agbenchmark.main import run_benchmark
|
||||
|
||||
configuration = Configuration(
|
||||
host=agbenchmark_config.host or "http://localhost:8000"
|
||||
)
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
router = APIRouter()
|
||||
|
||||
@router.post("/reports")
|
||||
def run_single_test(body: CreateReportRequest) -> dict:
|
||||
pids = find_agbenchmark_without_uvicorn()
|
||||
logger.info(f"pids already running with agbenchmark: {pids}")
|
||||
|
||||
logger.debug(f"Request to /reports: {body.model_dump()}")
|
||||
|
||||
# Start the benchmark in a separate thread
|
||||
benchmark_process = Process(
|
||||
target=lambda: run_benchmark(
|
||||
config=agbenchmark_config,
|
||||
tests=(body.test,),
|
||||
mock=body.mock or False,
|
||||
)
|
||||
)
|
||||
benchmark_process.start()
|
||||
|
||||
# Wait for the benchmark to finish, with a timeout of 200 seconds
|
||||
timeout = 200
|
||||
start_time = time.time()
|
||||
while benchmark_process.is_alive():
|
||||
if time.time() - start_time > timeout:
|
||||
logger.warning(f"Benchmark run timed out after {timeout} seconds")
|
||||
benchmark_process.terminate()
|
||||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
logger.debug(f"Benchmark finished running in {time.time() - start_time} s")
|
||||
|
||||
# List all folders in the current working directory
|
||||
reports_folder = agbenchmark_config.reports_folder
|
||||
folders = [folder for folder in reports_folder.iterdir() if folder.is_dir()]
|
||||
|
||||
# Sort the folders based on their names
|
||||
sorted_folders = sorted(folders, key=lambda x: x.name)
|
||||
|
||||
# Get the last folder
|
||||
latest_folder = sorted_folders[-1] if sorted_folders else None
|
||||
|
||||
# Read report.json from this folder
|
||||
if latest_folder:
|
||||
report_path = latest_folder / "report.json"
|
||||
logger.debug(f"Getting latest report from {report_path}")
|
||||
if report_path.exists():
|
||||
with report_path.open() as file:
|
||||
data = json.load(file)
|
||||
logger.debug(f"Report data: {data}")
|
||||
else:
|
||||
raise HTTPException(
|
||||
502,
|
||||
"Could not get result after running benchmark: "
|
||||
f"'report.json' does not exist in '{latest_folder}'",
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
504, "Could not get result after running benchmark: no reports found"
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
@router.post("/agent/tasks", tags=["agent"])
|
||||
async def create_agent_task(task_eval_request: TaskEvalRequestBody) -> Task:
|
||||
"""
|
||||
Creates a new task using the provided TaskEvalRequestBody and returns a Task.
|
||||
|
||||
Args:
|
||||
task_eval_request: `TaskRequestBody` including an eval_id.
|
||||
|
||||
Returns:
|
||||
Task: A new task with task_id, input, additional_input,
|
||||
and empty lists for artifacts and steps.
|
||||
|
||||
Example:
|
||||
Request (TaskEvalRequestBody defined in schema.py):
|
||||
{
|
||||
...,
|
||||
"eval_id": "50da533e-3904-4401-8a07-c49adf88b5eb"
|
||||
}
|
||||
|
||||
Response (Task defined in `agent_protocol_client.models`):
|
||||
{
|
||||
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
|
||||
"input": "Write the word 'Washington' to a .txt file",
|
||||
"artifacts": []
|
||||
}
|
||||
"""
|
||||
try:
|
||||
challenge_info = CHALLENGES[task_eval_request.eval_id]
|
||||
async with ApiClient(configuration) as api_client:
|
||||
api_instance = AgentApi(api_client)
|
||||
task_input = challenge_info.task
|
||||
|
||||
task_request_body = TaskRequestBody(
|
||||
input=task_input, additional_input=None
|
||||
)
|
||||
task_response = await api_instance.create_agent_task(
|
||||
task_request_body=task_request_body
|
||||
)
|
||||
task_info = BenchmarkTaskInfo(
|
||||
task_id=task_response.task_id,
|
||||
start_time=datetime.datetime.now(datetime.timezone.utc),
|
||||
challenge_info=challenge_info,
|
||||
)
|
||||
task_informations[task_info.task_id] = task_info
|
||||
|
||||
if input_artifacts_dir := challenge_info.task_artifacts_dir:
|
||||
await upload_artifacts(
|
||||
api_instance,
|
||||
input_artifacts_dir,
|
||||
task_response.task_id,
|
||||
"artifacts_in",
|
||||
)
|
||||
return task_response
|
||||
except ApiException as e:
|
||||
logger.error(f"Error whilst trying to create a task:\n{e}")
|
||||
logger.error(
|
||||
"The above error was caused while processing request: "
|
||||
f"{task_eval_request}"
|
||||
)
|
||||
raise HTTPException(500)
|
||||
|
||||
@router.post("/agent/tasks/{task_id}/steps")
|
||||
async def proxy(request: Request, task_id: str):
|
||||
timeout = httpx.Timeout(300.0, read=300.0) # 5 minutes
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
# Construct the new URL
|
||||
new_url = f"{configuration.host}/ap/v1/agent/tasks/{task_id}/steps"
|
||||
|
||||
# Forward the request
|
||||
response = await client.post(
|
||||
new_url,
|
||||
content=await request.body(),
|
||||
headers=dict(request.headers),
|
||||
)
|
||||
|
||||
# Return the response from the forwarded request
|
||||
return Response(content=response.content, status_code=response.status_code)
|
||||
|
||||
@router.post("/agent/tasks/{task_id}/evaluations")
|
||||
async def create_evaluation(task_id: str) -> BenchmarkRun:
|
||||
task_info = task_informations[task_id]
|
||||
challenge = get_challenge_from_source_uri(task_info.challenge_info.source_uri)
|
||||
try:
|
||||
async with ApiClient(configuration) as api_client:
|
||||
api_instance = AgentApi(api_client)
|
||||
eval_results = await challenge.evaluate_task_state(
|
||||
api_instance, task_id
|
||||
)
|
||||
|
||||
eval_info = BenchmarkRun(
|
||||
repository_info=RepositoryInfo(),
|
||||
run_details=RunDetails(
|
||||
command=f"agbenchmark --test={challenge.info.name}",
|
||||
benchmark_start_time=(
|
||||
task_info.start_time.strftime("%Y-%m-%dT%H:%M:%S+00:00")
|
||||
),
|
||||
test_name=challenge.info.name,
|
||||
),
|
||||
task_info=TaskInfo(
|
||||
data_path=challenge.info.source_uri,
|
||||
is_regression=None,
|
||||
category=[c.value for c in challenge.info.category],
|
||||
task=challenge.info.task,
|
||||
answer=challenge.info.reference_answer or "",
|
||||
description=challenge.info.description or "",
|
||||
),
|
||||
metrics=Metrics(
|
||||
success=all(e.passed for e in eval_results),
|
||||
success_percentage=(
|
||||
100 * sum(e.score for e in eval_results) / len(eval_results)
|
||||
if eval_results # avoid division by 0
|
||||
else 0
|
||||
),
|
||||
attempted=True,
|
||||
),
|
||||
config={},
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Returning evaluation data:\n{eval_info.model_dump_json(indent=4)}"
|
||||
)
|
||||
return eval_info
|
||||
except ApiException as e:
|
||||
logger.error(f"Error {e} whilst trying to evaluate task: {task_id}")
|
||||
raise HTTPException(500)
|
||||
|
||||
app.include_router(router, prefix="/ap/v1")
|
||||
|
||||
return app
|
||||
@@ -1,128 +0,0 @@
|
||||
import json
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, ValidationInfo, field_validator
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
def _calculate_info_test_path(base_path: Path, benchmark_start_time: datetime) -> Path:
|
||||
"""
|
||||
Calculates the path to the directory where the test report will be saved.
|
||||
"""
|
||||
# Ensure the reports path exists
|
||||
base_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Get current UTC date-time stamp
|
||||
date_stamp = benchmark_start_time.strftime("%Y%m%dT%H%M%S")
|
||||
|
||||
# Default run name
|
||||
run_name = "full_run"
|
||||
|
||||
# Map command-line arguments to their respective labels
|
||||
arg_labels = {
|
||||
"--test": None,
|
||||
"--category": None,
|
||||
"--maintain": "maintain",
|
||||
"--improve": "improve",
|
||||
"--explore": "explore",
|
||||
}
|
||||
|
||||
# Identify the relevant command-line argument
|
||||
for arg, label in arg_labels.items():
|
||||
if arg in sys.argv:
|
||||
test_arg = sys.argv[sys.argv.index(arg) + 1] if label is None else None
|
||||
run_name = arg.strip("--")
|
||||
if test_arg:
|
||||
run_name = f"{run_name}_{test_arg}"
|
||||
break
|
||||
|
||||
# Create the full new directory path with ISO standard UTC date-time stamp
|
||||
report_path = base_path / f"{date_stamp}_{run_name}"
|
||||
|
||||
# Ensure the new directory is created
|
||||
# FIXME: this is not a desirable side-effect of loading the config
|
||||
report_path.mkdir(exist_ok=True)
|
||||
|
||||
return report_path
|
||||
|
||||
|
||||
class AgentBenchmarkConfig(BaseSettings, extra="allow"):
|
||||
"""
|
||||
Configuration model and loader for the AGBenchmark.
|
||||
|
||||
Projects that want to use AGBenchmark should contain an agbenchmark_config folder
|
||||
with a config.json file that - at minimum - specifies the `host` at which the
|
||||
subject application exposes an Agent Protocol compliant API.
|
||||
"""
|
||||
|
||||
agbenchmark_config_dir: Path = Field(exclude=True)
|
||||
"""Path to the agbenchmark_config folder of the subject agent application."""
|
||||
|
||||
categories: list[str] | None = None
|
||||
"""Categories to benchmark the agent for. If omitted, all categories are assumed."""
|
||||
|
||||
host: str
|
||||
"""Host (scheme://address:port) of the subject agent application."""
|
||||
|
||||
reports_folder: Path = Field(None)
|
||||
"""
|
||||
Path to the folder where new reports should be stored.
|
||||
Defaults to {agbenchmark_config_dir}/reports.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def load(cls, config_dir: Optional[Path] = None) -> "AgentBenchmarkConfig":
|
||||
config_dir = config_dir or cls.find_config_folder()
|
||||
with (config_dir / "config.json").open("r") as f:
|
||||
return cls(
|
||||
agbenchmark_config_dir=config_dir,
|
||||
**json.load(f),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def find_config_folder(for_dir: Path = Path.cwd()) -> Path:
|
||||
"""
|
||||
Find the closest ancestor folder containing an agbenchmark_config folder,
|
||||
and returns the path of that agbenchmark_config folder.
|
||||
"""
|
||||
current_directory = for_dir
|
||||
while current_directory != Path("/"):
|
||||
if (path := current_directory / "agbenchmark_config").exists():
|
||||
if (path / "config.json").is_file():
|
||||
return path
|
||||
current_directory = current_directory.parent
|
||||
raise FileNotFoundError(
|
||||
"No 'agbenchmark_config' directory found in the path hierarchy."
|
||||
)
|
||||
|
||||
@property
|
||||
def config_file(self) -> Path:
|
||||
return self.agbenchmark_config_dir / "config.json"
|
||||
|
||||
@field_validator("reports_folder", mode="before")
|
||||
def set_reports_folder(cls, value: Path, info: ValidationInfo):
|
||||
if not value:
|
||||
return info.data["agbenchmark_config_dir"] / "reports"
|
||||
return value
|
||||
|
||||
def get_report_dir(self, benchmark_start_time: datetime) -> Path:
|
||||
return _calculate_info_test_path(self.reports_folder, benchmark_start_time)
|
||||
|
||||
@property
|
||||
def regression_tests_file(self) -> Path:
|
||||
return self.reports_folder / "regression_tests.json"
|
||||
|
||||
@property
|
||||
def success_rate_file(self) -> Path:
|
||||
return self.reports_folder / "success_rate.json"
|
||||
|
||||
@property
|
||||
def challenges_already_beaten_file(self) -> Path:
|
||||
return self.agbenchmark_config_dir / "challenges_already_beaten.json"
|
||||
|
||||
@property
|
||||
def temp_folder(self) -> Path:
|
||||
return self.agbenchmark_config_dir / "temp_folder"
|
||||
@@ -1,339 +0,0 @@
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from agbenchmark.challenges import OPTIONAL_CATEGORIES, BaseChallenge
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.report_types import Test
|
||||
from agbenchmark.reports.ReportManager import RegressionTestsTracker
|
||||
from agbenchmark.reports.reports import (
|
||||
add_test_result_to_report,
|
||||
make_empty_test_report,
|
||||
session_finish,
|
||||
)
|
||||
from agbenchmark.utils.data_types import Category
|
||||
|
||||
GLOBAL_TIMEOUT = (
|
||||
1500 # The tests will stop after 25 minutes so we can send the reports.
|
||||
)
|
||||
|
||||
agbenchmark_config = AgentBenchmarkConfig.load()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
pytest_plugins = ["agbenchmark.utils.dependencies"]
|
||||
collect_ignore = ["challenges"]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def config() -> AgentBenchmarkConfig:
|
||||
return agbenchmark_config
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def temp_folder() -> Generator[Path, None, None]:
|
||||
"""
|
||||
Pytest fixture that sets up and tears down the temporary folder for each test.
|
||||
It is automatically used in every test due to the 'autouse=True' parameter.
|
||||
"""
|
||||
|
||||
# create output directory if it doesn't exist
|
||||
if not os.path.exists(agbenchmark_config.temp_folder):
|
||||
os.makedirs(agbenchmark_config.temp_folder, exist_ok=True)
|
||||
|
||||
yield agbenchmark_config.temp_folder
|
||||
# teardown after test function completes
|
||||
if not os.getenv("KEEP_TEMP_FOLDER_FILES"):
|
||||
for filename in os.listdir(agbenchmark_config.temp_folder):
|
||||
file_path = os.path.join(agbenchmark_config.temp_folder, filename)
|
||||
try:
|
||||
if os.path.isfile(file_path) or os.path.islink(file_path):
|
||||
os.unlink(file_path)
|
||||
elif os.path.isdir(file_path):
|
||||
shutil.rmtree(file_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete {file_path}. Reason: {e}")
|
||||
|
||||
|
||||
def pytest_addoption(parser: pytest.Parser) -> None:
|
||||
"""
|
||||
Pytest hook that adds command-line options to the `pytest` command.
|
||||
The added options are specific to agbenchmark and control its behavior:
|
||||
* `--mock` is used to run the tests in mock mode.
|
||||
* `--host` is used to specify the host for the tests.
|
||||
* `--category` is used to run only tests of a specific category.
|
||||
* `--nc` is used to run the tests without caching.
|
||||
* `--cutoff` is used to specify a cutoff time for the tests.
|
||||
* `--improve` is used to run only the tests that are marked for improvement.
|
||||
* `--maintain` is used to run only the tests that are marked for maintenance.
|
||||
* `--explore` is used to run the tests in exploration mode.
|
||||
* `--test` is used to run a specific test.
|
||||
* `--no-dep` is used to run the tests without dependencies.
|
||||
* `--keep-answers` is used to keep the answers of the tests.
|
||||
|
||||
Args:
|
||||
parser: The Pytest CLI parser to which the command-line options are added.
|
||||
"""
|
||||
parser.addoption("-N", "--attempts", action="store")
|
||||
parser.addoption("--no-dep", action="store_true")
|
||||
parser.addoption("--mock", action="store_true")
|
||||
parser.addoption("--host", default=None)
|
||||
parser.addoption("--nc", action="store_true")
|
||||
parser.addoption("--cutoff", action="store")
|
||||
parser.addoption("--category", action="append")
|
||||
parser.addoption("--test", action="append")
|
||||
parser.addoption("--improve", action="store_true")
|
||||
parser.addoption("--maintain", action="store_true")
|
||||
parser.addoption("--explore", action="store_true")
|
||||
parser.addoption("--keep-answers", action="store_true")
|
||||
|
||||
|
||||
def pytest_configure(config: pytest.Config) -> None:
|
||||
# Register category markers to prevent "unknown marker" warnings
|
||||
for category in Category:
|
||||
config.addinivalue_line("markers", f"{category.value}: {category}")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def check_regression(request: pytest.FixtureRequest) -> None:
|
||||
"""
|
||||
Fixture that checks for every test if it should be treated as a regression test,
|
||||
and whether to skip it based on that.
|
||||
|
||||
The test name is retrieved from the `request` object. Regression reports are loaded
|
||||
from the path specified in the benchmark configuration.
|
||||
|
||||
Effect:
|
||||
* If the `--improve` option is used and the current test is considered a regression
|
||||
test, it is skipped.
|
||||
* If the `--maintain` option is used and the current test is not considered a
|
||||
regression test, it is also skipped.
|
||||
|
||||
Args:
|
||||
request: The request object from which the test name and the benchmark
|
||||
configuration are retrieved.
|
||||
"""
|
||||
with contextlib.suppress(FileNotFoundError):
|
||||
rt_tracker = RegressionTestsTracker(agbenchmark_config.regression_tests_file)
|
||||
|
||||
assert isinstance(request.node, pytest.Function)
|
||||
assert isinstance(request.node.parent, pytest.Class)
|
||||
test_name = request.node.parent.name
|
||||
challenge_location = getattr(request.node.cls, "CHALLENGE_LOCATION", "")
|
||||
skip_string = f"Skipping {test_name} at {challenge_location}"
|
||||
|
||||
# Check if the test name exists in the regression tests
|
||||
is_regression_test = rt_tracker.has_regression_test(test_name)
|
||||
if request.config.getoption("--improve") and is_regression_test:
|
||||
pytest.skip(f"{skip_string} because it's a regression test")
|
||||
elif request.config.getoption("--maintain") and not is_regression_test:
|
||||
pytest.skip(f"{skip_string} because it's not a regression test")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="session")
|
||||
def mock(request: pytest.FixtureRequest) -> bool:
|
||||
"""
|
||||
Pytest fixture that retrieves the value of the `--mock` command-line option.
|
||||
The `--mock` option is used to run the tests in mock mode.
|
||||
|
||||
Args:
|
||||
request: The `pytest.FixtureRequest` from which the `--mock` option value
|
||||
is retrieved.
|
||||
|
||||
Returns:
|
||||
bool: Whether `--mock` is set for this session.
|
||||
"""
|
||||
mock = request.config.getoption("--mock")
|
||||
assert isinstance(mock, bool)
|
||||
return mock
|
||||
|
||||
|
||||
test_reports: dict[str, Test] = {}
|
||||
|
||||
|
||||
def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo) -> None:
|
||||
"""
|
||||
Pytest hook that is called when a test report is being generated.
|
||||
It is used to generate and finalize reports for each test.
|
||||
|
||||
Args:
|
||||
item: The test item for which the report is being generated.
|
||||
call: The call object from which the test result is retrieved.
|
||||
"""
|
||||
challenge: type[BaseChallenge] = item.cls # type: ignore
|
||||
challenge_id = challenge.info.eval_id
|
||||
|
||||
if challenge_id not in test_reports:
|
||||
test_reports[challenge_id] = make_empty_test_report(challenge.info)
|
||||
|
||||
if call.when == "setup":
|
||||
test_name = item.nodeid.split("::")[1]
|
||||
item.user_properties.append(("test_name", test_name))
|
||||
|
||||
if call.when == "call":
|
||||
add_test_result_to_report(
|
||||
test_reports[challenge_id], item, call, agbenchmark_config
|
||||
)
|
||||
|
||||
|
||||
def timeout_monitor(start_time: int) -> None:
|
||||
"""
|
||||
Function that limits the total execution time of the test suite.
|
||||
This function is supposed to be run in a separate thread and calls `pytest.exit`
|
||||
if the total execution time has exceeded the global timeout.
|
||||
|
||||
Args:
|
||||
start_time (int): The start time of the test suite.
|
||||
"""
|
||||
while time.time() - start_time < GLOBAL_TIMEOUT:
|
||||
time.sleep(1) # check every second
|
||||
|
||||
pytest.exit("Test suite exceeded the global timeout", returncode=1)
|
||||
|
||||
|
||||
def pytest_sessionstart(session: pytest.Session) -> None:
|
||||
"""
|
||||
Pytest hook that is called at the start of a test session.
|
||||
|
||||
Sets up and runs a `timeout_monitor` in a separate thread.
|
||||
"""
|
||||
start_time = time.time()
|
||||
t = threading.Thread(target=timeout_monitor, args=(start_time,))
|
||||
t.daemon = True # Daemon threads are abruptly stopped at shutdown
|
||||
t.start()
|
||||
|
||||
|
||||
def pytest_sessionfinish(session: pytest.Session) -> None:
|
||||
"""
|
||||
Pytest hook that is called at the end of a test session.
|
||||
|
||||
Finalizes and saves the test reports.
|
||||
"""
|
||||
session_finish(agbenchmark_config)
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
n = metafunc.config.getoption("-N")
|
||||
metafunc.parametrize("i_attempt", range(int(n)) if type(n) is str else [0])
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(
|
||||
items: list[pytest.Function], config: pytest.Config
|
||||
) -> None:
|
||||
"""
|
||||
Pytest hook that is called after initial test collection has been performed.
|
||||
Modifies the collected test items based on the agent benchmark configuration,
|
||||
adding the dependency marker and category markers.
|
||||
|
||||
Args:
|
||||
items: The collected test items to be modified.
|
||||
config: The active pytest configuration.
|
||||
"""
|
||||
rt_tracker = RegressionTestsTracker(agbenchmark_config.regression_tests_file)
|
||||
|
||||
try:
|
||||
challenges_beaten_in_the_past = json.loads(
|
||||
agbenchmark_config.challenges_already_beaten_file.read_bytes()
|
||||
)
|
||||
except FileNotFoundError:
|
||||
challenges_beaten_in_the_past = {}
|
||||
|
||||
selected_tests: tuple[str] = config.getoption("--test") # type: ignore
|
||||
selected_categories: tuple[str] = config.getoption("--category") # type: ignore
|
||||
|
||||
# Can't use a for-loop to remove items in-place
|
||||
i = 0
|
||||
while i < len(items):
|
||||
item = items[i]
|
||||
assert item.cls and issubclass(item.cls, BaseChallenge)
|
||||
challenge = item.cls
|
||||
challenge_name = challenge.info.name
|
||||
|
||||
if not issubclass(challenge, BaseChallenge):
|
||||
item.warn(
|
||||
pytest.PytestCollectionWarning(
|
||||
f"Non-challenge item collected: {challenge}"
|
||||
)
|
||||
)
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# --test: remove the test from the set if it's not specifically selected
|
||||
if selected_tests and challenge.info.name not in selected_tests:
|
||||
items.remove(item)
|
||||
continue
|
||||
|
||||
# Filter challenges for --maintain, --improve, and --explore:
|
||||
# --maintain -> only challenges expected to be passed (= regression tests)
|
||||
# --improve -> only challenges that so far are not passed (reliably)
|
||||
# --explore -> only challenges that have never been passed
|
||||
is_regression_test = rt_tracker.has_regression_test(challenge.info.name)
|
||||
has_been_passed = challenges_beaten_in_the_past.get(challenge.info.name, False)
|
||||
if (
|
||||
(config.getoption("--maintain") and not is_regression_test)
|
||||
or (config.getoption("--improve") and is_regression_test)
|
||||
or (config.getoption("--explore") and has_been_passed)
|
||||
):
|
||||
items.remove(item)
|
||||
continue
|
||||
|
||||
dependencies = challenge.info.dependencies
|
||||
if (
|
||||
config.getoption("--test")
|
||||
or config.getoption("--no-dep")
|
||||
or config.getoption("--maintain")
|
||||
):
|
||||
# Ignore dependencies:
|
||||
# --test -> user selected specific tests to run, don't care about deps
|
||||
# --no-dep -> ignore dependency relations regardless of test selection
|
||||
# --maintain -> all "regression" tests must pass, so run all of them
|
||||
dependencies = []
|
||||
elif config.getoption("--improve"):
|
||||
# Filter dependencies, keep only deps that are not "regression" tests
|
||||
dependencies = [
|
||||
d for d in dependencies if not rt_tracker.has_regression_test(d)
|
||||
]
|
||||
|
||||
# Set category markers
|
||||
challenge_categories = set(c.value for c in challenge.info.category)
|
||||
for category in challenge_categories:
|
||||
item.add_marker(category)
|
||||
|
||||
# Enforce category selection
|
||||
if selected_categories:
|
||||
if not challenge_categories.intersection(set(selected_categories)):
|
||||
items.remove(item)
|
||||
continue
|
||||
# # Filter dependencies, keep only deps from selected categories
|
||||
# dependencies = [
|
||||
# d for d in dependencies
|
||||
# if not set(d.categories).intersection(set(selected_categories))
|
||||
# ]
|
||||
|
||||
# Skip items in optional categories that are not selected for the subject agent
|
||||
challenge_optional_categories = challenge_categories & set(OPTIONAL_CATEGORIES)
|
||||
if challenge_optional_categories and not (
|
||||
agbenchmark_config.categories
|
||||
and challenge_optional_categories.issubset(
|
||||
set(agbenchmark_config.categories)
|
||||
)
|
||||
):
|
||||
logger.debug(
|
||||
f"Skipping {challenge_name}: "
|
||||
f"category {' and '.join(challenge_optional_categories)} is optional, "
|
||||
"and not explicitly selected in the benchmark config."
|
||||
)
|
||||
items.remove(item)
|
||||
continue
|
||||
|
||||
# Add marker for the DependencyManager
|
||||
item.add_marker(pytest.mark.depends(on=dependencies, name=challenge_name))
|
||||
|
||||
i += 1
|
||||
@@ -1,26 +0,0 @@
|
||||
"""
|
||||
AGBenchmark's test discovery endpoint for Pytest.
|
||||
|
||||
This module is picked up by Pytest's *_test.py file matching pattern, and all challenge
|
||||
classes in the module that conform to the `Test*` pattern are collected.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
from itertools import chain
|
||||
|
||||
from agbenchmark.challenges.builtin import load_builtin_challenges
|
||||
from agbenchmark.challenges.webarena import load_webarena_challenges
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DATA_CATEGORY = {}
|
||||
|
||||
# Load challenges and attach them to this module
|
||||
for challenge in chain(load_builtin_challenges(), load_webarena_challenges()):
|
||||
# Attach the Challenge class to this module so it can be discovered by pytest
|
||||
module = importlib.import_module(__name__)
|
||||
setattr(module, challenge.__name__, challenge)
|
||||
|
||||
# Build a map of challenge names and their primary category
|
||||
DATA_CATEGORY[challenge.info.name] = challenge.info.category[0].value
|
||||
@@ -1,158 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional, Sequence
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from agbenchmark.challenges import get_unique_categories
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
|
||||
load_dotenv()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_benchmark(
|
||||
config: AgentBenchmarkConfig,
|
||||
maintain: bool = False,
|
||||
improve: bool = False,
|
||||
explore: bool = False,
|
||||
tests: tuple[str, ...] = tuple(),
|
||||
categories: tuple[str, ...] = tuple(),
|
||||
skip_categories: tuple[str, ...] = tuple(),
|
||||
attempts_per_challenge: int = 1,
|
||||
mock: bool = False,
|
||||
no_dep: bool = False,
|
||||
no_cutoff: bool = False,
|
||||
cutoff: Optional[int] = None,
|
||||
keep_answers: bool = False,
|
||||
server: bool = False,
|
||||
) -> int:
|
||||
"""
|
||||
Starts the benchmark. If a category flag is provided, only challenges with the
|
||||
corresponding mark will be run.
|
||||
"""
|
||||
import pytest
|
||||
|
||||
from agbenchmark.reports.ReportManager import SingletonReportManager
|
||||
|
||||
validate_args(
|
||||
maintain=maintain,
|
||||
improve=improve,
|
||||
explore=explore,
|
||||
tests=tests,
|
||||
categories=categories,
|
||||
skip_categories=skip_categories,
|
||||
no_cutoff=no_cutoff,
|
||||
cutoff=cutoff,
|
||||
)
|
||||
|
||||
SingletonReportManager()
|
||||
|
||||
for key, value in vars(config).items():
|
||||
logger.debug(f"config.{key} = {repr(value)}")
|
||||
|
||||
pytest_args = ["-vs"]
|
||||
|
||||
if tests:
|
||||
logger.info(f"Running specific test(s): {' '.join(tests)}")
|
||||
pytest_args += [f"--test={t}" for t in tests]
|
||||
else:
|
||||
all_categories = get_unique_categories()
|
||||
|
||||
if categories or skip_categories:
|
||||
categories_to_run = set(categories) or all_categories
|
||||
if skip_categories:
|
||||
categories_to_run = categories_to_run.difference(set(skip_categories))
|
||||
assert categories_to_run, "Error: You can't skip all categories"
|
||||
pytest_args += [f"--category={c}" for c in categories_to_run]
|
||||
logger.info(f"Running tests of category: {categories_to_run}")
|
||||
else:
|
||||
logger.info("Running all categories")
|
||||
|
||||
if maintain:
|
||||
logger.info("Running only regression tests")
|
||||
elif improve:
|
||||
logger.info("Running only non-regression tests")
|
||||
elif explore:
|
||||
logger.info("Only attempt challenges that have never been beaten")
|
||||
|
||||
if mock:
|
||||
# TODO: unhack
|
||||
os.environ[
|
||||
"IS_MOCK"
|
||||
] = "True" # ugly hack to make the mock work when calling from API
|
||||
|
||||
# Pass through flags
|
||||
for flag, active in {
|
||||
"--maintain": maintain,
|
||||
"--improve": improve,
|
||||
"--explore": explore,
|
||||
"--no-dep": no_dep,
|
||||
"--mock": mock,
|
||||
"--nc": no_cutoff,
|
||||
"--keep-answers": keep_answers,
|
||||
}.items():
|
||||
if active:
|
||||
pytest_args.append(flag)
|
||||
|
||||
if attempts_per_challenge > 1:
|
||||
pytest_args.append(f"--attempts={attempts_per_challenge}")
|
||||
|
||||
if cutoff:
|
||||
pytest_args.append(f"--cutoff={cutoff}")
|
||||
logger.debug(f"Setting cuttoff override to {cutoff} seconds.")
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
pytest_args.append(str(current_dir / "generate_test.py"))
|
||||
|
||||
pytest_args.append("--cache-clear")
|
||||
logger.debug(f"Running Pytest with args: {pytest_args}")
|
||||
exit_code = pytest.main(pytest_args)
|
||||
|
||||
SingletonReportManager.clear_instance()
|
||||
return exit_code
|
||||
|
||||
|
||||
class InvalidInvocationError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def validate_args(
|
||||
maintain: bool,
|
||||
improve: bool,
|
||||
explore: bool,
|
||||
tests: Sequence[str],
|
||||
categories: Sequence[str],
|
||||
skip_categories: Sequence[str],
|
||||
no_cutoff: bool,
|
||||
cutoff: Optional[int],
|
||||
) -> None:
|
||||
if categories:
|
||||
all_categories = get_unique_categories()
|
||||
invalid_categories = set(categories) - all_categories
|
||||
if invalid_categories:
|
||||
raise InvalidInvocationError(
|
||||
"One or more invalid categories were specified: "
|
||||
f"{', '.join(invalid_categories)}.\n"
|
||||
f"Valid categories are: {', '.join(all_categories)}."
|
||||
)
|
||||
|
||||
if (maintain + improve + explore) > 1:
|
||||
raise InvalidInvocationError(
|
||||
"You can't use --maintain, --improve or --explore at the same time. "
|
||||
"Please choose one."
|
||||
)
|
||||
|
||||
if tests and (categories or skip_categories or maintain or improve or explore):
|
||||
raise InvalidInvocationError(
|
||||
"If you're running a specific test make sure no other options are "
|
||||
"selected. Please just pass the --test."
|
||||
)
|
||||
|
||||
if no_cutoff and cutoff:
|
||||
raise InvalidInvocationError(
|
||||
"You can't use both --nc and --cutoff at the same time. "
|
||||
"Please choose one."
|
||||
)
|
||||
@@ -1,217 +0,0 @@
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.graphs import save_single_radar_chart
|
||||
from agbenchmark.reports.processing.process_report import (
|
||||
get_highest_achieved_difficulty_per_category,
|
||||
)
|
||||
from agbenchmark.reports.processing.report_types import MetricsOverall, Report, Test
|
||||
from agbenchmark.utils.utils import get_highest_success_difficulty
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SingletonReportManager:
|
||||
instance = None
|
||||
|
||||
INFO_MANAGER: "SessionReportManager"
|
||||
REGRESSION_MANAGER: "RegressionTestsTracker"
|
||||
SUCCESS_RATE_TRACKER: "SuccessRatesTracker"
|
||||
|
||||
def __new__(cls):
|
||||
if not cls.instance:
|
||||
cls.instance = super(SingletonReportManager, cls).__new__(cls)
|
||||
|
||||
agent_benchmark_config = AgentBenchmarkConfig.load()
|
||||
benchmark_start_time_dt = datetime.now(
|
||||
timezone.utc
|
||||
) # or any logic to fetch the datetime
|
||||
|
||||
# Make the Managers class attributes
|
||||
cls.INFO_MANAGER = SessionReportManager(
|
||||
agent_benchmark_config.get_report_dir(benchmark_start_time_dt)
|
||||
/ "report.json",
|
||||
benchmark_start_time_dt,
|
||||
)
|
||||
cls.REGRESSION_MANAGER = RegressionTestsTracker(
|
||||
agent_benchmark_config.regression_tests_file
|
||||
)
|
||||
cls.SUCCESS_RATE_TRACKER = SuccessRatesTracker(
|
||||
agent_benchmark_config.success_rate_file
|
||||
)
|
||||
|
||||
return cls.instance
|
||||
|
||||
@classmethod
|
||||
def clear_instance(cls):
|
||||
cls.instance = None
|
||||
del cls.INFO_MANAGER
|
||||
del cls.REGRESSION_MANAGER
|
||||
del cls.SUCCESS_RATE_TRACKER
|
||||
|
||||
|
||||
class BaseReportManager:
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, Any]
|
||||
|
||||
def __init__(self, report_file: Path):
|
||||
self.report_file = report_file
|
||||
|
||||
self.load()
|
||||
|
||||
def load(self) -> None:
|
||||
if not self.report_file.exists():
|
||||
self.report_file.parent.mkdir(exist_ok=True)
|
||||
|
||||
try:
|
||||
with self.report_file.open("r") as f:
|
||||
data = json.load(f)
|
||||
self.tests = {k: data[k] for k in sorted(data)}
|
||||
except FileNotFoundError:
|
||||
self.tests = {}
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
logger.warning(f"Could not parse {self.report_file}: {e}")
|
||||
self.tests = {}
|
||||
|
||||
def save(self) -> None:
|
||||
with self.report_file.open("w") as f:
|
||||
json.dump(self.tests, f, indent=4)
|
||||
|
||||
def remove_test(self, test_name: str) -> None:
|
||||
if test_name in self.tests:
|
||||
del self.tests[test_name]
|
||||
self.save()
|
||||
|
||||
def reset(self) -> None:
|
||||
self.tests = {}
|
||||
self.save()
|
||||
|
||||
|
||||
class SessionReportManager(BaseReportManager):
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, Test]
|
||||
report: Report | None = None
|
||||
|
||||
def __init__(self, report_file: Path, benchmark_start_time: datetime):
|
||||
super().__init__(report_file)
|
||||
|
||||
self.start_time = time.time()
|
||||
self.benchmark_start_time = benchmark_start_time
|
||||
|
||||
def save(self) -> None:
|
||||
with self.report_file.open("w") as f:
|
||||
if self.report:
|
||||
f.write(self.report.model_dump_json(indent=4))
|
||||
else:
|
||||
json.dump(
|
||||
{k: v.model_dump() for k, v in self.tests.items()}, f, indent=4
|
||||
)
|
||||
|
||||
def load(self) -> None:
|
||||
super().load()
|
||||
|
||||
if "tests" in self.tests:
|
||||
self.report = Report.model_validate(self.tests)
|
||||
else:
|
||||
self.tests = {n: Test.model_validate(d) for n, d in self.tests.items()}
|
||||
|
||||
def add_test_report(self, test_name: str, test_report: Test) -> None:
|
||||
if self.report:
|
||||
raise RuntimeError("Session report already finalized")
|
||||
|
||||
if test_name.startswith("Test"):
|
||||
test_name = test_name[4:]
|
||||
self.tests[test_name] = test_report
|
||||
|
||||
self.save()
|
||||
|
||||
def finalize_session_report(self, config: AgentBenchmarkConfig) -> None:
|
||||
command = " ".join(sys.argv)
|
||||
|
||||
if self.report:
|
||||
raise RuntimeError("Session report already finalized")
|
||||
|
||||
self.report = Report(
|
||||
command=command.split(os.sep)[-1],
|
||||
benchmark_git_commit_sha="---",
|
||||
agent_git_commit_sha="---",
|
||||
completion_time=datetime.now(timezone.utc).strftime(
|
||||
"%Y-%m-%dT%H:%M:%S+00:00"
|
||||
),
|
||||
benchmark_start_time=self.benchmark_start_time.strftime(
|
||||
"%Y-%m-%dT%H:%M:%S+00:00"
|
||||
),
|
||||
metrics=MetricsOverall(
|
||||
run_time=str(round(time.time() - self.start_time, 2)) + " seconds",
|
||||
highest_difficulty=get_highest_success_difficulty(self.tests),
|
||||
total_cost=self.get_total_costs(),
|
||||
),
|
||||
tests=copy.copy(self.tests),
|
||||
config=config.model_dump(exclude={"reports_folder"}, exclude_none=True),
|
||||
)
|
||||
|
||||
agent_categories = get_highest_achieved_difficulty_per_category(self.report)
|
||||
if len(agent_categories) > 1:
|
||||
save_single_radar_chart(
|
||||
agent_categories,
|
||||
config.get_report_dir(self.benchmark_start_time) / "radar_chart.png",
|
||||
)
|
||||
|
||||
self.save()
|
||||
|
||||
def get_total_costs(self):
|
||||
if self.report:
|
||||
tests = self.report.tests
|
||||
else:
|
||||
tests = self.tests
|
||||
|
||||
total_cost = 0
|
||||
all_costs_none = True
|
||||
for test_data in tests.values():
|
||||
cost = sum(r.cost or 0 for r in test_data.results)
|
||||
|
||||
if cost is not None: # check if cost is not None
|
||||
all_costs_none = False
|
||||
total_cost += cost # add cost to total
|
||||
if all_costs_none:
|
||||
total_cost = None
|
||||
return total_cost
|
||||
|
||||
|
||||
class RegressionTestsTracker(BaseReportManager):
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, dict]
|
||||
|
||||
def add_test(self, test_name: str, test_details: dict) -> None:
|
||||
if test_name.startswith("Test"):
|
||||
test_name = test_name[4:]
|
||||
|
||||
self.tests[test_name] = test_details
|
||||
self.save()
|
||||
|
||||
def has_regression_test(self, test_name: str) -> bool:
|
||||
return self.tests.get(test_name) is not None
|
||||
|
||||
|
||||
class SuccessRatesTracker(BaseReportManager):
|
||||
"""Abstracts interaction with the regression tests file"""
|
||||
|
||||
tests: dict[str, list[bool | None]]
|
||||
|
||||
def update(self, test_name: str, success_history: list[bool | None]) -> None:
|
||||
if test_name.startswith("Test"):
|
||||
test_name = test_name[4:]
|
||||
|
||||
self.tests[test_name] = success_history
|
||||
self.save()
|
||||
@@ -1,45 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from agbenchmark.reports.processing.graphs import (
|
||||
save_combined_bar_chart,
|
||||
save_combined_radar_chart,
|
||||
)
|
||||
from agbenchmark.reports.processing.process_report import (
|
||||
all_agent_categories,
|
||||
get_reports_data,
|
||||
)
|
||||
|
||||
|
||||
def generate_combined_chart() -> None:
|
||||
all_agents_path = Path(__file__).parent.parent.parent.parent / "reports"
|
||||
|
||||
combined_charts_folder = all_agents_path / "combined_charts"
|
||||
|
||||
reports_data = get_reports_data(str(all_agents_path))
|
||||
|
||||
categories = all_agent_categories(reports_data)
|
||||
|
||||
# Count the number of directories in this directory
|
||||
num_dirs = len([f for f in combined_charts_folder.iterdir() if f.is_dir()])
|
||||
|
||||
run_charts_folder = combined_charts_folder / f"run{num_dirs + 1}"
|
||||
|
||||
if not os.path.exists(run_charts_folder):
|
||||
os.makedirs(run_charts_folder)
|
||||
|
||||
info_data = {
|
||||
report_name: data.benchmark_start_time
|
||||
for report_name, data in reports_data.items()
|
||||
if report_name in categories
|
||||
}
|
||||
with open(Path(run_charts_folder) / "run_info.json", "w") as f:
|
||||
json.dump(info_data, f)
|
||||
|
||||
save_combined_radar_chart(categories, Path(run_charts_folder) / "radar_chart.png")
|
||||
save_combined_bar_chart(categories, Path(run_charts_folder) / "bar_chart.png")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
generate_combined_chart()
|
||||
@@ -1,34 +0,0 @@
|
||||
import os
|
||||
|
||||
|
||||
def get_last_subdirectory(directory_path: str) -> str | None:
|
||||
# Get all subdirectories in the directory
|
||||
subdirs = [
|
||||
os.path.join(directory_path, name)
|
||||
for name in os.listdir(directory_path)
|
||||
if os.path.isdir(os.path.join(directory_path, name))
|
||||
]
|
||||
|
||||
# Sort the subdirectories by creation time
|
||||
subdirs.sort(key=os.path.getctime)
|
||||
|
||||
# Return the last subdirectory in the list
|
||||
return subdirs[-1] if subdirs else None
|
||||
|
||||
|
||||
def get_latest_report_from_agent_directories(
|
||||
directory_path: str,
|
||||
) -> list[tuple[os.DirEntry[str], str]]:
|
||||
latest_reports = []
|
||||
|
||||
for subdir in os.scandir(directory_path):
|
||||
if subdir.is_dir():
|
||||
# Get the most recently created subdirectory within this agent's directory
|
||||
latest_subdir = get_last_subdirectory(subdir.path)
|
||||
if latest_subdir is not None:
|
||||
# Look for 'report.json' in the subdirectory
|
||||
report_file = os.path.join(latest_subdir, "report.json")
|
||||
if os.path.isfile(report_file):
|
||||
latest_reports.append((subdir, report_file))
|
||||
|
||||
return latest_reports
|
||||
@@ -1,199 +0,0 @@
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import matplotlib.patches as mpatches
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def save_combined_radar_chart(
|
||||
categories: dict[str, Any], save_path: str | Path
|
||||
) -> None:
|
||||
categories = {k: v for k, v in categories.items() if v}
|
||||
if not all(categories.values()):
|
||||
raise Exception("No data to plot")
|
||||
labels = np.array(
|
||||
list(next(iter(categories.values())).keys())
|
||||
) # We use the first category to get the keys
|
||||
num_vars = len(labels)
|
||||
angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()
|
||||
angles += angles[
|
||||
:1
|
||||
] # Add the first angle to the end of the list to ensure the polygon is closed
|
||||
|
||||
# Create radar chart
|
||||
fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
|
||||
ax.set_theta_offset(np.pi / 2) # type: ignore
|
||||
ax.set_theta_direction(-1) # type: ignore
|
||||
ax.spines["polar"].set_visible(False) # Remove border
|
||||
|
||||
cmap = plt.cm.get_cmap("nipy_spectral", len(categories)) # type: ignore
|
||||
|
||||
colors = [cmap(i) for i in range(len(categories))]
|
||||
|
||||
for i, (cat_name, cat_values) in enumerate(
|
||||
categories.items()
|
||||
): # Iterating through each category (series)
|
||||
values = np.array(list(cat_values.values()))
|
||||
values = np.concatenate((values, values[:1])) # Ensure the polygon is closed
|
||||
|
||||
ax.fill(angles, values, color=colors[i], alpha=0.25) # Draw the filled polygon
|
||||
ax.plot(angles, values, color=colors[i], linewidth=2) # Draw polygon
|
||||
ax.plot(
|
||||
angles,
|
||||
values,
|
||||
"o",
|
||||
color="white",
|
||||
markersize=7,
|
||||
markeredgecolor=colors[i],
|
||||
markeredgewidth=2,
|
||||
) # Draw points
|
||||
|
||||
# Draw legend
|
||||
ax.legend(
|
||||
handles=[
|
||||
mpatches.Patch(color=color, label=cat_name, alpha=0.25)
|
||||
for cat_name, color in zip(categories.keys(), colors)
|
||||
],
|
||||
loc="upper left",
|
||||
bbox_to_anchor=(0.7, 1.3),
|
||||
)
|
||||
|
||||
# Adjust layout to make room for the legend
|
||||
plt.tight_layout()
|
||||
|
||||
lines, labels = plt.thetagrids(
|
||||
np.degrees(angles[:-1]), (list(next(iter(categories.values())).keys()))
|
||||
) # We use the first category to get the keys
|
||||
|
||||
highest_score = 7
|
||||
|
||||
# Set y-axis limit to 7
|
||||
ax.set_ylim(top=highest_score)
|
||||
|
||||
# Move labels away from the plot
|
||||
for label in labels:
|
||||
label.set_position(
|
||||
(label.get_position()[0], label.get_position()[1] + -0.05)
|
||||
) # adjust 0.1 as needed
|
||||
|
||||
# Move radial labels away from the plot
|
||||
ax.set_rlabel_position(180) # type: ignore
|
||||
|
||||
ax.set_yticks([]) # Remove default yticks
|
||||
|
||||
# Manually create gridlines
|
||||
for y in np.arange(0, highest_score + 1, 1):
|
||||
if y != highest_score:
|
||||
ax.plot(
|
||||
angles, [y] * len(angles), color="gray", linewidth=0.5, linestyle=":"
|
||||
)
|
||||
# Add labels for manually created gridlines
|
||||
ax.text(
|
||||
angles[0],
|
||||
y + 0.2,
|
||||
str(int(y)),
|
||||
color="black",
|
||||
size=9,
|
||||
horizontalalignment="center",
|
||||
verticalalignment="center",
|
||||
)
|
||||
|
||||
plt.savefig(save_path, dpi=300) # Save the figure as a PNG file
|
||||
plt.close() # Close the figure to free up memory
|
||||
|
||||
|
||||
def save_single_radar_chart(
|
||||
category_dict: dict[str, int], save_path: str | Path
|
||||
) -> None:
|
||||
labels = np.array(list(category_dict.keys()))
|
||||
values = np.array(list(category_dict.values()))
|
||||
|
||||
num_vars = len(labels)
|
||||
|
||||
angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()
|
||||
|
||||
angles += angles[:1]
|
||||
values = np.concatenate((values, values[:1]))
|
||||
|
||||
colors = ["#1f77b4"]
|
||||
|
||||
fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
|
||||
ax.set_theta_offset(np.pi / 2) # type: ignore
|
||||
ax.set_theta_direction(-1) # type: ignore
|
||||
|
||||
ax.spines["polar"].set_visible(False)
|
||||
|
||||
lines, labels = plt.thetagrids(
|
||||
np.degrees(angles[:-1]), (list(category_dict.keys()))
|
||||
)
|
||||
|
||||
highest_score = 7
|
||||
|
||||
# Set y-axis limit to 7
|
||||
ax.set_ylim(top=highest_score)
|
||||
|
||||
for label in labels:
|
||||
label.set_position((label.get_position()[0], label.get_position()[1] + -0.05))
|
||||
|
||||
ax.fill(angles, values, color=colors[0], alpha=0.25)
|
||||
ax.plot(angles, values, color=colors[0], linewidth=2)
|
||||
|
||||
for i, (angle, value) in enumerate(zip(angles, values)):
|
||||
ha = "left"
|
||||
if angle in {0, np.pi}:
|
||||
ha = "center"
|
||||
elif np.pi < angle < 2 * np.pi:
|
||||
ha = "right"
|
||||
ax.text(
|
||||
angle,
|
||||
value - 0.5,
|
||||
f"{value}",
|
||||
size=10,
|
||||
horizontalalignment=ha,
|
||||
verticalalignment="center",
|
||||
color="black",
|
||||
)
|
||||
|
||||
ax.set_yticklabels([])
|
||||
|
||||
ax.set_yticks([])
|
||||
|
||||
if values.size == 0:
|
||||
return
|
||||
|
||||
for y in np.arange(0, highest_score, 1):
|
||||
ax.plot(angles, [y] * len(angles), color="gray", linewidth=0.5, linestyle=":")
|
||||
|
||||
for angle, value in zip(angles, values):
|
||||
ax.plot(
|
||||
angle,
|
||||
value,
|
||||
"o",
|
||||
color="white",
|
||||
markersize=7,
|
||||
markeredgecolor=colors[0],
|
||||
markeredgewidth=2,
|
||||
)
|
||||
|
||||
plt.savefig(save_path, dpi=300) # Save the figure as a PNG file
|
||||
plt.close() # Close the figure to free up memory
|
||||
|
||||
|
||||
def save_combined_bar_chart(categories: dict[str, Any], save_path: str | Path) -> None:
|
||||
if not all(categories.values()):
|
||||
raise Exception("No data to plot")
|
||||
|
||||
# Convert dictionary to DataFrame
|
||||
df = pd.DataFrame(categories)
|
||||
|
||||
# Create a grouped bar chart
|
||||
df.plot(kind="bar", figsize=(10, 7))
|
||||
|
||||
plt.title("Performance by Category for Each Agent")
|
||||
plt.xlabel("Category")
|
||||
plt.ylabel("Performance")
|
||||
|
||||
plt.savefig(save_path, dpi=300) # Save the figure as a PNG file
|
||||
plt.close() # Close the figure to free up memory
|
||||
@@ -1,67 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from agbenchmark.reports.processing.get_files import (
|
||||
get_latest_report_from_agent_directories,
|
||||
)
|
||||
from agbenchmark.reports.processing.report_types import Report
|
||||
from agbenchmark.utils.data_types import STRING_DIFFICULTY_MAP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_reports_data(report_path: str) -> dict[str, Any]:
|
||||
latest_files = get_latest_report_from_agent_directories(report_path)
|
||||
|
||||
reports_data = {}
|
||||
|
||||
if latest_files is None:
|
||||
raise Exception("No files found in the reports directory")
|
||||
|
||||
# This will print the latest file in each s
|
||||
# ubdirectory and add to the files_data dictionary
|
||||
for subdir, file in latest_files:
|
||||
subdir_name = os.path.basename(os.path.normpath(subdir))
|
||||
with open(Path(subdir) / file, "r") as f:
|
||||
# Load the JSON data from the file
|
||||
json_data = json.load(f)
|
||||
converted_data = Report.model_validate(json_data)
|
||||
# get the last directory name in the path as key
|
||||
reports_data[subdir_name] = converted_data
|
||||
|
||||
return reports_data
|
||||
|
||||
|
||||
def get_highest_achieved_difficulty_per_category(report: Report) -> dict[str, Any]:
|
||||
categories: dict[str, Any] = {}
|
||||
|
||||
for _, test_data in report.tests.items():
|
||||
for category in test_data.category:
|
||||
if category in ("interface", "iterate", "product_advisor"):
|
||||
continue
|
||||
categories.setdefault(category, 0)
|
||||
if (
|
||||
test_data.results
|
||||
and all(r.success for r in test_data.results)
|
||||
and test_data.difficulty
|
||||
):
|
||||
num_dif = STRING_DIFFICULTY_MAP[test_data.difficulty]
|
||||
if num_dif > categories[category]:
|
||||
categories[category] = num_dif
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
def all_agent_categories(reports_data: dict[str, Any]) -> dict[str, Any]:
|
||||
all_categories: dict[str, Any] = {}
|
||||
|
||||
for name, report in reports_data.items():
|
||||
categories = get_highest_achieved_difficulty_per_category(report)
|
||||
if categories: # only add to all_categories if categories is not empty
|
||||
logger.debug(f"Adding {name}: {categories}")
|
||||
all_categories[name] = categories
|
||||
|
||||
return all_categories
|
||||
@@ -1,106 +0,0 @@
|
||||
"""
|
||||
Model definitions used internally and for reports generated during command-line runs.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Annotated, Any, Dict, List
|
||||
|
||||
from agent_protocol_client import Step
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
StringConstraints,
|
||||
ValidationInfo,
|
||||
field_validator,
|
||||
)
|
||||
|
||||
datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestResult(BaseModel):
|
||||
"""Result details for a single run of a test/challenge."""
|
||||
|
||||
success: bool | None = None
|
||||
"""Whether the run was successful"""
|
||||
run_time: str | None = None
|
||||
"""The (formatted) duration of the run"""
|
||||
fail_reason: str | None = None
|
||||
"""If applicable, the reason why the run was not successful"""
|
||||
reached_cutoff: bool | None = None # None if in progress
|
||||
"""Whether the run had to be stopped due to reaching the timeout"""
|
||||
n_steps: int | None = None
|
||||
"""The number of steps executed by the agent"""
|
||||
steps: list[Step] = []
|
||||
"""The steps generated by the agent"""
|
||||
cost: float | None = None
|
||||
"""The (known) cost incurred by the run, e.g. from using paid LLM APIs"""
|
||||
|
||||
@field_validator("fail_reason")
|
||||
def success_xor_fail_reason(cls, value, info: ValidationInfo):
|
||||
if bool(value) == bool(info.data["success"]):
|
||||
logger.error(
|
||||
"Error validating `success ^ fail_reason` on TestResult: "
|
||||
f"success = {repr(info.data['success'])}; "
|
||||
f"fail_reason = {repr(value)}"
|
||||
)
|
||||
if value:
|
||||
success = info.data["success"]
|
||||
assert not success, "fail_reason must only be specified if success=False"
|
||||
else:
|
||||
assert info.data["success"], "fail_reason is required if success=False"
|
||||
return value
|
||||
|
||||
|
||||
class TestMetrics(BaseModel):
|
||||
"""
|
||||
Result metrics for a set of runs for a test/challenge. Should be an aggregate of all
|
||||
results for the same test/challenge within a benchmarking session.
|
||||
"""
|
||||
|
||||
attempted: bool
|
||||
"""Whether the challenge was attempted during this session"""
|
||||
is_regression: bool
|
||||
"""Whether the challenge was considered a regression test at the time of running"""
|
||||
success_percentage: float | None = Field(default=None, alias="success_%")
|
||||
"""Success rate (0-100) for this challenge within the session"""
|
||||
|
||||
|
||||
class MetricsOverall(BaseModel):
|
||||
"""Global metrics concerning a benchmarking session"""
|
||||
|
||||
run_time: str
|
||||
"""Duration from beginning to end of the session"""
|
||||
highest_difficulty: str
|
||||
"""
|
||||
Difficulty of the most difficult challenge that succeeded at least once this session
|
||||
"""
|
||||
total_cost: float | None = None
|
||||
"""Total known cost of the session"""
|
||||
|
||||
|
||||
class Test(BaseModel):
|
||||
category: List[str]
|
||||
difficulty: str | None
|
||||
data_path: str
|
||||
description: str
|
||||
task: str
|
||||
answer: str
|
||||
metrics: TestMetrics
|
||||
results: list[TestResult]
|
||||
metadata: dict[str, Any] | None = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ReportBase(BaseModel):
|
||||
command: str
|
||||
completion_time: str | None = None
|
||||
benchmark_start_time: Annotated[str, StringConstraints(pattern=datetime_format)]
|
||||
metrics: MetricsOverall
|
||||
config: Dict[str, str | dict[str, str]]
|
||||
agent_git_commit_sha: str | None = None
|
||||
benchmark_git_commit_sha: str | None = None
|
||||
repo_url: str | None = None
|
||||
|
||||
|
||||
class Report(ReportBase):
|
||||
tests: Dict[str, Test]
|
||||
@@ -1,49 +0,0 @@
|
||||
"""Model definitions for use in the API"""
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import BaseModel, StringConstraints
|
||||
|
||||
datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
|
||||
|
||||
|
||||
class TaskInfo(BaseModel):
|
||||
data_path: str
|
||||
is_regression: bool | None
|
||||
answer: str
|
||||
description: str
|
||||
category: list[str]
|
||||
task: str
|
||||
|
||||
|
||||
class RepositoryInfo(BaseModel):
|
||||
repo_url: str | None = None
|
||||
team_name: str | None = None
|
||||
agent_git_commit_sha: str | None = None
|
||||
benchmark_git_commit_sha: str | None = None
|
||||
|
||||
|
||||
class Metrics(BaseModel):
|
||||
cost: float | None = None
|
||||
success: bool
|
||||
attempted: bool
|
||||
difficulty: str | None = None
|
||||
run_time: str | None = None
|
||||
fail_reason: str | None = None
|
||||
success_percentage: float | None = None
|
||||
|
||||
|
||||
class RunDetails(BaseModel):
|
||||
test_name: str
|
||||
run_id: str | None = None
|
||||
command: str
|
||||
completion_time: str | None = None
|
||||
benchmark_start_time: Annotated[str, StringConstraints(pattern=datetime_format)]
|
||||
|
||||
|
||||
class BenchmarkRun(BaseModel):
|
||||
repository_info: RepositoryInfo
|
||||
run_details: RunDetails
|
||||
task_info: TaskInfo
|
||||
metrics: Metrics
|
||||
reached_cutoff: bool | None = None
|
||||
config: dict[str, str | dict[str, str]]
|
||||
@@ -1,157 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from agbenchmark.challenges import ChallengeInfo
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.reports.processing.report_types import Test, TestMetrics, TestResult
|
||||
from agbenchmark.reports.ReportManager import SingletonReportManager
|
||||
from agbenchmark.utils.data_types import DifficultyLevel
|
||||
|
||||
# from agbenchmark.utils.get_data_from_helicone import get_data_from_helicone
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_and_update_success_history(
|
||||
test_name: str, success: bool | None
|
||||
) -> list[bool | None]:
|
||||
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
|
||||
|
||||
prev_test_results = SingletonReportManager().SUCCESS_RATE_TRACKER.tests.get(
|
||||
test_name, []
|
||||
)
|
||||
|
||||
if not mock:
|
||||
# only add if it's an actual test
|
||||
prev_test_results.append(success)
|
||||
SingletonReportManager().SUCCESS_RATE_TRACKER.update(
|
||||
test_name, prev_test_results
|
||||
)
|
||||
|
||||
return prev_test_results
|
||||
|
||||
|
||||
def update_regression_tests(
|
||||
prev_test_results: list[bool | None],
|
||||
test_report: Test,
|
||||
test_name: str,
|
||||
) -> None:
|
||||
if len(prev_test_results) >= 3 and prev_test_results[-3:] == [True, True, True]:
|
||||
# if the last 3 tests were successful, add to the regression tests
|
||||
test_report.metrics.is_regression = True
|
||||
SingletonReportManager().REGRESSION_MANAGER.add_test(
|
||||
test_name, test_report.model_dump(include={"difficulty", "data_path"})
|
||||
)
|
||||
|
||||
|
||||
def make_empty_test_report(
|
||||
challenge_info: ChallengeInfo,
|
||||
) -> Test:
|
||||
difficulty = challenge_info.difficulty
|
||||
if isinstance(difficulty, DifficultyLevel):
|
||||
difficulty = difficulty.value
|
||||
|
||||
return Test(
|
||||
category=[c.value for c in challenge_info.category],
|
||||
difficulty=difficulty,
|
||||
data_path=challenge_info.source_uri,
|
||||
description=challenge_info.description or "",
|
||||
task=challenge_info.task,
|
||||
answer=challenge_info.reference_answer or "",
|
||||
metrics=TestMetrics(attempted=False, is_regression=False),
|
||||
results=[],
|
||||
)
|
||||
|
||||
|
||||
def add_test_result_to_report(
|
||||
test_report: Test,
|
||||
item: pytest.Item,
|
||||
call: pytest.CallInfo,
|
||||
config: AgentBenchmarkConfig,
|
||||
) -> None:
|
||||
user_properties: dict = dict(item.user_properties)
|
||||
test_name: str = user_properties.get("test_name", "")
|
||||
|
||||
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
|
||||
|
||||
if call.excinfo:
|
||||
if not mock:
|
||||
SingletonReportManager().REGRESSION_MANAGER.remove_test(test_name)
|
||||
|
||||
test_report.metrics.attempted = call.excinfo.typename != "Skipped"
|
||||
else:
|
||||
test_report.metrics.attempted = True
|
||||
|
||||
try:
|
||||
test_report.results.append(
|
||||
TestResult(
|
||||
success=call.excinfo is None,
|
||||
run_time=f"{str(round(call.duration, 3))} seconds",
|
||||
fail_reason=(
|
||||
str(call.excinfo.value) if call.excinfo is not None else None
|
||||
),
|
||||
reached_cutoff=user_properties.get("timed_out", False),
|
||||
n_steps=user_properties.get("n_steps"),
|
||||
steps=user_properties.get("steps", []),
|
||||
cost=user_properties.get("agent_task_cost"),
|
||||
)
|
||||
)
|
||||
test_report.metrics.success_percentage = (
|
||||
sum(r.success or False for r in test_report.results)
|
||||
/ len(test_report.results)
|
||||
* 100
|
||||
)
|
||||
except ValidationError:
|
||||
if call.excinfo:
|
||||
logger.error(
|
||||
"Validation failed on TestResult; "
|
||||
f"call.excinfo = {repr(call.excinfo)};\n{call.excinfo.getrepr()})"
|
||||
)
|
||||
raise
|
||||
|
||||
prev_test_results: list[bool | None] = get_and_update_success_history(
|
||||
test_name, test_report.results[-1].success
|
||||
)
|
||||
|
||||
update_regression_tests(prev_test_results, test_report, test_name)
|
||||
|
||||
if test_report and test_name:
|
||||
# if "--mock" not in sys.argv and os.environ.get("HELICONE_API_KEY"):
|
||||
# logger.debug("Getting cost from Helicone")
|
||||
# test_report.metrics.cost = get_data_from_helicone(test_name)
|
||||
# logger.debug(f"Cost: {cost}")
|
||||
|
||||
if not mock:
|
||||
update_challenges_already_beaten(
|
||||
config.challenges_already_beaten_file, test_report, test_name
|
||||
)
|
||||
|
||||
SingletonReportManager().INFO_MANAGER.add_test_report(test_name, test_report)
|
||||
|
||||
|
||||
def update_challenges_already_beaten(
|
||||
challenges_already_beaten_file: Path, test_report: Test, test_name: str
|
||||
) -> None:
|
||||
current_run_successful = any(r.success for r in test_report.results)
|
||||
try:
|
||||
with open(challenges_already_beaten_file, "r") as f:
|
||||
challenges_beaten_before = json.load(f)
|
||||
except FileNotFoundError:
|
||||
challenges_beaten_before = {}
|
||||
|
||||
has_ever_been_beaten = challenges_beaten_before.get(test_name)
|
||||
challenges_beaten_before[test_name] = has_ever_been_beaten or current_run_successful
|
||||
|
||||
with open(challenges_already_beaten_file, "w") as f:
|
||||
json.dump(challenges_beaten_before, f, indent=4)
|
||||
|
||||
|
||||
def session_finish(agbenchmark_config: AgentBenchmarkConfig) -> None:
|
||||
SingletonReportManager().INFO_MANAGER.finalize_session_report(agbenchmark_config)
|
||||
SingletonReportManager().REGRESSION_MANAGER.save()
|
||||
SingletonReportManager().SUCCESS_RATE_TRACKER.save()
|
||||
@@ -1,18 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class TaskRequestBody(BaseModel):
|
||||
input: str = Field(
|
||||
min_length=1,
|
||||
description="Input prompt for the task.",
|
||||
examples=["Write the words you receive to the file 'output.txt'."],
|
||||
)
|
||||
additional_input: Optional[dict[str, Any]] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class TaskEvalRequestBody(TaskRequestBody):
|
||||
eval_id: str
|
||||
@@ -1,46 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class DifficultyLevel(Enum):
|
||||
interface = "interface"
|
||||
basic = "basic"
|
||||
novice = "novice"
|
||||
intermediate = "intermediate"
|
||||
advanced = "advanced"
|
||||
expert = "expert"
|
||||
human = "human"
|
||||
|
||||
|
||||
# map from enum to difficulty level (numeric)
|
||||
DIFFICULTY_MAP = {
|
||||
DifficultyLevel.interface: 1,
|
||||
DifficultyLevel.basic: 2,
|
||||
DifficultyLevel.novice: 3,
|
||||
DifficultyLevel.intermediate: 4,
|
||||
DifficultyLevel.advanced: 5,
|
||||
DifficultyLevel.expert: 6,
|
||||
DifficultyLevel.human: 7,
|
||||
}
|
||||
|
||||
STRING_DIFFICULTY_MAP = {e.value: DIFFICULTY_MAP[e] for e in DifficultyLevel}
|
||||
|
||||
|
||||
class Category(str, Enum):
|
||||
GENERALIST = "general"
|
||||
DATA = "data"
|
||||
CODING = "coding"
|
||||
SCRAPE_SYNTHESIZE = "scrape_synthesize"
|
||||
WEB = "web"
|
||||
GAIA_1 = "GAIA_1"
|
||||
GAIA_2 = "GAIA_2"
|
||||
GAIA_3 = "GAIA_3"
|
||||
|
||||
|
||||
class EvalResult(BaseModel):
|
||||
result: str
|
||||
result_source: Literal["step_output"] | str
|
||||
score: float
|
||||
passed: bool
|
||||
@@ -1,206 +0,0 @@
|
||||
"""
|
||||
A module that provides the pytest hooks for this plugin.
|
||||
|
||||
The logic itself is in main.py.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import pytest
|
||||
from _pytest.config.argparsing import OptionGroup, Parser
|
||||
from _pytest.nodes import Item
|
||||
|
||||
from .main import DependencyManager
|
||||
|
||||
managers: list[DependencyManager] = []
|
||||
|
||||
|
||||
DEPENDENCY_PROBLEM_ACTIONS: dict[str, Callable[[str], None] | None] = {
|
||||
"run": None,
|
||||
"skip": lambda m: pytest.skip(m),
|
||||
"fail": lambda m: pytest.fail(m, False),
|
||||
"warning": lambda m: warnings.warn(m),
|
||||
}
|
||||
|
||||
|
||||
def _add_ini_and_option(
|
||||
parser: Any,
|
||||
group: OptionGroup,
|
||||
name: str,
|
||||
help: str,
|
||||
default: str | bool | int,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""
|
||||
Add an option to both the ini file and the command line flags.
|
||||
Command line flags/options takes precedence over the ini config.
|
||||
"""
|
||||
parser.addini(
|
||||
name,
|
||||
help + " This overrides the similarly named option from the config.",
|
||||
default=default,
|
||||
)
|
||||
group.addoption(f'--{name.replace("_", "-")}', help=help, default=None, **kwargs)
|
||||
|
||||
|
||||
def _get_ini_or_option(
|
||||
config: Any, name: str, choices: Optional[list[str]]
|
||||
) -> str | None:
|
||||
"""
|
||||
Get an option from either the ini file or the command line flags,
|
||||
with the latter taking precedence.
|
||||
"""
|
||||
value = config.getini(name)
|
||||
if value is not None and choices is not None and value not in choices:
|
||||
raise ValueError(
|
||||
f'Invalid ini value for {name}, choose from {", ".join(choices)}'
|
||||
)
|
||||
return config.getoption(name) or value
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
# get all current option strings
|
||||
current_options = []
|
||||
for action in parser._anonymous.options:
|
||||
current_options += action._short_opts + action._long_opts
|
||||
|
||||
for group in parser._groups:
|
||||
for action in group.options:
|
||||
current_options += action._short_opts + action._long_opts
|
||||
|
||||
group = parser.getgroup("depends")
|
||||
|
||||
# Add a flag to list all names + the tests they resolve to
|
||||
if "--list-dependency-names" not in current_options:
|
||||
group.addoption(
|
||||
"--list-dependency-names",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"List all non-nodeid dependency names + the tests they resolve to. "
|
||||
"Will also list all nodeid dependency names in verbose mode."
|
||||
),
|
||||
)
|
||||
|
||||
# Add a flag to list all (resolved) dependencies for all tests + unresolvable names
|
||||
if "--list-processed-dependencies" not in current_options:
|
||||
group.addoption(
|
||||
"--list-processed-dependencies",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"List all dependencies of all tests as a list of nodeids "
|
||||
"+ the names that could not be resolved."
|
||||
),
|
||||
)
|
||||
|
||||
# Add an ini option + flag to choose the action to take for failed dependencies
|
||||
if "--failed-dependency-action" not in current_options:
|
||||
_add_ini_and_option(
|
||||
parser,
|
||||
group,
|
||||
name="failed_dependency_action",
|
||||
help=(
|
||||
"The action to take when a test has dependencies that failed. "
|
||||
'Use "run" to run the test anyway, "skip" to skip the test, '
|
||||
'and "fail" to fail the test.'
|
||||
),
|
||||
default="skip",
|
||||
choices=DEPENDENCY_PROBLEM_ACTIONS.keys(),
|
||||
)
|
||||
|
||||
# Add an ini option + flag to choose the action to take for unresolved dependencies
|
||||
if "--missing-dependency-action" not in current_options:
|
||||
_add_ini_and_option(
|
||||
parser,
|
||||
group,
|
||||
name="missing_dependency_action",
|
||||
help=(
|
||||
"The action to take when a test has dependencies that cannot be found "
|
||||
"within the current scope. "
|
||||
'Use "run" to run the test anyway, "skip" to skip the test, '
|
||||
'and "fail" to fail the test.'
|
||||
),
|
||||
default="warning",
|
||||
choices=DEPENDENCY_PROBLEM_ACTIONS.keys(),
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config: Any) -> None:
|
||||
manager = DependencyManager()
|
||||
managers.append(manager)
|
||||
|
||||
# Setup the handling of problems with dependencies
|
||||
manager.options["failed_dependency_action"] = _get_ini_or_option(
|
||||
config,
|
||||
"failed_dependency_action",
|
||||
list(DEPENDENCY_PROBLEM_ACTIONS.keys()),
|
||||
)
|
||||
manager.options["missing_dependency_action"] = _get_ini_or_option(
|
||||
config,
|
||||
"missing_dependency_action",
|
||||
list(DEPENDENCY_PROBLEM_ACTIONS.keys()),
|
||||
)
|
||||
|
||||
# Register marker
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"depends(name='name', on=['other_name']): marks dependencies between tests.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_collection_modifyitems(config: Any, items: list[pytest.Function]) -> None:
|
||||
manager = managers[-1]
|
||||
|
||||
# Register the founds tests on the manager
|
||||
manager.items = items
|
||||
|
||||
# Show the extra information if requested
|
||||
if config.getoption("list_dependency_names"):
|
||||
verbose = config.getoption("verbose") > 1
|
||||
manager.print_name_map(verbose)
|
||||
if config.getoption("list_processed_dependencies"):
|
||||
color = config.getoption("color")
|
||||
manager.print_processed_dependencies(color)
|
||||
|
||||
# Reorder the items so that tests run after their dependencies
|
||||
items[:] = manager.sorted_items
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item: Item) -> Any:
|
||||
manager = managers[-1]
|
||||
|
||||
# Run the step
|
||||
outcome = yield
|
||||
|
||||
# Store the result on the manager
|
||||
manager.register_result(item, outcome.get_result())
|
||||
|
||||
|
||||
def pytest_runtest_call(item: Item) -> None:
|
||||
manager = managers[-1]
|
||||
|
||||
# Handle missing dependencies
|
||||
missing_dependency_action = DEPENDENCY_PROBLEM_ACTIONS[
|
||||
manager.options["missing_dependency_action"]
|
||||
]
|
||||
missing = manager.get_missing(item)
|
||||
if missing_dependency_action and missing:
|
||||
missing_dependency_action(
|
||||
f'{item.nodeid} depends on {", ".join(missing)}, which was not found'
|
||||
)
|
||||
|
||||
# Check whether all dependencies succeeded
|
||||
failed_dependency_action = DEPENDENCY_PROBLEM_ACTIONS[
|
||||
manager.options["failed_dependency_action"]
|
||||
]
|
||||
failed = manager.get_failed(item)
|
||||
if failed_dependency_action and failed:
|
||||
failed_dependency_action(f'{item.nodeid} depends on {", ".join(failed)}')
|
||||
|
||||
|
||||
def pytest_unconfigure() -> None:
|
||||
managers.pop()
|
||||
@@ -1,10 +0,0 @@
|
||||
""" Constants for this module. """
|
||||
|
||||
# The name of the marker used
|
||||
MARKER_NAME = "depends"
|
||||
|
||||
# The name of the kwarg for 'depends' markers that contains custom name(s) for the tests
|
||||
MARKER_KWARG_ID = "name"
|
||||
|
||||
# The name of the keyword argument for the marker that specifies the tests to depend on
|
||||
MARKER_KWARG_DEPENDENCIES = "on"
|
||||
@@ -1,453 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import matplotlib.patches as patches
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
from pyvis.network import Network
|
||||
|
||||
from agbenchmark.generate_test import DATA_CATEGORY
|
||||
from agbenchmark.utils.utils import write_pretty_json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def bezier_curve(
|
||||
src: np.ndarray, ctrl: List[float], dst: np.ndarray
|
||||
) -> List[np.ndarray]:
|
||||
"""
|
||||
Generate Bézier curve points.
|
||||
|
||||
Args:
|
||||
- src (np.ndarray): The source point.
|
||||
- ctrl (List[float]): The control point.
|
||||
- dst (np.ndarray): The destination point.
|
||||
|
||||
Returns:
|
||||
- List[np.ndarray]: The Bézier curve points.
|
||||
"""
|
||||
curve = []
|
||||
for t in np.linspace(0, 1, num=100):
|
||||
curve_point = (
|
||||
np.outer((1 - t) ** 2, src)
|
||||
+ 2 * np.outer((1 - t) * t, ctrl)
|
||||
+ np.outer(t**2, dst)
|
||||
)
|
||||
curve.append(curve_point[0])
|
||||
return curve
|
||||
|
||||
|
||||
def curved_edges(
|
||||
G: nx.Graph, pos: Dict[Any, Tuple[float, float]], dist: float = 0.2
|
||||
) -> None:
|
||||
"""
|
||||
Draw curved edges for nodes on the same level.
|
||||
|
||||
Args:
|
||||
- G (Any): The graph object.
|
||||
- pos (Dict[Any, Tuple[float, float]]): Dictionary with node positions.
|
||||
- dist (float, optional): Distance for curvature. Defaults to 0.2.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
ax = plt.gca()
|
||||
for u, v, data in G.edges(data=True):
|
||||
_src = pos[u]
|
||||
_dst = pos[v]
|
||||
src = np.array(_src)
|
||||
dst = np.array(_dst)
|
||||
|
||||
same_level = abs(src[1] - dst[1]) < 0.01
|
||||
|
||||
if same_level:
|
||||
control = [(src[0] + dst[0]) / 2, src[1] + dist]
|
||||
curve = bezier_curve(src, control, dst)
|
||||
arrow = patches.FancyArrowPatch(
|
||||
posA=curve[0], # type: ignore
|
||||
posB=curve[-1], # type: ignore
|
||||
connectionstyle="arc3,rad=0.2",
|
||||
color="gray",
|
||||
arrowstyle="-|>",
|
||||
mutation_scale=15.0,
|
||||
lw=1,
|
||||
shrinkA=10,
|
||||
shrinkB=10,
|
||||
)
|
||||
ax.add_patch(arrow)
|
||||
else:
|
||||
ax.annotate(
|
||||
"",
|
||||
xy=_dst,
|
||||
xytext=_src,
|
||||
arrowprops=dict(
|
||||
arrowstyle="-|>", color="gray", lw=1, shrinkA=10, shrinkB=10
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def tree_layout(graph: nx.DiGraph, root_node: Any) -> Dict[Any, Tuple[float, float]]:
|
||||
"""Compute positions as a tree layout centered on the root
|
||||
with alternating vertical shifts."""
|
||||
bfs_tree = nx.bfs_tree(graph, source=root_node)
|
||||
levels = {
|
||||
node: depth
|
||||
for node, depth in nx.single_source_shortest_path_length(
|
||||
bfs_tree, root_node
|
||||
).items()
|
||||
}
|
||||
|
||||
pos = {}
|
||||
max_depth = max(levels.values())
|
||||
level_positions = {i: 0 for i in range(max_depth + 1)} # type: ignore
|
||||
|
||||
# Count the number of nodes per level to compute the width
|
||||
level_count: Any = {}
|
||||
for node, level in levels.items():
|
||||
level_count[level] = level_count.get(level, 0) + 1
|
||||
|
||||
vertical_offset = (
|
||||
0.07 # The amount of vertical shift per node within the same level
|
||||
)
|
||||
|
||||
# Assign positions
|
||||
for node, level in sorted(levels.items(), key=lambda x: x[1]):
|
||||
total_nodes_in_level = level_count[level]
|
||||
horizontal_spacing = 1.0 / (total_nodes_in_level + 1)
|
||||
pos_x = (
|
||||
0.5
|
||||
- (total_nodes_in_level - 1) * horizontal_spacing / 2
|
||||
+ level_positions[level] * horizontal_spacing
|
||||
)
|
||||
|
||||
# Alternately shift nodes up and down within the same level
|
||||
pos_y = (
|
||||
-level
|
||||
+ (level_positions[level] % 2) * vertical_offset
|
||||
- ((level_positions[level] + 1) % 2) * vertical_offset
|
||||
)
|
||||
pos[node] = (pos_x, pos_y)
|
||||
|
||||
level_positions[level] += 1
|
||||
|
||||
return pos
|
||||
|
||||
|
||||
def graph_spring_layout(
|
||||
dag: nx.DiGraph, labels: Dict[Any, str], tree: bool = True
|
||||
) -> None:
|
||||
num_nodes = len(list(dag.nodes()))
|
||||
# Setting up the figure and axis
|
||||
fig, ax = plt.subplots()
|
||||
ax.axis("off") # Turn off the axis
|
||||
|
||||
base = 3.0
|
||||
|
||||
if num_nodes > 10:
|
||||
base /= 1 + math.log(num_nodes)
|
||||
font_size = base * 10
|
||||
|
||||
font_size = max(10, base * 10)
|
||||
node_size = max(300, base * 1000)
|
||||
|
||||
if tree:
|
||||
root_node = [node for node, degree in dag.in_degree() if degree == 0][0]
|
||||
pos = tree_layout(dag, root_node)
|
||||
else:
|
||||
# Adjust k for the spring layout based on node count
|
||||
k_value = 3 / math.sqrt(num_nodes)
|
||||
|
||||
pos = nx.spring_layout(dag, k=k_value, iterations=50)
|
||||
|
||||
# Draw nodes and labels
|
||||
nx.draw_networkx_nodes(dag, pos, node_color="skyblue", node_size=int(node_size))
|
||||
nx.draw_networkx_labels(dag, pos, labels=labels, font_size=int(font_size))
|
||||
|
||||
# Draw curved edges
|
||||
curved_edges(dag, pos) # type: ignore
|
||||
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
|
||||
def rgb_to_hex(rgb: Tuple[float, float, float]) -> str:
|
||||
return "#{:02x}{:02x}{:02x}".format(
|
||||
int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)
|
||||
)
|
||||
|
||||
|
||||
def get_category_colors(categories: Dict[Any, str]) -> Dict[str, str]:
|
||||
unique_categories = set(categories.values())
|
||||
colormap = plt.cm.get_cmap("tab10", len(unique_categories)) # type: ignore
|
||||
return {
|
||||
category: rgb_to_hex(colormap(i)[:3])
|
||||
for i, category in enumerate(unique_categories)
|
||||
}
|
||||
|
||||
|
||||
def graph_interactive_network(
|
||||
dag: nx.DiGraph,
|
||||
labels: Dict[Any, Dict[str, Any]],
|
||||
html_graph_path: str = "",
|
||||
) -> None:
|
||||
nt = Network(notebook=True, width="100%", height="800px", directed=True)
|
||||
|
||||
category_colors = get_category_colors(DATA_CATEGORY)
|
||||
|
||||
# Add nodes and edges to the pyvis network
|
||||
for node, json_data in labels.items():
|
||||
label = json_data.get("name", "")
|
||||
# remove the first 4 letters of label
|
||||
label_without_test = label[4:]
|
||||
node_id_str = node.nodeid
|
||||
|
||||
# Get the category for this label
|
||||
category = DATA_CATEGORY.get(
|
||||
label, "unknown"
|
||||
) # Default to 'unknown' if label not found
|
||||
|
||||
# Get the color for this category
|
||||
color = category_colors.get(category, "grey")
|
||||
|
||||
nt.add_node(
|
||||
node_id_str,
|
||||
label=label_without_test,
|
||||
color=color,
|
||||
data=json_data,
|
||||
)
|
||||
|
||||
# Add edges to the pyvis network
|
||||
for edge in dag.edges():
|
||||
source_id_str = edge[0].nodeid
|
||||
target_id_str = edge[1].nodeid
|
||||
edge_id_str = (
|
||||
f"{source_id_str}_to_{target_id_str}" # Construct a unique edge id
|
||||
)
|
||||
if not (source_id_str in nt.get_nodes() and target_id_str in nt.get_nodes()):
|
||||
logger.warning(
|
||||
f"Skipping edge {source_id_str} -> {target_id_str} due to missing nodes"
|
||||
)
|
||||
continue
|
||||
nt.add_edge(source_id_str, target_id_str, id=edge_id_str)
|
||||
|
||||
# Configure physics for hierarchical layout
|
||||
hierarchical_options = {
|
||||
"enabled": True,
|
||||
"levelSeparation": 200, # Increased vertical spacing between levels
|
||||
"nodeSpacing": 250, # Increased spacing between nodes on the same level
|
||||
"treeSpacing": 250, # Increased spacing between different trees (for forest)
|
||||
"blockShifting": True,
|
||||
"edgeMinimization": True,
|
||||
"parentCentralization": True,
|
||||
"direction": "UD",
|
||||
"sortMethod": "directed",
|
||||
}
|
||||
|
||||
physics_options = {
|
||||
"stabilization": {
|
||||
"enabled": True,
|
||||
"iterations": 1000, # Default is often around 100
|
||||
},
|
||||
"hierarchicalRepulsion": {
|
||||
"centralGravity": 0.0,
|
||||
"springLength": 200, # Increased edge length
|
||||
"springConstant": 0.01,
|
||||
"nodeDistance": 250, # Increased minimum distance between nodes
|
||||
"damping": 0.09,
|
||||
},
|
||||
"solver": "hierarchicalRepulsion",
|
||||
"timestep": 0.5,
|
||||
}
|
||||
|
||||
nt.options = {
|
||||
"nodes": {
|
||||
"font": {
|
||||
"size": 20, # Increased font size for labels
|
||||
"color": "black", # Set a readable font color
|
||||
},
|
||||
"shapeProperties": {"useBorderWithImage": True},
|
||||
},
|
||||
"edges": {
|
||||
"length": 250, # Increased edge length
|
||||
},
|
||||
"physics": physics_options,
|
||||
"layout": {"hierarchical": hierarchical_options},
|
||||
}
|
||||
|
||||
# Serialize the graph to JSON and save in appropriate locations
|
||||
graph_data = {"nodes": nt.nodes, "edges": nt.edges}
|
||||
logger.debug(f"Generated graph data:\n{json.dumps(graph_data, indent=4)}")
|
||||
|
||||
# FIXME: use more reliable method to find the right location for these files.
|
||||
# This will fail in all cases except if run from the root of our repo.
|
||||
home_path = Path.cwd()
|
||||
write_pretty_json(graph_data, home_path / "frontend" / "public" / "graph.json")
|
||||
|
||||
flutter_app_path = home_path.parent / "frontend" / "assets"
|
||||
|
||||
# Optionally, save to a file
|
||||
# Sync with the flutter UI
|
||||
# this literally only works in the AutoGPT repo, but this part of the code
|
||||
# is not reached if BUILD_SKILL_TREE is false
|
||||
write_pretty_json(graph_data, flutter_app_path / "tree_structure.json")
|
||||
validate_skill_tree(graph_data, "")
|
||||
|
||||
# Extract node IDs with category "coding"
|
||||
|
||||
coding_tree = extract_subgraph_based_on_category(graph_data.copy(), "coding")
|
||||
validate_skill_tree(coding_tree, "coding")
|
||||
write_pretty_json(
|
||||
coding_tree,
|
||||
flutter_app_path / "coding_tree_structure.json",
|
||||
)
|
||||
|
||||
data_tree = extract_subgraph_based_on_category(graph_data.copy(), "data")
|
||||
# validate_skill_tree(data_tree, "data")
|
||||
write_pretty_json(
|
||||
data_tree,
|
||||
flutter_app_path / "data_tree_structure.json",
|
||||
)
|
||||
|
||||
general_tree = extract_subgraph_based_on_category(graph_data.copy(), "general")
|
||||
validate_skill_tree(general_tree, "general")
|
||||
write_pretty_json(
|
||||
general_tree,
|
||||
flutter_app_path / "general_tree_structure.json",
|
||||
)
|
||||
|
||||
scrape_synthesize_tree = extract_subgraph_based_on_category(
|
||||
graph_data.copy(), "scrape_synthesize"
|
||||
)
|
||||
validate_skill_tree(scrape_synthesize_tree, "scrape_synthesize")
|
||||
write_pretty_json(
|
||||
scrape_synthesize_tree,
|
||||
flutter_app_path / "scrape_synthesize_tree_structure.json",
|
||||
)
|
||||
|
||||
if html_graph_path:
|
||||
file_path = str(Path(html_graph_path).resolve())
|
||||
|
||||
nt.write_html(file_path)
|
||||
|
||||
|
||||
def extract_subgraph_based_on_category(graph, category):
|
||||
"""
|
||||
Extracts a subgraph that includes all nodes and edges required to reach all nodes
|
||||
with a specified category.
|
||||
|
||||
:param graph: The original graph.
|
||||
:param category: The target category.
|
||||
:return: Subgraph with nodes and edges required to reach the nodes
|
||||
with the given category.
|
||||
"""
|
||||
|
||||
subgraph = {"nodes": [], "edges": []}
|
||||
visited = set()
|
||||
|
||||
def reverse_dfs(node_id):
|
||||
if node_id in visited:
|
||||
return
|
||||
visited.add(node_id)
|
||||
|
||||
node_data = next(node for node in graph["nodes"] if node["id"] == node_id)
|
||||
|
||||
# Add the node to the subgraph if it's not already present.
|
||||
if node_data not in subgraph["nodes"]:
|
||||
subgraph["nodes"].append(node_data)
|
||||
|
||||
for edge in graph["edges"]:
|
||||
if edge["to"] == node_id:
|
||||
if edge not in subgraph["edges"]:
|
||||
subgraph["edges"].append(edge)
|
||||
reverse_dfs(edge["from"])
|
||||
|
||||
# Identify nodes with the target category and initiate reverse DFS from them.
|
||||
nodes_with_target_category = [
|
||||
node["id"] for node in graph["nodes"] if category in node["data"]["category"]
|
||||
]
|
||||
|
||||
for node_id in nodes_with_target_category:
|
||||
reverse_dfs(node_id)
|
||||
|
||||
return subgraph
|
||||
|
||||
|
||||
def is_circular(graph):
|
||||
def dfs(node, visited, stack, parent_map):
|
||||
visited.add(node)
|
||||
stack.add(node)
|
||||
for edge in graph["edges"]:
|
||||
if edge["from"] == node:
|
||||
if edge["to"] in stack:
|
||||
# Detected a cycle
|
||||
cycle_path = []
|
||||
current = node
|
||||
while current != edge["to"]:
|
||||
cycle_path.append(current)
|
||||
current = parent_map.get(current)
|
||||
cycle_path.append(edge["to"])
|
||||
cycle_path.append(node)
|
||||
return cycle_path[::-1]
|
||||
elif edge["to"] not in visited:
|
||||
parent_map[edge["to"]] = node
|
||||
cycle_path = dfs(edge["to"], visited, stack, parent_map)
|
||||
if cycle_path:
|
||||
return cycle_path
|
||||
stack.remove(node)
|
||||
return None
|
||||
|
||||
visited = set()
|
||||
stack = set()
|
||||
parent_map = {}
|
||||
for node in graph["nodes"]:
|
||||
node_id = node["id"]
|
||||
if node_id not in visited:
|
||||
cycle_path = dfs(node_id, visited, stack, parent_map)
|
||||
if cycle_path:
|
||||
return cycle_path
|
||||
return None
|
||||
|
||||
|
||||
def get_roots(graph):
|
||||
"""
|
||||
Return the roots of a graph. Roots are nodes with no incoming edges.
|
||||
"""
|
||||
# Create a set of all node IDs
|
||||
all_nodes = {node["id"] for node in graph["nodes"]}
|
||||
|
||||
# Create a set of nodes with incoming edges
|
||||
nodes_with_incoming_edges = {edge["to"] for edge in graph["edges"]}
|
||||
|
||||
# Roots are nodes that have no incoming edges
|
||||
roots = all_nodes - nodes_with_incoming_edges
|
||||
|
||||
return list(roots)
|
||||
|
||||
|
||||
def validate_skill_tree(graph, skill_tree_name):
|
||||
"""
|
||||
Validate if a given graph represents a valid skill tree
|
||||
and raise appropriate exceptions if not.
|
||||
|
||||
:param graph: A dictionary representing the graph with 'nodes' and 'edges'.
|
||||
:raises: ValueError with a description of the invalidity.
|
||||
"""
|
||||
# Check for circularity
|
||||
cycle_path = is_circular(graph)
|
||||
if cycle_path:
|
||||
cycle_str = " -> ".join(cycle_path)
|
||||
raise ValueError(
|
||||
f"{skill_tree_name} skill tree is circular! "
|
||||
f"Detected circular path: {cycle_str}."
|
||||
)
|
||||
|
||||
# Check for multiple roots
|
||||
roots = get_roots(graph)
|
||||
if len(roots) > 1:
|
||||
raise ValueError(f"{skill_tree_name} skill tree has multiple roots: {roots}.")
|
||||
elif not roots:
|
||||
raise ValueError(f"{skill_tree_name} skill tree has no roots.")
|
||||
@@ -1,255 +0,0 @@
|
||||
"""
|
||||
A module to manage dependencies between pytest tests.
|
||||
|
||||
This module provides the methods implementing the main logic.
|
||||
These are used in the pytest hooks that are in __init__.py.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import os
|
||||
from typing import Any, Generator
|
||||
|
||||
import colorama
|
||||
import networkx
|
||||
from pytest import Function, Item
|
||||
|
||||
from agbenchmark.challenges.base import BaseChallenge
|
||||
|
||||
from .constants import MARKER_KWARG_DEPENDENCIES, MARKER_NAME
|
||||
from .graphs import graph_interactive_network
|
||||
from .util import clean_nodeid, get_absolute_nodeid, get_markers, get_name
|
||||
|
||||
|
||||
class TestResult(object):
|
||||
"""Keeps track of the results of a single test."""
|
||||
|
||||
STEPS = ["setup", "call", "teardown"]
|
||||
GOOD_OUTCOMES = ["passed"]
|
||||
|
||||
def __init__(self, nodeid: str) -> None:
|
||||
"""Create a new instance for a test with a given node id."""
|
||||
self.nodeid = nodeid
|
||||
self.results: dict[str, Any] = {}
|
||||
|
||||
def register_result(self, result: Any) -> None:
|
||||
"""Register a result of this test."""
|
||||
if result.when not in self.STEPS:
|
||||
raise ValueError(
|
||||
f"Received result for unknown step {result.when} of test {self.nodeid}"
|
||||
)
|
||||
if result.when in self.results:
|
||||
raise AttributeError(
|
||||
f"Received multiple results for step {result.when} "
|
||||
f"of test {self.nodeid}"
|
||||
)
|
||||
self.results[result.when] = result.outcome
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
"""Whether the entire test was successful."""
|
||||
return all(
|
||||
self.results.get(step, None) in self.GOOD_OUTCOMES for step in self.STEPS
|
||||
)
|
||||
|
||||
|
||||
class TestDependencies(object):
|
||||
"""Information about the resolved dependencies of a single test."""
|
||||
|
||||
def __init__(self, item: Item, manager: "DependencyManager") -> None:
|
||||
"""Create a new instance for a given test."""
|
||||
self.nodeid = clean_nodeid(item.nodeid)
|
||||
self.dependencies = set()
|
||||
self.unresolved = set()
|
||||
|
||||
markers = get_markers(item, MARKER_NAME)
|
||||
dependencies = [
|
||||
dep
|
||||
for marker in markers
|
||||
for dep in marker.kwargs.get(MARKER_KWARG_DEPENDENCIES, [])
|
||||
]
|
||||
for dependency in dependencies:
|
||||
# If the name is not known, try to make it absolute (file::[class::]method)
|
||||
if dependency not in manager.name_to_nodeids:
|
||||
absolute_dependency = get_absolute_nodeid(dependency, self.nodeid)
|
||||
if absolute_dependency in manager.name_to_nodeids:
|
||||
dependency = absolute_dependency
|
||||
|
||||
# Add all items matching the name
|
||||
if dependency in manager.name_to_nodeids:
|
||||
for nodeid in manager.name_to_nodeids[dependency]:
|
||||
self.dependencies.add(nodeid)
|
||||
else:
|
||||
self.unresolved.add(dependency)
|
||||
|
||||
|
||||
class DependencyManager(object):
|
||||
"""Keep track of tests, their names and their dependencies."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Create a new DependencyManager."""
|
||||
self.options: dict[str, Any] = {}
|
||||
self._items: list[Function] | None = None
|
||||
self._name_to_nodeids: Any = None
|
||||
self._nodeid_to_item: Any = None
|
||||
self._results: Any = None
|
||||
|
||||
@property
|
||||
def items(self) -> list[Function]:
|
||||
"""The collected tests that are managed by this instance."""
|
||||
if self._items is None:
|
||||
raise AttributeError("The items attribute has not been set yet")
|
||||
return self._items
|
||||
|
||||
@items.setter
|
||||
def items(self, items: list[Function]) -> None:
|
||||
if self._items is not None:
|
||||
raise AttributeError("The items attribute has already been set")
|
||||
self._items = items
|
||||
|
||||
self._name_to_nodeids = collections.defaultdict(list)
|
||||
self._nodeid_to_item = {}
|
||||
self._results = {}
|
||||
self._dependencies = {}
|
||||
|
||||
for item in items:
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
# Add the mapping from nodeid to the test item
|
||||
self._nodeid_to_item[nodeid] = item
|
||||
# Add the mappings from all names to the node id
|
||||
name = get_name(item)
|
||||
self._name_to_nodeids[name].append(nodeid)
|
||||
# Create the object that will contain the results of this test
|
||||
self._results[nodeid] = TestResult(clean_nodeid(item.nodeid))
|
||||
|
||||
# Don't allow using unknown keys on the name_to_nodeids mapping
|
||||
self._name_to_nodeids.default_factory = None
|
||||
|
||||
for item in items:
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
# Process the dependencies of this test
|
||||
# This uses the mappings created in the previous loop,
|
||||
# and can thus not be merged into that loop
|
||||
self._dependencies[nodeid] = TestDependencies(item, self)
|
||||
|
||||
@property
|
||||
def name_to_nodeids(self) -> dict[str, list[str]]:
|
||||
"""A mapping from names to matching node id(s)."""
|
||||
assert self.items is not None
|
||||
return self._name_to_nodeids
|
||||
|
||||
@property
|
||||
def nodeid_to_item(self) -> dict[str, Function]:
|
||||
"""A mapping from node ids to test items."""
|
||||
assert self.items is not None
|
||||
return self._nodeid_to_item
|
||||
|
||||
@property
|
||||
def results(self) -> dict[str, TestResult]:
|
||||
"""The results of the tests."""
|
||||
assert self.items is not None
|
||||
return self._results
|
||||
|
||||
@property
|
||||
def dependencies(self) -> dict[str, TestDependencies]:
|
||||
"""The dependencies of the tests."""
|
||||
assert self.items is not None
|
||||
return self._dependencies
|
||||
|
||||
def print_name_map(self, verbose: bool = False) -> None:
|
||||
"""Print a human-readable version of the name -> test mapping."""
|
||||
print("Available dependency names:")
|
||||
for name, nodeids in sorted(self.name_to_nodeids.items(), key=lambda x: x[0]):
|
||||
if len(nodeids) == 1:
|
||||
if name == nodeids[0]:
|
||||
# This is just the base name, only print this when verbose
|
||||
if verbose:
|
||||
print(f" {name}")
|
||||
else:
|
||||
# Name refers to a single node id, so use the short format
|
||||
print(f" {name} -> {nodeids[0]}")
|
||||
else:
|
||||
# Name refers to multiple node ids, so use the long format
|
||||
print(f" {name} ->")
|
||||
for nodeid in sorted(nodeids):
|
||||
print(f" {nodeid}")
|
||||
|
||||
def print_processed_dependencies(self, colors: bool = False) -> None:
|
||||
"""Print a human-readable list of the processed dependencies."""
|
||||
missing = "MISSING"
|
||||
if colors:
|
||||
missing = f"{colorama.Fore.RED}{missing}{colorama.Fore.RESET}"
|
||||
colorama.init()
|
||||
try:
|
||||
print("Dependencies:")
|
||||
for nodeid, info in sorted(self.dependencies.items(), key=lambda x: x[0]):
|
||||
descriptions = []
|
||||
for dependency in info.dependencies:
|
||||
descriptions.append(dependency)
|
||||
for dependency in info.unresolved:
|
||||
descriptions.append(f"{dependency} ({missing})")
|
||||
if descriptions:
|
||||
print(f" {nodeid} depends on")
|
||||
for description in sorted(descriptions):
|
||||
print(f" {description}")
|
||||
finally:
|
||||
if colors:
|
||||
colorama.deinit()
|
||||
|
||||
@property
|
||||
def sorted_items(self) -> Generator:
|
||||
"""
|
||||
Get a sorted list of tests where all tests are sorted after their dependencies.
|
||||
"""
|
||||
# Build a directed graph for sorting
|
||||
build_skill_tree = os.getenv("BUILD_SKILL_TREE")
|
||||
BUILD_SKILL_TREE = (
|
||||
build_skill_tree.lower() == "true" if build_skill_tree else False
|
||||
)
|
||||
dag = networkx.DiGraph()
|
||||
|
||||
# Insert all items as nodes, to prevent items that have no dependencies
|
||||
# and are not dependencies themselves from being lost
|
||||
dag.add_nodes_from(self.items)
|
||||
|
||||
# Insert edges for all the dependencies
|
||||
for item in self.items:
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
for dependency in self.dependencies[nodeid].dependencies:
|
||||
dag.add_edge(self.nodeid_to_item[dependency], item)
|
||||
|
||||
labels = {}
|
||||
for item in self.items:
|
||||
assert item.cls and issubclass(item.cls, BaseChallenge)
|
||||
data = item.cls.info.model_dump()
|
||||
|
||||
node_name = get_name(item)
|
||||
data["name"] = node_name
|
||||
labels[item] = data
|
||||
|
||||
# only build the tree if it's specified in the env and is a whole run
|
||||
if BUILD_SKILL_TREE:
|
||||
# graph_spring_layout(dag, labels)
|
||||
graph_interactive_network(dag, labels, html_graph_path="")
|
||||
|
||||
# Sort based on the dependencies
|
||||
return networkx.topological_sort(dag)
|
||||
|
||||
def register_result(self, item: Item, result: Any) -> None:
|
||||
"""Register a result of a test."""
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
self.results[nodeid].register_result(result)
|
||||
|
||||
def get_failed(self, item: Item) -> Any:
|
||||
"""Get a list of unfulfilled dependencies for a test."""
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
failed = []
|
||||
for dependency in self.dependencies[nodeid].dependencies:
|
||||
result = self.results[dependency]
|
||||
if not result.success:
|
||||
failed.append(dependency)
|
||||
return failed
|
||||
|
||||
def get_missing(self, item: Item) -> Any:
|
||||
"""Get a list of missing dependencies for a test."""
|
||||
nodeid = clean_nodeid(item.nodeid)
|
||||
return self.dependencies[nodeid].unresolved
|
||||
@@ -1,86 +0,0 @@
|
||||
""" Utility functions to process the identifiers of tests. """
|
||||
import re
|
||||
from typing import Iterator
|
||||
|
||||
from _pytest.mark.structures import Mark
|
||||
from _pytest.nodes import Item
|
||||
|
||||
from .constants import MARKER_KWARG_ID, MARKER_NAME
|
||||
|
||||
REGEX_PARAMETERS = re.compile(r"\[.+\]$")
|
||||
|
||||
|
||||
def clean_nodeid(nodeid: str) -> str:
|
||||
"""
|
||||
Remove any superfluous ::() from a node id.
|
||||
|
||||
>>> clean_nodeid('test_file.py::TestClass::()::test')
|
||||
'test_file.py::TestClass::test'
|
||||
>>> clean_nodeid('test_file.py::TestClass::test')
|
||||
'test_file.py::TestClass::test'
|
||||
>>> clean_nodeid('test_file.py::test')
|
||||
'test_file.py::test'
|
||||
"""
|
||||
return nodeid.replace("::()::", "::")
|
||||
|
||||
|
||||
def strip_nodeid_parameters(nodeid: str) -> str:
|
||||
"""
|
||||
Strip parameters from a node id.
|
||||
|
||||
>>> strip_nodeid_parameters('test_file.py::TestClass::test[foo]')
|
||||
'test_file.py::TestClass::test'
|
||||
>>> strip_nodeid_parameters('test_file.py::TestClass::test')
|
||||
'test_file.py::TestClass::test'
|
||||
"""
|
||||
return REGEX_PARAMETERS.sub("", nodeid)
|
||||
|
||||
|
||||
def get_absolute_nodeid(nodeid: str, scope: str) -> str:
|
||||
"""
|
||||
Transform a possibly relative node id to an absolute one
|
||||
using the scope in which it is used.
|
||||
|
||||
>>> scope = 'test_file.py::TestClass::test'
|
||||
>>> get_absolute_nodeid('test2', scope)
|
||||
'test_file.py::TestClass::test2'
|
||||
>>> get_absolute_nodeid('TestClass2::test2', scope)
|
||||
'test_file.py::TestClass2::test2'
|
||||
>>> get_absolute_nodeid('test_file2.py::TestClass2::test2', scope)
|
||||
'test_file2.py::TestClass2::test2'
|
||||
"""
|
||||
parts = nodeid.split("::")
|
||||
# Completely relative (test_name): add the full current scope (file::class or file)
|
||||
if len(parts) == 1:
|
||||
base_nodeid = scope.rsplit("::", 1)[0]
|
||||
nodeid = f"{base_nodeid}::{nodeid}"
|
||||
# Contains some scope already (Class::test_name), so only add the current file scope
|
||||
elif "." not in parts[0]:
|
||||
base_nodeid = scope.split("::", 1)[0]
|
||||
nodeid = f"{base_nodeid}::{nodeid}"
|
||||
return clean_nodeid(nodeid)
|
||||
|
||||
|
||||
def get_name(item: Item) -> str:
|
||||
"""
|
||||
Get all names for a test.
|
||||
|
||||
This will use the following methods to determine the name of the test:
|
||||
- If given, the custom name(s) passed to the keyword argument name on the marker
|
||||
"""
|
||||
name = ""
|
||||
|
||||
# Custom name
|
||||
markers = get_markers(item, MARKER_NAME)
|
||||
for marker in markers:
|
||||
if MARKER_KWARG_ID in marker.kwargs:
|
||||
name = marker.kwargs[MARKER_KWARG_ID]
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def get_markers(item: Item, name: str) -> Iterator[Mark]:
|
||||
"""Get all markers with the given name for a given item."""
|
||||
for marker in item.iter_markers():
|
||||
if marker.name == name:
|
||||
yield marker
|
||||
@@ -1,84 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from agbenchmark.__main__ import BENCHMARK_START_TIME
|
||||
from agbenchmark.agent_interface import HELICONE_GRAPHQL_LOGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_data_from_helicone(challenge: str) -> Optional[float]:
|
||||
# Define the endpoint of your GraphQL server
|
||||
url = "https://www.helicone.ai/api/graphql"
|
||||
|
||||
# Set the headers, usually you'd need to set the content type
|
||||
# and possibly an authorization token
|
||||
headers = {"authorization": f"Bearer {os.environ.get('HELICONE_API_KEY')}"}
|
||||
|
||||
# Define the query, variables, and operation name
|
||||
query = """
|
||||
query ExampleQuery($properties: [PropertyFilter!]){
|
||||
aggregatedHeliconeRequest(properties: $properties) {
|
||||
costUSD
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {
|
||||
"properties": [
|
||||
{
|
||||
"value": {"equals": os.environ.get("AGENT_NAME")},
|
||||
"name": "agent",
|
||||
},
|
||||
{
|
||||
"value": {"equals": BENCHMARK_START_TIME},
|
||||
"name": "benchmark_start_time",
|
||||
},
|
||||
{"value": {"equals": challenge}, "name": "challenge"},
|
||||
]
|
||||
}
|
||||
if HELICONE_GRAPHQL_LOGS:
|
||||
logger.debug(f"Executing Helicone query:\n{query.strip()}")
|
||||
logger.debug(f"Query variables:\n{json.dumps(variables, indent=4)}")
|
||||
|
||||
operation_name = "ExampleQuery"
|
||||
|
||||
data = {}
|
||||
response = None
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
url,
|
||||
headers=headers,
|
||||
json={
|
||||
"query": query,
|
||||
"variables": variables,
|
||||
"operationName": operation_name,
|
||||
},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
except requests.HTTPError as http_err:
|
||||
logger.error(f"Helicone returned an HTTP error: {http_err}")
|
||||
return None
|
||||
except json.JSONDecodeError:
|
||||
raw_response = response.text # type: ignore
|
||||
logger.error(
|
||||
f"Helicone returned an invalid JSON response: '''{raw_response}'''"
|
||||
)
|
||||
return None
|
||||
except Exception as err:
|
||||
logger.error(f"Error while trying to get data from Helicone: {err}")
|
||||
return None
|
||||
|
||||
if data is None or data.get("data") is None:
|
||||
logger.error("Invalid response received from Helicone: no data")
|
||||
logger.error(f"Offending response: {response}")
|
||||
return None
|
||||
return (
|
||||
data.get("data", {}).get("aggregatedHeliconeRequest", {}).get("costUSD", None)
|
||||
)
|
||||
@@ -1,74 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
SIMPLE_LOG_FORMAT = "[%(asctime)s] %(levelname)s %(message)s"
|
||||
DEBUG_LOG_FORMAT = "[%(asctime)s] %(levelname)s %(filename)s:%(lineno)03d %(message)s"
|
||||
|
||||
|
||||
def configure_logging(
|
||||
level: int = logging.INFO,
|
||||
) -> None:
|
||||
"""Configure the native logging module."""
|
||||
|
||||
# Auto-adjust default log format based on log level
|
||||
log_format = DEBUG_LOG_FORMAT if level == logging.DEBUG else SIMPLE_LOG_FORMAT
|
||||
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setFormatter(FancyConsoleFormatter(log_format))
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
level=level,
|
||||
format=log_format,
|
||||
handlers=[console_handler],
|
||||
)
|
||||
|
||||
|
||||
class FancyConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
A custom logging formatter designed for console output.
|
||||
|
||||
This formatter enhances the standard logging output with color coding. The color
|
||||
coding is based on the level of the log message, making it easier to distinguish
|
||||
between different types of messages in the console output.
|
||||
|
||||
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
|
||||
"""
|
||||
|
||||
# level -> (level & text color, title color)
|
||||
LEVEL_COLOR_MAP = {
|
||||
logging.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
logging.INFO: Fore.BLUE,
|
||||
logging.WARNING: Fore.YELLOW,
|
||||
logging.ERROR: Fore.RED,
|
||||
logging.CRITICAL: Fore.RED + Style.BRIGHT,
|
||||
}
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif not type(record.msg) is str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Justify the level name to 5 characters minimum
|
||||
record.levelname = record.levelname.ljust(5)
|
||||
|
||||
# Determine default color based on error level
|
||||
level_color = ""
|
||||
if record.levelno in self.LEVEL_COLOR_MAP:
|
||||
level_color = self.LEVEL_COLOR_MAP[record.levelno]
|
||||
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
|
||||
|
||||
# Determine color for message
|
||||
color = getattr(record, "color", level_color)
|
||||
color_is_specified = hasattr(record, "color")
|
||||
|
||||
# Don't color INFO messages unless the color is explicitly specified.
|
||||
if color and (record.levelno != logging.INFO or color_is_specified):
|
||||
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
|
||||
|
||||
return super().format(record)
|
||||
@@ -1,79 +0,0 @@
|
||||
SCORING_MAP = {
|
||||
"percentage": (
|
||||
"assign a float score that will represent a percentage out of 100. "
|
||||
"Use decimal points to be even more accurate. "
|
||||
"0 represents the worst possible generation, "
|
||||
"while 100 represents the ideal generation"
|
||||
),
|
||||
"scale": (
|
||||
"assign an integer score from a scale of 1-10. "
|
||||
"1 represents a really bad generation, while 10 represents an ideal generation"
|
||||
),
|
||||
"binary": (
|
||||
"assign a binary score of either 0 or 1. "
|
||||
"0 represents a failure, while 1 represents a success"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
REFERENCE_PROMPT = """Ignore previous directions. You are now an expert at evaluating how close machine generated responses are to human answers. You essentially act as a hyper advanced BLEU score.
|
||||
In order to score the machine generated response you will {scoring}. Make sure to factor in the distance to the ideal response into your thinking, deliberation, and final result regarding scoring. Return nothing but a float score.
|
||||
|
||||
Here is the given task for you to evaluate:
|
||||
{task}
|
||||
|
||||
Here is the ideal response you're comparing to based on the task:
|
||||
{answer}
|
||||
|
||||
Here is the current machine generated response to the task that you need to evaluate:
|
||||
{response}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
RUBRIC_PROMPT = """Ignore previous directions. You are now an expert at evaluating machine generated responses to given tasks.
|
||||
In order to score the generated texts you will {scoring}. Make sure to factor in rubric into your thinking, deliberation, and final result regarding scoring. Return nothing but a float score.
|
||||
|
||||
Here is the given task for you to evaluate:
|
||||
{task}
|
||||
|
||||
Use the below rubric to guide your thinking about scoring:
|
||||
{answer}
|
||||
|
||||
Here is the current machine generated response to the task that you need to evaluate:
|
||||
{response}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
QUESTION_PROMPT = """Ignore previous directions. You are now an expert at evaluating machine generated responses to given tasks.
|
||||
In order to score the generated texts you will {scoring}. Make sure to think about whether the generated response answers the question well in order to score accurately. Return nothing but a float score.
|
||||
|
||||
Here is the given task:
|
||||
{task}
|
||||
|
||||
Here is a question that checks if the task was completed correctly:
|
||||
{answer}
|
||||
|
||||
Here is the current machine generated response to the task that you need to evaluate:
|
||||
{response}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
FEW_SHOT_EXAMPLES = """Here are some examples of how to score a machine generated response based on the above:
|
||||
{examples}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
CUSTOM_PROMPT = """{custom}
|
||||
{scoring}
|
||||
|
||||
"""
|
||||
|
||||
PROMPT_MAP = {
|
||||
"rubric": RUBRIC_PROMPT,
|
||||
"reference": REFERENCE_PROMPT,
|
||||
"question": QUESTION_PROMPT,
|
||||
"custom": CUSTOM_PROMPT,
|
||||
}
|
||||
|
||||
END_PROMPT = """Remember to always end your response with nothing but a float score.
|
||||
Float score:"""
|
||||
@@ -1,216 +0,0 @@
|
||||
# radio charts, logs, helper functions for tests, anything else relevant.
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Iterable, Optional, TypeVar, overload
|
||||
|
||||
import click
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel
|
||||
|
||||
from agbenchmark.reports.processing.report_types import Test
|
||||
from agbenchmark.utils.data_types import DIFFICULTY_MAP, DifficultyLevel
|
||||
|
||||
load_dotenv()
|
||||
|
||||
AGENT_NAME = os.getenv("AGENT_NAME")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
T = TypeVar("T")
|
||||
E = TypeVar("E", bound=Enum)
|
||||
|
||||
|
||||
def replace_backslash(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
return re.sub(
|
||||
r"\\+", "/", value
|
||||
) # replace one or more backslashes with a forward slash
|
||||
elif isinstance(value, list):
|
||||
return [replace_backslash(i) for i in value]
|
||||
elif isinstance(value, dict):
|
||||
return {k: replace_backslash(v) for k, v in value.items()}
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def get_test_path(json_file: str | Path) -> str:
|
||||
if isinstance(json_file, str):
|
||||
json_file = Path(json_file)
|
||||
|
||||
# Find the index of "agbenchmark" in the path parts
|
||||
try:
|
||||
agbenchmark_index = json_file.parts.index("benchmark")
|
||||
except ValueError:
|
||||
raise ValueError("Invalid challenge location.")
|
||||
|
||||
# Create the path from "agbenchmark" onwards
|
||||
challenge_location = Path(*json_file.parts[agbenchmark_index:])
|
||||
|
||||
formatted_location = replace_backslash(str(challenge_location))
|
||||
if isinstance(formatted_location, str):
|
||||
return formatted_location
|
||||
else:
|
||||
return str(challenge_location)
|
||||
|
||||
|
||||
def get_highest_success_difficulty(
|
||||
data: dict[str, Test], just_string: Optional[bool] = None
|
||||
) -> str:
|
||||
highest_difficulty = None
|
||||
highest_difficulty_level = 0
|
||||
|
||||
for test_name, test_data in data.items():
|
||||
try:
|
||||
if any(r.success for r in test_data.results):
|
||||
difficulty_str = test_data.difficulty
|
||||
if not difficulty_str:
|
||||
continue
|
||||
|
||||
try:
|
||||
difficulty_enum = DifficultyLevel[difficulty_str.lower()]
|
||||
difficulty_level = DIFFICULTY_MAP[difficulty_enum]
|
||||
|
||||
if difficulty_level > highest_difficulty_level:
|
||||
highest_difficulty = difficulty_enum
|
||||
highest_difficulty_level = difficulty_level
|
||||
except KeyError:
|
||||
logger.warning(
|
||||
f"Unexpected difficulty level '{difficulty_str}' "
|
||||
f"in test '{test_name}'"
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"An unexpected error [1] occurred while analyzing report [2]."
|
||||
"Please notify a maintainer.\n"
|
||||
f"Report data [1]: {data}\n"
|
||||
f"Error [2]: {e}"
|
||||
)
|
||||
logger.warning(
|
||||
"Make sure you selected the right test, no reports were generated."
|
||||
)
|
||||
break
|
||||
|
||||
if highest_difficulty is not None:
|
||||
highest_difficulty_str = highest_difficulty.name # convert enum to string
|
||||
else:
|
||||
highest_difficulty_str = ""
|
||||
|
||||
if highest_difficulty_level and not just_string:
|
||||
return f"{highest_difficulty_str}: {highest_difficulty_level}"
|
||||
elif highest_difficulty_str:
|
||||
return highest_difficulty_str
|
||||
return "No successful tests"
|
||||
|
||||
|
||||
# def get_git_commit_sha(directory: Path) -> Optional[str]:
|
||||
# try:
|
||||
# repo = git.Repo(directory)
|
||||
# remote_url = repo.remotes.origin.url
|
||||
# if remote_url.endswith(".git"):
|
||||
# remote_url = remote_url[:-4]
|
||||
# git_commit_sha = f"{remote_url}/tree/{repo.head.commit.hexsha}"
|
||||
|
||||
# # logger.debug(f"GIT_COMMIT_SHA: {git_commit_sha}")
|
||||
# return git_commit_sha
|
||||
# except Exception:
|
||||
# # logger.error(f"{directory} is not a git repository!")
|
||||
# return None
|
||||
|
||||
|
||||
def write_pretty_json(data, json_file):
|
||||
sorted_data = deep_sort(data)
|
||||
json_graph = json.dumps(sorted_data, indent=4)
|
||||
with open(json_file, "w") as f:
|
||||
f.write(json_graph)
|
||||
f.write("\n")
|
||||
|
||||
|
||||
def pretty_print_model(model: BaseModel, include_header: bool = True) -> None:
|
||||
indent = ""
|
||||
if include_header:
|
||||
# Try to find the ID and/or name attribute of the model
|
||||
id, name = None, None
|
||||
for attr, value in model.model_dump().items():
|
||||
if attr == "id" or attr.endswith("_id"):
|
||||
id = value
|
||||
if attr.endswith("name"):
|
||||
name = value
|
||||
if id and name:
|
||||
break
|
||||
identifiers = [v for v in [name, id] if v]
|
||||
click.echo(
|
||||
f"{model.__repr_name__()}{repr(identifiers) if identifiers else ''}:"
|
||||
)
|
||||
indent = " " * 2
|
||||
|
||||
k_col_width = max(len(k) for k in model.model_dump().keys())
|
||||
for k, v in model.model_dump().items():
|
||||
v_fmt = repr(v)
|
||||
if v is None or v == "":
|
||||
v_fmt = click.style(v_fmt, fg="black")
|
||||
elif type(v) is bool:
|
||||
v_fmt = click.style(v_fmt, fg="green" if v else "red")
|
||||
elif type(v) is str and "\n" in v:
|
||||
v_fmt = f"\n{v}".replace(
|
||||
"\n", f"\n{indent} {click.style('|', fg='black')} "
|
||||
)
|
||||
if isinstance(v, Enum):
|
||||
v_fmt = click.style(v.value, fg="blue")
|
||||
elif type(v) is list and len(v) > 0 and isinstance(v[0], Enum):
|
||||
v_fmt = ", ".join(click.style(lv.value, fg="blue") for lv in v)
|
||||
click.echo(f"{indent}{k: <{k_col_width}} = {v_fmt}")
|
||||
|
||||
|
||||
def deep_sort(obj):
|
||||
"""
|
||||
Recursively sort the keys in JSON object
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
return {k: deep_sort(v) for k, v in sorted(obj.items())}
|
||||
if isinstance(obj, list):
|
||||
return [deep_sort(elem) for elem in obj]
|
||||
return obj
|
||||
|
||||
|
||||
@overload
|
||||
def sorted_by_enum_index(
|
||||
sortable: Iterable[E],
|
||||
enum: type[E],
|
||||
*,
|
||||
reverse: bool = False,
|
||||
) -> list[E]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def sorted_by_enum_index(
|
||||
sortable: Iterable[T],
|
||||
enum: type[Enum],
|
||||
*,
|
||||
key: Callable[[T], Enum | None],
|
||||
reverse: bool = False,
|
||||
) -> list[T]:
|
||||
...
|
||||
|
||||
|
||||
def sorted_by_enum_index(
|
||||
sortable: Iterable[T],
|
||||
enum: type[Enum],
|
||||
*,
|
||||
key: Optional[Callable[[T], Enum | None]] = None,
|
||||
reverse: bool = False,
|
||||
) -> list[T]:
|
||||
return sorted(
|
||||
sortable,
|
||||
key=lambda x: (
|
||||
enum._member_names_.index(e.name) # type: ignore
|
||||
if (e := key(x) if key else x)
|
||||
else 420e3
|
||||
),
|
||||
reverse=reverse,
|
||||
)
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"workspace": {"input": "auto_gpt_workspace", "output": "auto_gpt_workspace"},
|
||||
"host": "http://localhost:8000"
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
{
|
||||
"Auto-GPT": {
|
||||
"url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"branch": "master",
|
||||
"commit": "3a2d08fb415071cc94dd6fcee24cfbdd1fb487dd"
|
||||
},
|
||||
"gpt-engineer": {
|
||||
"url": "https://github.com/merwanehamadi/gpt-engineer.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "9bb81041ace9f09e8ea0e34e29f2e46bb9d46a36"
|
||||
},
|
||||
"mini-agi": {
|
||||
"url": "https://github.com/SilenNaihin/mini-agi.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "2fc70aa0032eec986dfb1020854a1b3b8aaf6780"
|
||||
},
|
||||
"smol-developer": {
|
||||
"url": "https://github.com/e2b-dev/smol-developer.git",
|
||||
"branch": "benchmarks",
|
||||
"commit": "a23d01369cea976e80b7889fdbf1096619471301"
|
||||
},
|
||||
"SuperAGI": {
|
||||
"url": "https://github.com/SilenNaihin/SuperAGI.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "48b2101374264b97dbdfc2c0bb0ae45e769e157d"
|
||||
},
|
||||
"babyagi": {
|
||||
"url": "https://github.com/SilenNaihin/babyagi.git",
|
||||
"branch": "benchmark-integration",
|
||||
"commit": "16f1b9519fea5543695203be0262a1b41c77cbba"
|
||||
},
|
||||
"beebot": {
|
||||
"url": "https://github.com/AutoPackAI/beebot.git",
|
||||
"branch": "main",
|
||||
"commit": "59d4e93c133612a0319d135bb0eb08bbcead9fa2"
|
||||
},
|
||||
"PolyGPT": {
|
||||
"url": "https://github.com/polywrap/PolyGPT.git",
|
||||
"branch": "nerfzael-use-local-wrap-library",
|
||||
"commit": "d621adf5f54cc0f9a6d191139fb67ac3d1436d7b"
|
||||
},
|
||||
"Auto-GPT-Turbo": {
|
||||
"url": "https://github.com/lc0rp/Auto-GPT-Turbo.git",
|
||||
"branch": "main",
|
||||
"commit": "8469e09ae204f2d5f41d489b217551544597ee14"
|
||||
}
|
||||
}
|
||||
42
classic/benchmark/frontend/.gitignore
vendored
42
classic/benchmark/frontend/.gitignore
vendored
@@ -1,42 +0,0 @@
|
||||
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
||||
|
||||
# dependencies
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
|
||||
# testing
|
||||
/coverage
|
||||
|
||||
# database
|
||||
/prisma/db.sqlite
|
||||
/prisma/db.sqlite-journal
|
||||
|
||||
# next.js
|
||||
/.next/
|
||||
/out/
|
||||
next-env.d.ts
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
*.pem
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
# do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables
|
||||
.env
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
@@ -1,7 +0,0 @@
|
||||
# agbenchmark-frontend
|
||||
|
||||
Frontend for https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks
|
||||
|
||||
Objectively know how well your agent is performing in categories like code, retrieval, memory, and safety.
|
||||
|
||||
Save time and money while doing it through smart dependencies. Best part? It's all automated.
|
||||
@@ -1,30 +0,0 @@
|
||||
/** @type {import("eslint").Linter.Config} */
|
||||
const config = {
|
||||
parser: "@typescript-eslint/parser",
|
||||
parserOptions: {
|
||||
project: true,
|
||||
},
|
||||
plugins: ["@typescript-eslint"],
|
||||
extends: [
|
||||
"next/core-web-vitals",
|
||||
"plugin:@typescript-eslint/recommended-type-checked",
|
||||
"plugin:@typescript-eslint/stylistic-type-checked",
|
||||
],
|
||||
rules: {
|
||||
// These opinionated rules are enabled in stylistic-type-checked above.
|
||||
// Feel free to reconfigure them to your own preference.
|
||||
"@typescript-eslint/array-type": "off",
|
||||
"@typescript-eslint/consistent-type-definitions": "off",
|
||||
|
||||
"@typescript-eslint/consistent-type-imports": [
|
||||
"warn",
|
||||
{
|
||||
prefer: "type-imports",
|
||||
fixStyle: "inline-type-imports",
|
||||
},
|
||||
],
|
||||
"@typescript-eslint/no-unused-vars": ["warn", { argsIgnorePattern: "^_" }],
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
@@ -1,22 +0,0 @@
|
||||
/**
|
||||
* Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. This is especially useful
|
||||
* for Docker builds.
|
||||
*/
|
||||
await import("./src/env.mjs");
|
||||
|
||||
/** @type {import("next").NextConfig} */
|
||||
const config = {
|
||||
reactStrictMode: true,
|
||||
|
||||
/**
|
||||
* If you are using `appDir` then you must comment the below `i18n` config out.
|
||||
*
|
||||
* @see https://github.com/vercel/next.js/issues/41980
|
||||
*/
|
||||
i18n: {
|
||||
locales: ["en"],
|
||||
defaultLocale: "en",
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
@@ -1,47 +0,0 @@
|
||||
{
|
||||
"name": "my-t3-app",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "next build",
|
||||
"dev": "next dev",
|
||||
"postinstall": "prisma generate",
|
||||
"lint": "next lint",
|
||||
"start": "next start"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-svg-core": "^6.4.2",
|
||||
"@fortawesome/free-solid-svg-icons": "^6.4.2",
|
||||
"@fortawesome/react-fontawesome": "^0.2.0",
|
||||
"@prisma/client": "^5.1.1",
|
||||
"@t3-oss/env-nextjs": "^0.3.1",
|
||||
"next": "^13.4.2",
|
||||
"react": "18.2.0",
|
||||
"react-dom": "18.2.0",
|
||||
"tailwind-styled-components": "^2.2.0",
|
||||
"vis-data": "^7.1.6",
|
||||
"vis-network": "^9.1.6",
|
||||
"zod": "^3.21.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/eslint": "^8.37.0",
|
||||
"@types/node": "^18.16.0",
|
||||
"@types/prettier": "^2.7.2",
|
||||
"@types/react": "^18.2.6",
|
||||
"@types/react-dom": "^18.2.4",
|
||||
"@typescript-eslint/eslint-plugin": "6.0.0",
|
||||
"@typescript-eslint/parser": "6.0.0",
|
||||
"autoprefixer": "^10.4.14",
|
||||
"eslint": "^8.40.0",
|
||||
"eslint-config-next": "^13.4.2",
|
||||
"postcss": "^8.4.27",
|
||||
"prettier": "^2.8.8",
|
||||
"prettier-plugin-tailwindcss": "^0.2.8",
|
||||
"prisma": "^5.1.1",
|
||||
"tailwindcss": "^3.3.3",
|
||||
"typescript": "^5.0.4"
|
||||
},
|
||||
"ct3aMetadata": {
|
||||
"initVersion": "7.18.0"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
const config = {
|
||||
plugins: {
|
||||
tailwindcss: {},
|
||||
autoprefixer: {},
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
@@ -1,6 +0,0 @@
|
||||
/** @type {import("prettier").Config} */
|
||||
const config = {
|
||||
plugins: [require.resolve("prettier-plugin-tailwindcss")],
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 15 KiB |
File diff suppressed because one or more lines are too long
@@ -1,45 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
import RadarChart from "./dashboard/RadarChart";
|
||||
import CategorySuccess from "./dashboard/CategorySuccess";
|
||||
import CurrentEnv from "./dashboard/CurrentEnv";
|
||||
|
||||
interface DashboardProps {
|
||||
data: any;
|
||||
}
|
||||
|
||||
const Dashboard: React.FC<DashboardProps> = ({ data }) => {
|
||||
return (
|
||||
<DashboardContainer>
|
||||
<CardWrapper>
|
||||
<RadarChart />
|
||||
</CardWrapper>
|
||||
<CardWrapper>
|
||||
<CategorySuccess />
|
||||
</CardWrapper>
|
||||
<CardWrapper>
|
||||
<CurrentEnv />
|
||||
</CardWrapper>
|
||||
</DashboardContainer>
|
||||
);
|
||||
};
|
||||
|
||||
export default Dashboard;
|
||||
|
||||
const DashboardContainer = tw.div`
|
||||
w-full
|
||||
h-96
|
||||
flex
|
||||
justify-between
|
||||
items-center
|
||||
`;
|
||||
|
||||
const CardWrapper = tw.div`
|
||||
w-[30%]
|
||||
h-72
|
||||
rounded-xl
|
||||
shadow-lg
|
||||
border
|
||||
p-4
|
||||
`;
|
||||
@@ -1,28 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import tw from "tailwind-styled-components";
|
||||
|
||||
interface ReportsProps {
|
||||
data: any;
|
||||
}
|
||||
|
||||
const Reports: React.FC<ReportsProps> = ({ data }) => {
|
||||
return (
|
||||
<ReportsContainer>
|
||||
<Table></Table>
|
||||
</ReportsContainer>
|
||||
);
|
||||
};
|
||||
|
||||
export default Reports;
|
||||
|
||||
const ReportsContainer = tw.div`
|
||||
w-full
|
||||
`;
|
||||
|
||||
const Table = tw.div`
|
||||
w-full
|
||||
border
|
||||
shadow-lg
|
||||
rounded-xl
|
||||
h-96
|
||||
`;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user