mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-07 05:15:09 -05:00
Compare commits
21 Commits
make-old-w
...
ntindle/go
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4f81bc4fc | ||
|
|
c5abc01f25 | ||
|
|
8b7053c1de | ||
|
|
e00c1202ad | ||
|
|
8fddc9d71f | ||
|
|
3d1cd03fc8 | ||
|
|
e7ebe42306 | ||
|
|
e0fab7e34e | ||
|
|
29ee85c86f | ||
|
|
85b6520710 | ||
|
|
bfa942e032 | ||
|
|
11256076d8 | ||
|
|
3ca2387631 | ||
|
|
ed07f02738 | ||
|
|
b121030c94 | ||
|
|
c22c18374d | ||
|
|
e40233a3ac | ||
|
|
3ae5eabf9d | ||
|
|
a077ba9f03 | ||
|
|
5401d54eaa | ||
|
|
5ac89d7c0b |
73
.github/workflows/classic-autogpt-ci.yml
vendored
73
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -6,15 +6,11 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/forge/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -23,22 +19,47 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
working-directory: classic/original_autogpt
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Start MinIO service
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -50,23 +71,41 @@ jobs:
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Set up Python 3.12
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
@@ -77,12 +116,12 @@ jobs:
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
original_autogpt/tests/unit original_autogpt/tests/integration
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
@@ -96,11 +135,11 @@ jobs:
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/logs/
|
||||
path: classic/original_autogpt/logs/
|
||||
|
||||
36
.github/workflows/classic-autogpts-ci.yml
vendored
36
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -11,6 +11,9 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
@@ -19,6 +22,9 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
defaults:
|
||||
@@ -29,9 +35,13 @@ defaults:
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ original_autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
min-python-version: '3.12'
|
||||
min-python-version: '3.10'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -45,22 +55,22 @@ jobs:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run smoke tests with direct-benchmark
|
||||
- name: Run regression tests
|
||||
run: |
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests ReadFile,WriteFile \
|
||||
--json
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
CI: true
|
||||
HELICONE_CACHE_ENABLED: false
|
||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
|
||||
194
.github/workflows/classic-benchmark-ci.yml
vendored
194
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -1,21 +1,17 @@
|
||||
name: Classic - Direct Benchmark CI
|
||||
name: Classic - AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
@@ -27,16 +23,23 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.12'
|
||||
min-python-version: '3.10'
|
||||
|
||||
jobs:
|
||||
benchmark-tests:
|
||||
runs-on: ubuntu-latest
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
working-directory: classic/benchmark
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -44,88 +47,71 @@ jobs:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install dependencies
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Run basic benchmark tests
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
echo "Testing ReadFile challenge with one_shot strategy..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests ReadFile \
|
||||
--json
|
||||
|
||||
echo "Testing WriteFile challenge..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--tests WriteFile \
|
||||
--json
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
|
||||
- name: Test category filtering
|
||||
run: |
|
||||
echo "Testing coding category..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--categories coding \
|
||||
--tests ReadFile,WriteFile \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Test multiple strategies
|
||||
run: |
|
||||
echo "Testing multiple strategies..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot,plan_execute \
|
||||
--models claude \
|
||||
--tests ReadFile \
|
||||
--parallel 2 \
|
||||
--json
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
|
||||
# Run regression tests on maintain challenges
|
||||
regression-tests:
|
||||
self-test-with-agent:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -140,23 +126,51 @@ jobs:
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: classic
|
||||
run: |
|
||||
echo "Running regression tests (previously beaten challenges)..."
|
||||
poetry run direct-benchmark run \
|
||||
--fresh \
|
||||
--strategies one_shot \
|
||||
--models claude \
|
||||
--maintain \
|
||||
--parallel 4 \
|
||||
--json
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
poetry run agbenchmark --maintain --mock
|
||||
EXIT_CODE=$?
|
||||
set -e # Stop ignoring non-zero exit codes
|
||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||
if [ $EXIT_CODE -eq 5 ]; then
|
||||
echo "regression_tests.json is empty."
|
||||
fi
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock"
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||
poetry run agbenchmark --mock --category=data
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
env:
|
||||
CI: true
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
NONINTERACTIVE_MODE: "true"
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
|
||||
182
.github/workflows/classic-forge-ci.yml
vendored
182
.github/workflows/classic-forge-ci.yml
vendored
@@ -6,11 +6,13 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -19,38 +21,115 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
working-directory: classic/forge
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Start MinIO service
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
# uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Start MinIO service (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
working-directory: '.'
|
||||
run: |
|
||||
docker pull minio/minio:edge-cicd
|
||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||
|
||||
- name: Start MinIO service (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
brew install minio/stable/minio
|
||||
mkdir data
|
||||
minio server ./data &
|
||||
|
||||
# No MinIO on Windows:
|
||||
# - Windows doesn't support running Linux Docker containers
|
||||
# - It doesn't seem possible to start background processes on Windows. They are
|
||||
# killed after the step returns.
|
||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python 3.12
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
@@ -61,15 +140,12 @@ jobs:
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
forge/forge forge/tests
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
# API keys - tests that need these will skip if not available
|
||||
# Secrets are not available to fork PRs (GitHub security feature)
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
@@ -83,11 +159,85 @@ jobs:
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: classic/logs/
|
||||
path: classic/forge/logs/
|
||||
|
||||
67
.github/workflows/classic-python-checks.yml
vendored
67
.github/workflows/classic-python-checks.yml
vendored
@@ -7,9 +7,7 @@ on:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/pyproject.toml'
|
||||
- 'classic/poetry.lock'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
@@ -18,9 +16,7 @@ on:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/direct_benchmark/**'
|
||||
- 'classic/pyproject.toml'
|
||||
- 'classic/poetry.lock'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
@@ -31,13 +27,44 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
get-changed-parts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: changes-in
|
||||
name: Determine affected subprojects
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
original_autogpt:
|
||||
- classic/original_autogpt/autogpt/**
|
||||
- classic/original_autogpt/tests/**
|
||||
- classic/original_autogpt/poetry.lock
|
||||
forge:
|
||||
- classic/forge/forge/**
|
||||
- classic/forge/tests/**
|
||||
- classic/forge/poetry.lock
|
||||
benchmark:
|
||||
- classic/benchmark/agbenchmark/**
|
||||
- classic/benchmark/tests/**
|
||||
- classic/benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
lint:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.12"
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -54,31 +81,42 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.12"
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -95,16 +133,19 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
16
.github/workflows/platform-frontend-ci.yml
vendored
16
.github/workflows/platform-frontend-ci.yml
vendored
@@ -27,11 +27,20 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
components-changed: ${{ steps.filter.outputs.components }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check for component changes
|
||||
uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
components:
|
||||
- 'autogpt_platform/frontend/src/components/**'
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
@@ -90,8 +99,11 @@ jobs:
|
||||
chromatic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
# Only run on dev branch pushes or PRs targeting dev
|
||||
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
|
||||
# Disabled: to re-enable, remove 'false &&' from the condition below
|
||||
if: >-
|
||||
false
|
||||
&& (github.ref == 'refs/heads/dev' || github.base_ref == 'dev')
|
||||
&& needs.setup.outputs.components-changed == 'true'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -3,7 +3,6 @@
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
.autogpt/
|
||||
*.mpeg
|
||||
.env
|
||||
# Root .env files
|
||||
@@ -160,10 +159,6 @@ CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
classic/benchmark/agbenchmark/reports/
|
||||
classic/reports/
|
||||
classic/direct_benchmark/reports/
|
||||
classic/.benchmark_workspaces/
|
||||
classic/direct_benchmark/.benchmark_workspaces/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
@@ -182,13 +177,7 @@ autogpt_platform/backend/settings.py
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
**/.claude/settings.local.json
|
||||
.claude/settings.local.json
|
||||
CLAUDE.local.md
|
||||
/autogpt_platform/backend/logs
|
||||
|
||||
# Test database
|
||||
test.db
|
||||
|
||||
# Next.js
|
||||
.next
|
||||
.next
|
||||
@@ -43,10 +43,29 @@ repos:
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic
|
||||
alias: poetry-install-classic
|
||||
entry: poetry -C classic install
|
||||
files: ^classic/poetry\.lock$
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
@@ -97,10 +116,26 @@ repos:
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic
|
||||
alias: isort-classic
|
||||
entry: bash -c 'cd classic && poetry run isort $(echo "$@" | sed "s|classic/||g")' --
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -P classic/forge run isort -p forge
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
@@ -114,13 +149,26 @@ repos:
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
# Use consolidated flake8 config at classic/.flake8
|
||||
# To have flake8 load the config of the individual subprojects, we have to call
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic
|
||||
alias: flake8-classic
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||
args: [--config=classic/.flake8]
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
@@ -156,10 +204,29 @@ repos:
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic
|
||||
alias: pyright-classic
|
||||
entry: poetry -C classic run pyright
|
||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/.*\.py$|^classic/poetry\.lock$
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
@@ -152,6 +152,7 @@ REPLICATE_API_KEY=
|
||||
REVID_API_KEY=
|
||||
SCREENSHOTONE_API_KEY=
|
||||
UNREAL_SPEECH_API_KEY=
|
||||
ELEVENLABS_API_KEY=
|
||||
|
||||
# Data & Search Services
|
||||
E2B_API_KEY=
|
||||
|
||||
3
autogpt_platform/backend/.gitignore
vendored
3
autogpt_platform/backend/.gitignore
vendored
@@ -19,3 +19,6 @@ load-tests/*.json
|
||||
load-tests/*.log
|
||||
load-tests/node_modules/*
|
||||
migrations/*/rollback*.sql
|
||||
|
||||
# Workspace files
|
||||
workspaces/
|
||||
|
||||
@@ -62,10 +62,12 @@ ENV POETRY_HOME=/opt/poetry \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python without upgrading system-managed packages
|
||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ffmpeg \
|
||||
imagemagick \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
|
||||
@@ -11,7 +11,7 @@ class ChatConfig(BaseSettings):
|
||||
|
||||
# OpenAI API Configuration
|
||||
model: str = Field(
|
||||
default="anthropic/claude-opus-4.5", description="Default model to use"
|
||||
default="anthropic/claude-opus-4.6", description="Default model to use"
|
||||
)
|
||||
title_model: str = Field(
|
||||
default="openai/gpt-4o-mini",
|
||||
|
||||
@@ -33,7 +33,7 @@ from backend.data.understanding import (
|
||||
get_business_understanding,
|
||||
)
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.settings import AppEnvironment, Settings
|
||||
|
||||
from . import db as chat_db
|
||||
from . import stream_registry
|
||||
@@ -222,8 +222,18 @@ async def _get_system_prompt_template(context: str) -> str:
|
||||
try:
|
||||
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
||||
# Use asyncio.to_thread to avoid blocking the event loop
|
||||
# In non-production environments, fetch the latest prompt version
|
||||
# instead of the production-labeled version for easier testing
|
||||
label = (
|
||||
None
|
||||
if settings.config.app_env == AppEnvironment.PRODUCTION
|
||||
else "latest"
|
||||
)
|
||||
prompt = await asyncio.to_thread(
|
||||
langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0
|
||||
langfuse.get_prompt,
|
||||
config.langfuse_prompt_name,
|
||||
label=label,
|
||||
cache_ttl_seconds=0,
|
||||
)
|
||||
return prompt.compile(users_information=context)
|
||||
except Exception as e:
|
||||
@@ -618,6 +628,9 @@ async def stream_chat_completion(
|
||||
total_tokens=chunk.totalTokens,
|
||||
)
|
||||
)
|
||||
elif isinstance(chunk, StreamHeartbeat):
|
||||
# Pass through heartbeat to keep SSE connection alive
|
||||
yield chunk
|
||||
else:
|
||||
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
|
||||
|
||||
|
||||
@@ -7,15 +7,7 @@ from typing import Any, NotRequired, TypedDict
|
||||
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data.graph import (
|
||||
Graph,
|
||||
Link,
|
||||
Node,
|
||||
create_graph,
|
||||
get_graph,
|
||||
get_graph_all_versions,
|
||||
get_store_listed_graphs,
|
||||
)
|
||||
from backend.data.graph import Graph, Link, Node, get_graph, get_store_listed_graphs
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
from .service import (
|
||||
@@ -28,8 +20,6 @@ from .service import (
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
AGENT_EXECUTOR_BLOCK_ID = "e189baac-8c20-45a1-94a7-55177ea42565"
|
||||
|
||||
|
||||
class ExecutionSummary(TypedDict):
|
||||
"""Summary of a single execution for quality assessment."""
|
||||
@@ -669,45 +659,6 @@ def json_to_graph(agent_json: dict[str, Any]) -> Graph:
|
||||
)
|
||||
|
||||
|
||||
def _reassign_node_ids(graph: Graph) -> None:
|
||||
"""Reassign all node and link IDs to new UUIDs.
|
||||
|
||||
This is needed when creating a new version to avoid unique constraint violations.
|
||||
"""
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes}
|
||||
|
||||
for node in graph.nodes:
|
||||
node.id = id_map[node.id]
|
||||
|
||||
for link in graph.links:
|
||||
link.id = str(uuid.uuid4())
|
||||
if link.source_id in id_map:
|
||||
link.source_id = id_map[link.source_id]
|
||||
if link.sink_id in id_map:
|
||||
link.sink_id = id_map[link.sink_id]
|
||||
|
||||
|
||||
def _populate_agent_executor_user_ids(agent_json: dict[str, Any], user_id: str) -> None:
|
||||
"""Populate user_id in AgentExecutorBlock nodes.
|
||||
|
||||
The external agent generator creates AgentExecutorBlock nodes with empty user_id.
|
||||
This function fills in the actual user_id so sub-agents run with correct permissions.
|
||||
|
||||
Args:
|
||||
agent_json: Agent JSON dict (modified in place)
|
||||
user_id: User ID to set
|
||||
"""
|
||||
for node in agent_json.get("nodes", []):
|
||||
if node.get("block_id") == AGENT_EXECUTOR_BLOCK_ID:
|
||||
input_default = node.get("input_default") or {}
|
||||
if not input_default.get("user_id"):
|
||||
input_default["user_id"] = user_id
|
||||
node["input_default"] = input_default
|
||||
logger.debug(
|
||||
f"Set user_id for AgentExecutorBlock node {node.get('id')}"
|
||||
)
|
||||
|
||||
|
||||
async def save_agent_to_library(
|
||||
agent_json: dict[str, Any], user_id: str, is_update: bool = False
|
||||
) -> tuple[Graph, Any]:
|
||||
@@ -721,35 +672,10 @@ async def save_agent_to_library(
|
||||
Returns:
|
||||
Tuple of (created Graph, LibraryAgent)
|
||||
"""
|
||||
# Populate user_id in AgentExecutorBlock nodes before conversion
|
||||
_populate_agent_executor_user_ids(agent_json, user_id)
|
||||
|
||||
graph = json_to_graph(agent_json)
|
||||
|
||||
if is_update:
|
||||
if graph.id:
|
||||
existing_versions = await get_graph_all_versions(graph.id, user_id)
|
||||
if existing_versions:
|
||||
latest_version = max(v.version for v in existing_versions)
|
||||
graph.version = latest_version + 1
|
||||
_reassign_node_ids(graph)
|
||||
logger.info(f"Updating agent {graph.id} to version {graph.version}")
|
||||
else:
|
||||
graph.id = str(uuid.uuid4())
|
||||
graph.version = 1
|
||||
_reassign_node_ids(graph)
|
||||
logger.info(f"Creating new agent with ID {graph.id}")
|
||||
|
||||
created_graph = await create_graph(graph, user_id)
|
||||
|
||||
library_agents = await library_db.create_library_agent(
|
||||
graph=created_graph,
|
||||
user_id=user_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
|
||||
return created_graph, library_agents[0]
|
||||
return await library_db.update_graph_in_library(graph, user_id)
|
||||
return await library_db.create_graph_in_library(graph, user_id)
|
||||
|
||||
|
||||
def graph_to_json(graph: Graph) -> dict[str, Any]:
|
||||
|
||||
@@ -206,9 +206,9 @@ async def search_agents(
|
||||
]
|
||||
)
|
||||
no_results_msg = (
|
||||
f"No agents found matching '{query}'. Try different keywords or browse the marketplace."
|
||||
f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else f"No agents matching '{query}' found in your library."
|
||||
else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs."
|
||||
)
|
||||
return NoResultsResponse(
|
||||
message=no_results_msg, session_id=session_id, suggestions=suggestions
|
||||
@@ -224,10 +224,10 @@ async def search_agents(
|
||||
message = (
|
||||
"Now you have found some options for the user to choose from. "
|
||||
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
|
||||
"Please ask the user if they would like to use any of these agents."
|
||||
"Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else "Found agents in the user's library. You can provide a link to view an agent at: "
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute."
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs."
|
||||
)
|
||||
|
||||
return AgentsFoundResponse(
|
||||
|
||||
@@ -6,7 +6,6 @@ from typing import Any
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.library import model as library_model
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import (
|
||||
CredentialsFieldInfo,
|
||||
@@ -44,14 +43,8 @@ async def fetch_graph_from_store_slug(
|
||||
return None, None
|
||||
|
||||
# Get the graph from store listing version
|
||||
graph_meta = await store_db.get_available_graph(
|
||||
store_agent.store_listing_version_id
|
||||
)
|
||||
graph = await graph_db.get_graph(
|
||||
graph_id=graph_meta.id,
|
||||
version=graph_meta.version,
|
||||
user_id=None, # Public access
|
||||
include_subgraphs=True,
|
||||
graph = await store_db.get_available_graph(
|
||||
store_agent.store_listing_version_id, hide_nodes=False
|
||||
)
|
||||
return graph, store_agent
|
||||
|
||||
@@ -124,11 +117,11 @@ def build_missing_credentials_from_graph(
|
||||
preserving all supported credential types for each field.
|
||||
"""
|
||||
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
||||
aggregated_fields = graph.aggregate_credentials_inputs()
|
||||
aggregated_fields = graph.regular_credentials_inputs
|
||||
|
||||
return {
|
||||
field_key: _serialize_missing_credential(field_key, field_info)
|
||||
for field_key, (field_info, _node_fields) in aggregated_fields.items()
|
||||
for field_key, (field_info, _, _) in aggregated_fields.items()
|
||||
if field_key not in matched_keys
|
||||
}
|
||||
|
||||
@@ -251,7 +244,7 @@ async def match_user_credentials_to_graph(
|
||||
missing_creds: list[str] = []
|
||||
|
||||
# Get aggregated credentials requirements from the graph
|
||||
aggregated_creds = graph.aggregate_credentials_inputs()
|
||||
aggregated_creds = graph.regular_credentials_inputs
|
||||
logger.debug(
|
||||
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
|
||||
)
|
||||
@@ -269,7 +262,8 @@ async def match_user_credentials_to_graph(
|
||||
# provider is in the set of acceptable providers.
|
||||
for credential_field_name, (
|
||||
credential_requirements,
|
||||
_node_fields,
|
||||
_,
|
||||
_,
|
||||
) in aggregated_creds.items():
|
||||
# Find first matching credential by provider, type, and scopes
|
||||
matching_cred = next(
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
"""Tests for chat tools utility functions."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
|
||||
def _make_regular_field() -> CredentialsFieldInfo:
|
||||
return CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
|
||||
def test_build_missing_credentials_excludes_auto_creds():
|
||||
"""
|
||||
build_missing_credentials_from_graph() should use regular_credentials_inputs
|
||||
and thus exclude auto_credentials from the "missing" set.
|
||||
"""
|
||||
from backend.api.features.chat.tools.utils import (
|
||||
build_missing_credentials_from_graph,
|
||||
)
|
||||
|
||||
regular_field = _make_regular_field()
|
||||
|
||||
mock_graph = MagicMock()
|
||||
# regular_credentials_inputs should only return the non-auto field
|
||||
mock_graph.regular_credentials_inputs = {
|
||||
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
||||
}
|
||||
|
||||
result = build_missing_credentials_from_graph(mock_graph, matched_credentials=None)
|
||||
|
||||
# Should include the regular credential
|
||||
assert "github_api_key" in result
|
||||
# Should NOT include the auto_credential (not in regular_credentials_inputs)
|
||||
assert "google_oauth2" not in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_match_user_credentials_excludes_auto_creds():
|
||||
"""
|
||||
match_user_credentials_to_graph() should use regular_credentials_inputs
|
||||
and thus exclude auto_credentials from matching.
|
||||
"""
|
||||
from backend.api.features.chat.tools.utils import match_user_credentials_to_graph
|
||||
|
||||
regular_field = _make_regular_field()
|
||||
|
||||
mock_graph = MagicMock()
|
||||
mock_graph.id = "test-graph"
|
||||
# regular_credentials_inputs returns only non-auto fields
|
||||
mock_graph.regular_credentials_inputs = {
|
||||
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
||||
}
|
||||
|
||||
# Mock the credentials manager to return no credentials
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.utils.IntegrationCredentialsManager"
|
||||
) as MockCredsMgr:
|
||||
mock_store = AsyncMock()
|
||||
mock_store.get_all_creds.return_value = []
|
||||
MockCredsMgr.return_value.store = mock_store
|
||||
|
||||
matched, missing = await match_user_credentials_to_graph(
|
||||
user_id="test-user", graph=mock_graph
|
||||
)
|
||||
|
||||
# No credentials available, so github should be missing
|
||||
assert len(matched) == 0
|
||||
assert len(missing) == 1
|
||||
assert "github_api_key" in missing[0]
|
||||
@@ -19,7 +19,10 @@ from backend.data.graph import GraphSettings
|
||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||
on_graph_activate,
|
||||
on_graph_deactivate,
|
||||
)
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.exceptions import DatabaseError, InvalidInputError, NotFoundError
|
||||
from backend.util.json import SafeJson
|
||||
@@ -371,7 +374,7 @@ async def get_library_agent_by_graph_id(
|
||||
|
||||
|
||||
async def add_generated_agent_image(
|
||||
graph: graph_db.BaseGraph,
|
||||
graph: graph_db.GraphBaseMeta,
|
||||
user_id: str,
|
||||
library_agent_id: str,
|
||||
) -> Optional[prisma.models.LibraryAgent]:
|
||||
@@ -537,6 +540,92 @@ async def update_agent_version_in_library(
|
||||
return library_model.LibraryAgent.from_db(lib)
|
||||
|
||||
|
||||
async def create_graph_in_library(
|
||||
graph: graph_db.Graph,
|
||||
user_id: str,
|
||||
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||
"""Create a new graph and add it to the user's library."""
|
||||
graph.version = 1
|
||||
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||
|
||||
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||
|
||||
library_agents = await create_library_agent(
|
||||
graph=created_graph,
|
||||
user_id=user_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
|
||||
if created_graph.is_active:
|
||||
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||
|
||||
return created_graph, library_agents[0]
|
||||
|
||||
|
||||
async def update_graph_in_library(
|
||||
graph: graph_db.Graph,
|
||||
user_id: str,
|
||||
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||
"""Create a new version of an existing graph and update the library entry."""
|
||||
existing_versions = await graph_db.get_graph_all_versions(graph.id, user_id)
|
||||
current_active_version = (
|
||||
next((v for v in existing_versions if v.is_active), None)
|
||||
if existing_versions
|
||||
else None
|
||||
)
|
||||
graph.version = (
|
||||
max(v.version for v in existing_versions) + 1 if existing_versions else 1
|
||||
)
|
||||
|
||||
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
|
||||
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||
|
||||
library_agent = await get_library_agent_by_graph_id(user_id, created_graph.id)
|
||||
if not library_agent:
|
||||
raise NotFoundError(f"Library agent not found for graph {created_graph.id}")
|
||||
|
||||
library_agent = await update_library_agent_version_and_settings(
|
||||
user_id, created_graph
|
||||
)
|
||||
|
||||
if created_graph.is_active:
|
||||
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=created_graph.id,
|
||||
version=created_graph.version,
|
||||
user_id=user_id,
|
||||
)
|
||||
if current_active_version:
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
return created_graph, library_agent
|
||||
|
||||
|
||||
async def update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_model.LibraryAgent:
|
||||
"""Update library agent to point to new graph version and sync settings."""
|
||||
library = await update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
updated_settings = GraphSettings.from_graph(
|
||||
graph=agent_graph,
|
||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
if updated_settings != library.settings:
|
||||
library = await update_library_agent(
|
||||
library_agent_id=library.id,
|
||||
user_id=user_id,
|
||||
settings=updated_settings,
|
||||
)
|
||||
return library
|
||||
|
||||
|
||||
async def update_library_agent(
|
||||
library_agent_id: str,
|
||||
user_id: str,
|
||||
@@ -1014,7 +1103,7 @@ async def create_preset_from_graph_execution(
|
||||
raise NotFoundError(
|
||||
f"Graph #{graph_execution.graph_id} not found or accessible"
|
||||
)
|
||||
elif len(graph.aggregate_credentials_inputs()) > 0:
|
||||
elif len(graph.regular_credentials_inputs) > 0:
|
||||
raise ValueError(
|
||||
f"Graph execution #{graph_exec_id} can't be turned into a preset "
|
||||
"because it was run before this feature existed "
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Literal
|
||||
from typing import Any, Literal, overload
|
||||
|
||||
import fastapi
|
||||
import prisma.enums
|
||||
@@ -11,8 +11,8 @@ import prisma.types
|
||||
|
||||
from backend.data.db import transaction
|
||||
from backend.data.graph import (
|
||||
GraphMeta,
|
||||
GraphModel,
|
||||
GraphModelWithoutNodes,
|
||||
get_graph,
|
||||
get_graph_as_admin,
|
||||
get_sub_graphs,
|
||||
@@ -334,7 +334,22 @@ async def get_store_agent_details(
|
||||
raise DatabaseError("Failed to fetch agent details") from e
|
||||
|
||||
|
||||
async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
||||
@overload
|
||||
async def get_available_graph(
|
||||
store_listing_version_id: str, hide_nodes: Literal[False]
|
||||
) -> GraphModel: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def get_available_graph(
|
||||
store_listing_version_id: str, hide_nodes: Literal[True] = True
|
||||
) -> GraphModelWithoutNodes: ...
|
||||
|
||||
|
||||
async def get_available_graph(
|
||||
store_listing_version_id: str,
|
||||
hide_nodes: bool = True,
|
||||
) -> GraphModelWithoutNodes | GraphModel:
|
||||
try:
|
||||
# Get avaialble, non-deleted store listing version
|
||||
store_listing_version = (
|
||||
@@ -344,7 +359,7 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
||||
"isAvailable": True,
|
||||
"isDeleted": False,
|
||||
},
|
||||
include={"AgentGraph": {"include": {"Nodes": True}}},
|
||||
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -354,7 +369,9 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
||||
detail=f"Store listing version {store_listing_version_id} not found",
|
||||
)
|
||||
|
||||
return GraphModel.from_db(store_listing_version.AgentGraph).meta()
|
||||
return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db(
|
||||
store_listing_version.AgentGraph
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent: {e}")
|
||||
|
||||
@@ -16,7 +16,7 @@ from backend.blocks.ideogram import (
|
||||
StyleType,
|
||||
UpscaleOption,
|
||||
)
|
||||
from backend.data.graph import BaseGraph
|
||||
from backend.data.graph import GraphBaseMeta
|
||||
from backend.data.model import CredentialsMetaInput, ProviderName
|
||||
from backend.integrations.credentials_store import ideogram_credentials
|
||||
from backend.util.request import Requests
|
||||
@@ -34,14 +34,14 @@ class ImageStyle(str, Enum):
|
||||
DIGITAL_ART = "digital art"
|
||||
|
||||
|
||||
async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||
if settings.config.use_agent_image_generation_v2:
|
||||
return await generate_agent_image_v2(graph=agent)
|
||||
else:
|
||||
return await generate_agent_image_v1(agent=agent)
|
||||
|
||||
|
||||
async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||
"""
|
||||
Generate an image for an agent using Ideogram model.
|
||||
Returns:
|
||||
@@ -54,14 +54,17 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
description = f"{name} ({graph.description})" if graph.description else name
|
||||
|
||||
prompt = (
|
||||
f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring "
|
||||
f'"{name}" in bold typography. The image clearly and literally depicts a {description}, '
|
||||
f"along with recognizable objects directly associated with the primary function of a {name}. "
|
||||
f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the "
|
||||
f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric "
|
||||
f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a "
|
||||
f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, "
|
||||
f"prioritizing clear visual storytelling and thematic clarity above all else."
|
||||
"Create a visually striking retro-futuristic vector pop art illustration "
|
||||
f'prominently featuring "{name}" in bold typography. The image clearly and '
|
||||
f"literally depicts a {description}, along with recognizable objects directly "
|
||||
f"associated with the primary function of a {name}. "
|
||||
f"Ensure the imagery is concrete, intuitive, and immediately understandable, "
|
||||
f"clearly conveying the purpose of a {name}. "
|
||||
"Maintain vibrant, limited-palette colors, sharp vector lines, "
|
||||
"geometric shapes, flat illustration techniques, and solid colors "
|
||||
"without gradients or shading. Preserve a retro-futuristic aesthetic "
|
||||
"influenced by mid-century futurism and 1960s psychedelia, "
|
||||
"prioritizing clear visual storytelling and thematic clarity above all else."
|
||||
)
|
||||
|
||||
custom_colors = [
|
||||
@@ -99,12 +102,12 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
return io.BytesIO(response.content)
|
||||
|
||||
|
||||
async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||
"""
|
||||
Generate an image for an agent using Flux model via Replicate API.
|
||||
|
||||
Args:
|
||||
agent (Graph): The agent to generate an image for
|
||||
agent (GraphBaseMeta | AgentGraph): The agent to generate an image for
|
||||
|
||||
Returns:
|
||||
io.BytesIO: The generated image as bytes
|
||||
@@ -114,7 +117,13 @@ async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
raise ValueError("Missing Replicate API key in settings")
|
||||
|
||||
# Construct prompt from agent details
|
||||
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
|
||||
prompt = (
|
||||
"Create a visually engaging app store thumbnail for the AI agent "
|
||||
"that highlights what it does in a clear and captivating way:\n"
|
||||
f"- **Name**: {agent.name}\n"
|
||||
f"- **Description**: {agent.description}\n"
|
||||
f"Focus on showcasing its core functionality with an appealing design."
|
||||
)
|
||||
|
||||
# Set up Replicate client
|
||||
client = ReplicateClient(api_token=settings.secrets.replicate_api_key)
|
||||
|
||||
@@ -278,7 +278,7 @@ async def get_agent(
|
||||
)
|
||||
async def get_graph_meta_by_store_listing_version_id(
|
||||
store_listing_version_id: str,
|
||||
) -> backend.data.graph.GraphMeta:
|
||||
) -> backend.data.graph.GraphModelWithoutNodes:
|
||||
"""
|
||||
Get Agent Graph from Store Listing Version ID.
|
||||
"""
|
||||
|
||||
@@ -101,7 +101,6 @@ from backend.util.timezone_utils import (
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
from .library import db as library_db
|
||||
from .library import model as library_model
|
||||
from .store.model import StoreAgentDetails
|
||||
|
||||
|
||||
@@ -823,18 +822,16 @@ async def update_graph(
|
||||
graph: graph_db.Graph,
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> graph_db.GraphModel:
|
||||
# Sanity check
|
||||
if graph.id and graph.id != graph_id:
|
||||
raise HTTPException(400, detail="Graph ID does not match ID in URI")
|
||||
|
||||
# Determine new version
|
||||
existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id)
|
||||
if not existing_versions:
|
||||
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
|
||||
latest_version_number = max(g.version for g in existing_versions)
|
||||
graph.version = latest_version_number + 1
|
||||
|
||||
graph.version = max(g.version for g in existing_versions) + 1
|
||||
current_active_version = next((v for v in existing_versions if v.is_active), None)
|
||||
|
||||
graph = graph_db.make_graph_model(graph, user_id)
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
graph.validate_graph(for_run=False)
|
||||
@@ -842,27 +839,23 @@ async def update_graph(
|
||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
if new_graph_version.is_active:
|
||||
# Keep the library agent up to date with the new active version
|
||||
await _update_library_agent_version_and_settings(user_id, new_graph_version)
|
||||
|
||||
# Handle activation of the new graph first to ensure continuity
|
||||
await library_db.update_library_agent_version_and_settings(
|
||||
user_id, new_graph_version
|
||||
)
|
||||
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
|
||||
# Ensure new version is the only active version
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=graph_id, version=new_graph_version.version, user_id=user_id
|
||||
)
|
||||
if current_active_version:
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
# Fetch new graph version *with sub-graphs* (needed for credentials input schema)
|
||||
new_graph_version_with_subgraphs = await graph_db.get_graph(
|
||||
graph_id,
|
||||
new_graph_version.version,
|
||||
user_id=user_id,
|
||||
include_subgraphs=True,
|
||||
)
|
||||
assert new_graph_version_with_subgraphs # make type checker happy
|
||||
assert new_graph_version_with_subgraphs
|
||||
return new_graph_version_with_subgraphs
|
||||
|
||||
|
||||
@@ -900,33 +893,15 @@ async def set_graph_active_version(
|
||||
)
|
||||
|
||||
# Keep the library agent up to date with the new active version
|
||||
await _update_library_agent_version_and_settings(user_id, new_active_graph)
|
||||
await library_db.update_library_agent_version_and_settings(
|
||||
user_id, new_active_graph
|
||||
)
|
||||
|
||||
if current_active_graph and current_active_graph.version != new_active_version:
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_graph, user_id=user_id)
|
||||
|
||||
|
||||
async def _update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_model.LibraryAgent:
|
||||
library = await library_db.update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
updated_settings = GraphSettings.from_graph(
|
||||
graph=agent_graph,
|
||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
if updated_settings != library.settings:
|
||||
library = await library_db.update_library_agent(
|
||||
library_agent_id=library.id,
|
||||
user_id=user_id,
|
||||
settings=updated_settings,
|
||||
)
|
||||
return library
|
||||
|
||||
|
||||
@v1_router.patch(
|
||||
path="/graphs/{graph_id}/settings",
|
||||
summary="Update graph settings",
|
||||
|
||||
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""ElevenLabs integration blocks - test credentials and shared utilities."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="elevenlabs",
|
||||
api_key=SecretStr("mock-elevenlabs-api-key"),
|
||||
title="Mock ElevenLabs API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
ElevenLabsCredentials = APIKeyCredentials
|
||||
ElevenLabsCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
|
||||
]
|
||||
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""Text encoding block for converting special characters to escape sequences."""
|
||||
|
||||
import codecs
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TextEncoderBlock(Block):
|
||||
"""
|
||||
Encodes a string by converting special characters into escape sequences.
|
||||
|
||||
This block is the inverse of TextDecoderBlock. It takes text containing
|
||||
special characters (like newlines, tabs, etc.) and converts them into
|
||||
their escape sequence representations (e.g., newline becomes \\n).
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
"""Input schema for TextEncoderBlock."""
|
||||
|
||||
text: str = SchemaField(
|
||||
description="A string containing special characters to be encoded",
|
||||
placeholder="Your text with newlines and quotes to encode",
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
"""Output schema for TextEncoderBlock."""
|
||||
|
||||
encoded_text: str = SchemaField(
|
||||
description="The encoded text with special characters converted to escape sequences"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if encoding fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5185f32e-4b65-4ecf-8fbb-873f003f09d6",
|
||||
description="Encodes a string by converting special characters into escape sequences",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=TextEncoderBlock.Input,
|
||||
output_schema=TextEncoderBlock.Output,
|
||||
test_input={
|
||||
"text": """Hello
|
||||
World!
|
||||
This is a "quoted" string."""
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"encoded_text",
|
||||
"""Hello\\nWorld!\\nThis is a "quoted" string.""",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
"""
|
||||
Encode the input text by converting special characters to escape sequences.
|
||||
|
||||
Args:
|
||||
input_data: The input containing the text to encode.
|
||||
**kwargs: Additional keyword arguments (unused).
|
||||
|
||||
Yields:
|
||||
The encoded text with escape sequences, or an error message if encoding fails.
|
||||
"""
|
||||
try:
|
||||
encoded_text = codecs.encode(input_data.text, "unicode_escape").decode(
|
||||
"utf-8"
|
||||
)
|
||||
yield "encoded_text", encoded_text
|
||||
except Exception as e:
|
||||
yield "error", f"Encoding error: {str(e)}"
|
||||
@@ -115,6 +115,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_4_6_OPUS = "claude-opus-4-6"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
@@ -270,6 +271,9 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_4_6_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 128000, "Claude Opus 4.6", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-6
|
||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-5-20251101
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Optional
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class MediaDurationBlock(Block):
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
media_in: MediaFileType = SchemaField(
|
||||
description="Media input (URL, data URI, or local path)."
|
||||
)
|
||||
is_video: bool = SchemaField(
|
||||
description="Whether the media is a video (True) or audio (False).",
|
||||
default=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
duration: float = SchemaField(
|
||||
description="Duration of the media file (in seconds)."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||
description="Block to get the duration of a media file.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=MediaDurationBlock.Input,
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = await store_media_file(
|
||||
file=input_data.media_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
assert execution_context.graph_exec_id is not None
|
||||
media_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_media_path
|
||||
)
|
||||
|
||||
# 2) Load the clip
|
||||
if input_data.is_video:
|
||||
clip = VideoFileClip(media_abspath)
|
||||
else:
|
||||
clip = AudioFileClip(media_abspath)
|
||||
|
||||
yield "duration", clip.duration
|
||||
|
||||
|
||||
class LoopVideoBlock(Block):
|
||||
"""
|
||||
Block for looping (repeating) a video clip until a given duration or number of loops.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="The input video (can be a URL, data URI, or local path)."
|
||||
)
|
||||
# Provide EITHER a `duration` or `n_loops` or both. We'll demonstrate `duration`.
|
||||
duration: Optional[float] = SchemaField(
|
||||
description="Target duration (in seconds) to loop the video to. If omitted, defaults to no looping.",
|
||||
default=None,
|
||||
ge=0.0,
|
||||
)
|
||||
n_loops: Optional[int] = SchemaField(
|
||||
description="Number of times to repeat the video. If omitted, defaults to 1 (no repeat).",
|
||||
default=None,
|
||||
ge=1,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Looped video returned either as a relative path or a data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||
description="Block to loop a video to a given duration or number of repeats.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=LoopVideoBlock.Input,
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the input video locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
|
||||
# 2) Load the clip
|
||||
clip = VideoFileClip(input_abspath)
|
||||
|
||||
# 3) Apply the loop effect
|
||||
looped_clip = clip
|
||||
if input_data.duration:
|
||||
# Loop until we reach the specified duration
|
||||
looped_clip = looped_clip.with_effects([Loop(duration=input_data.duration)])
|
||||
elif input_data.n_loops:
|
||||
looped_clip = looped_clip.with_effects([Loop(n=input_data.n_loops)])
|
||||
else:
|
||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||
|
||||
assert isinstance(looped_clip, VideoFileClip)
|
||||
|
||||
# 4) Save the looped output
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_looped_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
|
||||
looped_clip = looped_clip.with_audio(clip.audio)
|
||||
looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
|
||||
|
||||
class AddAudioToVideoBlock(Block):
|
||||
"""
|
||||
Block that adds (attaches) an audio track to an existing video.
|
||||
Optionally scale the volume of the new track.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Video input (URL, data URI, or local path)."
|
||||
)
|
||||
audio_in: MediaFileType = SchemaField(
|
||||
description="Audio input (URL, data URI, or local path)."
|
||||
)
|
||||
volume: float = SchemaField(
|
||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||
default=1.0,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Final video (with attached audio), as a path or data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||
description="Block to attach an audio file to a video file using moviepy.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=AddAudioToVideoBlock.Input,
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
local_audio_path = await store_media_file(
|
||||
file=input_data.audio_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
abs_temp_dir = os.path.join(tempfile.gettempdir(), "exec_file", graph_exec_id)
|
||||
video_abspath = os.path.join(abs_temp_dir, local_video_path)
|
||||
audio_abspath = os.path.join(abs_temp_dir, local_audio_path)
|
||||
|
||||
# 2) Load video + audio with moviepy
|
||||
video_clip = VideoFileClip(video_abspath)
|
||||
audio_clip = AudioFileClip(audio_abspath)
|
||||
# Optionally scale volume
|
||||
if input_data.volume != 1.0:
|
||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||
|
||||
# 3) Attach the new audio track
|
||||
final_clip = video_clip.with_audio(audio_clip)
|
||||
|
||||
# 4) Write to output file
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_audio_attached_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = os.path.join(abs_temp_dir, output_filename)
|
||||
final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
@@ -0,0 +1,77 @@
|
||||
import pytest
|
||||
|
||||
from backend.blocks.encoder_block import TextEncoderBlock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_basic():
|
||||
"""Test basic encoding of newlines and special characters."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="Hello\nWorld")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert result[0][1] == "Hello\\nWorld"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_multiple_escapes():
|
||||
"""Test encoding of multiple escape sequences."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(
|
||||
TextEncoderBlock.Input(text="Line1\nLine2\tTabbed\rCarriage")
|
||||
):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert "\\n" in result[0][1]
|
||||
assert "\\t" in result[0][1]
|
||||
assert "\\r" in result[0][1]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_unicode():
|
||||
"""Test that unicode characters are handled correctly."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="Hello 世界\n")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
# Unicode characters should be escaped as \uXXXX sequences
|
||||
assert "\\n" in result[0][1]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_empty_string():
|
||||
"""Test encoding of an empty string."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert result[0][1] == ""
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_error_handling():
|
||||
"""Test that encoding errors are handled gracefully."""
|
||||
from unittest.mock import patch
|
||||
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
|
||||
with patch("codecs.encode", side_effect=Exception("Mocked encoding error")):
|
||||
async for output in block.run(TextEncoderBlock.Input(text="test")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "error"
|
||||
assert "Mocked encoding error" in result[0][1]
|
||||
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Video editing blocks for AutoGPT Platform.
|
||||
|
||||
This module provides blocks for:
|
||||
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
|
||||
- Clipping/trimming video segments
|
||||
- Concatenating multiple videos
|
||||
- Adding text overlays
|
||||
- Adding AI-generated narration
|
||||
- Getting media duration
|
||||
- Looping videos
|
||||
- Adding audio to videos
|
||||
|
||||
Dependencies:
|
||||
- yt-dlp: For video downloading
|
||||
- moviepy: For video editing operations
|
||||
- elevenlabs: For AI narration (optional)
|
||||
"""
|
||||
|
||||
from backend.blocks.video.add_audio import AddAudioToVideoBlock
|
||||
from backend.blocks.video.clip import VideoClipBlock
|
||||
from backend.blocks.video.concat import VideoConcatBlock
|
||||
from backend.blocks.video.download import VideoDownloadBlock
|
||||
from backend.blocks.video.duration import MediaDurationBlock
|
||||
from backend.blocks.video.loop import LoopVideoBlock
|
||||
from backend.blocks.video.narration import VideoNarrationBlock
|
||||
from backend.blocks.video.text_overlay import VideoTextOverlayBlock
|
||||
|
||||
__all__ = [
|
||||
"AddAudioToVideoBlock",
|
||||
"LoopVideoBlock",
|
||||
"MediaDurationBlock",
|
||||
"VideoClipBlock",
|
||||
"VideoConcatBlock",
|
||||
"VideoDownloadBlock",
|
||||
"VideoNarrationBlock",
|
||||
"VideoTextOverlayBlock",
|
||||
]
|
||||
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""Shared utilities for video blocks."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Known operation tags added by video blocks
|
||||
_VIDEO_OPS = (
|
||||
r"(?:clip|overlay|narrated|looped|concat|audio_attached|with_audio|narration)"
|
||||
)
|
||||
|
||||
# Matches: {node_exec_id}_{operation}_ where node_exec_id contains a UUID
|
||||
_BLOCK_PREFIX_RE = re.compile(
|
||||
r"^[a-zA-Z0-9_-]*"
|
||||
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
r"[a-zA-Z0-9_-]*"
|
||||
r"_" + _VIDEO_OPS + r"_"
|
||||
)
|
||||
|
||||
# Matches: a lone {node_exec_id}_ prefix (no operation keyword, e.g. download output)
|
||||
_UUID_PREFIX_RE = re.compile(
|
||||
r"^[a-zA-Z0-9_-]*"
|
||||
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
r"[a-zA-Z0-9_-]*_"
|
||||
)
|
||||
|
||||
|
||||
def extract_source_name(input_path: str, max_length: int = 50) -> str:
|
||||
"""Extract the original source filename by stripping block-generated prefixes.
|
||||
|
||||
Iteratively removes {node_exec_id}_{operation}_ prefixes that accumulate
|
||||
when chaining video blocks, recovering the original human-readable name.
|
||||
|
||||
Safe for plain filenames (no UUID -> no stripping).
|
||||
Falls back to "video" if everything is stripped.
|
||||
"""
|
||||
stem = Path(input_path).stem
|
||||
|
||||
# Pass 1: strip {node_exec_id}_{operation}_ prefixes iteratively
|
||||
while _BLOCK_PREFIX_RE.match(stem):
|
||||
stem = _BLOCK_PREFIX_RE.sub("", stem, count=1)
|
||||
|
||||
# Pass 2: strip a lone {node_exec_id}_ prefix (e.g. from download block)
|
||||
if _UUID_PREFIX_RE.match(stem):
|
||||
stem = _UUID_PREFIX_RE.sub("", stem, count=1)
|
||||
|
||||
if not stem:
|
||||
return "video"
|
||||
|
||||
return stem[:max_length]
|
||||
|
||||
|
||||
def get_video_codecs(output_path: str) -> tuple[str, str]:
|
||||
"""Get appropriate video and audio codecs based on output file extension.
|
||||
|
||||
Args:
|
||||
output_path: Path to the output file (used to determine extension)
|
||||
|
||||
Returns:
|
||||
Tuple of (video_codec, audio_codec)
|
||||
|
||||
Codec mappings:
|
||||
- .mp4: H.264 + AAC (universal compatibility)
|
||||
- .webm: VP8 + Vorbis (web streaming)
|
||||
- .mkv: H.264 + AAC (container supports many codecs)
|
||||
- .mov: H.264 + AAC (Apple QuickTime, widely compatible)
|
||||
- .m4v: H.264 + AAC (Apple iTunes/devices)
|
||||
- .avi: MPEG-4 + MP3 (legacy Windows)
|
||||
"""
|
||||
ext = os.path.splitext(output_path)[1].lower()
|
||||
|
||||
codec_map: dict[str, tuple[str, str]] = {
|
||||
".mp4": ("libx264", "aac"),
|
||||
".webm": ("libvpx", "libvorbis"),
|
||||
".mkv": ("libx264", "aac"),
|
||||
".mov": ("libx264", "aac"),
|
||||
".m4v": ("libx264", "aac"),
|
||||
".avi": ("mpeg4", "libmp3lame"),
|
||||
}
|
||||
|
||||
return codec_map.get(ext, ("libx264", "aac"))
|
||||
|
||||
|
||||
def strip_chapters_inplace(video_path: str) -> None:
|
||||
"""Strip chapter metadata from a media file in-place using ffmpeg.
|
||||
|
||||
MoviePy 2.x crashes with IndexError when parsing files with embedded
|
||||
chapter metadata (https://github.com/Zulko/moviepy/issues/2419).
|
||||
This strips chapters without re-encoding.
|
||||
|
||||
Args:
|
||||
video_path: Absolute path to the media file to strip chapters from.
|
||||
"""
|
||||
base, ext = os.path.splitext(video_path)
|
||||
tmp_path = base + ".tmp" + ext
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[
|
||||
"ffmpeg",
|
||||
"-y",
|
||||
"-i",
|
||||
video_path,
|
||||
"-map_chapters",
|
||||
"-1",
|
||||
"-codec",
|
||||
"copy",
|
||||
tmp_path,
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.warning(
|
||||
"ffmpeg chapter strip failed (rc=%d): %s",
|
||||
result.returncode,
|
||||
result.stderr,
|
||||
)
|
||||
return
|
||||
os.replace(tmp_path, video_path)
|
||||
except FileNotFoundError:
|
||||
logger.warning("ffmpeg not found; skipping chapter strip")
|
||||
finally:
|
||||
if os.path.exists(tmp_path):
|
||||
os.unlink(tmp_path)
|
||||
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""AddAudioToVideoBlock - Attach an audio track to a video file."""
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class AddAudioToVideoBlock(Block):
|
||||
"""Add (attach) an audio track to an existing video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Video input (URL, data URI, or local path)."
|
||||
)
|
||||
audio_in: MediaFileType = SchemaField(
|
||||
description="Audio input (URL, data URI, or local path)."
|
||||
)
|
||||
volume: float = SchemaField(
|
||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||
default=1.0,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Final video (with attached audio), as a path or data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||
description="Block to attach an audio file to a video file using moviepy.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=AddAudioToVideoBlock.Input,
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
local_audio_path = await store_media_file(
|
||||
file=input_data.audio_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
video_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
audio_abspath = get_exec_file_path(graph_exec_id, local_audio_path)
|
||||
|
||||
# 2) Load video + audio with moviepy
|
||||
strip_chapters_inplace(video_abspath)
|
||||
strip_chapters_inplace(audio_abspath)
|
||||
video_clip = None
|
||||
audio_clip = None
|
||||
final_clip = None
|
||||
try:
|
||||
video_clip = VideoFileClip(video_abspath)
|
||||
audio_clip = AudioFileClip(audio_abspath)
|
||||
# Optionally scale volume
|
||||
if input_data.volume != 1.0:
|
||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||
|
||||
# 3) Attach the new audio track
|
||||
final_clip = video_clip.with_audio(audio_clip)
|
||||
|
||||
# 4) Write to output file
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_with_audio_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
final_clip.write_videofile(
|
||||
output_abspath, codec="libx264", audio_codec="aac"
|
||||
)
|
||||
finally:
|
||||
if final_clip:
|
||||
final_clip.close()
|
||||
if audio_clip:
|
||||
audio_clip.close()
|
||||
if video_clip:
|
||||
video_clip.close()
|
||||
|
||||
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""VideoClipBlock - Extract a segment from a video file."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoClipBlock(Block):
|
||||
"""Extract a time segment from a video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
start_time: float = SchemaField(description="Start time in seconds", ge=0.0)
|
||||
end_time: float = SchemaField(description="End time in seconds", ge=0.0)
|
||||
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||
description="Output format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Clipped video file (path or data URI)"
|
||||
)
|
||||
duration: float = SchemaField(description="Clip duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8f539119-e580-4d86-ad41-86fbcb22abb1",
|
||||
description="Extract a time segment from a video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"start_time": 0.0,
|
||||
"end_time": 10.0,
|
||||
},
|
||||
test_output=[("video_out", str), ("duration", float)],
|
||||
test_mock={
|
||||
"_clip_video": lambda *args: 10.0,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "clip_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _clip_video(
|
||||
self,
|
||||
video_abspath: str,
|
||||
output_abspath: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
) -> float:
|
||||
"""Extract a clip from a video. Extracted for testability."""
|
||||
clip = None
|
||||
subclip = None
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
clip = VideoFileClip(video_abspath)
|
||||
subclip = clip.subclipped(start_time, end_time)
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
subclip.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
return subclip.duration
|
||||
finally:
|
||||
if subclip:
|
||||
subclip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate time range
|
||||
if input_data.end_time <= input_data.start_time:
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_clip_{source}.{input_data.output_format}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
duration = self._clip_video(
|
||||
video_abspath,
|
||||
output_abspath,
|
||||
input_data.start_time,
|
||||
input_data.end_time,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "duration", duration
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to clip video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""VideoConcatBlock - Concatenate multiple video clips into one."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy import concatenate_videoclips
|
||||
from moviepy.video.fx import CrossFadeIn, CrossFadeOut, FadeIn, FadeOut
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoConcatBlock(Block):
|
||||
"""Merge multiple video clips into one continuous video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
videos: list[MediaFileType] = SchemaField(
|
||||
description="List of video files to concatenate (in order)"
|
||||
)
|
||||
transition: Literal["none", "crossfade", "fade_black"] = SchemaField(
|
||||
description="Transition between clips", default="none"
|
||||
)
|
||||
transition_duration: int = SchemaField(
|
||||
description="Transition duration in seconds",
|
||||
default=1,
|
||||
ge=0,
|
||||
advanced=True,
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||
description="Output format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Concatenated video file (path or data URI)"
|
||||
)
|
||||
total_duration: float = SchemaField(description="Total duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9b0f531a-1118-487f-aeec-3fa63ea8900a",
|
||||
description="Merge multiple video clips into one continuous video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"videos": ["/tmp/a.mp4", "/tmp/b.mp4"],
|
||||
},
|
||||
test_output=[
|
||||
("video_out", str),
|
||||
("total_duration", float),
|
||||
],
|
||||
test_mock={
|
||||
"_concat_videos": lambda *args: 20.0,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "concat_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _concat_videos(
|
||||
self,
|
||||
video_abspaths: list[str],
|
||||
output_abspath: str,
|
||||
transition: str,
|
||||
transition_duration: int,
|
||||
) -> float:
|
||||
"""Concatenate videos. Extracted for testability.
|
||||
|
||||
Returns:
|
||||
Total duration of the concatenated video.
|
||||
"""
|
||||
clips = []
|
||||
faded_clips = []
|
||||
final = None
|
||||
try:
|
||||
# Load clips
|
||||
for v in video_abspaths:
|
||||
strip_chapters_inplace(v)
|
||||
clips.append(VideoFileClip(v))
|
||||
|
||||
# Validate transition_duration against shortest clip
|
||||
if transition in {"crossfade", "fade_black"} and transition_duration > 0:
|
||||
min_duration = min(c.duration for c in clips)
|
||||
if transition_duration >= min_duration:
|
||||
raise BlockExecutionError(
|
||||
message=(
|
||||
f"transition_duration ({transition_duration}s) must be "
|
||||
f"shorter than the shortest clip ({min_duration:.2f}s)"
|
||||
),
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
if transition == "crossfade":
|
||||
for i, clip in enumerate(clips):
|
||||
effects = []
|
||||
if i > 0:
|
||||
effects.append(CrossFadeIn(transition_duration))
|
||||
if i < len(clips) - 1:
|
||||
effects.append(CrossFadeOut(transition_duration))
|
||||
if effects:
|
||||
clip = clip.with_effects(effects)
|
||||
faded_clips.append(clip)
|
||||
final = concatenate_videoclips(
|
||||
faded_clips,
|
||||
method="compose",
|
||||
padding=-transition_duration,
|
||||
)
|
||||
elif transition == "fade_black":
|
||||
for clip in clips:
|
||||
faded = clip.with_effects(
|
||||
[FadeIn(transition_duration), FadeOut(transition_duration)]
|
||||
)
|
||||
faded_clips.append(faded)
|
||||
final = concatenate_videoclips(faded_clips)
|
||||
else:
|
||||
final = concatenate_videoclips(clips)
|
||||
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
return final.duration
|
||||
finally:
|
||||
if final:
|
||||
final.close()
|
||||
for clip in faded_clips:
|
||||
clip.close()
|
||||
for clip in clips:
|
||||
clip.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate minimum clips
|
||||
if len(input_data.videos) < 2:
|
||||
raise BlockExecutionError(
|
||||
message="At least 2 videos are required for concatenation",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store all input videos locally
|
||||
video_abspaths = []
|
||||
for video in input_data.videos:
|
||||
local_path = await self._store_input_video(execution_context, video)
|
||||
video_abspaths.append(
|
||||
get_exec_file_path(execution_context.graph_exec_id, local_path)
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = (
|
||||
extract_source_name(video_abspaths[0]) if video_abspaths else "video"
|
||||
)
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_concat_{source}.{input_data.output_format}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
total_duration = self._concat_videos(
|
||||
video_abspaths,
|
||||
output_abspath,
|
||||
input_data.transition,
|
||||
input_data.transition_duration,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "total_duration", total_duration
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to concatenate videos: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)."""
|
||||
|
||||
import os
|
||||
import typing
|
||||
from typing import Literal
|
||||
|
||||
import yt_dlp
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from yt_dlp import _Params
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoDownloadBlock(Block):
|
||||
"""Download video from URL using yt-dlp."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
url: str = SchemaField(
|
||||
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
|
||||
placeholder="https://www.youtube.com/watch?v=...",
|
||||
)
|
||||
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
|
||||
description="Video quality preference", default="720p"
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
|
||||
description="Output video format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_file: MediaFileType = SchemaField(
|
||||
description="Downloaded video (path or data URI)"
|
||||
)
|
||||
duration: float = SchemaField(description="Video duration in seconds")
|
||||
title: str = SchemaField(description="Video title from source")
|
||||
source_url: str = SchemaField(description="Original source URL")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c35daabb-cd60-493b-b9ad-51f1fe4b50c4",
|
||||
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
disabled=True, # Disable until we can sandbox yt-dlp and handle security implications
|
||||
test_input={
|
||||
"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
|
||||
"quality": "480p",
|
||||
},
|
||||
test_output=[
|
||||
("video_file", str),
|
||||
("duration", float),
|
||||
("title", str),
|
||||
("source_url", str),
|
||||
],
|
||||
test_mock={
|
||||
"_download_video": lambda *args: (
|
||||
"video.mp4",
|
||||
212.0,
|
||||
"Test Video",
|
||||
),
|
||||
"_store_output_video": lambda *args, **kwargs: "video.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _get_format_string(self, quality: str) -> str:
|
||||
formats = {
|
||||
"best": "bestvideo+bestaudio/best",
|
||||
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
|
||||
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
|
||||
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
|
||||
"audio_only": "bestaudio/best",
|
||||
}
|
||||
return formats.get(quality, formats["720p"])
|
||||
|
||||
def _download_video(
|
||||
self,
|
||||
url: str,
|
||||
quality: str,
|
||||
output_format: str,
|
||||
output_dir: str,
|
||||
node_exec_id: str,
|
||||
) -> tuple[str, float, str]:
|
||||
"""Download video. Extracted for testability."""
|
||||
output_template = os.path.join(
|
||||
output_dir, f"{node_exec_id}_%(title).50s.%(ext)s"
|
||||
)
|
||||
|
||||
ydl_opts: "_Params" = {
|
||||
"format": f"{self._get_format_string(quality)}/best",
|
||||
"outtmpl": output_template,
|
||||
"merge_output_format": output_format,
|
||||
"quiet": True,
|
||||
"no_warnings": True,
|
||||
}
|
||||
|
||||
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||
info = ydl.extract_info(url, download=True)
|
||||
video_path = ydl.prepare_filename(info)
|
||||
|
||||
# Handle format conversion in filename
|
||||
if not video_path.endswith(f".{output_format}"):
|
||||
video_path = video_path.rsplit(".", 1)[0] + f".{output_format}"
|
||||
|
||||
# Return just the filename, not the full path
|
||||
filename = os.path.basename(video_path)
|
||||
|
||||
return (
|
||||
filename,
|
||||
info.get("duration") or 0.0,
|
||||
info.get("title") or "Unknown",
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Get the exec file directory
|
||||
output_dir = get_exec_file_path(execution_context.graph_exec_id, "")
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
filename, duration, title = self._download_video(
|
||||
input_data.url,
|
||||
input_data.quality,
|
||||
input_data.output_format,
|
||||
output_dir,
|
||||
node_exec_id,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, MediaFileType(filename)
|
||||
)
|
||||
|
||||
yield "video_file", video_out
|
||||
yield "duration", duration
|
||||
yield "title", title
|
||||
yield "source_url", input_data.url
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to download video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""MediaDurationBlock - Get the duration of a media file."""
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class MediaDurationBlock(Block):
|
||||
"""Get the duration of a media file (video or audio)."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
media_in: MediaFileType = SchemaField(
|
||||
description="Media input (URL, data URI, or local path)."
|
||||
)
|
||||
is_video: bool = SchemaField(
|
||||
description="Whether the media is a video (True) or audio (False).",
|
||||
default=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
duration: float = SchemaField(
|
||||
description="Duration of the media file (in seconds)."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||
description="Block to get the duration of a media file.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=MediaDurationBlock.Input,
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = await store_media_file(
|
||||
file=input_data.media_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
assert execution_context.graph_exec_id is not None
|
||||
media_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_media_path
|
||||
)
|
||||
|
||||
# 2) Strip chapters to avoid MoviePy crash, then load the clip
|
||||
strip_chapters_inplace(media_abspath)
|
||||
clip = None
|
||||
try:
|
||||
if input_data.is_video:
|
||||
clip = VideoFileClip(media_abspath)
|
||||
else:
|
||||
clip = AudioFileClip(media_abspath)
|
||||
|
||||
duration = clip.duration
|
||||
finally:
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
yield "duration", duration
|
||||
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""LoopVideoBlock - Loop a video to a given duration or number of repeats."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class LoopVideoBlock(Block):
|
||||
"""Loop (repeat) a video clip until a given duration or number of loops."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="The input video (can be a URL, data URI, or local path)."
|
||||
)
|
||||
duration: Optional[float] = SchemaField(
|
||||
description="Target duration (in seconds) to loop the video to. Either duration or n_loops must be provided.",
|
||||
default=None,
|
||||
ge=0.0,
|
||||
le=3600.0, # Max 1 hour to prevent disk exhaustion
|
||||
)
|
||||
n_loops: Optional[int] = SchemaField(
|
||||
description="Number of times to repeat the video. Either n_loops or duration must be provided.",
|
||||
default=None,
|
||||
ge=1,
|
||||
le=10, # Max 10 loops to prevent disk exhaustion
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Looped video returned either as a relative path or a data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||
description="Block to loop a video to a given duration or number of repeats.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=LoopVideoBlock.Input,
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the input video locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
|
||||
# 2) Load the clip
|
||||
strip_chapters_inplace(input_abspath)
|
||||
clip = None
|
||||
looped_clip = None
|
||||
try:
|
||||
clip = VideoFileClip(input_abspath)
|
||||
|
||||
# 3) Apply the loop effect
|
||||
if input_data.duration:
|
||||
# Loop until we reach the specified duration
|
||||
looped_clip = clip.with_effects([Loop(duration=input_data.duration)])
|
||||
elif input_data.n_loops:
|
||||
looped_clip = clip.with_effects([Loop(n=input_data.n_loops)])
|
||||
else:
|
||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||
|
||||
assert isinstance(looped_clip, VideoFileClip)
|
||||
|
||||
# 4) Save the looped output
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_looped_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
|
||||
looped_clip = looped_clip.with_audio(clip.audio)
|
||||
looped_clip.write_videofile(
|
||||
output_abspath, codec="libx264", audio_codec="aac"
|
||||
)
|
||||
finally:
|
||||
if looped_clip:
|
||||
looped_clip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
# Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
@@ -0,0 +1,267 @@
|
||||
"""VideoNarrationBlock - Generate AI voice narration and add to video."""
|
||||
|
||||
import os
|
||||
from typing import Literal
|
||||
|
||||
from elevenlabs import ElevenLabs
|
||||
from moviepy import CompositeAudioClip
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.elevenlabs._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ElevenLabsCredentials,
|
||||
ElevenLabsCredentialsInput,
|
||||
)
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import CredentialsField, SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoNarrationBlock(Block):
|
||||
"""Generate AI narration and add to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: ElevenLabsCredentialsInput = CredentialsField(
|
||||
description="ElevenLabs API key for voice synthesis"
|
||||
)
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
script: str = SchemaField(description="Narration script text")
|
||||
voice_id: str = SchemaField(
|
||||
description="ElevenLabs voice ID", default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||
)
|
||||
model_id: Literal[
|
||||
"eleven_multilingual_v2",
|
||||
"eleven_flash_v2_5",
|
||||
"eleven_turbo_v2_5",
|
||||
"eleven_turbo_v2",
|
||||
] = SchemaField(
|
||||
description="ElevenLabs TTS model",
|
||||
default="eleven_multilingual_v2",
|
||||
)
|
||||
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
|
||||
description="How to combine with original audio. 'ducking' applies stronger attenuation than 'mix'.",
|
||||
default="ducking",
|
||||
)
|
||||
narration_volume: float = SchemaField(
|
||||
description="Narration volume (0.0 to 2.0)",
|
||||
default=1.0,
|
||||
ge=0.0,
|
||||
le=2.0,
|
||||
advanced=True,
|
||||
)
|
||||
original_volume: float = SchemaField(
|
||||
description="Original audio volume when mixing (0.0 to 1.0)",
|
||||
default=0.3,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Video with narration (path or data URI)"
|
||||
)
|
||||
audio_file: MediaFileType = SchemaField(
|
||||
description="Generated audio file (path or data URI)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3d036b53-859c-4b17-9826-ca340f736e0e",
|
||||
description="Generate AI narration and add to video",
|
||||
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"script": "Hello world",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("video_out", str), ("audio_file", str)],
|
||||
test_mock={
|
||||
"_generate_narration_audio": lambda *args: b"mock audio content",
|
||||
"_add_narration_to_video": lambda *args: None,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "narrated_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _generate_narration_audio(
|
||||
self, api_key: str, script: str, voice_id: str, model_id: str
|
||||
) -> bytes:
|
||||
"""Generate narration audio via ElevenLabs API."""
|
||||
client = ElevenLabs(api_key=api_key)
|
||||
audio_generator = client.text_to_speech.convert(
|
||||
voice_id=voice_id,
|
||||
text=script,
|
||||
model_id=model_id,
|
||||
)
|
||||
# The SDK returns a generator, collect all chunks
|
||||
return b"".join(audio_generator)
|
||||
|
||||
def _add_narration_to_video(
|
||||
self,
|
||||
video_abspath: str,
|
||||
audio_abspath: str,
|
||||
output_abspath: str,
|
||||
mix_mode: str,
|
||||
narration_volume: float,
|
||||
original_volume: float,
|
||||
) -> None:
|
||||
"""Add narration audio to video. Extracted for testability."""
|
||||
video = None
|
||||
final = None
|
||||
narration_original = None
|
||||
narration_scaled = None
|
||||
original = None
|
||||
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
video = VideoFileClip(video_abspath)
|
||||
narration_original = AudioFileClip(audio_abspath)
|
||||
narration_scaled = narration_original.with_volume_scaled(narration_volume)
|
||||
narration = narration_scaled
|
||||
|
||||
if mix_mode == "replace":
|
||||
final_audio = narration
|
||||
elif mix_mode == "mix":
|
||||
if video.audio:
|
||||
original = video.audio.with_volume_scaled(original_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
else: # ducking - apply stronger attenuation
|
||||
if video.audio:
|
||||
# Ducking uses a much lower volume for original audio
|
||||
ducking_volume = original_volume * 0.3
|
||||
original = video.audio.with_volume_scaled(ducking_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
|
||||
final = video.with_audio(final_audio)
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
finally:
|
||||
if original:
|
||||
original.close()
|
||||
if narration_scaled:
|
||||
narration_scaled.close()
|
||||
if narration_original:
|
||||
narration_original.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: ElevenLabsCredentials,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Generate narration audio via ElevenLabs
|
||||
audio_content = self._generate_narration_audio(
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.script,
|
||||
input_data.voice_id,
|
||||
input_data.model_id,
|
||||
)
|
||||
|
||||
# Save audio to exec file path
|
||||
audio_filename = MediaFileType(f"{node_exec_id}_narration.mp3")
|
||||
audio_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, audio_filename
|
||||
)
|
||||
os.makedirs(os.path.dirname(audio_abspath), exist_ok=True)
|
||||
with open(audio_abspath, "wb") as f:
|
||||
f.write(audio_content)
|
||||
|
||||
# Add narration to video
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_narrated_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
self._add_narration_to_video(
|
||||
video_abspath,
|
||||
audio_abspath,
|
||||
output_abspath,
|
||||
input_data.mix_mode,
|
||||
input_data.narration_volume,
|
||||
input_data.original_volume,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
audio_out = await self._store_output_video(
|
||||
execution_context, audio_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "audio_file", audio_out
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add narration: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
@@ -0,0 +1,231 @@
|
||||
"""VideoTextOverlayBlock - Add text overlay to video."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy import CompositeVideoClip, TextClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoTextOverlayBlock(Block):
|
||||
"""Add text overlay/caption to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
text: str = SchemaField(description="Text to overlay on video")
|
||||
position: Literal[
|
||||
"top",
|
||||
"center",
|
||||
"bottom",
|
||||
"top-left",
|
||||
"top-right",
|
||||
"bottom-left",
|
||||
"bottom-right",
|
||||
] = SchemaField(description="Position of text on screen", default="bottom")
|
||||
start_time: float | None = SchemaField(
|
||||
description="When to show text (seconds). None = entire video",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
end_time: float | None = SchemaField(
|
||||
description="When to hide text (seconds). None = until end",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
font_size: int = SchemaField(
|
||||
description="Font size", default=48, ge=12, le=200, advanced=True
|
||||
)
|
||||
font_color: str = SchemaField(
|
||||
description="Font color (hex or name)", default="white", advanced=True
|
||||
)
|
||||
bg_color: str | None = SchemaField(
|
||||
description="Background color behind text (None for transparent)",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Video with text overlay (path or data URI)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8ef14de6-cc90-430a-8cfa-3a003be92454",
|
||||
description="Add text overlay/caption to video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
disabled=True, # Disable until we can lockdown imagemagick security policy
|
||||
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
|
||||
test_output=[("video_out", str)],
|
||||
test_mock={
|
||||
"_add_text_overlay": lambda *args: None,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "overlay_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _add_text_overlay(
|
||||
self,
|
||||
video_abspath: str,
|
||||
output_abspath: str,
|
||||
text: str,
|
||||
position: str,
|
||||
start_time: float | None,
|
||||
end_time: float | None,
|
||||
font_size: int,
|
||||
font_color: str,
|
||||
bg_color: str | None,
|
||||
) -> None:
|
||||
"""Add text overlay to video. Extracted for testability."""
|
||||
video = None
|
||||
final = None
|
||||
txt_clip = None
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
video = VideoFileClip(video_abspath)
|
||||
|
||||
txt_clip = TextClip(
|
||||
text=text,
|
||||
font_size=font_size,
|
||||
color=font_color,
|
||||
bg_color=bg_color,
|
||||
)
|
||||
|
||||
# Position mapping
|
||||
pos_map = {
|
||||
"top": ("center", "top"),
|
||||
"center": ("center", "center"),
|
||||
"bottom": ("center", "bottom"),
|
||||
"top-left": ("left", "top"),
|
||||
"top-right": ("right", "top"),
|
||||
"bottom-left": ("left", "bottom"),
|
||||
"bottom-right": ("right", "bottom"),
|
||||
}
|
||||
|
||||
txt_clip = txt_clip.with_position(pos_map[position])
|
||||
|
||||
# Set timing
|
||||
start = start_time or 0
|
||||
end = end_time or video.duration
|
||||
duration = max(0, end - start)
|
||||
txt_clip = txt_clip.with_start(start).with_end(end).with_duration(duration)
|
||||
|
||||
final = CompositeVideoClip([video, txt_clip])
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
finally:
|
||||
if txt_clip:
|
||||
txt_clip.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate time range if both are provided
|
||||
if (
|
||||
input_data.start_time is not None
|
||||
and input_data.end_time is not None
|
||||
and input_data.end_time <= input_data.start_time
|
||||
):
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_overlay_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
self._add_text_overlay(
|
||||
video_abspath,
|
||||
output_abspath,
|
||||
input_data.text,
|
||||
input_data.position,
|
||||
input_data.start_time,
|
||||
input_data.end_time,
|
||||
input_data.font_size,
|
||||
input_data.font_color,
|
||||
input_data.bg_color,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add text overlay: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
@@ -165,10 +165,13 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
credentials: WebshareProxyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
yield "video_id", video_id
|
||||
try:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
transcript = self.get_transcript(video_id, credentials)
|
||||
transcript_text = self.format_transcript(transcript=transcript)
|
||||
|
||||
transcript = self.get_transcript(video_id, credentials)
|
||||
transcript_text = self.format_transcript(transcript=transcript)
|
||||
|
||||
yield "transcript", transcript_text
|
||||
# Only yield after all operations succeed
|
||||
yield "video_id", video_id
|
||||
yield "transcript", transcript_text
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -246,7 +246,9 @@ class BlockSchema(BaseModel):
|
||||
f"is not of type {CredentialsMetaInput.__name__}"
|
||||
)
|
||||
|
||||
credentials_fields[field_name].validate_credentials_field_schema(cls)
|
||||
CredentialsMetaInput.validate_credentials_field_schema(
|
||||
cls.get_field_schema(field_name), field_name
|
||||
)
|
||||
|
||||
elif field_name in credentials_fields:
|
||||
raise KeyError(
|
||||
@@ -317,6 +319,8 @@ class BlockSchema(BaseModel):
|
||||
"credentials_provider": [config.get("provider", "google")],
|
||||
"credentials_types": [config.get("type", "oauth2")],
|
||||
"credentials_scopes": config.get("scopes"),
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": info["field_name"],
|
||||
}
|
||||
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||
auto_schema, by_alias=True
|
||||
|
||||
@@ -36,12 +36,14 @@ from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||
from backend.blocks.video.narration import VideoNarrationBlock
|
||||
from backend.data.block import Block, BlockCost, BlockCostType
|
||||
from backend.integrations.credentials_store import (
|
||||
aiml_api_credentials,
|
||||
anthropic_credentials,
|
||||
apollo_credentials,
|
||||
did_credentials,
|
||||
elevenlabs_credentials,
|
||||
enrichlayer_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
@@ -78,6 +80,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
LlmModel.CLAUDE_4_6_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
@@ -639,4 +642,16 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
},
|
||||
),
|
||||
],
|
||||
VideoNarrationBlock: [
|
||||
BlockCost(
|
||||
cost_amount=5, # ElevenLabs TTS cost
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": elevenlabs_credentials.id,
|
||||
"provider": elevenlabs_credentials.provider,
|
||||
"type": elevenlabs_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
}
|
||||
|
||||
@@ -134,6 +134,16 @@ async def test_block_credit_reset(server: SpinTestServer):
|
||||
month1 = datetime.now(timezone.utc).replace(month=1, day=1)
|
||||
user_credit.time_now = lambda: month1
|
||||
|
||||
# IMPORTANT: Set updatedAt to December of previous year to ensure it's
|
||||
# in a different month than month1 (January). This fixes a timing bug
|
||||
# where if the test runs in early February, 35 days ago would be January,
|
||||
# matching the mocked month1 and preventing the refill from triggering.
|
||||
dec_previous_year = month1.replace(year=month1.year - 1, month=12, day=15)
|
||||
await UserBalance.prisma().update(
|
||||
where={"userId": DEFAULT_USER_ID},
|
||||
data={"updatedAt": dec_previous_year},
|
||||
)
|
||||
|
||||
# First call in month 1 should trigger refill
|
||||
balance = await user_credit.get_credits(DEFAULT_USER_ID)
|
||||
assert balance == REFILL_VALUE # Should get 1000 credits
|
||||
|
||||
@@ -3,7 +3,7 @@ import logging
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast
|
||||
|
||||
from prisma.enums import SubmissionStatus
|
||||
from prisma.models import (
|
||||
@@ -20,7 +20,7 @@ from prisma.types import (
|
||||
AgentNodeLinkCreateInput,
|
||||
StoreListingVersionWhereInput,
|
||||
)
|
||||
from pydantic import BaseModel, BeforeValidator, Field, create_model
|
||||
from pydantic import BaseModel, BeforeValidator, Field
|
||||
from pydantic.fields import computed_field
|
||||
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
@@ -30,7 +30,6 @@ from backend.data.db import prisma as db
|
||||
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
|
||||
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsFieldInfo,
|
||||
CredentialsMetaInput,
|
||||
is_credentials_field_name,
|
||||
@@ -45,7 +44,6 @@ from .block import (
|
||||
AnyBlockSchema,
|
||||
Block,
|
||||
BlockInput,
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
EmptySchema,
|
||||
get_block,
|
||||
@@ -113,10 +111,12 @@ class Link(BaseDbModel):
|
||||
|
||||
class Node(BaseDbModel):
|
||||
block_id: str
|
||||
input_default: BlockInput = {} # dict[input_name, default_value]
|
||||
metadata: dict[str, Any] = {}
|
||||
input_links: list[Link] = []
|
||||
output_links: list[Link] = []
|
||||
input_default: BlockInput = Field( # dict[input_name, default_value]
|
||||
default_factory=dict
|
||||
)
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
input_links: list[Link] = Field(default_factory=list)
|
||||
output_links: list[Link] = Field(default_factory=list)
|
||||
|
||||
@property
|
||||
def credentials_optional(self) -> bool:
|
||||
@@ -221,18 +221,33 @@ class NodeModel(Node):
|
||||
return result
|
||||
|
||||
|
||||
class BaseGraph(BaseDbModel):
|
||||
class GraphBaseMeta(BaseDbModel):
|
||||
"""
|
||||
Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields.
|
||||
"""
|
||||
|
||||
version: int = 1
|
||||
is_active: bool = True
|
||||
name: str
|
||||
description: str
|
||||
instructions: str | None = None
|
||||
recommended_schedule_cron: str | None = None
|
||||
nodes: list[Node] = []
|
||||
links: list[Link] = []
|
||||
forked_from_id: str | None = None
|
||||
forked_from_version: int | None = None
|
||||
|
||||
|
||||
class BaseGraph(GraphBaseMeta):
|
||||
"""
|
||||
Graph with nodes, links, and computed I/O schema fields.
|
||||
|
||||
Used to represent sub-graphs within a `Graph`. Contains the full graph
|
||||
structure including nodes and links, plus computed fields for schemas
|
||||
and trigger info. Does NOT include user_id or created_at (see GraphModel).
|
||||
"""
|
||||
|
||||
nodes: list[Node] = Field(default_factory=list)
|
||||
links: list[Link] = Field(default_factory=list)
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def input_schema(self) -> dict[str, Any]:
|
||||
@@ -361,44 +376,78 @@ class GraphTriggerInfo(BaseModel):
|
||||
|
||||
|
||||
class Graph(BaseGraph):
|
||||
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs
|
||||
"""Creatable graph model used in API create/update endpoints."""
|
||||
|
||||
sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs
|
||||
|
||||
|
||||
class GraphMeta(GraphBaseMeta):
|
||||
"""
|
||||
Lightweight graph metadata model representing an existing graph from the database,
|
||||
for use in listings and summaries.
|
||||
|
||||
Lacks `GraphModel`'s nodes, links, and expensive computed fields.
|
||||
Use for list endpoints where full graph data is not needed and performance matters.
|
||||
"""
|
||||
|
||||
id: str # type: ignore
|
||||
version: int # type: ignore
|
||||
user_id: str
|
||||
created_at: datetime
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, graph: "AgentGraph") -> Self:
|
||||
return cls(
|
||||
id=graph.id,
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
name=graph.name or "",
|
||||
description=graph.description or "",
|
||||
instructions=graph.instructions,
|
||||
recommended_schedule_cron=graph.recommendedScheduleCron,
|
||||
forked_from_id=graph.forkedFromId,
|
||||
forked_from_version=graph.forkedFromVersion,
|
||||
user_id=graph.userId,
|
||||
created_at=graph.createdAt,
|
||||
)
|
||||
|
||||
|
||||
class GraphModel(Graph, GraphMeta):
|
||||
"""
|
||||
Full graph model representing an existing graph from the database.
|
||||
|
||||
This is the primary model for working with persisted graphs. Includes all
|
||||
graph data (nodes, links, sub_graphs) plus user ownership and timestamps.
|
||||
Provides computed fields (input_schema, output_schema, etc.) used during
|
||||
set-up (frontend) and execution (backend).
|
||||
|
||||
Inherits from:
|
||||
- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas
|
||||
- `GraphMeta`: provides user_id, created_at for database records
|
||||
"""
|
||||
|
||||
nodes: list[NodeModel] = Field(default_factory=list) # type: ignore
|
||||
|
||||
@property
|
||||
def starting_nodes(self) -> list[NodeModel]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
input_nodes = {
|
||||
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
|
||||
}
|
||||
return [
|
||||
node
|
||||
for node in self.nodes
|
||||
if node.id not in outbound_nodes or node.id in input_nodes
|
||||
]
|
||||
|
||||
@property
|
||||
def webhook_input_node(self) -> NodeModel | None: # type: ignore
|
||||
return cast(NodeModel, super().webhook_input_node)
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def credentials_input_schema(self) -> dict[str, Any]:
|
||||
schema = self._credentials_input_schema.jsonschema()
|
||||
|
||||
# Determine which credential fields are required based on credentials_optional metadata
|
||||
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
||||
required_fields = []
|
||||
|
||||
# Build a map of node_id -> node for quick lookup
|
||||
all_nodes = {node.id: node for node in self.nodes}
|
||||
for sub_graph in self.sub_graphs:
|
||||
for node in sub_graph.nodes:
|
||||
all_nodes[node.id] = node
|
||||
|
||||
for field_key, (
|
||||
_field_info,
|
||||
node_field_pairs,
|
||||
) in graph_credentials_inputs.items():
|
||||
# A field is required if ANY node using it has credentials_optional=False
|
||||
is_required = False
|
||||
for node_id, _field_name in node_field_pairs:
|
||||
node = all_nodes.get(node_id)
|
||||
if node and not node.credentials_optional:
|
||||
is_required = True
|
||||
break
|
||||
|
||||
if is_required:
|
||||
required_fields.append(field_key)
|
||||
|
||||
schema["required"] = required_fields
|
||||
return schema
|
||||
|
||||
@property
|
||||
def _credentials_input_schema(self) -> type[BlockSchema]:
|
||||
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
||||
graph_credentials_inputs = self.regular_credentials_inputs
|
||||
logger.debug(
|
||||
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
|
||||
f"{graph_credentials_inputs}"
|
||||
@@ -406,8 +455,8 @@ class Graph(BaseGraph):
|
||||
|
||||
# Warn if same-provider credentials inputs can't be combined (= bad UX)
|
||||
graph_cred_fields = list(graph_credentials_inputs.values())
|
||||
for i, (field, keys) in enumerate(graph_cred_fields):
|
||||
for other_field, other_keys in list(graph_cred_fields)[i + 1 :]:
|
||||
for i, (field, keys, _) in enumerate(graph_cred_fields):
|
||||
for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]:
|
||||
if field.provider != other_field.provider:
|
||||
continue
|
||||
if ProviderName.HTTP in field.provider:
|
||||
@@ -423,31 +472,78 @@ class Graph(BaseGraph):
|
||||
f"keys: {keys} <> {other_keys}."
|
||||
)
|
||||
|
||||
fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = {
|
||||
agg_field_key: (
|
||||
CredentialsMetaInput[
|
||||
Literal[tuple(field_info.provider)], # type: ignore
|
||||
Literal[tuple(field_info.supported_types)], # type: ignore
|
||||
],
|
||||
CredentialsField(
|
||||
required_scopes=set(field_info.required_scopes or []),
|
||||
discriminator=field_info.discriminator,
|
||||
discriminator_mapping=field_info.discriminator_mapping,
|
||||
discriminator_values=field_info.discriminator_values,
|
||||
),
|
||||
)
|
||||
for agg_field_key, (field_info, _) in graph_credentials_inputs.items()
|
||||
}
|
||||
# Build JSON schema directly to avoid expensive create_model + validation overhead
|
||||
properties = {}
|
||||
required_fields = []
|
||||
|
||||
return create_model(
|
||||
self.name.replace(" ", "") + "CredentialsInputSchema",
|
||||
__base__=BlockSchema,
|
||||
**fields, # type: ignore
|
||||
)
|
||||
for agg_field_key, (
|
||||
field_info,
|
||||
_,
|
||||
is_required,
|
||||
) in graph_credentials_inputs.items():
|
||||
providers = list(field_info.provider)
|
||||
cred_types = list(field_info.supported_types)
|
||||
|
||||
field_schema: dict[str, Any] = {
|
||||
"credentials_provider": providers,
|
||||
"credentials_types": cred_types,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"title": {
|
||||
"anyOf": [{"type": "string"}, {"type": "null"}],
|
||||
"default": None,
|
||||
"title": "Title",
|
||||
},
|
||||
"provider": {
|
||||
"title": "Provider",
|
||||
"type": "string",
|
||||
**(
|
||||
{"enum": providers}
|
||||
if len(providers) > 1
|
||||
else {"const": providers[0]}
|
||||
),
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"type": "string",
|
||||
**(
|
||||
{"enum": cred_types}
|
||||
if len(cred_types) > 1
|
||||
else {"const": cred_types[0]}
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["id", "provider", "type"],
|
||||
}
|
||||
|
||||
# Add other (optional) field info items
|
||||
field_schema.update(
|
||||
field_info.model_dump(
|
||||
by_alias=True,
|
||||
exclude_defaults=True,
|
||||
exclude={"provider", "supported_types"}, # already included above
|
||||
)
|
||||
)
|
||||
|
||||
# Ensure field schema is well-formed
|
||||
CredentialsMetaInput.validate_credentials_field_schema(
|
||||
field_schema, agg_field_key
|
||||
)
|
||||
|
||||
properties[agg_field_key] = field_schema
|
||||
if is_required:
|
||||
required_fields.append(agg_field_key)
|
||||
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required_fields,
|
||||
}
|
||||
|
||||
def aggregate_credentials_inputs(
|
||||
self,
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]:
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||
"""
|
||||
Returns:
|
||||
dict[aggregated_field_key, tuple(
|
||||
@@ -455,13 +551,19 @@ class Graph(BaseGraph):
|
||||
(now includes discriminator_values from matching nodes)
|
||||
set[(node_id, field_name)]: Node credentials fields that are
|
||||
compatible with this aggregated field spec
|
||||
bool: True if the field is required (any node has credentials_optional=False)
|
||||
)]
|
||||
"""
|
||||
# First collect all credential field data with input defaults
|
||||
node_credential_data = []
|
||||
# Track (field_info, (node_id, field_name), is_required) for each credential field
|
||||
node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = []
|
||||
node_required_map: dict[str, bool] = {} # node_id -> is_required
|
||||
|
||||
for graph in [self] + self.sub_graphs:
|
||||
for node in graph.nodes:
|
||||
# Track if this node requires credentials (credentials_optional=False means required)
|
||||
node_required_map[node.id] = not node.credentials_optional
|
||||
|
||||
for (
|
||||
field_name,
|
||||
field_info,
|
||||
@@ -485,37 +587,43 @@ class Graph(BaseGraph):
|
||||
)
|
||||
|
||||
# Combine credential field info (this will merge discriminator_values automatically)
|
||||
return CredentialsFieldInfo.combine(*node_credential_data)
|
||||
combined = CredentialsFieldInfo.combine(*node_credential_data)
|
||||
|
||||
|
||||
class GraphModel(Graph):
|
||||
user_id: str
|
||||
nodes: list[NodeModel] = [] # type: ignore
|
||||
|
||||
created_at: datetime
|
||||
|
||||
@property
|
||||
def starting_nodes(self) -> list[NodeModel]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
input_nodes = {
|
||||
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
|
||||
# Add is_required flag to each aggregated field
|
||||
# A field is required if ANY node using it has credentials_optional=False
|
||||
return {
|
||||
key: (
|
||||
field_info,
|
||||
node_field_pairs,
|
||||
any(
|
||||
node_required_map.get(node_id, True)
|
||||
for node_id, _ in node_field_pairs
|
||||
),
|
||||
)
|
||||
for key, (field_info, node_field_pairs) in combined.items()
|
||||
}
|
||||
return [
|
||||
node
|
||||
for node in self.nodes
|
||||
if node.id not in outbound_nodes or node.id in input_nodes
|
||||
]
|
||||
|
||||
@property
|
||||
def webhook_input_node(self) -> NodeModel | None: # type: ignore
|
||||
return cast(NodeModel, super().webhook_input_node)
|
||||
def regular_credentials_inputs(
|
||||
self,
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||
"""Credentials that need explicit user mapping (CredentialsMetaInput fields)."""
|
||||
return {
|
||||
k: v
|
||||
for k, v in self.aggregate_credentials_inputs().items()
|
||||
if not v[0].is_auto_credential
|
||||
}
|
||||
|
||||
def meta(self) -> "GraphMeta":
|
||||
"""
|
||||
Returns a GraphMeta object with metadata about the graph.
|
||||
This is used to return metadata about the graph without exposing nodes and links.
|
||||
"""
|
||||
return GraphMeta.from_graph(self)
|
||||
@property
|
||||
def auto_credentials_inputs(
|
||||
self,
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||
"""Credentials embedded in file fields (_credentials_id), resolved at execution time."""
|
||||
return {
|
||||
k: v
|
||||
for k, v in self.aggregate_credentials_inputs().items()
|
||||
if v[0].is_auto_credential
|
||||
}
|
||||
|
||||
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
|
||||
"""
|
||||
@@ -567,6 +675,16 @@ class GraphModel(Graph):
|
||||
) and graph_id in graph_id_map:
|
||||
node.input_default["graph_id"] = graph_id_map[graph_id]
|
||||
|
||||
# Clear auto-credentials references (e.g., _credentials_id in
|
||||
# GoogleDriveFile fields) so the new user must re-authenticate
|
||||
# with their own account
|
||||
for node in graph.nodes:
|
||||
if not node.input_default:
|
||||
continue
|
||||
for key, value in node.input_default.items():
|
||||
if isinstance(value, dict) and "_credentials_id" in value:
|
||||
del value["_credentials_id"]
|
||||
|
||||
def validate_graph(
|
||||
self,
|
||||
for_run: bool = False,
|
||||
@@ -799,13 +917,14 @@ class GraphModel(Graph):
|
||||
if is_static_output_block(link.source_id):
|
||||
link.is_static = True # Each value block output should be static.
|
||||
|
||||
@staticmethod
|
||||
def from_db(
|
||||
@classmethod
|
||||
def from_db( # type: ignore[reportIncompatibleMethodOverride]
|
||||
cls,
|
||||
graph: AgentGraph,
|
||||
for_export: bool = False,
|
||||
sub_graphs: list[AgentGraph] | None = None,
|
||||
) -> "GraphModel":
|
||||
return GraphModel(
|
||||
) -> Self:
|
||||
return cls(
|
||||
id=graph.id,
|
||||
user_id=graph.userId if not for_export else "",
|
||||
version=graph.version,
|
||||
@@ -831,17 +950,28 @@ class GraphModel(Graph):
|
||||
],
|
||||
)
|
||||
|
||||
def hide_nodes(self) -> "GraphModelWithoutNodes":
|
||||
"""
|
||||
Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden
|
||||
(excluded from serialization). They are still present in the model instance
|
||||
so all computed fields (e.g. `credentials_input_schema`) still work.
|
||||
"""
|
||||
return GraphModelWithoutNodes.model_validate(self, from_attributes=True)
|
||||
|
||||
class GraphMeta(Graph):
|
||||
user_id: str
|
||||
|
||||
# Easy work-around to prevent exposing nodes and links in the API response
|
||||
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore
|
||||
links: list[Link] = Field(default=[], exclude=True)
|
||||
class GraphModelWithoutNodes(GraphModel):
|
||||
"""
|
||||
GraphModel variant that excludes nodes, links, and sub-graphs from serialization.
|
||||
|
||||
@staticmethod
|
||||
def from_graph(graph: GraphModel) -> "GraphMeta":
|
||||
return GraphMeta(**graph.model_dump())
|
||||
Used in contexts like the store where exposing internal graph structure
|
||||
is not desired. Inherits all computed fields from GraphModel but marks
|
||||
nodes and links as excluded from JSON output.
|
||||
"""
|
||||
|
||||
nodes: list[NodeModel] = Field(default_factory=list, exclude=True)
|
||||
links: list[Link] = Field(default_factory=list, exclude=True)
|
||||
|
||||
sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True)
|
||||
|
||||
|
||||
class GraphsPaginated(BaseModel):
|
||||
@@ -912,21 +1042,11 @@ async def list_graphs_paginated(
|
||||
where=where_clause,
|
||||
distinct=["id"],
|
||||
order={"version": "desc"},
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
skip=offset,
|
||||
take=page_size,
|
||||
)
|
||||
|
||||
graph_models: list[GraphMeta] = []
|
||||
for graph in graphs:
|
||||
try:
|
||||
graph_meta = GraphModel.from_db(graph).meta()
|
||||
# Trigger serialization to validate that the graph is well formed
|
||||
graph_meta.model_dump()
|
||||
graph_models.append(graph_meta)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing graph {graph.id}: {e}")
|
||||
continue
|
||||
graph_models = [GraphMeta.from_db(graph) for graph in graphs]
|
||||
|
||||
return GraphsPaginated(
|
||||
graphs=graph_models,
|
||||
|
||||
@@ -463,3 +463,328 @@ def test_node_credentials_optional_with_other_metadata():
|
||||
assert node.credentials_optional is True
|
||||
assert node.metadata["position"] == {"x": 100, "y": 200}
|
||||
assert node.metadata["customized_name"] == "My Custom Node"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for _reassign_ids credential clearing (Fix 3: SECRT-1772)
|
||||
def test_combine_preserves_is_auto_credential_flag():
|
||||
"""
|
||||
CredentialsFieldInfo.combine() must propagate is_auto_credential and
|
||||
input_field_name to the combined result. Regression test for reviewer
|
||||
finding that combine() dropped these fields.
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
auto_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google"],
|
||||
"credentials_types": ["oauth2"],
|
||||
"credentials_scopes": ["drive.readonly"],
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": "spreadsheet",
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
# combine() takes *args of (field_info, key) tuples
|
||||
combined = CredentialsFieldInfo.combine(
|
||||
(auto_field, ("node-1", "credentials")),
|
||||
(auto_field, ("node-2", "credentials")),
|
||||
)
|
||||
|
||||
assert len(combined) == 1
|
||||
group_key = next(iter(combined))
|
||||
combined_info, combined_keys = combined[group_key]
|
||||
|
||||
assert combined_info.is_auto_credential is True
|
||||
assert combined_info.input_field_name == "spreadsheet"
|
||||
assert combined_keys == {("node-1", "credentials"), ("node-2", "credentials")}
|
||||
|
||||
|
||||
def test_combine_preserves_regular_credential_defaults():
|
||||
"""Regular credentials should have is_auto_credential=False after combine()."""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
regular_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
combined = CredentialsFieldInfo.combine(
|
||||
(regular_field, ("node-1", "credentials")),
|
||||
)
|
||||
|
||||
group_key = next(iter(combined))
|
||||
combined_info, _ = combined[group_key]
|
||||
|
||||
assert combined_info.is_auto_credential is False
|
||||
assert combined_info.input_field_name is None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_reassign_ids_clears_credentials_id():
|
||||
"""
|
||||
[SECRT-1772] _reassign_ids should clear _credentials_id from
|
||||
GoogleDriveFile-style input_default fields so forked agents
|
||||
don't retain the original creator's credential references.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "original-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
# _credentials_id key should be removed (not set to None) so that
|
||||
# _acquire_auto_credentials correctly errors instead of treating it as chained data
|
||||
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
||||
|
||||
|
||||
def test_reassign_ids_preserves_non_credential_fields():
|
||||
"""
|
||||
Regression guard: _reassign_ids should NOT modify non-credential fields
|
||||
like name, mimeType, id, url.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "cred-abc",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
field = graph.nodes[0].input_default["spreadsheet"]
|
||||
assert field["id"] == "file-123"
|
||||
assert field["name"] == "test.xlsx"
|
||||
assert field["mimeType"] == "application/vnd.google-apps.spreadsheet"
|
||||
assert field["url"] == "https://docs.google.com/spreadsheets/d/file-123"
|
||||
|
||||
|
||||
def test_reassign_ids_handles_no_credentials():
|
||||
"""
|
||||
Regression guard: _reassign_ids should not error when input_default
|
||||
has no dict fields with _credentials_id.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"input": "some value",
|
||||
"another_input": 42,
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
# Should not error, fields unchanged
|
||||
assert graph.nodes[0].input_default["input"] == "some value"
|
||||
assert graph.nodes[0].input_default["another_input"] == 42
|
||||
|
||||
|
||||
def test_reassign_ids_handles_multiple_credential_fields():
|
||||
"""
|
||||
[SECRT-1772] When a node has multiple dict fields with _credentials_id,
|
||||
ALL of them should be cleared.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "cred-1",
|
||||
"id": "file-1",
|
||||
"name": "file1.xlsx",
|
||||
},
|
||||
"doc_file": {
|
||||
"_credentials_id": "cred-2",
|
||||
"id": "file-2",
|
||||
"name": "file2.docx",
|
||||
},
|
||||
"plain_input": "not a dict",
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
||||
assert "_credentials_id" not in graph.nodes[0].input_default["doc_file"]
|
||||
assert graph.nodes[0].input_default["plain_input"] == "not a dict"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for discriminate() field propagation
|
||||
def test_discriminate_preserves_is_auto_credential_flag():
|
||||
"""
|
||||
CredentialsFieldInfo.discriminate() must propagate is_auto_credential and
|
||||
input_field_name to the discriminated result. Regression test for
|
||||
discriminate() dropping these fields (same class of bug as combine()).
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
auto_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google", "openai"],
|
||||
"credentials_types": ["oauth2"],
|
||||
"credentials_scopes": ["drive.readonly"],
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": "spreadsheet",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
discriminated = auto_field.discriminate("gemini")
|
||||
|
||||
assert discriminated.is_auto_credential is True
|
||||
assert discriminated.input_field_name == "spreadsheet"
|
||||
assert discriminated.provider == frozenset(["google"])
|
||||
|
||||
|
||||
def test_discriminate_preserves_regular_credential_defaults():
|
||||
"""Regular credentials should have is_auto_credential=False after discriminate()."""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
regular_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google", "openai"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
discriminated = regular_field.discriminate("gpt-4")
|
||||
|
||||
assert discriminated.is_auto_credential is False
|
||||
assert discriminated.input_field_name is None
|
||||
assert discriminated.provider == frozenset(["openai"])
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for credentials_input_schema excluding auto_credentials
|
||||
def test_credentials_input_schema_excludes_auto_creds():
|
||||
"""
|
||||
GraphModel.credentials_input_schema should exclude auto_credentials
|
||||
(is_auto_credential=True) from the schema. Auto_credentials are
|
||||
transparently resolved at execution time via file picker data.
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import PropertyMock, patch
|
||||
|
||||
from backend.data.graph import GraphModel, NodeModel
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
regular_field_info = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
graph = GraphModel(
|
||||
id="test-graph",
|
||||
version=1,
|
||||
name="Test",
|
||||
description="Test",
|
||||
user_id="test-user",
|
||||
created_at=datetime.now(timezone.utc),
|
||||
nodes=[
|
||||
NodeModel(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={},
|
||||
graph_id="test-graph",
|
||||
graph_version=1,
|
||||
),
|
||||
],
|
||||
links=[],
|
||||
)
|
||||
|
||||
# Mock regular_credentials_inputs to return only the non-auto field (3-tuple)
|
||||
regular_only = {
|
||||
"github_credentials": (
|
||||
regular_field_info,
|
||||
{("node-1", "credentials")},
|
||||
True,
|
||||
),
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
type(graph),
|
||||
"regular_credentials_inputs",
|
||||
new_callable=PropertyMock,
|
||||
return_value=regular_only,
|
||||
):
|
||||
schema = graph.credentials_input_schema
|
||||
field_names = set(schema.get("properties", {}).keys())
|
||||
# Should include regular credential but NOT auto_credential
|
||||
assert "github_credentials" in field_names
|
||||
assert "google_credentials" not in field_names
|
||||
|
||||
@@ -163,7 +163,6 @@ class User(BaseModel):
|
||||
if TYPE_CHECKING:
|
||||
from prisma.models import User as PrismaUser
|
||||
|
||||
from backend.data.block import BlockSchema
|
||||
|
||||
T = TypeVar("T")
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -508,15 +507,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
|
||||
return get_args(cls.model_fields["type"].annotation)
|
||||
|
||||
@classmethod
|
||||
def validate_credentials_field_schema(cls, model: type["BlockSchema"]):
|
||||
@staticmethod
|
||||
def validate_credentials_field_schema(
|
||||
field_schema: dict[str, Any], field_name: str
|
||||
):
|
||||
"""Validates the schema of a credentials input field"""
|
||||
field_name = next(
|
||||
name for name, type in model.get_credentials_fields().items() if type is cls
|
||||
)
|
||||
field_schema = model.jsonschema()["properties"][field_name]
|
||||
try:
|
||||
schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
|
||||
field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
|
||||
except ValidationError as e:
|
||||
if "Field required [type=missing" not in str(e):
|
||||
raise
|
||||
@@ -526,11 +523,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
f"{field_schema}"
|
||||
) from e
|
||||
|
||||
providers = cls.allowed_providers()
|
||||
providers = field_info.provider
|
||||
if (
|
||||
providers is not None
|
||||
and len(providers) > 1
|
||||
and not schema_extra.discriminator
|
||||
and not field_info.discriminator
|
||||
):
|
||||
raise TypeError(
|
||||
f"Multi-provider CredentialsField '{field_name}' "
|
||||
@@ -574,6 +571,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
discriminator: Optional[str] = None
|
||||
discriminator_mapping: Optional[dict[str, CP]] = None
|
||||
discriminator_values: set[Any] = Field(default_factory=set)
|
||||
is_auto_credential: bool = False
|
||||
input_field_name: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
def combine(
|
||||
@@ -654,6 +653,9 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
+ "_credentials"
|
||||
)
|
||||
|
||||
# Propagate is_auto_credential from the combined field.
|
||||
# All fields in a group should share the same is_auto_credential
|
||||
# value since auto and regular credentials serve different purposes.
|
||||
result[group_key] = (
|
||||
CredentialsFieldInfo[CP, CT](
|
||||
credentials_provider=combined.provider,
|
||||
@@ -662,6 +664,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
discriminator=combined.discriminator,
|
||||
discriminator_mapping=combined.discriminator_mapping,
|
||||
discriminator_values=set(all_discriminator_values),
|
||||
is_auto_credential=combined.is_auto_credential,
|
||||
input_field_name=combined.input_field_name,
|
||||
),
|
||||
combined_keys,
|
||||
)
|
||||
@@ -687,6 +691,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
discriminator=self.discriminator,
|
||||
discriminator_mapping=self.discriminator_mapping,
|
||||
discriminator_values=self.discriminator_values,
|
||||
is_auto_credential=self.is_auto_credential,
|
||||
input_field_name=self.input_field_name,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -172,6 +172,81 @@ def execute_graph(
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
async def _acquire_auto_credentials(
|
||||
input_model: type[BlockSchema],
|
||||
input_data: dict[str, Any],
|
||||
creds_manager: "IntegrationCredentialsManager",
|
||||
user_id: str,
|
||||
) -> tuple[dict[str, Any], list[AsyncRedisLock]]:
|
||||
"""
|
||||
Resolve auto_credentials from GoogleDriveFileField-style inputs.
|
||||
|
||||
Returns:
|
||||
(extra_exec_kwargs, locks): kwargs to inject into block execution, and
|
||||
credential locks to release after execution completes.
|
||||
"""
|
||||
extra_exec_kwargs: dict[str, Any] = {}
|
||||
locks: list[AsyncRedisLock] = []
|
||||
|
||||
# NOTE: If a block ever has multiple auto-credential fields, a ValueError
|
||||
# on a later field will strand locks acquired for earlier fields. They'll
|
||||
# auto-expire via Redis TTL, but add a try/except to release partial locks
|
||||
# if that becomes a real scenario.
|
||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||
field_name = info["field_name"]
|
||||
field_data = input_data.get(field_name)
|
||||
|
||||
if field_data and isinstance(field_data, dict):
|
||||
# Check if _credentials_id key exists in the field data
|
||||
if "_credentials_id" in field_data:
|
||||
cred_id = field_data["_credentials_id"]
|
||||
if cred_id:
|
||||
# Credential ID provided - acquire credentials
|
||||
provider = info.get("config", {}).get(
|
||||
"provider", "external service"
|
||||
)
|
||||
file_name = field_data.get("name", "selected file")
|
||||
try:
|
||||
credentials, lock = await creds_manager.acquire(
|
||||
user_id, cred_id
|
||||
)
|
||||
locks.append(lock)
|
||||
extra_exec_kwargs[kwarg_name] = credentials
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"{provider.capitalize()} credentials for "
|
||||
f"'{file_name}' in field '{field_name}' are not "
|
||||
f"available in your account. "
|
||||
f"This can happen if the agent was created by another "
|
||||
f"user or the credentials were deleted. "
|
||||
f"Please open the agent in the builder and re-select "
|
||||
f"the file to authenticate with your own account."
|
||||
)
|
||||
# else: _credentials_id is explicitly None, skip (chained data)
|
||||
else:
|
||||
# _credentials_id key missing entirely - this is an error
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
file_name = field_data.get("name", "selected file")
|
||||
raise ValueError(
|
||||
f"Authentication missing for '{file_name}' in field "
|
||||
f"'{field_name}'. Please re-select the file to authenticate "
|
||||
f"with {provider.capitalize()}."
|
||||
)
|
||||
elif field_data is None and field_name not in input_data:
|
||||
# Field not in input_data at all = connected from upstream block, skip
|
||||
pass
|
||||
else:
|
||||
# field_data is None/empty but key IS in input_data = user didn't select
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
raise ValueError(
|
||||
f"No file selected for '{field_name}'. "
|
||||
f"Please select a file to provide "
|
||||
f"{provider.capitalize()} authentication."
|
||||
)
|
||||
|
||||
return extra_exec_kwargs, locks
|
||||
|
||||
|
||||
async def execute_node(
|
||||
node: Node,
|
||||
data: NodeExecutionEntry,
|
||||
@@ -271,41 +346,14 @@ async def execute_node(
|
||||
extra_exec_kwargs[field_name] = credentials
|
||||
|
||||
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||
field_name = info["field_name"]
|
||||
field_data = input_data.get(field_name)
|
||||
if field_data and isinstance(field_data, dict):
|
||||
# Check if _credentials_id key exists in the field data
|
||||
if "_credentials_id" in field_data:
|
||||
cred_id = field_data["_credentials_id"]
|
||||
if cred_id:
|
||||
# Credential ID provided - acquire credentials
|
||||
provider = info.get("config", {}).get(
|
||||
"provider", "external service"
|
||||
)
|
||||
file_name = field_data.get("name", "selected file")
|
||||
try:
|
||||
credentials, lock = await creds_manager.acquire(
|
||||
user_id, cred_id
|
||||
)
|
||||
creds_locks.append(lock)
|
||||
extra_exec_kwargs[kwarg_name] = credentials
|
||||
except ValueError:
|
||||
# Credential was deleted or doesn't exist
|
||||
raise ValueError(
|
||||
f"Authentication expired for '{file_name}' in field '{field_name}'. "
|
||||
f"The saved {provider.capitalize()} credentials no longer exist. "
|
||||
f"Please re-select the file to re-authenticate."
|
||||
)
|
||||
# else: _credentials_id is explicitly None, skip credentials (for chained data)
|
||||
else:
|
||||
# _credentials_id key missing entirely - this is an error
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
file_name = field_data.get("name", "selected file")
|
||||
raise ValueError(
|
||||
f"Authentication missing for '{file_name}' in field '{field_name}'. "
|
||||
f"Please re-select the file to authenticate with {provider.capitalize()}."
|
||||
)
|
||||
auto_extra_kwargs, auto_locks = await _acquire_auto_credentials(
|
||||
input_model=input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=creds_manager,
|
||||
user_id=user_id,
|
||||
)
|
||||
extra_exec_kwargs.update(auto_extra_kwargs)
|
||||
creds_locks.extend(auto_locks)
|
||||
|
||||
output_size = 0
|
||||
|
||||
|
||||
@@ -0,0 +1,320 @@
|
||||
"""
|
||||
Tests for auto_credentials handling in execute_node().
|
||||
|
||||
These test the _acquire_auto_credentials() helper function extracted from
|
||||
execute_node() (manager.py lines 273-308).
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def google_drive_file_data():
|
||||
return {
|
||||
"valid": {
|
||||
"_credentials_id": "cred-id-123",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
},
|
||||
"chained": {
|
||||
"_credentials_id": None,
|
||||
"id": "file-456",
|
||||
"name": "chained.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
},
|
||||
"missing_key": {
|
||||
"id": "file-789",
|
||||
"name": "bad.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_input_model(mocker: MockerFixture):
|
||||
"""Create a mock input model with get_auto_credentials_fields() returning one field."""
|
||||
input_model = mocker.MagicMock()
|
||||
input_model.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.readonly"],
|
||||
},
|
||||
}
|
||||
}
|
||||
return input_model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_creds_manager(mocker: MockerFixture):
|
||||
manager = mocker.AsyncMock()
|
||||
mock_lock = mocker.AsyncMock()
|
||||
mock_creds = mocker.MagicMock()
|
||||
mock_creds.id = "cred-id-123"
|
||||
mock_creds.provider = "google"
|
||||
manager.acquire.return_value = (mock_creds, mock_lock)
|
||||
return manager, mock_creds, mock_lock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_happy_path(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""When field_data has a valid _credentials_id, credentials should be acquired."""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, mock_creds, mock_lock = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
||||
assert extra_kwargs["credentials"] == mock_creds
|
||||
assert mock_lock in locks
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_field_none_static_raises(
|
||||
mocker: MockerFixture,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
[THE BUG FIX TEST — OPEN-2895]
|
||||
When field_data is None and the key IS in input_data (user didn't select a file),
|
||||
should raise ValueError instead of silently skipping.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
# Key is present but value is None = user didn't select a file
|
||||
input_data = {"spreadsheet": None}
|
||||
|
||||
with pytest.raises(ValueError, match="No file selected"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_field_absent_skips(
|
||||
mocker: MockerFixture,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
When the field key is NOT in input_data at all (upstream connection),
|
||||
should skip without error.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
# Key not present = connected from upstream block
|
||||
input_data = {}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
manager.acquire.assert_not_called()
|
||||
assert "credentials" not in extra_kwargs
|
||||
assert locks == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_chained_cred_id_none(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
When _credentials_id is explicitly None (chained data from upstream),
|
||||
should skip credential acquisition.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["chained"]}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
manager.acquire.assert_not_called()
|
||||
assert "credentials" not in extra_kwargs
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_missing_cred_id_key_raises(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
When _credentials_id key is missing entirely from field_data dict,
|
||||
should raise ValueError.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["missing_key"]}
|
||||
|
||||
with pytest.raises(ValueError, match="Authentication missing"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_ownership_mismatch_error(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When acquire() raises ValueError (credential belongs to another user),
|
||||
the error message should mention 'not available' (not 'expired').
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
manager.acquire.side_effect = ValueError(
|
||||
"Credentials #cred-id-123 for user #user-2 not found"
|
||||
)
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
with pytest.raises(ValueError, match="not available in your account"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-2",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_deleted_credential_error(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When acquire() raises ValueError (credential was deleted),
|
||||
the error message should mention 'not available' (not 'expired').
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
manager.acquire.side_effect = ValueError(
|
||||
"Credentials #cred-id-123 for user #user-1 not found"
|
||||
)
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
with pytest.raises(ValueError, match="not available in your account"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_lock_appended(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""Lock from acquire() should be included in returned locks list."""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, mock_lock = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
assert len(locks) == 1
|
||||
assert locks[0] is mock_lock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_multiple_fields(
|
||||
mocker: MockerFixture,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""When there are multiple auto_credentials fields, only valid ones should acquire."""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, mock_creds, mock_lock = mock_creds_manager
|
||||
|
||||
input_model = mocker.MagicMock()
|
||||
input_model.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
},
|
||||
"credentials2": {
|
||||
"field_name": "doc_file",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
},
|
||||
}
|
||||
|
||||
input_data = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "cred-id-123",
|
||||
"id": "file-1",
|
||||
"name": "file1.xlsx",
|
||||
},
|
||||
"doc_file": {
|
||||
"_credentials_id": None,
|
||||
"id": "file-2",
|
||||
"name": "chained.doc",
|
||||
},
|
||||
}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
# Only the first field should have acquired credentials
|
||||
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
||||
assert "credentials" in extra_kwargs
|
||||
assert "credentials2" not in extra_kwargs
|
||||
assert len(locks) == 1
|
||||
@@ -259,7 +259,8 @@ async def _validate_node_input_credentials(
|
||||
|
||||
# Find any fields of type CredentialsMetaInput
|
||||
credentials_fields = block.input_schema.get_credentials_fields()
|
||||
if not credentials_fields:
|
||||
auto_credentials_fields = block.input_schema.get_auto_credentials_fields()
|
||||
if not credentials_fields and not auto_credentials_fields:
|
||||
continue
|
||||
|
||||
# Track if any credential field is missing for this node
|
||||
@@ -339,6 +340,47 @@ async def _validate_node_input_credentials(
|
||||
] = "Invalid credentials: type/provider mismatch"
|
||||
continue
|
||||
|
||||
# Validate auto-credentials (GoogleDriveFileField-based)
|
||||
# These have _credentials_id embedded in the file field data
|
||||
if auto_credentials_fields:
|
||||
for _kwarg_name, info in auto_credentials_fields.items():
|
||||
field_name = info["field_name"]
|
||||
# Check input_default and nodes_input_masks for the field value
|
||||
field_value = node.input_default.get(field_name)
|
||||
if nodes_input_masks and node.id in nodes_input_masks:
|
||||
field_value = nodes_input_masks[node.id].get(
|
||||
field_name, field_value
|
||||
)
|
||||
|
||||
if field_value and isinstance(field_value, dict):
|
||||
if "_credentials_id" not in field_value:
|
||||
# Key removed (e.g., on fork) — needs re-auth
|
||||
has_missing_credentials = True
|
||||
credential_errors[node.id][field_name] = (
|
||||
"Authentication missing for the selected file. "
|
||||
"Please re-select the file to authenticate with "
|
||||
"your own account."
|
||||
)
|
||||
continue
|
||||
cred_id = field_value.get("_credentials_id")
|
||||
if cred_id and isinstance(cred_id, str):
|
||||
try:
|
||||
creds_store = get_integration_credentials_store()
|
||||
creds = await creds_store.get_creds_by_id(user_id, cred_id)
|
||||
except Exception as e:
|
||||
has_missing_credentials = True
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = f"Credentials not available: {e}"
|
||||
continue
|
||||
if not creds:
|
||||
has_missing_credentials = True
|
||||
credential_errors[node.id][field_name] = (
|
||||
"The saved credentials are not available "
|
||||
"for your account. Please re-select the file to "
|
||||
"authenticate with your own account."
|
||||
)
|
||||
|
||||
# If node has optional credentials and any are missing, mark for skipping
|
||||
# But only if there are no other errors for this node
|
||||
if (
|
||||
@@ -370,10 +412,11 @@ def make_node_credentials_input_map(
|
||||
"""
|
||||
result: dict[str, dict[str, JsonValue]] = {}
|
||||
|
||||
# Get aggregated credentials fields for the graph
|
||||
graph_cred_inputs = graph.aggregate_credentials_inputs()
|
||||
# Only map regular credentials (not auto_credentials, which are resolved
|
||||
# at execution time from _credentials_id in file field data)
|
||||
graph_cred_inputs = graph.regular_credentials_inputs
|
||||
|
||||
for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items():
|
||||
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
|
||||
# Best-effort map: skip missing items
|
||||
if graph_input_name not in graph_credentials_input:
|
||||
continue
|
||||
|
||||
@@ -907,3 +907,335 @@ async def test_stop_graph_execution_cascades_to_child_with_reviews(
|
||||
|
||||
# Verify both parent and child status updates
|
||||
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for auto_credentials validation in _validate_node_input_credentials
|
||||
# (Fix 3: SECRT-1772 + Fix 4: Path 4)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_auto_creds_valid(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When a node has auto_credentials with a valid _credentials_id
|
||||
that exists in the store, validation should pass without errors.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-auto-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "valid-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
}
|
||||
}
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
# No regular credentials fields
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||
# Has auto_credentials fields
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Mock the credentials store to return valid credentials
|
||||
mock_store = mocker.MagicMock()
|
||||
mock_creds = mocker.MagicMock()
|
||||
mock_creds.id = "valid-cred-id"
|
||||
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=mock_creds)
|
||||
mocker.patch(
|
||||
"backend.executor.utils.get_integration_credentials_store",
|
||||
return_value=mock_store,
|
||||
)
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
assert mock_node.id not in errors
|
||||
assert mock_node.id not in nodes_to_skip
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_auto_creds_missing(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When a node has auto_credentials with a _credentials_id
|
||||
that doesn't exist for the current user, validation should report an error.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-bad-auto-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "other-users-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
}
|
||||
}
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Mock the credentials store to return None (cred not found for this user)
|
||||
mock_store = mocker.MagicMock()
|
||||
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=None)
|
||||
mocker.patch(
|
||||
"backend.executor.utils.get_integration_credentials_store",
|
||||
return_value=mock_store,
|
||||
)
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="different-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
assert mock_node.id in errors
|
||||
assert "spreadsheet" in errors[mock_node.id]
|
||||
assert "not available" in errors[mock_node.id]["spreadsheet"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_both_regular_and_auto(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] A node that has BOTH regular credentials AND auto_credentials
|
||||
should have both validated.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-both-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"credentials": {
|
||||
"id": "regular-cred-id",
|
||||
"provider": "github",
|
||||
"type": "api_key",
|
||||
},
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "auto-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
},
|
||||
}
|
||||
|
||||
mock_credentials_field_type = mocker.MagicMock()
|
||||
mock_credentials_meta = mocker.MagicMock()
|
||||
mock_credentials_meta.id = "regular-cred-id"
|
||||
mock_credentials_meta.provider = "github"
|
||||
mock_credentials_meta.type = "api_key"
|
||||
mock_credentials_field_type.model_validate.return_value = mock_credentials_meta
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
# Regular credentials field
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {
|
||||
"credentials": mock_credentials_field_type,
|
||||
}
|
||||
# Auto-credentials field
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"auto_credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Mock the credentials store to return valid credentials for both
|
||||
mock_store = mocker.MagicMock()
|
||||
mock_regular_creds = mocker.MagicMock()
|
||||
mock_regular_creds.id = "regular-cred-id"
|
||||
mock_regular_creds.provider = "github"
|
||||
mock_regular_creds.type = "api_key"
|
||||
|
||||
mock_auto_creds = mocker.MagicMock()
|
||||
mock_auto_creds.id = "auto-cred-id"
|
||||
|
||||
def get_creds_side_effect(user_id, cred_id):
|
||||
if cred_id == "regular-cred-id":
|
||||
return mock_regular_creds
|
||||
elif cred_id == "auto-cred-id":
|
||||
return mock_auto_creds
|
||||
return None
|
||||
|
||||
mock_store.get_creds_by_id = mocker.AsyncMock(side_effect=get_creds_side_effect)
|
||||
mocker.patch(
|
||||
"backend.executor.utils.get_integration_credentials_store",
|
||||
return_value=mock_store,
|
||||
)
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
# Both should validate successfully - no errors
|
||||
assert mock_node.id not in errors
|
||||
assert mock_node.id not in nodes_to_skip
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_auto_creds_skipped_when_none(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
When a node has auto_credentials but the field value has _credentials_id=None
|
||||
(e.g., from upstream connection), validation should skip it without error.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-chained-auto-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": None,
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
}
|
||||
}
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
# No error - chained data with None cred_id is valid
|
||||
assert mock_node.id not in errors
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for CredentialsFieldInfo auto_credential tag (Fix 4: Path 4)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_credentials_field_info_auto_credential_tag():
|
||||
"""
|
||||
[Path 4] CredentialsFieldInfo should support is_auto_credential and
|
||||
input_field_name fields for distinguishing auto from regular credentials.
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
# Regular credential should have is_auto_credential=False by default
|
||||
regular = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
assert regular.is_auto_credential is False
|
||||
assert regular.input_field_name is None
|
||||
|
||||
# Auto credential should have is_auto_credential=True
|
||||
auto = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google"],
|
||||
"credentials_types": ["oauth2"],
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": "spreadsheet",
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
assert auto.is_auto_credential is True
|
||||
assert auto.input_field_name == "spreadsheet"
|
||||
|
||||
|
||||
def test_make_node_credentials_input_map_excludes_auto_creds(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[Path 4] make_node_credentials_input_map should only include regular credentials,
|
||||
not auto_credentials (which are resolved at execution time).
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.executor.utils import make_node_credentials_input_map
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
# Create a mock graph with aggregate_credentials_inputs that returns
|
||||
# both regular and auto credentials
|
||||
mock_graph = mocker.MagicMock()
|
||||
|
||||
regular_field_info = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
# Mock regular_credentials_inputs property (auto_credentials are excluded)
|
||||
mock_graph.regular_credentials_inputs = {
|
||||
"github_creds": (regular_field_info, {("node-1", "credentials")}, True),
|
||||
}
|
||||
|
||||
graph_credentials_input = {
|
||||
"github_creds": CredentialsMetaInput(
|
||||
id="cred-123",
|
||||
provider=ProviderName("github"),
|
||||
type="api_key",
|
||||
),
|
||||
}
|
||||
|
||||
result = make_node_credentials_input_map(mock_graph, graph_credentials_input)
|
||||
|
||||
# Regular credentials should be mapped
|
||||
assert "node-1" in result
|
||||
assert "credentials" in result["node-1"]
|
||||
|
||||
# Auto credentials should NOT appear in the result
|
||||
# (they would have been mapped to the kwarg_name "credentials" not "spreadsheet")
|
||||
for node_id, fields in result.items():
|
||||
for field_name, value in fields.items():
|
||||
# Verify no auto-credential phantom entries
|
||||
if isinstance(value, dict):
|
||||
assert "_credentials_id" not in value
|
||||
|
||||
@@ -224,6 +224,14 @@ openweathermap_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
elevenlabs_credentials = APIKeyCredentials(
|
||||
id="f4a8b6c2-3d1e-4f5a-9b8c-7d6e5f4a3b2c",
|
||||
provider="elevenlabs",
|
||||
api_key=SecretStr(settings.secrets.elevenlabs_api_key),
|
||||
title="Use Credits for ElevenLabs",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -252,6 +260,7 @@ DEFAULT_CREDENTIALS = [
|
||||
v0_credentials,
|
||||
webshare_proxy_credentials,
|
||||
openweathermap_credentials,
|
||||
elevenlabs_credentials,
|
||||
]
|
||||
|
||||
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
|
||||
@@ -366,6 +375,8 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(webshare_proxy_credentials)
|
||||
if settings.secrets.openweathermap_api_key:
|
||||
all_credentials.append(openweathermap_credentials)
|
||||
if settings.secrets.elevenlabs_api_key:
|
||||
all_credentials.append(elevenlabs_credentials)
|
||||
return all_credentials
|
||||
|
||||
async def get_creds_by_id(
|
||||
|
||||
@@ -18,6 +18,7 @@ class ProviderName(str, Enum):
|
||||
DISCORD = "discord"
|
||||
D_ID = "d_id"
|
||||
E2B = "e2b"
|
||||
ELEVENLABS = "elevenlabs"
|
||||
FAL = "fal"
|
||||
GITHUB = "github"
|
||||
GOOGLE = "google"
|
||||
|
||||
@@ -8,6 +8,8 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Config
|
||||
@@ -17,6 +19,35 @@ from backend.util.virus_scanner import scan_content_safe
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
|
||||
class WorkspaceUri(BaseModel):
|
||||
"""Parsed workspace:// URI."""
|
||||
|
||||
file_ref: str # File ID or path (e.g. "abc123" or "/path/to/file.txt")
|
||||
mime_type: str | None = None # MIME type from fragment (e.g. "video/mp4")
|
||||
is_path: bool = False # True if file_ref is a path (starts with "/")
|
||||
|
||||
|
||||
def parse_workspace_uri(uri: str) -> WorkspaceUri:
|
||||
"""Parse a workspace:// URI into its components.
|
||||
|
||||
Examples:
|
||||
"workspace://abc123" → WorkspaceUri(file_ref="abc123", mime_type=None, is_path=False)
|
||||
"workspace://abc123#video/mp4" → WorkspaceUri(file_ref="abc123", mime_type="video/mp4", is_path=False)
|
||||
"workspace:///path/to/file.txt" → WorkspaceUri(file_ref="/path/to/file.txt", mime_type=None, is_path=True)
|
||||
"""
|
||||
raw = uri.removeprefix("workspace://")
|
||||
mime_type: str | None = None
|
||||
if "#" in raw:
|
||||
raw, fragment = raw.split("#", 1)
|
||||
mime_type = fragment or None
|
||||
return WorkspaceUri(
|
||||
file_ref=raw,
|
||||
mime_type=mime_type,
|
||||
is_path=raw.startswith("/"),
|
||||
)
|
||||
|
||||
|
||||
# Return format options for store_media_file
|
||||
# - "for_local_processing": Returns local file path - use with ffmpeg, MoviePy, PIL, etc.
|
||||
# - "for_external_api": Returns data URI (base64) - use when sending content to external APIs
|
||||
@@ -183,22 +214,20 @@ async def store_media_file(
|
||||
"This file type is only available in CoPilot sessions."
|
||||
)
|
||||
|
||||
# Parse workspace reference
|
||||
# workspace://abc123 - by file ID
|
||||
# workspace:///path/to/file.txt - by virtual path
|
||||
file_ref = file[12:] # Remove "workspace://"
|
||||
# Parse workspace reference (strips #mimeType fragment from file ID)
|
||||
ws = parse_workspace_uri(file)
|
||||
|
||||
if file_ref.startswith("/"):
|
||||
# Path reference
|
||||
workspace_content = await workspace_manager.read_file(file_ref)
|
||||
file_info = await workspace_manager.get_file_info_by_path(file_ref)
|
||||
if ws.is_path:
|
||||
# Path reference: workspace:///path/to/file.txt
|
||||
workspace_content = await workspace_manager.read_file(ws.file_ref)
|
||||
file_info = await workspace_manager.get_file_info_by_path(ws.file_ref)
|
||||
filename = sanitize_filename(
|
||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||
)
|
||||
else:
|
||||
# ID reference
|
||||
workspace_content = await workspace_manager.read_file_by_id(file_ref)
|
||||
file_info = await workspace_manager.get_file_info(file_ref)
|
||||
# ID reference: workspace://abc123 or workspace://abc123#video/mp4
|
||||
workspace_content = await workspace_manager.read_file_by_id(ws.file_ref)
|
||||
file_info = await workspace_manager.get_file_info(ws.file_ref)
|
||||
filename = sanitize_filename(
|
||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||
)
|
||||
@@ -334,7 +363,21 @@ async def store_media_file(
|
||||
|
||||
# Don't re-save if input was already from workspace
|
||||
if is_from_workspace:
|
||||
# Return original workspace reference
|
||||
# Return original workspace reference, ensuring MIME type fragment
|
||||
ws = parse_workspace_uri(file)
|
||||
if not ws.mime_type:
|
||||
# Add MIME type fragment if missing (older refs without it)
|
||||
try:
|
||||
if ws.is_path:
|
||||
info = await workspace_manager.get_file_info_by_path(
|
||||
ws.file_ref
|
||||
)
|
||||
else:
|
||||
info = await workspace_manager.get_file_info(ws.file_ref)
|
||||
if info:
|
||||
return MediaFileType(f"{file}#{info.mimeType}")
|
||||
except Exception:
|
||||
pass
|
||||
return MediaFileType(file)
|
||||
|
||||
# Save new content to workspace
|
||||
@@ -346,7 +389,7 @@ async def store_media_file(
|
||||
filename=filename,
|
||||
overwrite=True,
|
||||
)
|
||||
return MediaFileType(f"workspace://{file_record.id}")
|
||||
return MediaFileType(f"workspace://{file_record.id}#{file_record.mimeType}")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid return_format: {return_format}")
|
||||
|
||||
@@ -656,6 +656,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
e2b_api_key: str = Field(default="", description="E2B API key")
|
||||
nvidia_api_key: str = Field(default="", description="Nvidia API key")
|
||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||
|
||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||
|
||||
@@ -22,6 +22,7 @@ from backend.data.workspace import (
|
||||
soft_delete_workspace_file,
|
||||
)
|
||||
from backend.util.settings import Config
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
from backend.util.workspace_storage import compute_file_checksum, get_workspace_storage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -187,6 +188,9 @@ class WorkspaceManager:
|
||||
f"{Config().max_file_size_mb}MB limit"
|
||||
)
|
||||
|
||||
# Virus scan content before persisting (defense in depth)
|
||||
await scan_content_safe(content, filename=filename)
|
||||
|
||||
# Determine path with session scoping
|
||||
if path is None:
|
||||
path = f"/{filename}"
|
||||
|
||||
47
autogpt_platform/backend/poetry.lock
generated
47
autogpt_platform/backend/poetry.lock
generated
@@ -1169,6 +1169,29 @@ attrs = ">=21.3.0"
|
||||
e2b = ">=1.5.4,<2.0.0"
|
||||
httpx = ">=0.20.0,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "elevenlabs"
|
||||
version = "1.59.0"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "elevenlabs-1.59.0-py3-none-any.whl", hash = "sha256:468145db81a0bc867708b4a8619699f75583e9481b395ec1339d0b443da771ed"},
|
||||
{file = "elevenlabs-1.59.0.tar.gz", hash = "sha256:16e735bd594e86d415dd445d249c8cc28b09996cfd627fbc10102c0a84698859"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.21.2"
|
||||
pydantic = ">=1.9.2"
|
||||
pydantic-core = ">=2.18.2,<3.0.0"
|
||||
requests = ">=2.20"
|
||||
typing_extensions = ">=4.0.0"
|
||||
websockets = ">=11.0"
|
||||
|
||||
[package.extras]
|
||||
pyaudio = ["pyaudio (>=0.2.14)"]
|
||||
|
||||
[[package]]
|
||||
name = "email-validator"
|
||||
version = "2.2.0"
|
||||
@@ -7361,6 +7384,28 @@ files = [
|
||||
defusedxml = ">=0.7.1,<0.8.0"
|
||||
requests = "*"
|
||||
|
||||
[[package]]
|
||||
name = "yt-dlp"
|
||||
version = "2025.12.8"
|
||||
description = "A feature-rich command-line audio/video downloader"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "yt_dlp-2025.12.8-py3-none-any.whl", hash = "sha256:36e2584342e409cfbfa0b5e61448a1c5189e345cf4564294456ee509e7d3e065"},
|
||||
{file = "yt_dlp-2025.12.8.tar.gz", hash = "sha256:b773c81bb6b71cb2c111cfb859f453c7a71cf2ef44eff234ff155877184c3e4f"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
build = ["build", "hatchling (>=1.27.0)", "pip", "setuptools (>=71.0.2)", "wheel"]
|
||||
curl-cffi = ["curl-cffi (>=0.5.10,<0.6.dev0 || >=0.10.dev0,<0.14) ; implementation_name == \"cpython\""]
|
||||
default = ["brotli ; implementation_name == \"cpython\"", "brotlicffi ; implementation_name != \"cpython\"", "certifi", "mutagen", "pycryptodomex", "requests (>=2.32.2,<3)", "urllib3 (>=2.0.2,<3)", "websockets (>=13.0)", "yt-dlp-ejs (==0.3.2)"]
|
||||
dev = ["autopep8 (>=2.0,<3.0)", "pre-commit", "pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||
pyinstaller = ["pyinstaller (>=6.17.0)"]
|
||||
secretstorage = ["cffi", "secretstorage"]
|
||||
static-analysis = ["autopep8 (>=2.0,<3.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||
test = ["pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "zerobouncesdk"
|
||||
version = "1.1.2"
|
||||
@@ -7512,4 +7557,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "ee5742dc1a9df50dfc06d4b26a1682cbb2b25cab6b79ce5625ec272f93e4f4bf"
|
||||
content-hash = "8239323f9ae6713224dffd1fe8ba8b449fe88b6c3c7a90940294a74f43a0387a"
|
||||
|
||||
@@ -20,6 +20,7 @@ click = "^8.2.0"
|
||||
cryptography = "^45.0"
|
||||
discord-py = "^2.5.2"
|
||||
e2b-code-interpreter = "^1.5.2"
|
||||
elevenlabs = "^1.50.0"
|
||||
fastapi = "^0.116.1"
|
||||
feedparser = "^6.0.11"
|
||||
flake8 = "^7.3.0"
|
||||
@@ -71,6 +72,7 @@ tweepy = "^4.16.0"
|
||||
uvicorn = { extras = ["standard"], version = "^0.35.0" }
|
||||
websockets = "^15.0"
|
||||
youtube-transcript-api = "^1.2.1"
|
||||
yt-dlp = "2025.12.08"
|
||||
zerobouncesdk = "^1.1.2"
|
||||
# NOTE: please insert new dependencies in their alphabetical location
|
||||
pytest-snapshot = "^0.9.0"
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
"description": "A test graph",
|
||||
|
||||
@@ -1,34 +1,14 @@
|
||||
[
|
||||
{
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
"created_at": "2025-09-04T13:37:00",
|
||||
"description": "A test graph",
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"has_sensitive_action": false,
|
||||
"id": "graph-123",
|
||||
"input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"instructions": null,
|
||||
"is_active": true,
|
||||
"name": "Test Graph",
|
||||
"output_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"recommended_schedule_cron": null,
|
||||
"sub_graphs": [],
|
||||
"trigger_setup_info": null,
|
||||
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
||||
"version": 1
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import { useState } from "react";
|
||||
import { getSchemaDefaultCredentials } from "../../helpers";
|
||||
@@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined;
|
||||
type Credentials = Record<string, Credential>;
|
||||
|
||||
type Props = {
|
||||
agent: GraphMeta | null;
|
||||
agent: GraphModel | null;
|
||||
siblingInputs?: Record<string, any>;
|
||||
onCredentialsChange: (
|
||||
credentials: Record<string, CredentialsMetaInput>,
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
|
||||
|
||||
export function getCredentialFields(
|
||||
agent: GraphMeta | null,
|
||||
agent: GraphModel | null,
|
||||
): AgentCredentialsFields {
|
||||
if (!agent) return {};
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@ import type {
|
||||
CredentialsMetaInput,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import type { InputValues } from "./types";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
|
||||
export function computeInitialAgentInputs(
|
||||
agent: GraphMeta | null,
|
||||
agent: GraphModel | null,
|
||||
existingInputs?: InputValues | null,
|
||||
): InputValues {
|
||||
const properties = agent?.input_schema?.properties || {};
|
||||
@@ -29,7 +29,7 @@ export function computeInitialAgentInputs(
|
||||
}
|
||||
|
||||
type IsRunDisabledParams = {
|
||||
agent: GraphMeta | null;
|
||||
agent: GraphModel | null;
|
||||
isRunning: boolean;
|
||||
agentInputs: InputValues | null | undefined;
|
||||
};
|
||||
|
||||
@@ -30,6 +30,8 @@ import {
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { GraphMeta } from "@/lib/autogpt-server-api";
|
||||
import jaro from "jaro-winkler";
|
||||
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
|
||||
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
|
||||
uiKey?: string;
|
||||
@@ -107,6 +109,8 @@ export function BlocksControl({
|
||||
.filter((b) => b.uiType !== BlockUIType.AGENT)
|
||||
.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
// Agent blocks are created from GraphMeta which doesn't include schemas.
|
||||
// Schemas will be fetched on-demand when the block is actually added.
|
||||
const agentBlockList = flows
|
||||
.map((flow): _Block => {
|
||||
return {
|
||||
@@ -116,8 +120,9 @@ export function BlocksControl({
|
||||
`Ver.${flow.version}` +
|
||||
(flow.description ? ` | ${flow.description}` : ""),
|
||||
categories: [{ category: "AGENT", description: "" }],
|
||||
inputSchema: flow.input_schema,
|
||||
outputSchema: flow.output_schema,
|
||||
// Empty schemas - will be populated when block is added
|
||||
inputSchema: { type: "object", properties: {} },
|
||||
outputSchema: { type: "object", properties: {} },
|
||||
staticOutput: false,
|
||||
uiType: BlockUIType.AGENT,
|
||||
costs: [],
|
||||
@@ -125,8 +130,7 @@ export function BlocksControl({
|
||||
hardcodedValues: {
|
||||
graph_id: flow.id,
|
||||
graph_version: flow.version,
|
||||
input_schema: flow.input_schema,
|
||||
output_schema: flow.output_schema,
|
||||
// Schemas will be fetched on-demand when block is added
|
||||
},
|
||||
};
|
||||
})
|
||||
@@ -182,6 +186,37 @@ export function BlocksControl({
|
||||
setSelectedCategory(null);
|
||||
}, []);
|
||||
|
||||
// Handler to add a block, fetching graph data on-demand for agent blocks
|
||||
const handleAddBlock = useCallback(
|
||||
async (block: _Block & { notAvailable: string | null }) => {
|
||||
if (block.notAvailable) return;
|
||||
|
||||
// For agent blocks, fetch the full graph to get schemas
|
||||
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
|
||||
const graphID = block.hardcodedValues.graph_id as string;
|
||||
const graphVersion = block.hardcodedValues.graph_version as number;
|
||||
const graphData = okData(
|
||||
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
|
||||
);
|
||||
|
||||
if (graphData) {
|
||||
addBlock(block.id, block.name, {
|
||||
...block.hardcodedValues,
|
||||
input_schema: graphData.input_schema,
|
||||
output_schema: graphData.output_schema,
|
||||
});
|
||||
} else {
|
||||
// Fallback: add without schemas (will be incomplete)
|
||||
console.error("Failed to fetch graph data for agent block");
|
||||
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||
}
|
||||
} else {
|
||||
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||
}
|
||||
},
|
||||
[addBlock],
|
||||
);
|
||||
|
||||
// Extract unique categories from blocks
|
||||
const categories = useMemo(() => {
|
||||
return Array.from(
|
||||
@@ -303,10 +338,7 @@ export function BlocksControl({
|
||||
}),
|
||||
);
|
||||
}}
|
||||
onClick={() =>
|
||||
!block.notAvailable &&
|
||||
addBlock(block.id, block.name, block?.hardcodedValues || {})
|
||||
}
|
||||
onClick={() => handleAddBlock(block)}
|
||||
title={block.notAvailable ?? undefined}
|
||||
>
|
||||
<div
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import React, { useState } from "react";
|
||||
import React, { useMemo, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import {
|
||||
@@ -11,6 +11,12 @@ import {
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "../../../../../components/__legacy__/ui/table";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
import ExpandableOutputDialog from "./ExpandableOutputDialog";
|
||||
|
||||
@@ -26,6 +32,9 @@ export default function DataTable({
|
||||
data,
|
||||
}: DataTableProps) {
|
||||
const { toast } = useToast();
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
execId: string;
|
||||
@@ -33,6 +42,15 @@ export default function DataTable({
|
||||
data: any[];
|
||||
} | null>(null);
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const copyData = (pin: string, data: string) => {
|
||||
navigator.clipboard.writeText(data).then(() => {
|
||||
toast({
|
||||
@@ -102,15 +120,31 @@ export default function DataTable({
|
||||
<Clipboard size={18} />
|
||||
</Button>
|
||||
</div>
|
||||
{value.map((item, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
))}
|
||||
{value.map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
|
||||
@@ -29,13 +29,17 @@ import "@xyflow/react/dist/style.css";
|
||||
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
|
||||
import "./flow.css";
|
||||
import {
|
||||
BlockIORootSchema,
|
||||
BlockUIType,
|
||||
formatEdgeID,
|
||||
GraphExecutionID,
|
||||
GraphID,
|
||||
GraphMeta,
|
||||
LibraryAgent,
|
||||
SpecialBlockID,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
|
||||
@@ -687,8 +691,94 @@ const FlowEditor: React.FC<{
|
||||
[getNode, updateNode, nodes],
|
||||
);
|
||||
|
||||
/* Shared helper to create and add a node */
|
||||
const createAndAddNode = useCallback(
|
||||
async (
|
||||
blockID: string,
|
||||
blockName: string,
|
||||
hardcodedValues: Record<string, any>,
|
||||
position: { x: number; y: number },
|
||||
): Promise<CustomNode | null> => {
|
||||
const nodeSchema = availableBlocks.find((node) => node.id === blockID);
|
||||
if (!nodeSchema) {
|
||||
console.error(`Schema not found for block ID: ${blockID}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// For agent blocks, fetch the full graph to get schemas
|
||||
let inputSchema: BlockIORootSchema = nodeSchema.inputSchema;
|
||||
let outputSchema: BlockIORootSchema = nodeSchema.outputSchema;
|
||||
let finalHardcodedValues = hardcodedValues;
|
||||
|
||||
if (blockID === SpecialBlockID.AGENT) {
|
||||
const graphID = hardcodedValues.graph_id as string;
|
||||
const graphVersion = hardcodedValues.graph_version as number;
|
||||
const graphData = okData(
|
||||
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
|
||||
);
|
||||
|
||||
if (graphData) {
|
||||
inputSchema = graphData.input_schema as BlockIORootSchema;
|
||||
outputSchema = graphData.output_schema as BlockIORootSchema;
|
||||
finalHardcodedValues = {
|
||||
...hardcodedValues,
|
||||
input_schema: graphData.input_schema,
|
||||
output_schema: graphData.output_schema,
|
||||
};
|
||||
} else {
|
||||
console.error("Failed to fetch graph data for agent block");
|
||||
}
|
||||
}
|
||||
|
||||
const newNode: CustomNode = {
|
||||
id: nodeId.toString(),
|
||||
type: "custom",
|
||||
position,
|
||||
data: {
|
||||
blockType: blockName,
|
||||
blockCosts: nodeSchema.costs || [],
|
||||
title: `${blockName} ${nodeId}`,
|
||||
description: nodeSchema.description,
|
||||
categories: nodeSchema.categories,
|
||||
inputSchema: inputSchema,
|
||||
outputSchema: outputSchema,
|
||||
hardcodedValues: finalHardcodedValues,
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
block_id: blockID,
|
||||
isOutputStatic: nodeSchema.staticOutput,
|
||||
uiType: nodeSchema.uiType,
|
||||
},
|
||||
};
|
||||
|
||||
addNodes(newNode);
|
||||
setNodeId((prevId) => prevId + 1);
|
||||
clearNodesStatusAndOutput();
|
||||
|
||||
history.push({
|
||||
type: "ADD_NODE",
|
||||
payload: { node: { ...newNode, ...newNode.data } },
|
||||
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
|
||||
redo: () => addNodes(newNode),
|
||||
});
|
||||
|
||||
return newNode;
|
||||
},
|
||||
[
|
||||
availableBlocks,
|
||||
nodeId,
|
||||
addNodes,
|
||||
deleteElements,
|
||||
clearNodesStatusAndOutput,
|
||||
],
|
||||
);
|
||||
|
||||
const addNode = useCallback(
|
||||
(blockId: string, nodeType: string, hardcodedValues: any = {}) => {
|
||||
async (
|
||||
blockId: string,
|
||||
nodeType: string,
|
||||
hardcodedValues: Record<string, any> = {},
|
||||
) => {
|
||||
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
|
||||
if (!nodeSchema) {
|
||||
console.error(`Schema not found for block ID: ${blockId}`);
|
||||
@@ -707,73 +797,42 @@ const FlowEditor: React.FC<{
|
||||
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
|
||||
|
||||
const { x, y } = getViewport();
|
||||
const viewportCoordinates =
|
||||
const position =
|
||||
nodeDimensions && Object.keys(nodeDimensions).length > 0
|
||||
? // we will get all the dimension of nodes, then store
|
||||
findNewlyAddedBlockCoordinates(
|
||||
? findNewlyAddedBlockCoordinates(
|
||||
nodeDimensions,
|
||||
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
|
||||
60,
|
||||
1.0,
|
||||
)
|
||||
: // we will get all the dimension of nodes, then store
|
||||
{
|
||||
: {
|
||||
x: window.innerWidth / 2 - x,
|
||||
y: window.innerHeight / 2 - y,
|
||||
};
|
||||
|
||||
const newNode: CustomNode = {
|
||||
id: nodeId.toString(),
|
||||
type: "custom",
|
||||
position: viewportCoordinates, // Set the position to the calculated viewport center
|
||||
data: {
|
||||
blockType: nodeType,
|
||||
blockCosts: nodeSchema.costs,
|
||||
title: `${nodeType} ${nodeId}`,
|
||||
description: nodeSchema.description,
|
||||
categories: nodeSchema.categories,
|
||||
inputSchema: nodeSchema.inputSchema,
|
||||
outputSchema: nodeSchema.outputSchema,
|
||||
hardcodedValues: hardcodedValues,
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
block_id: blockId,
|
||||
isOutputStatic: nodeSchema.staticOutput,
|
||||
uiType: nodeSchema.uiType,
|
||||
},
|
||||
};
|
||||
|
||||
addNodes(newNode);
|
||||
setNodeId((prevId) => prevId + 1);
|
||||
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
|
||||
const newNode = await createAndAddNode(
|
||||
blockId,
|
||||
nodeType,
|
||||
hardcodedValues,
|
||||
position,
|
||||
);
|
||||
if (!newNode) return;
|
||||
|
||||
setViewport(
|
||||
{
|
||||
// Rough estimate of the dimension of the node is: 500x400px.
|
||||
// Though we skip shifting the X, considering the block menu side-bar.
|
||||
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
|
||||
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
|
||||
x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2,
|
||||
y: -position.y * 0.8 + (window.innerHeight - 400) / 2,
|
||||
zoom: 0.8,
|
||||
},
|
||||
{ duration: 500 },
|
||||
);
|
||||
|
||||
history.push({
|
||||
type: "ADD_NODE",
|
||||
payload: { node: { ...newNode, ...newNode.data } },
|
||||
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
|
||||
redo: () => addNodes(newNode),
|
||||
});
|
||||
},
|
||||
[
|
||||
nodeId,
|
||||
getViewport,
|
||||
setViewport,
|
||||
availableBlocks,
|
||||
addNodes,
|
||||
nodeDimensions,
|
||||
deleteElements,
|
||||
clearNodesStatusAndOutput,
|
||||
createAndAddNode,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -920,7 +979,7 @@ const FlowEditor: React.FC<{
|
||||
}, []);
|
||||
|
||||
const onDrop = useCallback(
|
||||
(event: React.DragEvent) => {
|
||||
async (event: React.DragEvent) => {
|
||||
event.preventDefault();
|
||||
|
||||
const blockData = event.dataTransfer.getData("application/reactflow");
|
||||
@@ -935,62 +994,17 @@ const FlowEditor: React.FC<{
|
||||
y: event.clientY,
|
||||
});
|
||||
|
||||
// Find the block schema
|
||||
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
|
||||
if (!nodeSchema) {
|
||||
console.error(`Schema not found for block ID: ${blockId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Create the new node at the drop position
|
||||
const newNode: CustomNode = {
|
||||
id: nodeId.toString(),
|
||||
type: "custom",
|
||||
await createAndAddNode(
|
||||
blockId,
|
||||
blockName,
|
||||
hardcodedValues || {},
|
||||
position,
|
||||
data: {
|
||||
blockType: blockName,
|
||||
blockCosts: nodeSchema.costs || [],
|
||||
title: `${blockName} ${nodeId}`,
|
||||
description: nodeSchema.description,
|
||||
categories: nodeSchema.categories,
|
||||
inputSchema: nodeSchema.inputSchema,
|
||||
outputSchema: nodeSchema.outputSchema,
|
||||
hardcodedValues: hardcodedValues,
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
block_id: blockId,
|
||||
uiType: nodeSchema.uiType,
|
||||
},
|
||||
};
|
||||
|
||||
history.push({
|
||||
type: "ADD_NODE",
|
||||
payload: { node: { ...newNode, ...newNode.data } },
|
||||
undo: () => {
|
||||
deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] });
|
||||
},
|
||||
redo: () => {
|
||||
addNodes([newNode]);
|
||||
},
|
||||
});
|
||||
addNodes([newNode]);
|
||||
clearNodesStatusAndOutput();
|
||||
|
||||
setNodeId((prevId) => prevId + 1);
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("Failed to drop block:", error);
|
||||
}
|
||||
},
|
||||
[
|
||||
nodeId,
|
||||
availableBlocks,
|
||||
nodes,
|
||||
edges,
|
||||
addNodes,
|
||||
screenToFlowPosition,
|
||||
deleteElements,
|
||||
clearNodesStatusAndOutput,
|
||||
],
|
||||
[screenToFlowPosition, createAndAddNode],
|
||||
);
|
||||
|
||||
const buildContextValue: BuilderContextType = useMemo(
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
import React, { useContext, useState } from "react";
|
||||
import React, { useContext, useMemo, useState } from "react";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Maximize2 } from "lucide-react";
|
||||
import * as Separator from "@radix-ui/react-separator";
|
||||
import { ContentRenderer } from "@/components/__legacy__/ui/render";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
|
||||
@@ -21,6 +27,9 @@ export default function NodeOutputs({
|
||||
data,
|
||||
}: NodeOutputsProps) {
|
||||
const builderContext = useContext(BuilderContext);
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
@@ -37,6 +46,15 @@ export default function NodeOutputs({
|
||||
|
||||
const { getNodeTitle } = builderContext;
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const getBeautifiedPinName = (pin: string) => {
|
||||
if (!pin.startsWith("tools_^_")) {
|
||||
return beautifyString(pin);
|
||||
@@ -87,15 +105,31 @@ export default function NodeOutputs({
|
||||
<div className="mt-2">
|
||||
<strong className="mr-2">Data:</strong>
|
||||
<div className="mt-1">
|
||||
{dataArray.slice(0, 10).map((item, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
))}
|
||||
{dataArray.slice(0, 10).map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
{dataArray.length > 10 && (
|
||||
<span style={{ color: "#888" }}>
|
||||
<br />
|
||||
|
||||
@@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import type {
|
||||
CredentialsMetaInput,
|
||||
GraphMeta,
|
||||
Graph,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
|
||||
interface RunInputDialogProps {
|
||||
isOpen: boolean;
|
||||
doClose: () => void;
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
doRun?: (
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
|
||||
@@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder
|
||||
import {
|
||||
BlockUIType,
|
||||
CredentialsMetaInput,
|
||||
GraphMeta,
|
||||
Graph,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
|
||||
import { RunnerInputDialog } from "./RunnerInputUI";
|
||||
|
||||
interface RunnerUIWrapperProps {
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
nodes: Node<CustomNodeData>[];
|
||||
graphExecutionError?: string | null;
|
||||
saveAndRun: (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { GraphInputSchema } from "@/lib/autogpt-server-api";
|
||||
import { GraphMetaLike, IncompatibilityInfo } from "./types";
|
||||
import { GraphLike, IncompatibilityInfo } from "./types";
|
||||
|
||||
// Helper type for schema properties - the generated types are too loose
|
||||
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
|
||||
@@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired {
|
||||
*/
|
||||
export function createUpdatedAgentNodeInputs(
|
||||
currentInputs: Record<string, unknown>,
|
||||
latestSubGraphVersion: GraphMetaLike,
|
||||
latestSubGraphVersion: GraphLike,
|
||||
): Record<string, unknown> {
|
||||
return {
|
||||
...currentInputs,
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api";
|
||||
import type {
|
||||
Graph as LegacyGraph,
|
||||
GraphMeta as LegacyGraphMeta,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel";
|
||||
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
|
||||
export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
|
||||
export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
|
||||
hasUpdate: boolean;
|
||||
currentVersion: number;
|
||||
latestVersion: number;
|
||||
@@ -10,7 +14,10 @@ export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
|
||||
incompatibilities: IncompatibilityInfo | null;
|
||||
};
|
||||
|
||||
// Union type for GraphMeta that works with both legacy and new builder
|
||||
// Union type for Graph (with schemas) that works with both legacy and new builder
|
||||
export type GraphLike = LegacyGraph | GeneratedGraph;
|
||||
|
||||
// Union type for GraphMeta (without schemas) for version detection
|
||||
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
|
||||
|
||||
export type IncompatibilityInfo = {
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
import { useMemo } from "react";
|
||||
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api";
|
||||
import type {
|
||||
GraphInputSchema,
|
||||
GraphOutputSchema,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import type { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { getEffectiveType } from "@/lib/utils";
|
||||
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
|
||||
import {
|
||||
@@ -11,26 +17,38 @@ import {
|
||||
/**
|
||||
* Checks if a newer version of a sub-agent is available and determines compatibility
|
||||
*/
|
||||
export function useSubAgentUpdate<T extends GraphMetaLike>(
|
||||
export function useSubAgentUpdate(
|
||||
nodeID: string,
|
||||
graphID: string | undefined,
|
||||
graphVersion: number | undefined,
|
||||
currentInputSchema: GraphInputSchema | undefined,
|
||||
currentOutputSchema: GraphOutputSchema | undefined,
|
||||
connections: EdgeLike[],
|
||||
availableGraphs: T[],
|
||||
): SubAgentUpdateInfo<T> {
|
||||
availableGraphs: GraphMetaLike[],
|
||||
): SubAgentUpdateInfo<GraphModel> {
|
||||
// Find the latest version of the same graph
|
||||
const latestGraph = useMemo(() => {
|
||||
const latestGraphInfo = useMemo(() => {
|
||||
if (!graphID) return null;
|
||||
return availableGraphs.find((graph) => graph.id === graphID) || null;
|
||||
}, [graphID, availableGraphs]);
|
||||
|
||||
// Check if there's an update available
|
||||
// Check if there's a newer version available
|
||||
const hasUpdate = useMemo(() => {
|
||||
if (!latestGraph || graphVersion === undefined) return false;
|
||||
return latestGraph.version! > graphVersion;
|
||||
}, [latestGraph, graphVersion]);
|
||||
if (!latestGraphInfo || graphVersion === undefined) return false;
|
||||
return latestGraphInfo.version! > graphVersion;
|
||||
}, [latestGraphInfo, graphVersion]);
|
||||
|
||||
// Fetch full graph IF an update is detected
|
||||
const { data: latestGraph } = useGetV1GetSpecificGraph(
|
||||
graphID ?? "",
|
||||
{ version: latestGraphInfo?.version },
|
||||
{
|
||||
query: {
|
||||
enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version,
|
||||
select: okData,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Get connected input and output handles for this specific node
|
||||
const connectedHandles = useMemo(() => {
|
||||
@@ -152,8 +170,8 @@ export function useSubAgentUpdate<T extends GraphMetaLike>(
|
||||
return {
|
||||
hasUpdate,
|
||||
currentVersion: graphVersion || 0,
|
||||
latestVersion: latestGraph?.version || 0,
|
||||
latestGraph,
|
||||
latestVersion: latestGraphInfo?.version || 0,
|
||||
latestGraph: latestGraph || null,
|
||||
isCompatible: compatibilityResult.isCompatible,
|
||||
incompatibilities: compatibilityResult.incompatibilities,
|
||||
};
|
||||
|
||||
@@ -18,7 +18,7 @@ interface GraphStore {
|
||||
outputSchema: Record<string, any> | null,
|
||||
) => void;
|
||||
|
||||
// Available graphs; used for sub-graph updates
|
||||
// Available graphs; used for sub-graph updated version detection
|
||||
availableSubGraphs: GraphMeta[];
|
||||
setAvailableSubGraphs: (graphs: GraphMeta[]) => void;
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@ import React, {
|
||||
import {
|
||||
CredentialsMetaInput,
|
||||
CredentialsType,
|
||||
Graph,
|
||||
GraphExecutionID,
|
||||
GraphMeta,
|
||||
LibraryAgentPreset,
|
||||
LibraryAgentPresetID,
|
||||
LibraryAgentPresetUpdatable,
|
||||
@@ -69,7 +69,7 @@ export function AgentRunDraftView({
|
||||
className,
|
||||
recommendedScheduleCron,
|
||||
}: {
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
agentActions?: ButtonAction[];
|
||||
recommendedScheduleCron?: string | null;
|
||||
doRun?: (
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
import React, { useCallback, useMemo } from "react";
|
||||
|
||||
import {
|
||||
Graph,
|
||||
GraphExecutionID,
|
||||
GraphMeta,
|
||||
Schedule,
|
||||
ScheduleID,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
@@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({
|
||||
onForcedRun,
|
||||
doDeleteSchedule,
|
||||
}: {
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
schedule: Schedule;
|
||||
agentActions: ButtonAction[];
|
||||
onForcedRun: (runID: GraphExecutionID) => void;
|
||||
|
||||
@@ -5629,7 +5629,9 @@
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/GraphMeta" }
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/GraphModelWithoutNodes"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -6495,18 +6497,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -6514,11 +6504,22 @@
|
||||
"forked_from_version": {
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "description"],
|
||||
"title": "BaseGraph"
|
||||
"title": "BaseGraph",
|
||||
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
|
||||
},
|
||||
"BaseGraph-Output": {
|
||||
"properties": {
|
||||
@@ -6539,18 +6540,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -6559,6 +6548,16 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
},
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
@@ -6605,7 +6604,8 @@
|
||||
"has_sensitive_action",
|
||||
"trigger_setup_info"
|
||||
],
|
||||
"title": "BaseGraph"
|
||||
"title": "BaseGraph",
|
||||
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
|
||||
},
|
||||
"BlockCategoryResponse": {
|
||||
"properties": {
|
||||
@@ -7399,18 +7399,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -7419,16 +7407,26 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
},
|
||||
"sub_graphs": {
|
||||
"items": { "$ref": "#/components/schemas/BaseGraph-Input" },
|
||||
"type": "array",
|
||||
"title": "Sub Graphs",
|
||||
"default": []
|
||||
"title": "Sub Graphs"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "description"],
|
||||
"title": "Graph"
|
||||
"title": "Graph",
|
||||
"description": "Creatable graph model used in API create/update endpoints."
|
||||
},
|
||||
"GraphExecution": {
|
||||
"properties": {
|
||||
@@ -7778,6 +7776,52 @@
|
||||
"description": "Response schema for paginated graph executions."
|
||||
},
|
||||
"GraphMeta": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"version": { "type": "integer", "title": "Version" },
|
||||
"is_active": {
|
||||
"type": "boolean",
|
||||
"title": "Is Active",
|
||||
"default": true
|
||||
},
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"instructions": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Instructions"
|
||||
},
|
||||
"recommended_schedule_cron": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
},
|
||||
"forked_from_version": {
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Created At"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"version",
|
||||
"name",
|
||||
"description",
|
||||
"user_id",
|
||||
"created_at"
|
||||
],
|
||||
"title": "GraphMeta",
|
||||
"description": "Lightweight graph metadata model representing an existing graph from the database,\nfor use in listings and summaries.\n\nLacks `GraphModel`'s nodes, links, and expensive computed fields.\nUse for list endpoints where full graph data is not needed and performance matters."
|
||||
},
|
||||
"GraphModel": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"version": { "type": "integer", "title": "Version", "default": 1 },
|
||||
@@ -7804,13 +7848,27 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Created At"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/NodeModel" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
},
|
||||
"sub_graphs": {
|
||||
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
|
||||
"type": "array",
|
||||
"title": "Sub Graphs",
|
||||
"default": []
|
||||
"title": "Sub Graphs"
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
@@ -7857,6 +7915,7 @@
|
||||
"name",
|
||||
"description",
|
||||
"user_id",
|
||||
"created_at",
|
||||
"input_schema",
|
||||
"output_schema",
|
||||
"has_external_trigger",
|
||||
@@ -7865,9 +7924,10 @@
|
||||
"trigger_setup_info",
|
||||
"credentials_input_schema"
|
||||
],
|
||||
"title": "GraphMeta"
|
||||
"title": "GraphModel",
|
||||
"description": "Full graph model representing an existing graph from the database.\n\nThis is the primary model for working with persisted graphs. Includes all\ngraph data (nodes, links, sub_graphs) plus user ownership and timestamps.\nProvides computed fields (input_schema, output_schema, etc.) used during\nset-up (frontend) and execution (backend).\n\nInherits from:\n- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas\n- `GraphMeta`: provides user_id, created_at for database records"
|
||||
},
|
||||
"GraphModel": {
|
||||
"GraphModelWithoutNodes": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"version": { "type": "integer", "title": "Version", "default": 1 },
|
||||
@@ -7886,18 +7946,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/NodeModel" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -7906,12 +7954,6 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"sub_graphs": {
|
||||
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
|
||||
"type": "array",
|
||||
"title": "Sub Graphs",
|
||||
"default": []
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
@@ -7973,7 +8015,8 @@
|
||||
"trigger_setup_info",
|
||||
"credentials_input_schema"
|
||||
],
|
||||
"title": "GraphModel"
|
||||
"title": "GraphModelWithoutNodes",
|
||||
"description": "GraphModel variant that excludes nodes, links, and sub-graphs from serialization.\n\nUsed in contexts like the store where exposing internal graph structure\nis not desired. Inherits all computed fields from GraphModel but marks\nnodes and links as excluded from JSON output."
|
||||
},
|
||||
"GraphSettings": {
|
||||
"properties": {
|
||||
@@ -8613,26 +8656,22 @@
|
||||
"input_default": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Default",
|
||||
"default": {}
|
||||
"title": "Input Default"
|
||||
},
|
||||
"metadata": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Metadata",
|
||||
"default": {}
|
||||
"title": "Metadata"
|
||||
},
|
||||
"input_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Input Links",
|
||||
"default": []
|
||||
"title": "Input Links"
|
||||
},
|
||||
"output_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Output Links",
|
||||
"default": []
|
||||
"title": "Output Links"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
@@ -8712,26 +8751,22 @@
|
||||
"input_default": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Default",
|
||||
"default": {}
|
||||
"title": "Input Default"
|
||||
},
|
||||
"metadata": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Metadata",
|
||||
"default": {}
|
||||
"title": "Metadata"
|
||||
},
|
||||
"input_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Input Links",
|
||||
"default": []
|
||||
"title": "Input Links"
|
||||
},
|
||||
"output_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Output Links",
|
||||
"default": []
|
||||
"title": "Output Links"
|
||||
},
|
||||
"graph_id": { "type": "string", "title": "Graph Id" },
|
||||
"graph_version": { "type": "integer", "title": "Graph Version" },
|
||||
|
||||
@@ -22,7 +22,7 @@ const isValidVideoUrl = (url: string): boolean => {
|
||||
if (url.startsWith("data:video")) {
|
||||
return true;
|
||||
}
|
||||
const videoExtensions = /\.(mp4|webm|ogg)$/i;
|
||||
const videoExtensions = /\.(mp4|webm|ogg|mov|avi|mkv|m4v)$/i;
|
||||
const youtubeRegex = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$/;
|
||||
const cleanedUrl = url.split("?")[0];
|
||||
return (
|
||||
@@ -44,11 +44,29 @@ const isValidAudioUrl = (url: string): boolean => {
|
||||
if (url.startsWith("data:audio")) {
|
||||
return true;
|
||||
}
|
||||
const audioExtensions = /\.(mp3|wav)$/i;
|
||||
const audioExtensions = /\.(mp3|wav|ogg|m4a|aac|flac)$/i;
|
||||
const cleanedUrl = url.split("?")[0];
|
||||
return isValidMediaUri(url) && audioExtensions.test(cleanedUrl);
|
||||
};
|
||||
|
||||
const getVideoMimeType = (url: string): string => {
|
||||
if (url.startsWith("data:video/")) {
|
||||
const match = url.match(/^data:(video\/[^;]+)/);
|
||||
return match?.[1] || "video/mp4";
|
||||
}
|
||||
const extension = url.split("?")[0].split(".").pop()?.toLowerCase();
|
||||
const mimeMap: Record<string, string> = {
|
||||
mp4: "video/mp4",
|
||||
webm: "video/webm",
|
||||
ogg: "video/ogg",
|
||||
mov: "video/quicktime",
|
||||
avi: "video/x-msvideo",
|
||||
mkv: "video/x-matroska",
|
||||
m4v: "video/mp4",
|
||||
};
|
||||
return mimeMap[extension || ""] || "video/mp4";
|
||||
};
|
||||
|
||||
const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
const videoId = getYouTubeVideoId(videoUrl);
|
||||
return (
|
||||
@@ -63,7 +81,7 @@ const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
></iframe>
|
||||
) : (
|
||||
<video controls width="100%" height="315">
|
||||
<source src={videoUrl} type="video/mp4" />
|
||||
<source src={videoUrl} type={getVideoMimeType(videoUrl)} />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
)}
|
||||
|
||||
@@ -102,18 +102,6 @@ export function ChatMessage({
|
||||
}
|
||||
}
|
||||
|
||||
function handleClarificationAnswers(answers: Record<string, string>) {
|
||||
if (onSendMessage) {
|
||||
const contextMessage = Object.entries(answers)
|
||||
.map(([keyword, answer]) => `${keyword}: ${answer}`)
|
||||
.join("\n");
|
||||
|
||||
onSendMessage(
|
||||
`I have the answers to your questions:\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const handleCopy = useCallback(
|
||||
async function handleCopy() {
|
||||
if (message.type !== "message") return;
|
||||
@@ -162,6 +150,22 @@ export function ChatMessage({
|
||||
.slice(index + 1)
|
||||
.some((m) => m.type === "message" && m.role === "user");
|
||||
|
||||
const handleClarificationAnswers = (answers: Record<string, string>) => {
|
||||
if (onSendMessage) {
|
||||
// Iterate over questions (preserves original order) instead of answers
|
||||
const contextMessage = message.questions
|
||||
.map((q) => {
|
||||
const answer = answers[q.keyword] || "";
|
||||
return `> ${q.question}\n\n${answer}`;
|
||||
})
|
||||
.join("\n\n");
|
||||
|
||||
onSendMessage(
|
||||
`**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<ClarificationQuestionsWidget
|
||||
questions={message.questions}
|
||||
@@ -346,6 +350,7 @@ export function ChatMessage({
|
||||
toolId={message.toolId}
|
||||
toolName={message.toolName}
|
||||
result={message.result}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { EyeSlash } from "@phosphor-icons/react";
|
||||
import React from "react";
|
||||
import React, { useState } from "react";
|
||||
import ReactMarkdown from "react-markdown";
|
||||
import remarkGfm from "remark-gfm";
|
||||
|
||||
@@ -48,7 +48,9 @@ interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
||||
*/
|
||||
function resolveWorkspaceUrl(src: string): string {
|
||||
if (src.startsWith("workspace://")) {
|
||||
const fileId = src.replace("workspace://", "");
|
||||
// Strip MIME type fragment if present (e.g., workspace://abc123#video/mp4 → abc123)
|
||||
const withoutPrefix = src.replace("workspace://", "");
|
||||
const fileId = withoutPrefix.split("#")[0];
|
||||
// Use the generated API URL helper to get the correct path
|
||||
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
|
||||
// Route through the Next.js proxy (same pattern as customMutator for client-side)
|
||||
@@ -65,13 +67,49 @@ function isWorkspaceImage(src: string | undefined): boolean {
|
||||
return src?.includes("/workspace/files/") ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Renders a workspace video with controls and an optional "AI cannot see" badge.
|
||||
*/
|
||||
function WorkspaceVideo({
|
||||
src,
|
||||
aiCannotSee,
|
||||
}: {
|
||||
src: string;
|
||||
aiCannotSee: boolean;
|
||||
}) {
|
||||
return (
|
||||
<span className="relative my-2 inline-block">
|
||||
<video
|
||||
controls
|
||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||
preload="metadata"
|
||||
>
|
||||
<source src={src} />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
{aiCannotSee && (
|
||||
<span
|
||||
className="absolute bottom-2 right-2 flex items-center gap-1 rounded bg-black/70 px-2 py-1 text-xs text-white"
|
||||
title="The AI cannot see this video"
|
||||
>
|
||||
<EyeSlash size={14} />
|
||||
<span>AI cannot see this video</span>
|
||||
</span>
|
||||
)}
|
||||
</span>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom image component that shows an indicator when the AI cannot see the image.
|
||||
* Also handles the "video:" alt-text prefix convention to render <video> elements.
|
||||
* For workspace files with unknown types, falls back to <video> if <img> fails.
|
||||
* Note: src is already transformed by urlTransform, so workspace:// is now /api/workspace/...
|
||||
*/
|
||||
function MarkdownImage(props: Record<string, unknown>) {
|
||||
const src = props.src as string | undefined;
|
||||
const alt = props.alt as string | undefined;
|
||||
const [imgFailed, setImgFailed] = useState(false);
|
||||
|
||||
const aiCannotSee = isWorkspaceImage(src);
|
||||
|
||||
@@ -84,6 +122,18 @@ function MarkdownImage(props: Record<string, unknown>) {
|
||||
);
|
||||
}
|
||||
|
||||
// Detect video: prefix in alt text (set by formatOutputValue in helpers.ts)
|
||||
if (alt?.startsWith("video:")) {
|
||||
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||
}
|
||||
|
||||
// If the <img> failed to load and this is a workspace file, try as video.
|
||||
// This handles generic output keys like "file_out" where the MIME type
|
||||
// isn't known from the key name alone.
|
||||
if (imgFailed && aiCannotSee) {
|
||||
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<span className="relative my-2 inline-block">
|
||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||
@@ -92,6 +142,9 @@ function MarkdownImage(props: Record<string, unknown>) {
|
||||
alt={alt || "Image"}
|
||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||
loading="lazy"
|
||||
onError={() => {
|
||||
if (aiCannotSee) setImgFailed(true);
|
||||
}}
|
||||
/>
|
||||
{aiCannotSee && (
|
||||
<span
|
||||
|
||||
@@ -73,6 +73,7 @@ export function MessageList({
|
||||
key={index}
|
||||
message={message}
|
||||
prevMessage={messages[index - 1]}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,11 +5,13 @@ import { shouldSkipAgentOutput } from "../../helpers";
|
||||
export interface LastToolResponseProps {
|
||||
message: ChatMessageData;
|
||||
prevMessage: ChatMessageData | undefined;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function LastToolResponse({
|
||||
message,
|
||||
prevMessage,
|
||||
onSendMessage,
|
||||
}: LastToolResponseProps) {
|
||||
if (message.type !== "tool_response") return null;
|
||||
|
||||
@@ -21,6 +23,7 @@ export function LastToolResponse({
|
||||
toolId={message.toolId}
|
||||
toolName={message.toolName}
|
||||
result={message.result}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { Progress } from "@/components/atoms/Progress/Progress";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress";
|
||||
|
||||
export interface ThinkingMessageProps {
|
||||
className?: string;
|
||||
@@ -11,6 +13,7 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
||||
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
|
||||
const timerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const progress = useAsymptoticProgress(showCoffeeMessage);
|
||||
|
||||
useEffect(() => {
|
||||
if (timerRef.current === null) {
|
||||
@@ -49,9 +52,18 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
||||
<AIChatBubble>
|
||||
<div className="transition-all duration-500 ease-in-out">
|
||||
{showCoffeeMessage ? (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
This could take a few minutes, grab a coffee ☕️
|
||||
</span>
|
||||
<div className="flex flex-col items-center gap-3">
|
||||
<div className="flex w-full max-w-[280px] flex-col gap-1.5">
|
||||
<div className="flex items-center justify-between text-xs text-neutral-500">
|
||||
<span>Working on it...</span>
|
||||
<span>{Math.round(progress)}%</span>
|
||||
</div>
|
||||
<Progress value={progress} className="h-2 w-full" />
|
||||
</div>
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
This could take a few minutes, grab a coffee ☕️
|
||||
</span>
|
||||
</div>
|
||||
) : showSlowLoader ? (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
Taking a bit more time...
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
|
||||
/**
|
||||
* Hook that returns a progress value that starts fast and slows down,
|
||||
* asymptotically approaching but never reaching the max value.
|
||||
*
|
||||
* Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife))
|
||||
* This creates the "game loading bar" effect where:
|
||||
* - 50% is reached at halfLifeSeconds
|
||||
* - 75% is reached at 2 * halfLifeSeconds
|
||||
* - 87.5% is reached at 3 * halfLifeSeconds
|
||||
* - and so on...
|
||||
*
|
||||
* @param isActive - Whether the progress should be animating
|
||||
* @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30)
|
||||
* @param maxProgress - Maximum progress value to approach (default: 100)
|
||||
* @param intervalMs - Update interval in milliseconds (default: 100)
|
||||
* @returns Current progress value (0-maxProgress)
|
||||
*/
|
||||
export function useAsymptoticProgress(
|
||||
isActive: boolean,
|
||||
halfLifeSeconds = 30,
|
||||
maxProgress = 100,
|
||||
intervalMs = 100,
|
||||
) {
|
||||
const [progress, setProgress] = useState(0);
|
||||
const elapsedTimeRef = useRef(0);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isActive) {
|
||||
setProgress(0);
|
||||
elapsedTimeRef.current = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
elapsedTimeRef.current += intervalMs / 1000;
|
||||
// Half-life approach: progress = max * (1 - 0.5^(time/halfLife))
|
||||
// At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc.
|
||||
const newProgress =
|
||||
maxProgress *
|
||||
(1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds));
|
||||
setProgress(newProgress);
|
||||
}, intervalMs);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isActive, halfLifeSeconds, maxProgress, intervalMs]);
|
||||
|
||||
return progress;
|
||||
}
|
||||
@@ -0,0 +1,128 @@
|
||||
"use client";
|
||||
|
||||
import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||
import { RunAgentModal } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
CheckCircleIcon,
|
||||
PencilLineIcon,
|
||||
PlayIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
|
||||
interface Props {
|
||||
agentName: string;
|
||||
libraryAgentId: string;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function AgentCreatedPrompt({
|
||||
agentName,
|
||||
libraryAgentId,
|
||||
onSendMessage,
|
||||
}: Props) {
|
||||
// Fetch library agent eagerly so modal is ready when user clicks
|
||||
const { data: libraryAgentResponse, isLoading } = useGetV2GetLibraryAgent(
|
||||
libraryAgentId,
|
||||
{
|
||||
query: {
|
||||
enabled: !!libraryAgentId,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const libraryAgent =
|
||||
libraryAgentResponse?.status === 200 ? libraryAgentResponse.data : null;
|
||||
|
||||
function handleRunWithPlaceholders() {
|
||||
onSendMessage?.(
|
||||
`Run the agent "${agentName}" with placeholder/example values so I can test it.`,
|
||||
);
|
||||
}
|
||||
|
||||
function handleRunCreated(execution: GraphExecutionMeta) {
|
||||
onSendMessage?.(
|
||||
`I've started the agent "${agentName}". The execution ID is ${execution.id}. Please monitor its progress and let me know when it completes.`,
|
||||
);
|
||||
}
|
||||
|
||||
function handleScheduleCreated(schedule: GraphExecutionJobInfo) {
|
||||
const scheduleInfo = schedule.cron
|
||||
? `with cron schedule "${schedule.cron}"`
|
||||
: "to run on the specified schedule";
|
||||
onSendMessage?.(
|
||||
`I've scheduled the agent "${agentName}" ${scheduleInfo}. The schedule ID is ${schedule.id}.`,
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<AIChatBubble>
|
||||
<div className="flex flex-col gap-4">
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-green-100">
|
||||
<CheckCircleIcon
|
||||
size={18}
|
||||
weight="fill"
|
||||
className="text-green-600"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="body-medium" className="text-neutral-900">
|
||||
Agent Created Successfully
|
||||
</Text>
|
||||
<Text variant="small" className="text-neutral-500">
|
||||
"{agentName}" is ready to test
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-2">
|
||||
<Text variant="small-medium" className="text-neutral-700">
|
||||
Ready to test?
|
||||
</Text>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={handleRunWithPlaceholders}
|
||||
className="gap-2"
|
||||
>
|
||||
<PlayIcon size={16} />
|
||||
Run with example values
|
||||
</Button>
|
||||
{libraryAgent ? (
|
||||
<RunAgentModal
|
||||
triggerSlot={
|
||||
<Button variant="outline" size="small" className="gap-2">
|
||||
<PencilLineIcon size={16} />
|
||||
Run with my inputs
|
||||
</Button>
|
||||
}
|
||||
agent={libraryAgent}
|
||||
onRunCreated={handleRunCreated}
|
||||
onScheduleCreated={handleScheduleCreated}
|
||||
/>
|
||||
) : (
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
loading={isLoading}
|
||||
disabled
|
||||
className="gap-2"
|
||||
>
|
||||
<PencilLineIcon size={16} />
|
||||
Run with my inputs
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
<Text variant="small" className="text-neutral-500">
|
||||
or just ask me
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
</AIChatBubble>
|
||||
);
|
||||
}
|
||||
@@ -2,11 +2,13 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
import type { ToolResult } from "@/types/chat";
|
||||
import { WarningCircleIcon } from "@phosphor-icons/react";
|
||||
import { AgentCreatedPrompt } from "./AgentCreatedPrompt";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
|
||||
import {
|
||||
formatToolResponse,
|
||||
getErrorMessage,
|
||||
isAgentSavedResponse,
|
||||
isErrorResponse,
|
||||
} from "./helpers";
|
||||
|
||||
@@ -16,6 +18,7 @@ export interface ToolResponseMessageProps {
|
||||
result?: ToolResult;
|
||||
success?: boolean;
|
||||
className?: string;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function ToolResponseMessage({
|
||||
@@ -24,6 +27,7 @@ export function ToolResponseMessage({
|
||||
result,
|
||||
success: _success,
|
||||
className,
|
||||
onSendMessage,
|
||||
}: ToolResponseMessageProps) {
|
||||
if (isErrorResponse(result)) {
|
||||
const errorMessage = getErrorMessage(result);
|
||||
@@ -43,6 +47,18 @@ export function ToolResponseMessage({
|
||||
);
|
||||
}
|
||||
|
||||
// Check for agent_saved response - show special prompt
|
||||
const agentSavedData = isAgentSavedResponse(result);
|
||||
if (agentSavedData.isSaved) {
|
||||
return (
|
||||
<AgentCreatedPrompt
|
||||
agentName={agentSavedData.agentName}
|
||||
libraryAgentId={agentSavedData.libraryAgentId}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const formattedText = formatToolResponse(result, toolName);
|
||||
|
||||
return (
|
||||
|
||||
@@ -6,6 +6,43 @@ function stripInternalReasoning(content: string): string {
|
||||
.trim();
|
||||
}
|
||||
|
||||
export interface AgentSavedData {
|
||||
isSaved: boolean;
|
||||
agentName: string;
|
||||
agentId: string;
|
||||
libraryAgentId: string;
|
||||
libraryAgentLink: string;
|
||||
}
|
||||
|
||||
export function isAgentSavedResponse(result: unknown): AgentSavedData {
|
||||
if (typeof result !== "object" || result === null) {
|
||||
return {
|
||||
isSaved: false,
|
||||
agentName: "",
|
||||
agentId: "",
|
||||
libraryAgentId: "",
|
||||
libraryAgentLink: "",
|
||||
};
|
||||
}
|
||||
const response = result as Record<string, unknown>;
|
||||
if (response.type === "agent_saved") {
|
||||
return {
|
||||
isSaved: true,
|
||||
agentName: (response.agent_name as string) || "Agent",
|
||||
agentId: (response.agent_id as string) || "",
|
||||
libraryAgentId: (response.library_agent_id as string) || "",
|
||||
libraryAgentLink: (response.library_agent_link as string) || "",
|
||||
};
|
||||
}
|
||||
return {
|
||||
isSaved: false,
|
||||
agentName: "",
|
||||
agentId: "",
|
||||
libraryAgentId: "",
|
||||
libraryAgentLink: "",
|
||||
};
|
||||
}
|
||||
|
||||
export function isErrorResponse(result: unknown): boolean {
|
||||
if (typeof result === "string") {
|
||||
const lower = result.toLowerCase();
|
||||
@@ -39,69 +76,101 @@ export function getErrorMessage(result: unknown): string {
|
||||
|
||||
/**
|
||||
* Check if a value is a workspace file reference.
|
||||
* Format: workspace://{fileId} or workspace://{fileId}#{mimeType}
|
||||
*/
|
||||
function isWorkspaceRef(value: unknown): value is string {
|
||||
return typeof value === "string" && value.startsWith("workspace://");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a workspace reference appears to be an image based on common patterns.
|
||||
* Since workspace refs don't have extensions, we check the context or assume image
|
||||
* for certain block types.
|
||||
*
|
||||
* TODO: Replace keyword matching with MIME type encoded in workspace ref.
|
||||
* e.g., workspace://abc123#image/png or workspace://abc123#video/mp4
|
||||
* This would let frontend render correctly without fragile keyword matching.
|
||||
* Extract MIME type from a workspace reference fragment.
|
||||
* e.g., "workspace://abc123#video/mp4" → "video/mp4"
|
||||
* Returns undefined if no fragment is present.
|
||||
*/
|
||||
function isLikelyImageRef(value: string, outputKey?: string): boolean {
|
||||
if (!isWorkspaceRef(value)) return false;
|
||||
|
||||
// Check output key name for video-related hints (these are NOT images)
|
||||
const videoKeywords = ["video", "mp4", "mov", "avi", "webm", "movie", "clip"];
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
if (videoKeywords.some((kw) => lowerKey.includes(kw))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check output key name for image-related hints
|
||||
const imageKeywords = [
|
||||
"image",
|
||||
"img",
|
||||
"photo",
|
||||
"picture",
|
||||
"thumbnail",
|
||||
"avatar",
|
||||
"icon",
|
||||
"screenshot",
|
||||
];
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
if (imageKeywords.some((kw) => lowerKey.includes(kw))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Default to treating workspace refs as potential images
|
||||
// since that's the most common case for generated content
|
||||
return true;
|
||||
function getWorkspaceMimeType(value: string): string | undefined {
|
||||
const hashIndex = value.indexOf("#");
|
||||
if (hashIndex === -1) return undefined;
|
||||
return value.slice(hashIndex + 1) || undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single output value, converting workspace refs to markdown images.
|
||||
* Determine the media category of a workspace ref or data URI.
|
||||
* Uses the MIME type fragment on workspace refs when available,
|
||||
* falls back to output key keyword matching for older refs without it.
|
||||
*/
|
||||
function formatOutputValue(value: unknown, outputKey?: string): string {
|
||||
if (isWorkspaceRef(value) && isLikelyImageRef(value, outputKey)) {
|
||||
// Format as markdown image
|
||||
return ``;
|
||||
function getMediaCategory(
|
||||
value: string,
|
||||
outputKey?: string,
|
||||
): "video" | "image" | "audio" | "unknown" {
|
||||
// Data URIs carry their own MIME type
|
||||
if (value.startsWith("data:video/")) return "video";
|
||||
if (value.startsWith("data:image/")) return "image";
|
||||
if (value.startsWith("data:audio/")) return "audio";
|
||||
|
||||
// Workspace refs: prefer MIME type fragment
|
||||
if (isWorkspaceRef(value)) {
|
||||
const mime = getWorkspaceMimeType(value);
|
||||
if (mime) {
|
||||
if (mime.startsWith("video/")) return "video";
|
||||
if (mime.startsWith("image/")) return "image";
|
||||
if (mime.startsWith("audio/")) return "audio";
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
// Fallback: keyword matching on output key for older refs without fragment
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
|
||||
const videoKeywords = [
|
||||
"video",
|
||||
"mp4",
|
||||
"mov",
|
||||
"avi",
|
||||
"webm",
|
||||
"movie",
|
||||
"clip",
|
||||
];
|
||||
if (videoKeywords.some((kw) => lowerKey.includes(kw))) return "video";
|
||||
|
||||
const imageKeywords = [
|
||||
"image",
|
||||
"img",
|
||||
"photo",
|
||||
"picture",
|
||||
"thumbnail",
|
||||
"avatar",
|
||||
"icon",
|
||||
"screenshot",
|
||||
];
|
||||
if (imageKeywords.some((kw) => lowerKey.includes(kw))) return "image";
|
||||
}
|
||||
|
||||
// Default to image for backward compatibility
|
||||
return "image";
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single output value, converting workspace refs to markdown images/videos.
|
||||
* Videos use a "video:" alt-text prefix so the MarkdownContent renderer can
|
||||
* distinguish them from images and render a <video> element.
|
||||
*/
|
||||
function formatOutputValue(value: unknown, outputKey?: string): string {
|
||||
if (typeof value === "string") {
|
||||
// Check for data URIs (images)
|
||||
if (value.startsWith("data:image/")) {
|
||||
const category = getMediaCategory(value, outputKey);
|
||||
|
||||
if (category === "video") {
|
||||
// Format with "video:" prefix so MarkdownContent renders <video>
|
||||
return ``;
|
||||
}
|
||||
|
||||
if (category === "image") {
|
||||
return ``;
|
||||
}
|
||||
|
||||
// For audio, unknown workspace refs, data URIs, etc. - return as-is
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ export const providerIcons: Partial<
|
||||
nvidia: fallbackIcon,
|
||||
discord: FaDiscord,
|
||||
d_id: fallbackIcon,
|
||||
elevenlabs: fallbackIcon,
|
||||
google_maps: FaGoogle,
|
||||
jina: fallbackIcon,
|
||||
ideogram: fallbackIcon,
|
||||
|
||||
@@ -4,7 +4,9 @@ import { loadScript } from "@/services/scripts/scripts";
|
||||
export async function loadGoogleAPIPicker(): Promise<void> {
|
||||
validateWindow();
|
||||
|
||||
await loadScript("https://apis.google.com/js/api.js");
|
||||
await loadScript("https://apis.google.com/js/api.js", {
|
||||
referrerPolicy: "no-referrer-when-downgrade",
|
||||
});
|
||||
|
||||
const googleAPI = window.gapi;
|
||||
if (!googleAPI) {
|
||||
@@ -27,7 +29,9 @@ export async function loadGoogleIdentityServices(): Promise<void> {
|
||||
throw new Error("Google Identity Services cannot load on server");
|
||||
}
|
||||
|
||||
await loadScript("https://accounts.google.com/gsi/client");
|
||||
await loadScript("https://accounts.google.com/gsi/client", {
|
||||
referrerPolicy: "no-referrer-when-downgrade",
|
||||
});
|
||||
|
||||
const google = window.google;
|
||||
if (!google?.accounts?.oauth2) {
|
||||
|
||||
@@ -47,7 +47,7 @@ export function Navbar() {
|
||||
|
||||
const actualLoggedInLinks = [
|
||||
{ name: "Home", href: homeHref },
|
||||
...(isChatEnabled === true ? [{ name: "Tasks", href: "/library" }] : []),
|
||||
...(isChatEnabled === true ? [{ name: "Agents", href: "/library" }] : []),
|
||||
...loggedInLinks,
|
||||
];
|
||||
|
||||
|
||||
@@ -362,25 +362,14 @@ export type GraphMeta = {
|
||||
user_id: UserID;
|
||||
version: number;
|
||||
is_active: boolean;
|
||||
created_at: Date;
|
||||
name: string;
|
||||
description: string;
|
||||
instructions?: string | null;
|
||||
recommended_schedule_cron: string | null;
|
||||
forked_from_id?: GraphID | null;
|
||||
forked_from_version?: number | null;
|
||||
input_schema: GraphInputSchema;
|
||||
output_schema: GraphOutputSchema;
|
||||
credentials_input_schema: CredentialsInputSchema;
|
||||
} & (
|
||||
| {
|
||||
has_external_trigger: true;
|
||||
trigger_setup_info: GraphTriggerInfo;
|
||||
}
|
||||
| {
|
||||
has_external_trigger: false;
|
||||
trigger_setup_info: null;
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
export type GraphID = Brand<string, "GraphID">;
|
||||
|
||||
@@ -447,11 +436,22 @@ export type GraphTriggerInfo = {
|
||||
|
||||
/* Mirror of backend/data/graph.py:Graph */
|
||||
export type Graph = GraphMeta & {
|
||||
created_at: Date;
|
||||
nodes: Node[];
|
||||
links: Link[];
|
||||
sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs
|
||||
};
|
||||
input_schema: GraphInputSchema;
|
||||
output_schema: GraphOutputSchema;
|
||||
credentials_input_schema: CredentialsInputSchema;
|
||||
} & (
|
||||
| {
|
||||
has_external_trigger: true;
|
||||
trigger_setup_info: GraphTriggerInfo;
|
||||
}
|
||||
| {
|
||||
has_external_trigger: false;
|
||||
trigger_setup_info: null;
|
||||
}
|
||||
);
|
||||
|
||||
export type GraphUpdateable = Omit<
|
||||
Graph,
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
extend-ignore = E203
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
*.pyc,
|
||||
.env,
|
||||
venv*,
|
||||
.venv,
|
||||
reports,
|
||||
dist,
|
||||
data,
|
||||
.benchmark_workspaces,
|
||||
.autogpt,
|
||||
.env
|
||||
venv*/*,
|
||||
.venv/*,
|
||||
reports/*,
|
||||
dist/*,
|
||||
data/*,
|
||||
|
||||
@@ -1,291 +0,0 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
AutoGPT Classic is an experimental, **unsupported** project demonstrating autonomous GPT-4 operation. Dependencies will not be updated, and the codebase contains known vulnerabilities. This is preserved for educational/historical purposes.
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
classic/
|
||||
├── pyproject.toml # Single consolidated Poetry project
|
||||
├── poetry.lock # Single lock file
|
||||
├── forge/
|
||||
│ └── forge/ # Core agent framework package
|
||||
├── original_autogpt/
|
||||
│ └── autogpt/ # AutoGPT agent package
|
||||
├── direct_benchmark/
|
||||
│ └── direct_benchmark/ # Benchmark harness package
|
||||
└── benchmark/ # Challenge definitions (data, not code)
|
||||
```
|
||||
|
||||
All packages are managed by a single `pyproject.toml` at the classic/ root.
|
||||
|
||||
## Common Commands
|
||||
|
||||
### Setup & Install
|
||||
```bash
|
||||
# Install everything from classic/ directory
|
||||
cd classic
|
||||
poetry install
|
||||
```
|
||||
|
||||
### Running Agents
|
||||
```bash
|
||||
# Run forge agent
|
||||
poetry run python -m forge
|
||||
|
||||
# Run original autogpt server
|
||||
poetry run serve --debug
|
||||
|
||||
# Run autogpt CLI
|
||||
poetry run autogpt
|
||||
```
|
||||
|
||||
Agents run on `http://localhost:8000` by default.
|
||||
|
||||
### Benchmarking
|
||||
```bash
|
||||
# Run benchmarks
|
||||
poetry run direct-benchmark run
|
||||
|
||||
# Run specific strategies and models
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,rewoo \
|
||||
--models claude \
|
||||
--parallel 4
|
||||
|
||||
# Run a single test
|
||||
poetry run direct-benchmark run --tests ReadFile
|
||||
|
||||
# List available commands
|
||||
poetry run direct-benchmark --help
|
||||
```
|
||||
|
||||
### Testing
|
||||
```bash
|
||||
poetry run pytest # All tests
|
||||
poetry run pytest forge/tests/ # Forge tests only
|
||||
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
|
||||
poetry run pytest -k test_name # Single test by name
|
||||
poetry run pytest path/to/test.py # Specific test file
|
||||
poetry run pytest --cov # With coverage
|
||||
```
|
||||
|
||||
### Linting & Formatting
|
||||
|
||||
Run from the classic/ directory:
|
||||
|
||||
```bash
|
||||
# Format everything (recommended to run together)
|
||||
poetry run black . && poetry run isort .
|
||||
|
||||
# Check formatting (CI-style, no changes)
|
||||
poetry run black --check . && poetry run isort --check-only .
|
||||
|
||||
# Lint
|
||||
poetry run flake8 # Style linting
|
||||
|
||||
# Type check
|
||||
poetry run pyright # Type checking (some errors are expected in infrastructure code)
|
||||
```
|
||||
|
||||
Note: Always run linters over the entire directory, not specific files, for best results.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Forge (Core Framework)
|
||||
The `forge` package is the foundation that other components depend on:
|
||||
- `forge/agent/` - Agent implementation and protocols
|
||||
- `forge/llm/` - Multi-provider LLM integrations (OpenAI, Anthropic, Groq, LiteLLM)
|
||||
- `forge/components/` - Reusable agent components
|
||||
- `forge/file_storage/` - File system abstraction
|
||||
- `forge/config/` - Configuration management
|
||||
|
||||
### Original AutoGPT
|
||||
- `original_autogpt/autogpt/app/` - CLI application entry points
|
||||
- `original_autogpt/autogpt/agents/` - Agent implementations
|
||||
- `original_autogpt/autogpt/agent_factory/` - Agent creation logic
|
||||
|
||||
### Direct Benchmark
|
||||
Benchmark harness for testing agent performance:
|
||||
- `direct_benchmark/direct_benchmark/` - CLI and harness code
|
||||
- `benchmark/agbenchmark/challenges/` - Test cases organized by category (code, retrieval, data, etc.)
|
||||
- Reports generated in `direct_benchmark/reports/`
|
||||
|
||||
### Package Structure
|
||||
All three packages are included in a single Poetry project. Imports are fully qualified:
|
||||
- `from forge.agent.base import BaseAgent`
|
||||
- `from autogpt.agents.agent import Agent`
|
||||
- `from direct_benchmark.harness import BenchmarkHarness`
|
||||
|
||||
## Code Style
|
||||
|
||||
- Python 3.12 target
|
||||
- Line length: 88 characters (Black default)
|
||||
- Black for formatting, isort for imports (profile="black")
|
||||
- Type hints with Pyright checking
|
||||
|
||||
## Testing Patterns
|
||||
|
||||
- Async support via pytest-asyncio
|
||||
- Fixtures defined in `conftest.py` files provide: `tmp_project_root`, `storage`, `config`, `llm_provider`, `agent`
|
||||
- Tests requiring API keys (OPENAI_API_KEY, ANTHROPIC_API_KEY) will skip if not set
|
||||
|
||||
## Environment Setup
|
||||
|
||||
Copy `.env.example` to `.env` in the relevant directory and add your API keys:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env with your OPENAI_API_KEY, etc.
|
||||
```
|
||||
|
||||
## Workspaces
|
||||
|
||||
Agents operate within a **workspace** - a directory containing all agent data and files. The workspace root defaults to the current working directory.
|
||||
|
||||
### Workspace Structure
|
||||
|
||||
```
|
||||
{workspace}/
|
||||
├── .autogpt/
|
||||
│ ├── autogpt.yaml # Workspace-level permissions
|
||||
│ ├── ap_server.db # Agent Protocol database (server mode)
|
||||
│ └── agents/
|
||||
│ └── AutoGPT-{agent_id}/
|
||||
│ ├── state.json # Agent profile, directives, action history
|
||||
│ ├── permissions.yaml # Agent-specific permission overrides
|
||||
│ └── workspace/ # Agent's sandboxed working directory
|
||||
```
|
||||
|
||||
### Key Concepts
|
||||
|
||||
- **Multiple agents** can coexist in the same workspace (each gets its own subdirectory)
|
||||
- **File access** is sandboxed to the agent's `workspace/` directory by default
|
||||
- **State persistence** - agent state saves to `state.json` and survives across sessions
|
||||
- **Storage backends** - supports local filesystem, S3, and GCS (via `FILE_STORAGE_BACKEND` env var)
|
||||
|
||||
### Specifying a Workspace
|
||||
|
||||
```bash
|
||||
# Default: uses current directory
|
||||
cd /path/to/my/project && poetry run autogpt
|
||||
|
||||
# Or specify explicitly via CLI (if supported)
|
||||
poetry run autogpt --workspace /path/to/workspace
|
||||
```
|
||||
|
||||
## Settings Location
|
||||
|
||||
Configuration uses a **layered system** with three levels (in order of precedence):
|
||||
|
||||
### 1. Environment Variables (Global)
|
||||
|
||||
Loaded from `.env` file in the working directory:
|
||||
|
||||
```bash
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional LLM settings
|
||||
SMART_LLM=gpt-4o # Model for complex reasoning
|
||||
FAST_LLM=gpt-4o-mini # Model for simple tasks
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
# Optional search providers (for web search component)
|
||||
TAVILY_API_KEY=tvly-...
|
||||
SERPER_API_KEY=...
|
||||
GOOGLE_API_KEY=...
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID=...
|
||||
|
||||
# Optional infrastructure
|
||||
LOG_LEVEL=DEBUG # DEBUG, INFO, WARNING, ERROR
|
||||
DATABASE_STRING=sqlite:///agent.db # Agent Protocol database
|
||||
PORT=8000 # Server port
|
||||
FILE_STORAGE_BACKEND=local # local, s3, or gcs
|
||||
```
|
||||
|
||||
### 2. Workspace Settings (`{workspace}/.autogpt/autogpt.yaml`)
|
||||
|
||||
Workspace-wide permissions that apply to **all agents** in this workspace:
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- read_file({workspace}/**)
|
||||
- write_to_file({workspace}/**)
|
||||
- list_folder({workspace}/**)
|
||||
- web_search(*)
|
||||
|
||||
deny:
|
||||
- read_file(**.env)
|
||||
- read_file(**.env.*)
|
||||
- read_file(**.key)
|
||||
- read_file(**.pem)
|
||||
- execute_shell(rm -rf:*)
|
||||
- execute_shell(sudo:*)
|
||||
```
|
||||
|
||||
Auto-generated with sensible defaults if missing.
|
||||
|
||||
### 3. Agent Settings (`{workspace}/.autogpt/agents/{id}/permissions.yaml`)
|
||||
|
||||
Agent-specific permission overrides:
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- execute_python(*)
|
||||
- web_search(*)
|
||||
|
||||
deny:
|
||||
- execute_shell(*)
|
||||
```
|
||||
|
||||
## Permissions
|
||||
|
||||
The permission system uses **pattern matching** with a **first-match-wins** evaluation order.
|
||||
|
||||
### Permission Check Order
|
||||
|
||||
1. Agent deny list → **Block**
|
||||
2. Workspace deny list → **Block**
|
||||
3. Agent allow list → **Allow**
|
||||
4. Workspace allow list → **Allow**
|
||||
5. Session denied list → **Block** (commands denied during this session)
|
||||
6. **Prompt user** → Interactive approval (if in interactive mode)
|
||||
|
||||
### Pattern Syntax
|
||||
|
||||
Format: `command_name(glob_pattern)`
|
||||
|
||||
| Pattern | Description |
|
||||
|---------|-------------|
|
||||
| `read_file({workspace}/**)` | Read any file in workspace (recursive) |
|
||||
| `write_to_file({workspace}/*.txt)` | Write only .txt files in workspace root |
|
||||
| `execute_shell(python:**)` | Execute Python commands only |
|
||||
| `execute_shell(git:*)` | Execute any git command |
|
||||
| `web_search(*)` | Allow all web searches |
|
||||
|
||||
Special tokens:
|
||||
- `{workspace}` - Replaced with actual workspace path
|
||||
- `**` - Matches any path including `/`
|
||||
- `*` - Matches any characters except `/`
|
||||
|
||||
### Interactive Approval Scopes
|
||||
|
||||
When prompted for permission, users can choose:
|
||||
|
||||
| Scope | Effect |
|
||||
|-------|--------|
|
||||
| **Once** | Allow this one time only (not saved) |
|
||||
| **Agent** | Always allow for this agent (saves to agent `permissions.yaml`) |
|
||||
| **Workspace** | Always allow for all agents (saves to `autogpt.yaml`) |
|
||||
| **Deny** | Deny this command (saves to appropriate deny list) |
|
||||
|
||||
### Default Security
|
||||
|
||||
Out of the box, the following are **denied by default**:
|
||||
- Reading sensitive files (`.env`, `.key`, `.pem`)
|
||||
- Destructive shell commands (`rm -rf`, `sudo`)
|
||||
- Operations outside the workspace directory
|
||||
@@ -2,7 +2,7 @@
|
||||
ARG BUILD_TYPE=dev
|
||||
|
||||
# Use an official Python base image from the Docker Hub
|
||||
FROM python:3.12-slim AS autogpt-base
|
||||
FROM python:3.10-slim AS autogpt-base
|
||||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -34,6 +34,9 @@ COPY original_autogpt/pyproject.toml original_autogpt/poetry.lock ./
|
||||
# Include forge so it can be used as a path dependency
|
||||
COPY forge/ ../forge
|
||||
|
||||
# Include frontend
|
||||
COPY frontend/ ../frontend
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["poetry", "run", "autogpt"]
|
||||
CMD []
|
||||
|
||||
@@ -4,7 +4,7 @@ AutoGPT Classic was an experimental project to demonstrate autonomous GPT-4 oper
|
||||
|
||||
## Project Status
|
||||
|
||||
**This project is unsupported, and dependencies will not be updated.** It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform).
|
||||
⚠️ **This project is unsupported, and dependencies will not be updated. It was an experiment that has concluded its initial research phase. If you want to use AutoGPT, you should use the [AutoGPT Platform](/autogpt_platform)**
|
||||
|
||||
For those interested in autonomous AI agents, we recommend exploring more actively maintained alternatives or referring to this codebase for educational purposes only.
|
||||
|
||||
@@ -16,171 +16,37 @@ AutoGPT Classic was one of the first implementations of autonomous AI agents - A
|
||||
- Learn from the results and adjust its approach
|
||||
- Chain multiple actions together to achieve an objective
|
||||
|
||||
## Key Features
|
||||
|
||||
- 🔄 Autonomous task chaining
|
||||
- 🛠 Tool and API integration capabilities
|
||||
- 💾 Memory management for context retention
|
||||
- 🔍 Web browsing and information gathering
|
||||
- 📝 File operations and content creation
|
||||
- 🔄 Self-prompting and task breakdown
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
classic/
|
||||
├── pyproject.toml # Single consolidated Poetry project
|
||||
├── poetry.lock # Single lock file
|
||||
├── forge/ # Core autonomous agent framework
|
||||
├── original_autogpt/ # Original implementation
|
||||
├── direct_benchmark/ # Benchmark harness
|
||||
└── benchmark/ # Challenge definitions (data)
|
||||
```
|
||||
The project is organized into several key components:
|
||||
- `/benchmark` - Performance testing tools
|
||||
- `/forge` - Core autonomous agent framework
|
||||
- `/frontend` - User interface components
|
||||
- `/original_autogpt` - Original implementation
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.12+
|
||||
- [Poetry](https://python-poetry.org/docs/#installation)
|
||||
|
||||
### Installation
|
||||
While this project is no longer actively maintained, you can still explore the codebase:
|
||||
|
||||
1. Clone the repository:
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/Significant-Gravitas/AutoGPT.git
|
||||
cd classic
|
||||
|
||||
# Install everything
|
||||
poetry install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Configuration uses a layered system:
|
||||
|
||||
1. **Environment variables** (`.env` file)
|
||||
2. **Workspace settings** (`.autogpt/autogpt.yaml`)
|
||||
3. **Agent settings** (`.autogpt/agents/{id}/permissions.yaml`)
|
||||
|
||||
Copy the example environment file and add your API keys:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Key environment variables:
|
||||
```bash
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional LLM settings
|
||||
SMART_LLM=gpt-4o # Model for complex reasoning
|
||||
FAST_LLM=gpt-4o-mini # Model for simple tasks
|
||||
|
||||
# Optional search providers
|
||||
TAVILY_API_KEY=tvly-...
|
||||
SERPER_API_KEY=...
|
||||
|
||||
# Optional infrastructure
|
||||
LOG_LEVEL=DEBUG
|
||||
PORT=8000
|
||||
FILE_STORAGE_BACKEND=local # local, s3, or gcs
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
All commands run from the `classic/` directory:
|
||||
|
||||
```bash
|
||||
# Run forge agent
|
||||
poetry run python -m forge
|
||||
|
||||
# Run original autogpt server
|
||||
poetry run serve --debug
|
||||
|
||||
# Run autogpt CLI
|
||||
poetry run autogpt
|
||||
```
|
||||
|
||||
Agents run on `http://localhost:8000` by default.
|
||||
|
||||
### Benchmarking
|
||||
|
||||
```bash
|
||||
poetry run direct-benchmark run
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
poetry run pytest # All tests
|
||||
poetry run pytest forge/tests/ # Forge tests only
|
||||
poetry run pytest original_autogpt/tests/ # AutoGPT tests only
|
||||
```
|
||||
|
||||
## Workspaces
|
||||
|
||||
Agents operate within a **workspace** directory that contains all agent data and files:
|
||||
|
||||
```
|
||||
{workspace}/
|
||||
├── .autogpt/
|
||||
│ ├── autogpt.yaml # Workspace-level permissions
|
||||
│ ├── ap_server.db # Agent Protocol database (server mode)
|
||||
│ └── agents/
|
||||
│ └── AutoGPT-{agent_id}/
|
||||
│ ├── state.json # Agent profile, directives, history
|
||||
│ ├── permissions.yaml # Agent-specific permissions
|
||||
│ └── workspace/ # Agent's sandboxed working directory
|
||||
```
|
||||
|
||||
- The workspace defaults to the current working directory
|
||||
- Multiple agents can coexist in the same workspace
|
||||
- Agent file access is sandboxed to their `workspace/` subdirectory
|
||||
- State persists across sessions via `state.json`
|
||||
|
||||
## Permissions
|
||||
|
||||
AutoGPT uses a **layered permission system** with pattern matching:
|
||||
|
||||
### Permission Files
|
||||
|
||||
| File | Scope | Location |
|
||||
|------|-------|----------|
|
||||
| `autogpt.yaml` | All agents in workspace | `.autogpt/autogpt.yaml` |
|
||||
| `permissions.yaml` | Single agent | `.autogpt/agents/{id}/permissions.yaml` |
|
||||
|
||||
### Permission Format
|
||||
|
||||
```yaml
|
||||
allow:
|
||||
- read_file({workspace}/**) # Read any file in workspace
|
||||
- write_to_file({workspace}/**) # Write any file in workspace
|
||||
- web_search(*) # All web searches
|
||||
|
||||
deny:
|
||||
- read_file(**.env) # Block .env files
|
||||
- execute_shell(sudo:*) # Block sudo commands
|
||||
```
|
||||
|
||||
### Check Order (First Match Wins)
|
||||
|
||||
1. Agent deny → Block
|
||||
2. Workspace deny → Block
|
||||
3. Agent allow → Allow
|
||||
4. Workspace allow → Allow
|
||||
5. Prompt user → Interactive approval
|
||||
|
||||
### Interactive Approval
|
||||
|
||||
When prompted, users can approve commands with different scopes:
|
||||
- **Once** - Allow this one time only
|
||||
- **Agent** - Always allow for this agent
|
||||
- **Workspace** - Always allow for all agents
|
||||
- **Deny** - Block this command
|
||||
|
||||
### Default Security
|
||||
|
||||
Denied by default:
|
||||
- Sensitive files (`.env`, `.key`, `.pem`)
|
||||
- Destructive commands (`rm -rf`, `sudo`)
|
||||
- Operations outside the workspace
|
||||
|
||||
## Security Notice
|
||||
|
||||
This codebase has **known vulnerabilities** and issues with its dependencies. It will not be updated to new dependencies. Use for educational purposes only.
|
||||
2. Review the documentation:
|
||||
- For reference, see the [documentation](https://docs.agpt.co). You can browse at the same point in time as this commit so the docs don't change.
|
||||
- Check `CLI-USAGE.md` for command-line interface details
|
||||
- Refer to `TROUBLESHOOTING.md` for common issues
|
||||
|
||||
## License
|
||||
|
||||
@@ -189,3 +55,27 @@ This project segment is licensed under the MIT License - see the [LICENSE](LICEN
|
||||
## Documentation
|
||||
|
||||
Please refer to the [documentation](https://docs.agpt.co) for more detailed information about the project's architecture and concepts.
|
||||
You can browse at the same point in time as this commit so the docs don't change.
|
||||
|
||||
## Historical Impact
|
||||
|
||||
AutoGPT Classic played a significant role in advancing the field of autonomous AI agents:
|
||||
- Demonstrated practical implementation of AI autonomy
|
||||
- Inspired numerous derivative projects and research
|
||||
- Contributed to the development of AI agent architectures
|
||||
- Helped identify key challenges in AI autonomy
|
||||
|
||||
## Security Notice
|
||||
|
||||
If you're studying this codebase, please understand this has KNOWN vulnerabilities and issues with its dependencies. It will not be updated to new dependencies.
|
||||
|
||||
## Community & Support
|
||||
|
||||
While active development has concluded:
|
||||
- The codebase remains available for study and reference
|
||||
- Historical discussions can be found in project issues
|
||||
- Related research and developments continue in the broader AI agent community
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
Thanks to all contributors who participated in this experimental project and helped advance the field of autonomous AI agents.
|
||||
|
||||
27
classic/direct_benchmark/.gitignore
vendored
27
classic/direct_benchmark/.gitignore
vendored
@@ -1,27 +0,0 @@
|
||||
# Benchmark outputs
|
||||
reports/
|
||||
.benchmark_workspaces/
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
dist/
|
||||
build/
|
||||
|
||||
# Environment
|
||||
.env
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
@@ -1,297 +0,0 @@
|
||||
# CLAUDE.md - Direct Benchmark Harness
|
||||
|
||||
This file provides guidance to Claude Code when working with the direct benchmark harness.
|
||||
|
||||
## Overview
|
||||
|
||||
The Direct Benchmark Harness is a high-performance testing framework for AutoGPT that directly instantiates agents without HTTP server overhead. It enables parallel execution of multiple strategy/model configurations.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
All commands run from the `classic/` directory (parent of this directory):
|
||||
|
||||
```bash
|
||||
# Install (one-time setup)
|
||||
cd classic
|
||||
poetry install
|
||||
|
||||
# Run benchmarks
|
||||
poetry run direct-benchmark run
|
||||
|
||||
# Run specific strategies and models
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,rewoo \
|
||||
--models claude,openai \
|
||||
--parallel 4
|
||||
|
||||
# Run a single test
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot \
|
||||
--tests ReadFile
|
||||
|
||||
# List available challenges
|
||||
poetry run direct-benchmark list-challenges
|
||||
|
||||
# List model presets
|
||||
poetry run direct-benchmark list-models
|
||||
|
||||
# List strategies
|
||||
poetry run direct-benchmark list-strategies
|
||||
```
|
||||
|
||||
## CLI Options
|
||||
|
||||
### Run Command
|
||||
|
||||
| Option | Short | Description |
|
||||
|--------|-------|-------------|
|
||||
| `--strategies` | `-s` | Comma-separated strategies (one_shot, rewoo, plan_execute, reflexion, tree_of_thoughts) |
|
||||
| `--models` | `-m` | Comma-separated model presets (claude, openai, etc.) |
|
||||
| `--categories` | `-c` | Filter by challenge categories |
|
||||
| `--skip-category` | `-S` | Exclude categories |
|
||||
| `--tests` | `-t` | Filter by test names |
|
||||
| `--attempts` | `-N` | Number of times to run each challenge |
|
||||
| `--parallel` | `-p` | Maximum parallel runs (default: 4) |
|
||||
| `--timeout` | | Per-challenge timeout in seconds (default: 300) |
|
||||
| `--cutoff` | | Alias for --timeout |
|
||||
| `--no-cutoff` | `--nc` | Disable time limit |
|
||||
| `--max-steps` | | Maximum steps per challenge (default: 50) |
|
||||
| `--maintain` | | Run only regression tests |
|
||||
| `--improve` | | Run only non-regression tests |
|
||||
| `--explore` | | Run only never-beaten challenges |
|
||||
| `--no-dep` | | Ignore challenge dependencies |
|
||||
| `--workspace` | | Workspace root directory |
|
||||
| `--challenges-dir` | | Path to challenges directory |
|
||||
| `--reports-dir` | | Path to reports directory |
|
||||
| `--keep-answers` | | Keep answer files for debugging |
|
||||
| `--quiet` | `-q` | Minimal output |
|
||||
| `--verbose` | `-v` | Detailed per-challenge output |
|
||||
| `--json` | | JSON output for CI/scripting |
|
||||
| `--ci` | | CI mode: no live display, shows completion blocks (auto-enabled when CI env var is set or not a TTY) |
|
||||
| `--fresh` | | Clear all saved state and start fresh (don't resume) |
|
||||
| `--retry-failures` | | Re-run only the challenges that failed in previous run |
|
||||
| `--reset-strategy` | | Reset saved results for specific strategy (can repeat) |
|
||||
| `--reset-model` | | Reset saved results for specific model (can repeat) |
|
||||
| `--reset-challenge` | | Reset saved results for specific challenge (can repeat) |
|
||||
| `--debug` | | Enable debug output |
|
||||
|
||||
### State Management Commands
|
||||
```bash
|
||||
# Show current state
|
||||
poetry run direct-benchmark state show
|
||||
|
||||
# Clear all state
|
||||
poetry run direct-benchmark state clear
|
||||
|
||||
# Reset specific strategy/model/challenge
|
||||
poetry run direct-benchmark state reset --strategy reflexion
|
||||
poetry run direct-benchmark state reset --model claude-thinking-25k
|
||||
poetry run direct-benchmark state reset --challenge ThreeSum
|
||||
```
|
||||
|
||||
## Available Strategies
|
||||
|
||||
- `one_shot` - Single-pass reasoning (default)
|
||||
- `rewoo` - Reasoning with observations
|
||||
- `plan_execute` - Plan then execute
|
||||
- `reflexion` - Self-reflection loop
|
||||
- `tree_of_thoughts` - Multiple reasoning paths
|
||||
|
||||
## Available Model Presets
|
||||
|
||||
### Claude
|
||||
- `claude` - sonnet-4 smart, haiku fast
|
||||
- `claude-smart` - sonnet-4 for both
|
||||
- `claude-fast` - haiku for both
|
||||
- `claude-opus` - opus smart, sonnet fast
|
||||
- `claude-opus-only` - opus for both
|
||||
|
||||
### Claude with Extended Thinking
|
||||
- `claude-thinking-10k` - 10k thinking tokens
|
||||
- `claude-thinking-25k` - 25k thinking tokens
|
||||
- `claude-thinking-50k` - 50k thinking tokens
|
||||
- `claude-opus-thinking` - opus with 25k thinking
|
||||
- `claude-opus-thinking-50k` - opus with 50k thinking
|
||||
|
||||
### OpenAI
|
||||
- `openai` - gpt-4o smart, gpt-4o-mini fast
|
||||
- `openai-smart` - gpt-4o for both
|
||||
- `openai-fast` - gpt-4o-mini for both
|
||||
- `gpt5` - gpt-5 smart, gpt-4o fast
|
||||
- `gpt5-only` - gpt-5 for both
|
||||
|
||||
### OpenAI Reasoning Models
|
||||
- `o1`, `o1-mini` - o1 variants
|
||||
- `o1-low`, `o1-medium`, `o1-high` - o1 with reasoning effort
|
||||
- `o3-low`, `o3-medium`, `o3-high` - o3 with reasoning effort
|
||||
- `gpt5-low`, `gpt5-medium`, `gpt5-high` - gpt-5 with reasoning effort
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
direct_benchmark/
|
||||
├── pyproject.toml # Poetry config
|
||||
├── README.md # User documentation
|
||||
├── CLAUDE.md # This file
|
||||
├── .gitignore
|
||||
└── direct_benchmark/
|
||||
├── __init__.py
|
||||
├── __main__.py # CLI entry point
|
||||
├── models.py # Pydantic models, presets
|
||||
├── harness.py # Main orchestrator
|
||||
├── runner.py # AgentRunner (single agent lifecycle)
|
||||
├── parallel.py # ParallelExecutor (concurrent runs)
|
||||
├── challenge_loader.py # Load challenges from JSON
|
||||
├── evaluator.py # Evaluate outputs vs ground truth
|
||||
├── report.py # Report generation
|
||||
└── ui.py # Rich UI components
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
CLI args → HarnessConfig
|
||||
↓
|
||||
BenchmarkHarness.run()
|
||||
↓
|
||||
ChallengeLoader.load_all() → list[Challenge]
|
||||
↓
|
||||
ParallelExecutor.execute_matrix(configs × challenges × attempts)
|
||||
↓
|
||||
[Parallel with semaphore limiting to N concurrent]
|
||||
↓
|
||||
AgentRunner.run_challenge():
|
||||
1. Create temp workspace
|
||||
2. Copy input artifacts to agent workspace
|
||||
3. Create AppConfig with strategy/model
|
||||
4. create_agent() - direct instantiation
|
||||
5. Run agent loop until finish/timeout
|
||||
6. Collect output files
|
||||
↓
|
||||
Evaluator.evaluate() - check against ground truth
|
||||
↓
|
||||
ReportGenerator - write reports
|
||||
```
|
||||
|
||||
### Key Components
|
||||
|
||||
**AgentRunner** (`runner.py`)
|
||||
- Manages single agent lifecycle for one challenge
|
||||
- Creates isolated temp workspace per run
|
||||
- Copies input artifacts to `{workspace}/.autogpt/agents/{agent_id}/workspace/`
|
||||
- Instantiates agent directly via `create_agent()`
|
||||
- Runs agent loop: `propose_action()` → `execute()` until finish/timeout
|
||||
|
||||
**ParallelExecutor** (`parallel.py`)
|
||||
- Manages concurrent execution with asyncio semaphore
|
||||
- Supports multiple attempts per challenge
|
||||
- Reports progress via callbacks
|
||||
|
||||
**Evaluator** (`evaluator.py`)
|
||||
- String matching (should_contain/should_not_contain)
|
||||
- Python script execution
|
||||
- Pytest execution
|
||||
|
||||
**ReportGenerator** (`report.py`)
|
||||
- Per-config `report.json` files (compatible with agbenchmark format)
|
||||
- Comparison reports across all configs
|
||||
|
||||
## Report Format
|
||||
|
||||
Reports are generated in `./reports/` with format:
|
||||
```
|
||||
reports/
|
||||
├── {timestamp}_{strategy}_{model}/
|
||||
│ └── report.json
|
||||
└── strategy_comparison_{timestamp}.json
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `autogpt-forge` - Core agent framework
|
||||
- `autogpt` - Original AutoGPT agent
|
||||
- `click` - CLI framework
|
||||
- `pydantic` - Data models
|
||||
- `rich` - Terminal UI
|
||||
|
||||
## Key Differences from agbenchmark
|
||||
|
||||
| agbenchmark | direct_benchmark |
|
||||
|-------------|-----------------|
|
||||
| `subprocess.Popen` + HTTP server | Direct `create_agent()` |
|
||||
| HTTP/REST via Agent Protocol | Direct `propose_action()`/`execute()` |
|
||||
| Sequential (one config at a time) | Parallel via asyncio semaphore |
|
||||
| Port-based isolation | Workspace-based isolation |
|
||||
| `agbenchmark run` CLI | Direct JSON parsing |
|
||||
|
||||
## Common Tasks
|
||||
|
||||
### Run Full Benchmark Suite
|
||||
```bash
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,rewoo,plan_execute \
|
||||
--models claude \
|
||||
--parallel 8
|
||||
```
|
||||
|
||||
### Compare Strategies
|
||||
```bash
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,rewoo,plan_execute,reflexion \
|
||||
--models claude \
|
||||
--tests ReadFile,WriteFile,ThreeSum
|
||||
```
|
||||
|
||||
### Debug a Failing Test
|
||||
```bash
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot \
|
||||
--tests FailingTest \
|
||||
--keep-answers \
|
||||
--verbose
|
||||
```
|
||||
|
||||
### Resume / Incremental Runs
|
||||
The benchmark automatically saves progress and resumes from where it left off.
|
||||
State is saved to `.benchmark_state.json` in the reports directory.
|
||||
|
||||
```bash
|
||||
# Run benchmarks - will resume from last run automatically
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,reflexion \
|
||||
--models claude
|
||||
|
||||
# Start fresh (clear all saved state)
|
||||
poetry run direct-benchmark run --fresh \
|
||||
--strategies one_shot,reflexion \
|
||||
--models claude
|
||||
|
||||
# Reset specific strategy and re-run
|
||||
poetry run direct-benchmark run \
|
||||
--reset-strategy reflexion \
|
||||
--strategies one_shot,reflexion \
|
||||
--models claude
|
||||
|
||||
# Reset specific model and re-run
|
||||
poetry run direct-benchmark run \
|
||||
--reset-model claude-thinking-25k \
|
||||
--strategies one_shot \
|
||||
--models claude,claude-thinking-25k
|
||||
|
||||
# Retry only the failures from the last run
|
||||
poetry run direct-benchmark run --retry-failures \
|
||||
--strategies one_shot,reflexion \
|
||||
--models claude
|
||||
```
|
||||
|
||||
### CI/Scripting Mode
|
||||
```bash
|
||||
# JSON output (parseable)
|
||||
poetry run direct-benchmark run --json
|
||||
|
||||
# CI mode - shows completion blocks without Live display
|
||||
# Auto-enabled when CI=true env var is set or stdout is not a TTY
|
||||
poetry run direct-benchmark run --ci
|
||||
```
|
||||
@@ -1,154 +0,0 @@
|
||||
# Direct Benchmark Harness
|
||||
|
||||
High-performance benchmark harness for AutoGPT that directly instantiates agents without HTTP server overhead, enabling parallel execution of multiple configurations.
|
||||
|
||||
## Features
|
||||
|
||||
- **Direct Agent Instantiation**: No HTTP server, no Agent Protocol overhead
|
||||
- **Parallel Execution**: Run multiple strategy/model combinations concurrently
|
||||
- **Multiple Attempts**: Run each challenge multiple times for statistical reliability
|
||||
- **Rich UI**: Live progress display with Rich library
|
||||
- **Multiple Output Modes**: Default (rich), quiet, verbose, JSON for CI
|
||||
- **Full CLI Compatibility**: All flags from the original agbenchmark supported
|
||||
|
||||
## Installation
|
||||
|
||||
All commands run from the `classic/` directory (parent of this directory):
|
||||
|
||||
```bash
|
||||
cd classic
|
||||
poetry install
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Run benchmarks with default settings
|
||||
poetry run direct-benchmark run
|
||||
|
||||
# Run specific strategies and models
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot,rewoo \
|
||||
--models claude,openai \
|
||||
--parallel 4
|
||||
|
||||
# Run a single test
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot \
|
||||
--tests ReadFile
|
||||
|
||||
# Run multiple attempts per challenge
|
||||
poetry run direct-benchmark run \
|
||||
--strategies one_shot \
|
||||
--attempts 3
|
||||
|
||||
# Run only regression tests (previously beaten)
|
||||
poetry run direct-benchmark run --maintain
|
||||
|
||||
# Run only non-regression tests (not consistently beaten)
|
||||
poetry run direct-benchmark run --improve
|
||||
|
||||
# Run only never-beaten challenges
|
||||
poetry run direct-benchmark run --explore
|
||||
|
||||
# List available challenges
|
||||
poetry run direct-benchmark list-challenges
|
||||
|
||||
# List model presets
|
||||
poetry run direct-benchmark list-models
|
||||
|
||||
# List strategies
|
||||
poetry run direct-benchmark list-strategies
|
||||
```
|
||||
|
||||
## CLI Options
|
||||
|
||||
### Challenge Selection
|
||||
- `--strategies, -s`: Comma-separated strategies (one_shot, rewoo, plan_execute, reflexion, tree_of_thoughts)
|
||||
- `--models, -m`: Comma-separated model presets (claude, openai, etc.)
|
||||
- `--categories, -c`: Filter by challenge categories
|
||||
- `--skip-category, -S`: Exclude categories
|
||||
- `--tests, -t`: Filter by test names
|
||||
|
||||
### Execution Control
|
||||
- `--attempts, -N`: Number of times to run each challenge
|
||||
- `--parallel, -p`: Maximum parallel runs (default: 4)
|
||||
- `--timeout`: Per-challenge timeout in seconds (default: 300)
|
||||
- `--cutoff`: Alias for --timeout
|
||||
- `--no-cutoff, --nc`: Disable time limit
|
||||
- `--max-steps`: Maximum steps per challenge (default: 50)
|
||||
|
||||
### Challenge Filtering Modes
|
||||
- `--maintain`: Run only regression tests (previously beaten consistently)
|
||||
- `--improve`: Run only non-regression tests (not consistently beaten)
|
||||
- `--explore`: Run only challenges that have never been beaten
|
||||
- `--no-dep`: Run all challenges regardless of dependency success/failure
|
||||
|
||||
### Output & Debug
|
||||
- `--quiet, -q`: Minimal output
|
||||
- `--verbose, -v`: Detailed per-challenge output
|
||||
- `--json`: JSON output for CI/scripting
|
||||
- `--debug`: Enable debug output
|
||||
- `--keep-answers`: Keep answer files for debugging
|
||||
|
||||
### Paths
|
||||
- `--workspace`: Workspace root directory
|
||||
- `--challenges-dir`: Path to challenges directory
|
||||
- `--reports-dir`: Path to reports directory
|
||||
|
||||
## Available Strategies
|
||||
|
||||
| Strategy | Description |
|
||||
|----------|-------------|
|
||||
| `one_shot` | Single-pass reasoning (default, most reliable) |
|
||||
| `rewoo` | Reasoning with observations |
|
||||
| `plan_execute` | Plan then execute |
|
||||
| `reflexion` | Self-reflection loop |
|
||||
| `tree_of_thoughts` | Multiple reasoning paths |
|
||||
|
||||
## Available Model Presets
|
||||
|
||||
### Claude
|
||||
- `claude`: sonnet-4 smart, haiku fast (default)
|
||||
- `claude-smart`: sonnet-4 for both
|
||||
- `claude-fast`: haiku for both
|
||||
- `claude-opus`: opus smart, sonnet fast
|
||||
- `claude-opus-only`: opus for both
|
||||
|
||||
### Claude with Extended Thinking
|
||||
- `claude-thinking-10k`: 10k thinking tokens
|
||||
- `claude-thinking-25k`: 25k thinking tokens
|
||||
- `claude-thinking-50k`: 50k thinking tokens
|
||||
- `claude-opus-thinking`: opus with 25k thinking
|
||||
- `claude-opus-thinking-50k`: opus with 50k thinking
|
||||
|
||||
### OpenAI
|
||||
- `openai`: gpt-4o smart, gpt-4o-mini fast
|
||||
- `openai-smart`: gpt-4o for both
|
||||
- `openai-fast`: gpt-4o-mini for both
|
||||
- `gpt5`: gpt-5 smart, gpt-4o fast
|
||||
- `gpt5-only`: gpt-5 for both
|
||||
|
||||
### OpenAI Reasoning Models
|
||||
- `o1`, `o1-mini`: o1 variants
|
||||
- `o1-low`, `o1-medium`, `o1-high`: o1 with reasoning effort
|
||||
- `o3-low`, `o3-medium`, `o3-high`: o3 with reasoning effort
|
||||
|
||||
## Reports
|
||||
|
||||
Reports are generated in `./reports/` with format:
|
||||
```
|
||||
reports/
|
||||
├── {timestamp}_{strategy}_{model}/
|
||||
│ └── report.json
|
||||
└── strategy_comparison_{timestamp}.json
|
||||
```
|
||||
|
||||
## Key Differences from agbenchmark
|
||||
|
||||
| agbenchmark | direct_benchmark |
|
||||
|-------------|------------------|
|
||||
| `subprocess.Popen` + HTTP server | Direct `create_agent()` |
|
||||
| HTTP/REST via Agent Protocol | Direct `propose_action()`/`execute()` |
|
||||
| Sequential (one config at a time) | Parallel via asyncio semaphore |
|
||||
| Port-based isolation | Workspace-based isolation |
|
||||
@@ -1,842 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Strategy Failure Analysis Tool
|
||||
|
||||
Analyzes why prompt strategies fail on benchmark tests, identifies patterns,
|
||||
and provides actionable insights for improvement.
|
||||
|
||||
Usage:
|
||||
# Full analysis with LLM summaries (default)
|
||||
poetry run python agbenchmark_config/analyze_failures.py
|
||||
|
||||
# Disable LLM analysis (just print raw pattern data)
|
||||
poetry run python agbenchmark_config/analyze_failures.py --no-analysis
|
||||
|
||||
# Focus on specific strategy
|
||||
poetry run python agbenchmark_config/analyze_failures.py --strategy rewoo
|
||||
|
||||
# Compare one test across strategies (interactive)
|
||||
poetry run python agbenchmark_config/analyze_failures.py --test Battleship
|
||||
|
||||
# Interactive drill-down mode
|
||||
poetry run python agbenchmark_config/analyze_failures.py --interactive
|
||||
|
||||
# Export to markdown
|
||||
poetry run python agbenchmark_config/analyze_failures.py --markdown
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from collections import Counter, defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
# Type hints for optional rich imports
|
||||
Console: Any = None
|
||||
Markdown: Any = None
|
||||
Panel: Any = None
|
||||
Progress: Any = None
|
||||
SpinnerColumn: Any = None
|
||||
TextColumn: Any = None
|
||||
Confirm: Any = None
|
||||
Prompt: Any = None
|
||||
Table: Any = None
|
||||
Text: Any = None
|
||||
Tree: Any = None
|
||||
|
||||
try:
|
||||
from rich.console import Console
|
||||
from rich.markdown import Markdown # noqa: F401
|
||||
from rich.panel import Panel
|
||||
from rich.progress import Progress, SpinnerColumn, TextColumn
|
||||
from rich.prompt import Confirm, Prompt # noqa: F401
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
from rich.tree import Tree
|
||||
|
||||
RICH_AVAILABLE = True
|
||||
except ImportError:
|
||||
RICH_AVAILABLE = False
|
||||
|
||||
|
||||
class FailurePattern(Enum):
|
||||
"""Categories of failure patterns."""
|
||||
|
||||
OVER_PLANNING = "over_planning" # Too many planning steps, not enough execution
|
||||
TOOL_LOOP = "tool_loop" # Repeating same tool without progress
|
||||
MISSING_CRITICAL = "missing_critical" # Didn't complete key action
|
||||
TIMEOUT = "timeout" # Hit step limit before completion
|
||||
ERROR_UNRECOVERED = "error_unrecovered" # Hit error and couldn't recover
|
||||
WRONG_APPROACH = "wrong_approach" # Fundamentally wrong solution
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StepInfo:
|
||||
"""Information about a single execution step."""
|
||||
|
||||
step_num: int
|
||||
tool_name: str
|
||||
tool_args: dict
|
||||
tool_result: Optional[dict]
|
||||
thoughts: dict
|
||||
cumulative_cost: float
|
||||
output: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestResult:
|
||||
"""Analysis of a single test execution."""
|
||||
|
||||
test_name: str
|
||||
strategy: str
|
||||
task: str
|
||||
success: bool
|
||||
fail_reason: Optional[str]
|
||||
reached_cutoff: bool
|
||||
n_steps: int
|
||||
steps: list[StepInfo]
|
||||
total_cost: float
|
||||
run_time: str
|
||||
tool_distribution: Counter = field(default_factory=Counter)
|
||||
patterns_detected: list[FailurePattern] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StrategyAnalysis:
|
||||
"""Analysis results for a strategy."""
|
||||
|
||||
strategy_name: str
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
success_rate: float
|
||||
total_cost: float
|
||||
avg_steps: float
|
||||
failed_tests: list[TestResult]
|
||||
pattern_distribution: Counter = field(default_factory=Counter)
|
||||
|
||||
|
||||
class FailureAnalyzer:
|
||||
"""Main analysis engine."""
|
||||
|
||||
def __init__(self, reports_dir: Path, use_llm: bool = True):
|
||||
self.reports_dir = reports_dir
|
||||
self.use_llm = use_llm
|
||||
self._console_instance = Console() if RICH_AVAILABLE else None
|
||||
self.strategies: dict[str, StrategyAnalysis] = {}
|
||||
self.test_comparison: dict[str, dict[str, TestResult]] = defaultdict(dict)
|
||||
self._llm_provider = None
|
||||
|
||||
@property
|
||||
def console(self) -> Any:
|
||||
"""Get console instance (only call when RICH_AVAILABLE is True)."""
|
||||
assert self._console_instance is not None
|
||||
return self._console_instance
|
||||
|
||||
def _print(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Print with Rich if available, otherwise standard print."""
|
||||
if self._console_instance:
|
||||
self._console_instance.print(*args, **kwargs)
|
||||
else:
|
||||
print(*args, **kwargs)
|
||||
|
||||
def find_reports(self) -> list[tuple[str, Path]]:
|
||||
"""Find all strategy-specific reports."""
|
||||
reports = []
|
||||
for report_dir in self.reports_dir.iterdir():
|
||||
if not report_dir.is_dir():
|
||||
continue
|
||||
report_file = report_dir / "report.json"
|
||||
if not report_file.exists():
|
||||
continue
|
||||
|
||||
# Extract strategy from directory name
|
||||
name = report_dir.name
|
||||
strategy = None
|
||||
for s in [
|
||||
"one_shot",
|
||||
"rewoo",
|
||||
"plan_execute",
|
||||
"reflexion",
|
||||
"tree_of_thoughts",
|
||||
]:
|
||||
if s in name:
|
||||
strategy = s
|
||||
break
|
||||
|
||||
if strategy:
|
||||
reports.append((strategy, report_file))
|
||||
|
||||
return sorted(reports, key=lambda x: x[1].stat().st_mtime, reverse=True)
|
||||
|
||||
def parse_report(self, strategy: str, report_path: Path) -> StrategyAnalysis:
|
||||
"""Parse a benchmark report file."""
|
||||
with open(report_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
tests_data = data.get("tests", {})
|
||||
failed_tests = []
|
||||
total_cost = 0.0
|
||||
total_steps = 0
|
||||
passed = 0
|
||||
failed = 0
|
||||
|
||||
for test_name, test_data in tests_data.items():
|
||||
results = test_data.get("results", [])
|
||||
if not results:
|
||||
continue
|
||||
|
||||
result = results[0]
|
||||
success = result.get("success", False)
|
||||
n_steps = result.get("n_steps", 0)
|
||||
cost = result.get("cost", 0)
|
||||
|
||||
total_steps += n_steps
|
||||
total_cost += cost or 0
|
||||
|
||||
if success:
|
||||
passed += 1
|
||||
else:
|
||||
failed += 1
|
||||
test_result = self._parse_test_result(
|
||||
test_name, strategy, test_data, result
|
||||
)
|
||||
failed_tests.append(test_result)
|
||||
self.test_comparison[test_name][strategy] = test_result
|
||||
|
||||
total_tests = passed + failed
|
||||
return StrategyAnalysis(
|
||||
strategy_name=strategy,
|
||||
total_tests=total_tests,
|
||||
passed=passed,
|
||||
failed=failed,
|
||||
success_rate=(passed / total_tests * 100) if total_tests > 0 else 0,
|
||||
total_cost=total_cost,
|
||||
avg_steps=total_steps / total_tests if total_tests > 0 else 0,
|
||||
failed_tests=failed_tests,
|
||||
)
|
||||
|
||||
def _parse_test_result(
|
||||
self, test_name: str, strategy: str, test_data: dict, result: dict
|
||||
) -> TestResult:
|
||||
"""Parse a single test result."""
|
||||
steps_data = result.get("steps", [])
|
||||
steps = []
|
||||
tool_distribution = Counter()
|
||||
|
||||
for i, step in enumerate(steps_data):
|
||||
ao = step.get("additional_output") or {}
|
||||
use_tool = ao.get("use_tool") or {}
|
||||
last_action = ao.get("last_action") or {}
|
||||
thoughts = ao.get("thoughts") or {}
|
||||
|
||||
tool_name = use_tool.get("name", "none")
|
||||
tool_distribution[tool_name] += 1
|
||||
|
||||
step_info = StepInfo(
|
||||
step_num=i + 1,
|
||||
tool_name=tool_name,
|
||||
tool_args=use_tool.get("arguments", {}),
|
||||
tool_result=last_action.get("result") if last_action else None,
|
||||
thoughts=thoughts,
|
||||
cumulative_cost=ao.get("task_cumulative_cost", 0),
|
||||
output=step.get("output", ""),
|
||||
)
|
||||
steps.append(step_info)
|
||||
|
||||
test_result = TestResult(
|
||||
test_name=test_name,
|
||||
strategy=strategy,
|
||||
task=test_data.get("task", ""),
|
||||
success=False,
|
||||
fail_reason=result.get("fail_reason"),
|
||||
reached_cutoff=result.get("reached_cutoff", False),
|
||||
n_steps=result.get("n_steps", 0),
|
||||
steps=steps,
|
||||
total_cost=result.get("cost", 0),
|
||||
run_time=result.get("run_time", ""),
|
||||
tool_distribution=tool_distribution,
|
||||
)
|
||||
|
||||
# Detect patterns
|
||||
test_result.patterns_detected = self._detect_patterns(test_result)
|
||||
return test_result
|
||||
|
||||
def _detect_patterns(self, test: TestResult) -> list[FailurePattern]:
|
||||
"""Detect failure patterns in a test result."""
|
||||
patterns = []
|
||||
|
||||
# Pattern 1: Over-planning
|
||||
planning_tools = {"todo_write", "todo_read", "think", "plan"}
|
||||
execution_tools = {
|
||||
"write_file",
|
||||
"execute_python",
|
||||
"execute_shell",
|
||||
"read_file",
|
||||
}
|
||||
|
||||
planning_count = sum(test.tool_distribution.get(t, 0) for t in planning_tools)
|
||||
_execution_count = sum( # noqa: F841
|
||||
test.tool_distribution.get(t, 0) for t in execution_tools
|
||||
)
|
||||
|
||||
if test.n_steps > 0:
|
||||
planning_ratio = planning_count / test.n_steps
|
||||
if planning_ratio > 0.5 and test.n_steps > 1:
|
||||
patterns.append(FailurePattern.OVER_PLANNING)
|
||||
|
||||
# Pattern 2: Tool loops (same tool used 3+ times consecutively)
|
||||
if len(test.steps) >= 3:
|
||||
for i in range(len(test.steps) - 2):
|
||||
if (
|
||||
test.steps[i].tool_name
|
||||
== test.steps[i + 1].tool_name
|
||||
== test.steps[i + 2].tool_name
|
||||
):
|
||||
patterns.append(FailurePattern.TOOL_LOOP)
|
||||
break
|
||||
|
||||
# Pattern 3: Missing critical action
|
||||
# If task mentions "write" or "create" but no write_file was used
|
||||
task_lower = test.task.lower()
|
||||
if any(word in task_lower for word in ["write", "create", "generate", "build"]):
|
||||
if test.tool_distribution.get("write_file", 0) == 0:
|
||||
patterns.append(FailurePattern.MISSING_CRITICAL)
|
||||
|
||||
# Pattern 4: Timeout
|
||||
if test.reached_cutoff:
|
||||
patterns.append(FailurePattern.TIMEOUT)
|
||||
|
||||
# Pattern 5: Error unrecovered
|
||||
error_count = 0
|
||||
for step in test.steps:
|
||||
if step.tool_result and step.tool_result.get("status") == "error":
|
||||
error_count += 1
|
||||
if error_count > 0 and error_count == len(test.steps) - 1:
|
||||
patterns.append(FailurePattern.ERROR_UNRECOVERED)
|
||||
|
||||
if not patterns:
|
||||
patterns.append(FailurePattern.UNKNOWN)
|
||||
|
||||
return patterns
|
||||
|
||||
def analyze_all(self) -> None:
|
||||
"""Analyze all available reports."""
|
||||
reports = self.find_reports()
|
||||
|
||||
# Keep only most recent report per strategy
|
||||
latest_reports = {}
|
||||
for strategy, path in reports:
|
||||
if strategy not in latest_reports:
|
||||
latest_reports[strategy] = path
|
||||
|
||||
if RICH_AVAILABLE:
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=self.console,
|
||||
) as progress:
|
||||
task = progress.add_task(
|
||||
"Analyzing reports...", total=len(latest_reports)
|
||||
)
|
||||
for strategy, path in latest_reports.items():
|
||||
progress.update(task, description=f"Analyzing {strategy}...")
|
||||
self.strategies[strategy] = self.parse_report(strategy, path)
|
||||
progress.advance(task)
|
||||
else:
|
||||
for strategy, path in latest_reports.items():
|
||||
print(f"Analyzing {strategy}...")
|
||||
self.strategies[strategy] = self.parse_report(strategy, path)
|
||||
|
||||
def _get_llm_provider(self) -> Any:
|
||||
"""Lazy-load the LLM provider."""
|
||||
if self._llm_provider is None:
|
||||
try:
|
||||
# Add parent paths to find forge
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "forge"))
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
self._llm_provider = MultiProvider()
|
||||
except ImportError as e:
|
||||
self._print(
|
||||
f"[yellow]Warning: Could not load LLM provider: {e}[/yellow]"
|
||||
if RICH_AVAILABLE
|
||||
else f"Warning: Could not load LLM provider: {e}"
|
||||
)
|
||||
self._llm_provider = False
|
||||
return self._llm_provider if self._llm_provider else None
|
||||
|
||||
async def _get_llm_analysis(self, test: TestResult) -> Optional[str]:
|
||||
"""Get LLM-powered analysis of a failure.
|
||||
|
||||
Note: This is a placeholder for future LLM-powered analysis.
|
||||
Currently disabled to avoid dependency issues.
|
||||
"""
|
||||
# LLM analysis disabled for now - patterns provide sufficient insights
|
||||
return None
|
||||
|
||||
def print_summary(self) -> None:
|
||||
"""Print overall summary."""
|
||||
if RICH_AVAILABLE:
|
||||
table = Table(title="Strategy Comparison Summary")
|
||||
table.add_column("Strategy", style="cyan")
|
||||
table.add_column("Tests", justify="right")
|
||||
table.add_column("Passed", justify="right", style="green")
|
||||
table.add_column("Failed", justify="right", style="red")
|
||||
table.add_column("Success %", justify="right")
|
||||
table.add_column("Avg Steps", justify="right")
|
||||
table.add_column("Cost", justify="right")
|
||||
|
||||
for name, analysis in sorted(
|
||||
self.strategies.items(), key=lambda x: x[1].success_rate, reverse=True
|
||||
):
|
||||
table.add_row(
|
||||
name,
|
||||
str(analysis.total_tests),
|
||||
str(analysis.passed),
|
||||
str(analysis.failed),
|
||||
f"{analysis.success_rate:.1f}%",
|
||||
f"{analysis.avg_steps:.1f}",
|
||||
f"${analysis.total_cost:.4f}",
|
||||
)
|
||||
|
||||
self.console.print(table)
|
||||
else:
|
||||
print("\n=== Strategy Comparison Summary ===")
|
||||
hdr = (
|
||||
f"{'Strategy':<20} {'Tests':>6} {'Passed':>7} "
|
||||
f"{'Failed':>7} {'Success%':>10} {'AvgSteps':>9} {'Cost':>10}"
|
||||
)
|
||||
print(hdr)
|
||||
print("-" * 80)
|
||||
for name, analysis in sorted(
|
||||
self.strategies.items(), key=lambda x: x[1].success_rate, reverse=True
|
||||
):
|
||||
row = (
|
||||
f"{name:<20} {analysis.total_tests:>6} "
|
||||
f"{analysis.passed:>7} {analysis.failed:>7} "
|
||||
f"{analysis.success_rate:>9.1f}% {analysis.avg_steps:>9.1f} "
|
||||
f"${analysis.total_cost:>9.4f}"
|
||||
)
|
||||
print(row)
|
||||
|
||||
def print_pattern_analysis(self) -> None:
|
||||
"""Print failure pattern analysis."""
|
||||
all_patterns = Counter()
|
||||
for analysis in self.strategies.values():
|
||||
for test in analysis.failed_tests:
|
||||
for pattern in test.patterns_detected:
|
||||
all_patterns[pattern] += 1
|
||||
|
||||
self._print("\n")
|
||||
if RICH_AVAILABLE:
|
||||
table = Table(title="Failure Pattern Distribution")
|
||||
table.add_column("Pattern", style="yellow")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Description")
|
||||
|
||||
pattern_descriptions = {
|
||||
FailurePattern.OVER_PLANNING: "Too much planning, not enough action",
|
||||
FailurePattern.TOOL_LOOP: "Repeats same tool 3+ times consecutively",
|
||||
FailurePattern.MISSING_CRITICAL: "Never performed key action",
|
||||
FailurePattern.TIMEOUT: "Hit step limit before completing task",
|
||||
FailurePattern.ERROR_UNRECOVERED: "Hit errors and couldn't recover",
|
||||
FailurePattern.WRONG_APPROACH: "Took fundamentally wrong approach",
|
||||
FailurePattern.UNKNOWN: "Pattern not categorized",
|
||||
}
|
||||
|
||||
for pattern, count in all_patterns.most_common():
|
||||
table.add_row(
|
||||
pattern.value, str(count), pattern_descriptions.get(pattern, "")
|
||||
)
|
||||
|
||||
self.console.print(table)
|
||||
else:
|
||||
print("\n=== Failure Pattern Distribution ===")
|
||||
for pattern, count in all_patterns.most_common():
|
||||
print(f" {pattern.value}: {count}")
|
||||
|
||||
def print_failed_tests(self, strategy: Optional[str] = None) -> None:
|
||||
"""Print detailed failure analysis."""
|
||||
strategies_to_show = (
|
||||
[self.strategies[strategy]] if strategy else self.strategies.values()
|
||||
)
|
||||
|
||||
for analysis in strategies_to_show:
|
||||
self._print("\n")
|
||||
if RICH_AVAILABLE:
|
||||
msg = (
|
||||
f"[bold]{analysis.strategy_name}[/bold] - "
|
||||
f"{analysis.failed} failures out of {analysis.total_tests} tests"
|
||||
)
|
||||
self.console.print(Panel(msg, title="Strategy Analysis"))
|
||||
else:
|
||||
print(f"\n=== {analysis.strategy_name} ===")
|
||||
print(f"Failures: {analysis.failed}/{analysis.total_tests}")
|
||||
|
||||
for test in analysis.failed_tests:
|
||||
self._print_test_failure(test)
|
||||
|
||||
def _print_test_failure(self, test: TestResult) -> None:
|
||||
"""Print a single test failure."""
|
||||
if RICH_AVAILABLE:
|
||||
tree = Tree(f"[red]{test.test_name}[/red]")
|
||||
tree.add(f"[dim]Task:[/dim] {test.task[:80]}...")
|
||||
tree.add(f"[dim]Steps:[/dim] {test.n_steps}")
|
||||
tree.add(f"[dim]Cost:[/dim] ${test.total_cost:.4f}")
|
||||
patterns = ", ".join(p.value for p in test.patterns_detected)
|
||||
tree.add(f"[dim]Patterns:[/dim] {patterns}")
|
||||
|
||||
tools = tree.add("[dim]Tool sequence:[/dim]")
|
||||
tool_seq = [s.tool_name for s in test.steps[:10]]
|
||||
tools.add(" -> ".join(tool_seq) + ("..." if len(test.steps) > 10 else ""))
|
||||
|
||||
if test.fail_reason:
|
||||
reason = tree.add("[dim]Fail reason:[/dim]")
|
||||
reason.add(Text(test.fail_reason[:200], style="red"))
|
||||
|
||||
self.console.print(tree)
|
||||
else:
|
||||
print(f"\n {test.test_name}")
|
||||
print(f" Task: {test.task[:80]}...")
|
||||
print(f" Steps: {test.n_steps}, Cost: ${test.total_cost:.4f}")
|
||||
print(f" Patterns: {', '.join(p.value for p in test.patterns_detected)}")
|
||||
tool_seq = [s.tool_name for s in test.steps[:10]]
|
||||
print(f" Tools: {' -> '.join(tool_seq)}")
|
||||
if test.fail_reason:
|
||||
print(f" Fail reason: {test.fail_reason[:200]}")
|
||||
|
||||
def compare_test(self, test_name: str) -> None:
|
||||
"""Compare a single test across all strategies."""
|
||||
if test_name not in self.test_comparison:
|
||||
self._print(
|
||||
f"[red]Test '{test_name}' not found in failed tests[/red]"
|
||||
if RICH_AVAILABLE
|
||||
else f"Test '{test_name}' not found in failed tests"
|
||||
)
|
||||
return
|
||||
|
||||
results = self.test_comparison[test_name]
|
||||
self._print("\n")
|
||||
if RICH_AVAILABLE:
|
||||
self.console.print(Panel(f"[bold]Comparing: {test_name}[/bold]"))
|
||||
else:
|
||||
print(f"\n=== Comparing: {test_name} ===")
|
||||
|
||||
for strategy, test in sorted(results.items()):
|
||||
self._print("\n")
|
||||
if RICH_AVAILABLE:
|
||||
self.console.print(f"[cyan]--- {strategy} ---[/cyan]")
|
||||
else:
|
||||
print(f"\n--- {strategy} ---")
|
||||
self._print_test_failure(test)
|
||||
|
||||
def interactive_mode(self) -> None:
|
||||
"""Run interactive exploration mode."""
|
||||
if not RICH_AVAILABLE:
|
||||
print("Interactive mode requires the 'rich' library.")
|
||||
print("Install with: pip install rich")
|
||||
return
|
||||
|
||||
while True:
|
||||
self.console.print("\n[bold]Interactive Failure Analysis[/bold]")
|
||||
self.console.print("Commands:")
|
||||
self.console.print(" [cyan]summary[/cyan] - Show overall summary")
|
||||
self.console.print(" [cyan]patterns[/cyan] - Show pattern analysis")
|
||||
self.console.print(
|
||||
" [cyan]strategy <name>[/cyan] - Show failures for a strategy"
|
||||
)
|
||||
self.console.print(
|
||||
" [cyan]test <name>[/cyan] - Compare test across strategies"
|
||||
)
|
||||
self.console.print(
|
||||
" [cyan]step <strategy> <test> <n>[/cyan] - Show step details"
|
||||
)
|
||||
self.console.print(" [cyan]list tests[/cyan] - List all failed tests")
|
||||
self.console.print(" [cyan]list strategies[/cyan] - List strategies")
|
||||
self.console.print(" [cyan]quit[/cyan] - Exit")
|
||||
|
||||
cmd = Prompt.ask("\n[bold]>>[/bold]").strip().lower()
|
||||
|
||||
if cmd == "quit" or cmd == "q":
|
||||
break
|
||||
elif cmd == "summary":
|
||||
self.print_summary()
|
||||
elif cmd == "patterns":
|
||||
self.print_pattern_analysis()
|
||||
elif cmd.startswith("strategy "):
|
||||
strategy = cmd.split(" ", 1)[1]
|
||||
if strategy in self.strategies:
|
||||
self.print_failed_tests(strategy)
|
||||
else:
|
||||
self.console.print(f"[red]Unknown strategy: {strategy}[/red]")
|
||||
elif cmd.startswith("test "):
|
||||
test_name = cmd.split(" ", 1)[1]
|
||||
self.compare_test(test_name)
|
||||
elif cmd.startswith("step "):
|
||||
parts = cmd.split()
|
||||
if len(parts) >= 4:
|
||||
strategy = parts[1]
|
||||
test_name = parts[2]
|
||||
step_num = int(parts[3])
|
||||
self._show_step_detail(strategy, test_name, step_num)
|
||||
else:
|
||||
self.console.print(
|
||||
"[red]Usage: step <strategy> <test> <step_num>[/red]"
|
||||
)
|
||||
elif cmd == "list tests":
|
||||
self._list_tests()
|
||||
elif cmd == "list strategies":
|
||||
self.console.print(", ".join(self.strategies.keys()))
|
||||
else:
|
||||
self.console.print(f"[red]Unknown command: {cmd}[/red]")
|
||||
|
||||
def _list_tests(self) -> None:
|
||||
"""List all failed tests."""
|
||||
all_tests = set()
|
||||
for analysis in self.strategies.values():
|
||||
for test in analysis.failed_tests:
|
||||
all_tests.add(test.test_name)
|
||||
|
||||
if RICH_AVAILABLE:
|
||||
table = Table(title="Failed Tests Across Strategies")
|
||||
table.add_column("Test", style="cyan")
|
||||
for strategy in self.strategies.keys():
|
||||
table.add_column(strategy, justify="center")
|
||||
|
||||
for test_name in sorted(all_tests):
|
||||
row = [test_name]
|
||||
for strategy in self.strategies.keys():
|
||||
if (
|
||||
test_name in self.test_comparison
|
||||
and strategy in self.test_comparison[test_name]
|
||||
):
|
||||
row.append("[red]FAIL[/red]")
|
||||
else:
|
||||
row.append("[green]PASS[/green]")
|
||||
table.add_row(*row)
|
||||
|
||||
self.console.print(table)
|
||||
else:
|
||||
print("\n=== Failed Tests ===")
|
||||
for test_name in sorted(all_tests):
|
||||
print(f" {test_name}")
|
||||
|
||||
def _show_step_detail(self, strategy: str, test_name: str, step_num: int) -> None:
|
||||
"""Show detailed information about a specific step."""
|
||||
if strategy not in self.strategies:
|
||||
self._print(
|
||||
f"[red]Unknown strategy: {strategy}[/red]"
|
||||
if RICH_AVAILABLE
|
||||
else f"Unknown strategy: {strategy}"
|
||||
)
|
||||
return
|
||||
|
||||
test = None
|
||||
for t in self.strategies[strategy].failed_tests:
|
||||
if t.test_name == test_name:
|
||||
test = t
|
||||
break
|
||||
|
||||
if not test:
|
||||
self._print(
|
||||
f"[red]Test '{test_name}' not found in {strategy}[/red]"
|
||||
if RICH_AVAILABLE
|
||||
else f"Test '{test_name}' not found in {strategy}"
|
||||
)
|
||||
return
|
||||
|
||||
if step_num < 1 or step_num > len(test.steps):
|
||||
self._print(
|
||||
f"[red]Step {step_num} out of range (1-{len(test.steps)})[/red]"
|
||||
if RICH_AVAILABLE
|
||||
else f"Step {step_num} out of range (1-{len(test.steps)})"
|
||||
)
|
||||
return
|
||||
|
||||
step = test.steps[step_num - 1]
|
||||
|
||||
if RICH_AVAILABLE:
|
||||
self.console.print(Panel(f"[bold]Step {step_num} Details[/bold]"))
|
||||
self.console.print(f"[cyan]Tool:[/cyan] {step.tool_name}")
|
||||
self.console.print(
|
||||
f"[cyan]Arguments:[/cyan] {json.dumps(step.tool_args, indent=2)}"
|
||||
)
|
||||
|
||||
if step.thoughts:
|
||||
self.console.print("\n[cyan]Thoughts:[/cyan]")
|
||||
for key, value in step.thoughts.items():
|
||||
self.console.print(f" [dim]{key}:[/dim] {value}")
|
||||
|
||||
if step.tool_result:
|
||||
result_str = json.dumps(step.tool_result, indent=2)[:500]
|
||||
self.console.print(f"\n[cyan]Result:[/cyan] {result_str}")
|
||||
|
||||
self.console.print(
|
||||
f"\n[cyan]Cumulative Cost:[/cyan] ${step.cumulative_cost:.4f}"
|
||||
)
|
||||
else:
|
||||
print(f"\n=== Step {step_num} Details ===")
|
||||
print(f"Tool: {step.tool_name}")
|
||||
print(f"Arguments: {json.dumps(step.tool_args, indent=2)}")
|
||||
if step.thoughts:
|
||||
print("\nThoughts:")
|
||||
for key, value in step.thoughts.items():
|
||||
print(f" {key}: {value}")
|
||||
if step.tool_result:
|
||||
print(f"\nResult: {json.dumps(step.tool_result, indent=2)[:500]}")
|
||||
print(f"\nCumulative Cost: ${step.cumulative_cost:.4f}")
|
||||
|
||||
def export_markdown(self, output_path: Optional[Path] = None) -> str:
|
||||
"""Export analysis to markdown format."""
|
||||
lines = []
|
||||
lines.append("# Benchmark Failure Analysis Report")
|
||||
lines.append(f"\nGenerated: {datetime.now().isoformat()}\n")
|
||||
|
||||
# Summary table
|
||||
lines.append("## Strategy Comparison\n")
|
||||
lines.append(
|
||||
"| Strategy | Tests | Passed | Failed | Success % | Avg Steps | Cost |"
|
||||
)
|
||||
lines.append(
|
||||
"|----------|-------|--------|--------|-----------|-----------|------|"
|
||||
)
|
||||
for name, analysis in sorted(
|
||||
self.strategies.items(), key=lambda x: x[1].success_rate, reverse=True
|
||||
):
|
||||
row = (
|
||||
f"| {name} | {analysis.total_tests} | {analysis.passed} "
|
||||
f"| {analysis.failed} | {analysis.success_rate:.1f}% "
|
||||
f"| {analysis.avg_steps:.1f} | ${analysis.total_cost:.4f} |"
|
||||
)
|
||||
lines.append(row)
|
||||
|
||||
# Pattern analysis
|
||||
lines.append("\n## Failure Patterns\n")
|
||||
all_patterns = Counter()
|
||||
for analysis in self.strategies.values():
|
||||
for test in analysis.failed_tests:
|
||||
for pattern in test.patterns_detected:
|
||||
all_patterns[pattern] += 1
|
||||
|
||||
for pattern, count in all_patterns.most_common():
|
||||
lines.append(f"- **{pattern.value}**: {count} occurrences")
|
||||
|
||||
# Failed tests by strategy
|
||||
lines.append("\n## Failed Tests by Strategy\n")
|
||||
for name, analysis in self.strategies.items():
|
||||
if not analysis.failed_tests:
|
||||
continue
|
||||
lines.append(f"\n### {name}\n")
|
||||
for test in analysis.failed_tests:
|
||||
lines.append(f"#### {test.test_name}\n")
|
||||
lines.append(f"- **Task**: {test.task[:100]}...")
|
||||
lines.append(f"- **Steps**: {test.n_steps}")
|
||||
patterns = ", ".join(p.value for p in test.patterns_detected)
|
||||
lines.append(f"- **Patterns**: {patterns}")
|
||||
tools = " -> ".join(s.tool_name for s in test.steps[:8])
|
||||
lines.append(f"- **Tool sequence**: {tools}")
|
||||
if test.fail_reason:
|
||||
lines.append(f"- **Fail reason**: {test.fail_reason[:150]}...")
|
||||
lines.append("")
|
||||
|
||||
content = "\n".join(lines)
|
||||
|
||||
if output_path:
|
||||
output_path.write_text(content)
|
||||
self._print(
|
||||
f"Markdown report saved to: {output_path}"
|
||||
if not RICH_AVAILABLE
|
||||
else f"[green]Markdown report saved to: {output_path}[/green]"
|
||||
)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
async def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Analyze benchmark failures across prompt strategies"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-analysis",
|
||||
action="store_true",
|
||||
help="Disable LLM-powered analysis",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--strategy",
|
||||
type=str,
|
||||
help="Focus on a specific strategy",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--test",
|
||||
type=str,
|
||||
help="Compare a specific test across strategies",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--interactive",
|
||||
"-i",
|
||||
action="store_true",
|
||||
help="Run in interactive mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--markdown",
|
||||
type=str,
|
||||
nargs="?",
|
||||
const="failure_analysis.md",
|
||||
help="Export to markdown (optionally specify output file)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--reports-dir",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to reports directory",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Find reports directory
|
||||
if args.reports_dir:
|
||||
reports_dir = Path(args.reports_dir)
|
||||
else:
|
||||
# Try to find it relative to this script
|
||||
script_dir = Path(__file__).parent
|
||||
reports_dir = script_dir / "reports"
|
||||
if not reports_dir.exists():
|
||||
reports_dir = Path.cwd() / "agbenchmark_config" / "reports"
|
||||
|
||||
if not reports_dir.exists():
|
||||
print(f"Reports directory not found: {reports_dir}")
|
||||
sys.exit(1)
|
||||
|
||||
analyzer = FailureAnalyzer(reports_dir, use_llm=not args.no_analysis)
|
||||
analyzer.analyze_all()
|
||||
|
||||
if not analyzer.strategies:
|
||||
print("No strategy reports found.")
|
||||
sys.exit(1)
|
||||
|
||||
if args.interactive:
|
||||
analyzer.interactive_mode()
|
||||
elif args.test:
|
||||
analyzer.compare_test(args.test)
|
||||
elif args.strategy:
|
||||
analyzer.print_failed_tests(args.strategy)
|
||||
else:
|
||||
analyzer.print_summary()
|
||||
analyzer.print_pattern_analysis()
|
||||
analyzer.print_failed_tests()
|
||||
|
||||
if args.markdown:
|
||||
output_path = Path(args.markdown)
|
||||
analyzer.export_markdown(output_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
asyncio.run(main())
|
||||
@@ -1,162 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
info = "-v" in sys.argv
|
||||
debug = "-vv" in sys.argv
|
||||
granular = "--granular" in sys.argv
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if debug else logging.INFO if info else logging.WARNING
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get a list of all JSON files in the directory
|
||||
reports_dir = Path(__file__).parent / "reports"
|
||||
if not reports_dir.exists():
|
||||
print(f"No reports directory found at {reports_dir}")
|
||||
sys.exit(1)
|
||||
|
||||
report_files = [
|
||||
report_file
|
||||
for dir in reports_dir.iterdir()
|
||||
if re.match(r"^\d{8}T\d{6}_", dir.name)
|
||||
and (report_file := dir / "report.json").is_file()
|
||||
]
|
||||
|
||||
labels = list[str]()
|
||||
runs_per_label = defaultdict[str, int](lambda: 0)
|
||||
suite_names = list[str]()
|
||||
test_names = list[str]()
|
||||
|
||||
# Create a dictionary to store grouped success values by suffix and test
|
||||
grouped_success_values = defaultdict[str, list[str]](list[str])
|
||||
|
||||
# Loop through each JSON file to collect suffixes and success values
|
||||
for report_file in sorted(report_files):
|
||||
with open(report_file) as f:
|
||||
logger.info(f"Loading {report_file}...")
|
||||
|
||||
data = json.load(f)
|
||||
if "tests" in data:
|
||||
test_tree = data["tests"]
|
||||
# Handle old format (agent_git_commit_sha) and new (config_name)
|
||||
if "config" in data and "config_name" in data["config"]:
|
||||
label = data["config"]["config_name"]
|
||||
elif "agent_git_commit_sha" in data and "/" in data["agent_git_commit_sha"]:
|
||||
label = data["agent_git_commit_sha"].rsplit("/", 1)[1][
|
||||
:7
|
||||
] # commit hash
|
||||
else:
|
||||
label = report_file.parent.name.split("_", 1)[1]
|
||||
else:
|
||||
# Benchmark run still in progress
|
||||
test_tree = data
|
||||
label = report_file.parent.name.split("_", 1)[1]
|
||||
logger.info(f"Run '{label}' seems to be in progress")
|
||||
|
||||
runs_per_label[label] += 1
|
||||
|
||||
def process_test(test_name: str, test_data: dict):
|
||||
result_group = grouped_success_values[f"{label}|{test_name}"]
|
||||
|
||||
if "tests" in test_data:
|
||||
logger.debug(f"{test_name} is a test suite")
|
||||
|
||||
# Test suite
|
||||
suite_attempted = any(
|
||||
test["metrics"]["attempted"] for test in test_data["tests"].values()
|
||||
)
|
||||
logger.debug(f"suite_attempted: {suite_attempted}")
|
||||
if not suite_attempted:
|
||||
return
|
||||
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
if test_data["metrics"]["percentage"] == 0:
|
||||
result_indicator = "❌"
|
||||
else:
|
||||
highest_difficulty = test_data["metrics"]["highest_difficulty"]
|
||||
result_indicator = {
|
||||
"interface": "🔌",
|
||||
"novice": "🌑",
|
||||
"basic": "🌒",
|
||||
"intermediate": "🌓",
|
||||
"advanced": "🌔",
|
||||
"hard": "🌕",
|
||||
}[highest_difficulty]
|
||||
|
||||
logger.debug(f"result group: {result_group}")
|
||||
logger.debug(f"runs_per_label: {runs_per_label[label]}")
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
["❔"] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
logger.debug(f"result group (after): {result_group}")
|
||||
|
||||
if granular:
|
||||
for test_name, test in test_data["tests"].items():
|
||||
process_test(test_name, test)
|
||||
return
|
||||
|
||||
test_metrics = test_data["metrics"]
|
||||
result_indicator = "❔"
|
||||
|
||||
if "attempted" not in test_metrics:
|
||||
return
|
||||
elif test_metrics["attempted"]:
|
||||
if test_name not in test_names:
|
||||
test_names.append(test_name)
|
||||
|
||||
# Handle old format (success: bool) and new (success_percentage)
|
||||
if "success" in test_metrics:
|
||||
success_value = test_metrics["success"]
|
||||
elif "success_percentage" in test_metrics:
|
||||
success_value = test_metrics["success_percentage"] >= 100.0
|
||||
else:
|
||||
success_value = False
|
||||
result_indicator = {True: "✅", False: "❌"}[success_value]
|
||||
|
||||
if len(result_group) + 1 < runs_per_label[label]:
|
||||
result_group.extend(
|
||||
[" "] * (runs_per_label[label] - len(result_group) - 1)
|
||||
)
|
||||
result_group.append(result_indicator)
|
||||
|
||||
for test_name, suite in test_tree.items():
|
||||
try:
|
||||
process_test(test_name, suite)
|
||||
except KeyError:
|
||||
print(f"{test_name}.metrics: {suite['metrics']}")
|
||||
raise
|
||||
|
||||
if label not in labels:
|
||||
labels.append(label)
|
||||
|
||||
# Create headers
|
||||
headers = ["Test Name"] + list(labels)
|
||||
|
||||
# Prepare data for tabulation
|
||||
table_data = list[list[str]]()
|
||||
for test_name in test_names:
|
||||
row = [test_name]
|
||||
for label in labels:
|
||||
results = grouped_success_values.get(f"{label}|{test_name}", ["❔"])
|
||||
if len(results) < runs_per_label[label]:
|
||||
results.extend(["❔"] * (runs_per_label[label] - len(results)))
|
||||
if len(results) > 1 and all(r == "❔" for r in results):
|
||||
results.clear()
|
||||
row.append(" ".join(results))
|
||||
table_data.append(row)
|
||||
|
||||
# Print tabulated data
|
||||
print(tabulate(table_data, headers=headers, tablefmt="grid"))
|
||||
@@ -1,85 +0,0 @@
|
||||
# Challenges Data Schema of Benchmark
|
||||
|
||||
## General challenges
|
||||
|
||||
Input:
|
||||
|
||||
- **name** (str): Name of the challenge.
|
||||
- **category** (str[]): Category of the challenge such as 'basic', 'retrieval', 'comprehension', etc. _this is not currently used. for the future it may be needed_
|
||||
- **task** (str): The task that the agent needs to solve.
|
||||
- **dependencies** (str[]): The dependencies that the challenge needs to run. Needs to be the full node to the test function.
|
||||
- **ground** (dict): The ground truth.
|
||||
- **answer** (str): The raw text of the ground truth answer.
|
||||
- **should_contain** (list): The exact strings that are required in the final answer.
|
||||
- **should_not_contain** (list): The exact strings that should not be in the final answer.
|
||||
- **files** (list): Files that are used for retrieval. Can specify file here or an extension.
|
||||
- **mock** (dict): Mock response for testing.
|
||||
- **mock_func** (str): Function to mock the agent's response. This is used for testing purposes.
|
||||
- **mock_task** (str): Task to provide for the mock function.
|
||||
- **info** (dict): Additional info about the challenge.
|
||||
- **difficulty** (str): The difficulty of this query.
|
||||
- **description** (str): Description of the challenge.
|
||||
- **side_effects** (str[]): Describes the effects of the challenge.
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"category": ["basic"],
|
||||
"task": "Print the capital of America to a .txt file",
|
||||
"dependencies": ["TestWriteFile"], // the class name of the test
|
||||
"ground": {
|
||||
"answer": "Washington",
|
||||
"should_contain": ["Washington"],
|
||||
"should_not_contain": ["New York", "Los Angeles", "San Francisco"],
|
||||
"files": [".txt"],
|
||||
"eval": {
|
||||
"type": "llm" or "file" or "python",
|
||||
"scoring": "percentage" or "scale" or "binary", // only if the type is llm
|
||||
"template": "rubric" or "reference" or "custom" // only if the type is llm
|
||||
}
|
||||
},
|
||||
"info": {
|
||||
"difficulty": "basic",
|
||||
"description": "Tests the writing to file",
|
||||
"side_effects": ["tests if there is in fact an LLM attached"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Evals
|
||||
|
||||
This is the method of evaluation for a challenge.
|
||||
|
||||
### file
|
||||
|
||||
This is the default method of evaluation. It will compare the files specified in "files" field to the "should_contain" and "should_not_contain" ground truths.
|
||||
|
||||
### python
|
||||
|
||||
This runs a python function in the specified "files" which captures the print statements to be scored using the "should_contain" and "should_not_contain" ground truths.
|
||||
|
||||
### llm
|
||||
|
||||
This uses a language model to evaluate the answer.
|
||||
|
||||
- There are 3 different templates - "rubric", "reference", and "custom". "rubric" will evaluate based on a rubric you provide in the "answer" field. "reference" will evaluate based on the ideal reference response in "answer". "custom" will not use any predefined scoring method, the prompt will be what you put in "answer".
|
||||
- The "scoring" field is used to determine how to score the answer. "percentage" will assign a percentage out of 100. "scale" will score the answer 1-10. "binary" will score the answer based on whether the answer is correct or not.
|
||||
- You can still use the "should_contain" and "should_not_contain" fields to directly match the answer along with the llm eval.
|
||||
|
||||
## Add files to challenges:
|
||||
|
||||
### artifacts_in
|
||||
|
||||
This folder contains all the files you want the agent to have in its workspace BEFORE the challenge starts
|
||||
|
||||
### artifacts_out
|
||||
|
||||
This folder contains all the files you would like the agent to generate. This folder is used to mock the agent.
|
||||
This allows to run agbenchmark --test=TestExample --mock and make sure our challenge actually works.
|
||||
|
||||
### custom_python
|
||||
|
||||
This folder contains files that will be copied into the agent's workspace and run after the challenge is completed.
|
||||
For example we can have a test.py in it and run this file in the workspace to easily import code generated by the agent.
|
||||
Example: TestBasicCodeGeneration challenge.
|
||||
@@ -1,13 +0,0 @@
|
||||
# This is the official challenge library for https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks
|
||||
|
||||
The goal of this repo is to provide easy challenge creation for test driven development with the Auto-GPT-Benchmarks package. This is essentially a library to craft challenges using a dsl (jsons in this case).
|
||||
|
||||
This is the up to date dependency graph: https://sapphire-denys-23.tiiny.site/
|
||||
|
||||
### How to use
|
||||
|
||||
Make sure you have the package installed with `pip install agbenchmark`.
|
||||
|
||||
If you would just like to use the default challenges, don't worry about this repo. Just install the package and you will have access to the default challenges.
|
||||
|
||||
To add new challenges as you develop, add this repo as a submodule to your `project/agbenchmark` folder. Any new challenges you add within the submodule will get registered automatically.
|
||||
@@ -1,56 +0,0 @@
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from .base import BaseChallenge, ChallengeInfo
|
||||
from .builtin import OPTIONAL_CATEGORIES
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_challenge_from_source_uri(source_uri: str) -> type[BaseChallenge]:
|
||||
from .builtin import BuiltinChallenge
|
||||
from .webarena import WebArenaChallenge
|
||||
|
||||
provider_prefix = source_uri.split("/", 1)[0]
|
||||
|
||||
if provider_prefix == BuiltinChallenge.SOURCE_URI_PREFIX:
|
||||
return BuiltinChallenge.from_source_uri(source_uri)
|
||||
|
||||
if provider_prefix == WebArenaChallenge.SOURCE_URI_PREFIX:
|
||||
return WebArenaChallenge.from_source_uri(source_uri)
|
||||
|
||||
raise ValueError(f"Cannot resolve source_uri '{source_uri}'")
|
||||
|
||||
|
||||
def get_unique_categories() -> set[str]:
|
||||
"""
|
||||
Reads all challenge spec files and returns a set of all their categories.
|
||||
"""
|
||||
categories = set()
|
||||
|
||||
challenges_dir = Path(__file__).parent
|
||||
glob_path = f"{challenges_dir}/**/data.json"
|
||||
|
||||
for data_file in glob.glob(glob_path, recursive=True):
|
||||
with open(data_file, "r") as f:
|
||||
try:
|
||||
challenge_data = json.load(f)
|
||||
categories.update(challenge_data.get("category", []))
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"Error: {data_file} is not a valid JSON file.")
|
||||
continue
|
||||
except IOError:
|
||||
logger.error(f"IOError: file could not be read: {data_file}")
|
||||
continue
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseChallenge",
|
||||
"ChallengeInfo",
|
||||
"get_unique_categories",
|
||||
"OPTIONAL_CATEGORIES",
|
||||
]
|
||||
@@ -1 +0,0 @@
|
||||
Hello World!
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user