mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 00:05:02 -05:00
Compare commits
94 Commits
pwuts/spee
...
make-old-w
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0754958f0 | ||
|
|
e8c50b96d1 | ||
|
|
30e854569a | ||
|
|
301d7cbada | ||
|
|
d437e756e9 | ||
|
|
9622ba8cbb | ||
|
|
053b92e72c | ||
|
|
24b38f2cdd | ||
|
|
1480183c47 | ||
|
|
711f0da63c | ||
|
|
d95aef7665 | ||
|
|
cb166dd6fb | ||
|
|
3d31f62bf1 | ||
|
|
b8b6c9de23 | ||
|
|
4f6055f494 | ||
|
|
695a185fa1 | ||
|
|
113e87a23c | ||
|
|
d09f1532a4 | ||
|
|
ac7de17eb4 | ||
|
|
f56abcef4f | ||
|
|
6210b3259d | ||
|
|
60f506add9 | ||
|
|
b3f35953ed | ||
|
|
d8d87f2853 | ||
|
|
791e1d8982 | ||
|
|
0040636948 | ||
|
|
c671af851f | ||
|
|
7dd181f4b0 | ||
|
|
114856cef1 | ||
|
|
68b9bd0c51 | ||
|
|
ff076b1f15 | ||
|
|
57fbab500b | ||
|
|
6faabef24d | ||
|
|
a67d475a69 | ||
|
|
326554d89a | ||
|
|
5e22a1888a | ||
|
|
a4d7b0142f | ||
|
|
7d6375f59c | ||
|
|
aeec0ce509 | ||
|
|
b32bfcaac5 | ||
|
|
5373a6eb6e | ||
|
|
98cde46ccb | ||
|
|
bd10da10d9 | ||
|
|
60fdee1345 | ||
|
|
6f2783468c | ||
|
|
c1031b286d | ||
|
|
b849eafb7f | ||
|
|
572c3f5e0d | ||
|
|
89003a585d | ||
|
|
0e65785228 | ||
|
|
f07dff1cdd | ||
|
|
00e02a4696 | ||
|
|
634bff8277 | ||
|
|
d591f36c7b | ||
|
|
a347bed0b1 | ||
|
|
4eeb6ee2b0 | ||
|
|
7db962b9f9 | ||
|
|
9108b21541 | ||
|
|
ffe9325296 | ||
|
|
0a616d9267 | ||
|
|
ab95077e5b | ||
|
|
e477150979 | ||
|
|
804430e243 | ||
|
|
acb320d32d | ||
|
|
32f68d5999 | ||
|
|
49f56b4e8d | ||
|
|
bead811e73 | ||
|
|
013f728ebf | ||
|
|
cda9572acd | ||
|
|
e0784f8f6b | ||
|
|
3040f39136 | ||
|
|
515504c604 | ||
|
|
18edeaeaf4 | ||
|
|
44182aff9c | ||
|
|
864c5a7846 | ||
|
|
699fffb1a8 | ||
|
|
f0641c2d26 | ||
|
|
94b6f74c95 | ||
|
|
46aabab3ea | ||
|
|
0a65df5102 | ||
|
|
6fbd208fe3 | ||
|
|
8fc174ca87 | ||
|
|
cacc89790f | ||
|
|
b9113bee02 | ||
|
|
3f65da03e7 | ||
|
|
9e96d11b2d | ||
|
|
4c264b7ae9 | ||
|
|
0adbc0bd05 | ||
|
|
8f3291bc92 | ||
|
|
7a20de880d | ||
|
|
ef8a6d2528 | ||
|
|
fd66be2aaa | ||
|
|
ae2cc97dc4 | ||
|
|
ea521eed26 |
10
.claude/settings.json
Normal file
10
.claude/settings.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"permissions": {
|
||||||
|
"allowedTools": [
|
||||||
|
"Read", "Grep", "Glob",
|
||||||
|
"Bash(ls:*)", "Bash(cat:*)", "Bash(grep:*)", "Bash(find:*)",
|
||||||
|
"Bash(git status:*)", "Bash(git diff:*)", "Bash(git log:*)", "Bash(git worktree:*)",
|
||||||
|
"Bash(tmux:*)", "Bash(sleep:*)", "Bash(branchlet:*)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -5,13 +5,42 @@
|
|||||||
!docs/
|
!docs/
|
||||||
|
|
||||||
# Platform - Libs
|
# Platform - Libs
|
||||||
!autogpt_platform/autogpt_libs/
|
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||||
|
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||||
|
!autogpt_platform/autogpt_libs/poetry.lock
|
||||||
|
!autogpt_platform/autogpt_libs/README.md
|
||||||
|
|
||||||
# Platform - Backend
|
# Platform - Backend
|
||||||
!autogpt_platform/backend/
|
!autogpt_platform/backend/backend/
|
||||||
|
!autogpt_platform/backend/test/e2e_test_data.py
|
||||||
|
!autogpt_platform/backend/migrations/
|
||||||
|
!autogpt_platform/backend/schema.prisma
|
||||||
|
!autogpt_platform/backend/pyproject.toml
|
||||||
|
!autogpt_platform/backend/poetry.lock
|
||||||
|
!autogpt_platform/backend/README.md
|
||||||
|
!autogpt_platform/backend/.env
|
||||||
|
!autogpt_platform/backend/gen_prisma_types_stub.py
|
||||||
|
|
||||||
|
# Platform - Market
|
||||||
|
!autogpt_platform/market/market/
|
||||||
|
!autogpt_platform/market/scripts.py
|
||||||
|
!autogpt_platform/market/schema.prisma
|
||||||
|
!autogpt_platform/market/pyproject.toml
|
||||||
|
!autogpt_platform/market/poetry.lock
|
||||||
|
!autogpt_platform/market/README.md
|
||||||
|
|
||||||
# Platform - Frontend
|
# Platform - Frontend
|
||||||
!autogpt_platform/frontend/
|
!autogpt_platform/frontend/src/
|
||||||
|
!autogpt_platform/frontend/public/
|
||||||
|
!autogpt_platform/frontend/scripts/
|
||||||
|
!autogpt_platform/frontend/package.json
|
||||||
|
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
|
!autogpt_platform/frontend/tsconfig.json
|
||||||
|
!autogpt_platform/frontend/README.md
|
||||||
|
## config
|
||||||
|
!autogpt_platform/frontend/*.config.*
|
||||||
|
!autogpt_platform/frontend/.env.*
|
||||||
|
!autogpt_platform/frontend/.env
|
||||||
|
|
||||||
# Classic - AutoGPT
|
# Classic - AutoGPT
|
||||||
!classic/original_autogpt/autogpt/
|
!classic/original_autogpt/autogpt/
|
||||||
@@ -35,38 +64,6 @@
|
|||||||
# Classic - Frontend
|
# Classic - Frontend
|
||||||
!classic/frontend/build/web/
|
!classic/frontend/build/web/
|
||||||
|
|
||||||
# Explicitly re-ignore unwanted files from whitelisted directories
|
# Explicitly re-ignore some folders
|
||||||
# Note: These patterns MUST come after the whitelist rules to take effect
|
.*
|
||||||
|
**/__pycache__
|
||||||
# Hidden files and directories (but keep frontend .env files needed for build)
|
|
||||||
**/.*
|
|
||||||
!autogpt_platform/frontend/.env
|
|
||||||
!autogpt_platform/frontend/.env.default
|
|
||||||
!autogpt_platform/frontend/.env.production
|
|
||||||
|
|
||||||
# Python artifacts
|
|
||||||
**/__pycache__/
|
|
||||||
**/*.pyc
|
|
||||||
**/*.pyo
|
|
||||||
**/.venv/
|
|
||||||
**/.ruff_cache/
|
|
||||||
**/.pytest_cache/
|
|
||||||
**/.coverage
|
|
||||||
**/htmlcov/
|
|
||||||
|
|
||||||
# Node artifacts
|
|
||||||
**/node_modules/
|
|
||||||
**/.next/
|
|
||||||
**/storybook-static/
|
|
||||||
**/playwright-report/
|
|
||||||
**/test-results/
|
|
||||||
|
|
||||||
# Build artifacts
|
|
||||||
**/dist/
|
|
||||||
**/build/
|
|
||||||
!autogpt_platform/frontend/src/**/build/
|
|
||||||
**/target/
|
|
||||||
|
|
||||||
# Logs and temp files
|
|
||||||
**/*.log
|
|
||||||
**/*.tmp
|
|
||||||
|
|||||||
74
.github/workflows/classic-autogpt-ci.yml
vendored
74
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -6,11 +6,15 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-autogpt-ci.yml'
|
- '.github/workflows/classic-autogpt-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
|
- 'classic/direct_benchmark/**'
|
||||||
|
- 'classic/forge/**'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-autogpt-ci.yml'
|
- '.github/workflows/classic-autogpt-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
|
- 'classic/direct_benchmark/**'
|
||||||
|
- 'classic/forge/**'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||||
@@ -19,47 +23,22 @@ concurrency:
|
|||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
working-directory: classic/original_autogpt
|
working-directory: classic
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
strategy:
|
runs-on: ubuntu-latest
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
python-version: ["3.10"]
|
|
||||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
|
||||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
- name: Start MinIO service
|
||||||
# - name: Set up Docker (macOS)
|
|
||||||
# if: runner.os == 'macOS'
|
|
||||||
# uses: crazy-max/ghaction-setup-docker@v3
|
|
||||||
|
|
||||||
- name: Start MinIO service (Linux)
|
|
||||||
if: runner.os == 'Linux'
|
|
||||||
working-directory: '.'
|
working-directory: '.'
|
||||||
run: |
|
run: |
|
||||||
docker pull minio/minio:edge-cicd
|
docker pull minio/minio:edge-cicd
|
||||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||||
|
|
||||||
- name: Start MinIO service (macOS)
|
|
||||||
if: runner.os == 'macOS'
|
|
||||||
working-directory: ${{ runner.temp }}
|
|
||||||
run: |
|
|
||||||
brew install minio/stable/minio
|
|
||||||
mkdir data
|
|
||||||
minio server ./data &
|
|
||||||
|
|
||||||
# No MinIO on Windows:
|
|
||||||
# - Windows doesn't support running Linux Docker containers
|
|
||||||
# - It doesn't seem possible to start background processes on Windows. They are
|
|
||||||
# killed after the step returns.
|
|
||||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -71,41 +50,23 @@ jobs:
|
|||||||
git config --global user.name "Auto-GPT-Bot"
|
git config --global user.name "Auto-GPT-Bot"
|
||||||
git config --global user.email "github-bot@agpt.co"
|
git config --global user.email "github-bot@agpt.co"
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python 3.12
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: "3.12"
|
||||||
|
|
||||||
- id: get_date
|
- id: get_date
|
||||||
name: Get date
|
name: Get date
|
||||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
- name: Set up Python dependency cache
|
||||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
|
||||||
if: runner.os != 'Windows'
|
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
path: ~/.cache/pypoetry
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry (Unix)
|
- name: Install Poetry
|
||||||
if: runner.os != 'Windows'
|
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||||
run: |
|
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
|
||||||
|
|
||||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
|
||||||
PATH="$HOME/.local/bin:$PATH"
|
|
||||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Install Poetry (Windows)
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
shell: pwsh
|
|
||||||
run: |
|
|
||||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
|
||||||
|
|
||||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
|
||||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry install
|
run: poetry install
|
||||||
@@ -116,12 +77,13 @@ jobs:
|
|||||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||||
--numprocesses=logical --durations=10 \
|
--numprocesses=logical --durations=10 \
|
||||||
--junitxml=junit.xml -o junit_family=legacy \
|
--junitxml=junit.xml -o junit_family=legacy \
|
||||||
tests/unit tests/integration
|
original_autogpt/tests/unit original_autogpt/tests/integration
|
||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
PLAIN_OUTPUT: True
|
PLAIN_OUTPUT: True
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
|
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
AWS_ACCESS_KEY_ID: minioadmin
|
AWS_ACCESS_KEY_ID: minioadmin
|
||||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||||
|
|
||||||
@@ -135,11 +97,11 @@ jobs:
|
|||||||
uses: codecov/codecov-action@v5
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
flags: autogpt-agent,${{ runner.os }}
|
flags: autogpt-agent
|
||||||
|
|
||||||
- name: Upload logs to artifact
|
- name: Upload logs to artifact
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: test-logs
|
name: test-logs
|
||||||
path: classic/original_autogpt/logs/
|
path: classic/logs/
|
||||||
|
|||||||
36
.github/workflows/classic-autogpts-ci.yml
vendored
36
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -11,9 +11,6 @@ on:
|
|||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
- 'classic/run'
|
|
||||||
- 'classic/cli.py'
|
|
||||||
- 'classic/setup.py'
|
|
||||||
- '!**/*.md'
|
- '!**/*.md'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
@@ -22,9 +19,6 @@ on:
|
|||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
- 'classic/run'
|
|
||||||
- 'classic/cli.py'
|
|
||||||
- 'classic/setup.py'
|
|
||||||
- '!**/*.md'
|
- '!**/*.md'
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -35,13 +29,9 @@ defaults:
|
|||||||
jobs:
|
jobs:
|
||||||
serve-agent-protocol:
|
serve-agent-protocol:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
agent-name: [ original_autogpt ]
|
|
||||||
fail-fast: false
|
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
env:
|
env:
|
||||||
min-python-version: '3.10'
|
min-python-version: '3.12'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -55,22 +45,22 @@ jobs:
|
|||||||
python-version: ${{ env.min-python-version }}
|
python-version: ${{ env.min-python-version }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
|
||||||
run: |
|
run: |
|
||||||
curl -sSL https://install.python-poetry.org | python -
|
curl -sSL https://install.python-poetry.org | python -
|
||||||
|
|
||||||
- name: Run regression tests
|
- name: Install dependencies
|
||||||
|
run: poetry install
|
||||||
|
|
||||||
|
- name: Run smoke tests with direct-benchmark
|
||||||
run: |
|
run: |
|
||||||
./run agent start ${{ matrix.agent-name }}
|
poetry run direct-benchmark run \
|
||||||
cd ${{ matrix.agent-name }}
|
--strategies one_shot \
|
||||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
--models claude \
|
||||||
poetry run agbenchmark --test=WriteFile
|
--tests ReadFile,WriteFile \
|
||||||
|
--json
|
||||||
env:
|
env:
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
AGENT_NAME: ${{ matrix.agent-name }}
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||||
HELICONE_CACHE_ENABLED: false
|
NONINTERACTIVE_MODE: "true"
|
||||||
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
CI: true
|
||||||
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
|
||||||
TELEMETRY_ENVIRONMENT: autogpt-ci
|
|
||||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
|
||||||
|
|||||||
194
.github/workflows/classic-benchmark-ci.yml
vendored
194
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -1,17 +1,21 @@
|
|||||||
name: Classic - AGBenchmark CI
|
name: Classic - Direct Benchmark CI
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master, dev, ci-test* ]
|
branches: [ master, dev, ci-test* ]
|
||||||
paths:
|
paths:
|
||||||
- 'classic/benchmark/**'
|
- 'classic/direct_benchmark/**'
|
||||||
- '!classic/benchmark/reports/**'
|
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||||
|
- 'classic/original_autogpt/**'
|
||||||
|
- 'classic/forge/**'
|
||||||
- .github/workflows/classic-benchmark-ci.yml
|
- .github/workflows/classic-benchmark-ci.yml
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
paths:
|
paths:
|
||||||
- 'classic/benchmark/**'
|
- 'classic/direct_benchmark/**'
|
||||||
- '!classic/benchmark/reports/**'
|
- 'classic/benchmark/agbenchmark/challenges/**'
|
||||||
|
- 'classic/original_autogpt/**'
|
||||||
|
- 'classic/forge/**'
|
||||||
- .github/workflows/classic-benchmark-ci.yml
|
- .github/workflows/classic-benchmark-ci.yml
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
@@ -23,23 +27,16 @@ defaults:
|
|||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
env:
|
env:
|
||||||
min-python-version: '3.10'
|
min-python-version: '3.12'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
benchmark-tests:
|
||||||
permissions:
|
runs-on: ubuntu-latest
|
||||||
contents: read
|
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
python-version: ["3.10"]
|
|
||||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
|
||||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
working-directory: classic/benchmark
|
working-directory: classic
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -47,71 +44,88 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ env.min-python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ env.min-python-version }}
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
- name: Set up Python dependency cache
|
||||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
|
||||||
if: runner.os != 'Windows'
|
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
path: ~/.cache/pypoetry
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry (Unix)
|
- name: Install Poetry
|
||||||
if: runner.os != 'Windows'
|
|
||||||
run: |
|
run: |
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
- name: Install dependencies
|
||||||
PATH="$HOME/.local/bin:$PATH"
|
|
||||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Install Poetry (Windows)
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
shell: pwsh
|
|
||||||
run: |
|
|
||||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
|
||||||
|
|
||||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
|
||||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: poetry install
|
run: poetry install
|
||||||
|
|
||||||
- name: Run pytest with coverage
|
- name: Run basic benchmark tests
|
||||||
run: |
|
run: |
|
||||||
poetry run pytest -vv \
|
echo "Testing ReadFile challenge with one_shot strategy..."
|
||||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
poetry run direct-benchmark run \
|
||||||
--durations=10 \
|
--fresh \
|
||||||
--junitxml=junit.xml -o junit_family=legacy \
|
--strategies one_shot \
|
||||||
tests
|
--models claude \
|
||||||
|
--tests ReadFile \
|
||||||
|
--json
|
||||||
|
|
||||||
|
echo "Testing WriteFile challenge..."
|
||||||
|
poetry run direct-benchmark run \
|
||||||
|
--fresh \
|
||||||
|
--strategies one_shot \
|
||||||
|
--models claude \
|
||||||
|
--tests WriteFile \
|
||||||
|
--json
|
||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
NONINTERACTIVE_MODE: "true"
|
||||||
|
|
||||||
- name: Upload test results to Codecov
|
- name: Test category filtering
|
||||||
if: ${{ !cancelled() }} # Run even if tests fail
|
run: |
|
||||||
uses: codecov/test-results-action@v1
|
echo "Testing coding category..."
|
||||||
with:
|
poetry run direct-benchmark run \
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
--fresh \
|
||||||
|
--strategies one_shot \
|
||||||
|
--models claude \
|
||||||
|
--categories coding \
|
||||||
|
--tests ReadFile,WriteFile \
|
||||||
|
--json
|
||||||
|
env:
|
||||||
|
CI: true
|
||||||
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
NONINTERACTIVE_MODE: "true"
|
||||||
|
|
||||||
- name: Upload coverage reports to Codecov
|
- name: Test multiple strategies
|
||||||
uses: codecov/codecov-action@v5
|
run: |
|
||||||
with:
|
echo "Testing multiple strategies..."
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
poetry run direct-benchmark run \
|
||||||
flags: agbenchmark,${{ runner.os }}
|
--fresh \
|
||||||
|
--strategies one_shot,plan_execute \
|
||||||
|
--models claude \
|
||||||
|
--tests ReadFile \
|
||||||
|
--parallel 2 \
|
||||||
|
--json
|
||||||
|
env:
|
||||||
|
CI: true
|
||||||
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
NONINTERACTIVE_MODE: "true"
|
||||||
|
|
||||||
self-test-with-agent:
|
# Run regression tests on maintain challenges
|
||||||
|
regression-tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
timeout-minutes: 45
|
||||||
matrix:
|
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev'
|
||||||
agent-name: [forge]
|
defaults:
|
||||||
fail-fast: false
|
run:
|
||||||
timeout-minutes: 20
|
shell: bash
|
||||||
|
working-directory: classic
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -126,51 +140,23 @@ jobs:
|
|||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
run: |
|
run: |
|
||||||
curl -sSL https://install.python-poetry.org | python -
|
curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: poetry install
|
||||||
|
|
||||||
- name: Run regression tests
|
- name: Run regression tests
|
||||||
working-directory: classic
|
|
||||||
run: |
|
run: |
|
||||||
./run agent start ${{ matrix.agent-name }}
|
echo "Running regression tests (previously beaten challenges)..."
|
||||||
cd ${{ matrix.agent-name }}
|
poetry run direct-benchmark run \
|
||||||
|
--fresh \
|
||||||
set +e # Ignore non-zero exit codes and continue execution
|
--strategies one_shot \
|
||||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
--models claude \
|
||||||
poetry run agbenchmark --maintain --mock
|
--maintain \
|
||||||
EXIT_CODE=$?
|
--parallel 4 \
|
||||||
set -e # Stop ignoring non-zero exit codes
|
--json
|
||||||
# Check if the exit code was 5, and if so, exit with 0 instead
|
|
||||||
if [ $EXIT_CODE -eq 5 ]; then
|
|
||||||
echo "regression_tests.json is empty."
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Running the following command: poetry run agbenchmark --mock"
|
|
||||||
poetry run agbenchmark --mock
|
|
||||||
|
|
||||||
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
|
||||||
poetry run agbenchmark --mock --category=data
|
|
||||||
|
|
||||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
|
||||||
poetry run agbenchmark --mock --category=coding
|
|
||||||
|
|
||||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
|
||||||
# poetry run agbenchmark --test=WriteFile
|
|
||||||
cd ../benchmark
|
|
||||||
poetry install
|
|
||||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
|
||||||
export BUILD_SKILL_TREE=true
|
|
||||||
|
|
||||||
# poetry run agbenchmark --mock
|
|
||||||
|
|
||||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
|
||||||
# if [ ! -z "$CHANGED" ]; then
|
|
||||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
|
||||||
# echo "$CHANGED"
|
|
||||||
# exit 1
|
|
||||||
# else
|
|
||||||
# echo "No unstaged changes."
|
|
||||||
# fi
|
|
||||||
env:
|
env:
|
||||||
|
CI: true
|
||||||
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
NONINTERACTIVE_MODE: "true"
|
||||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
|
||||||
|
|||||||
185
.github/workflows/classic-forge-ci.yml
vendored
185
.github/workflows/classic-forge-ci.yml
vendored
@@ -6,13 +6,11 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-forge-ci.yml'
|
- '.github/workflows/classic-forge-ci.yml'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- '!classic/forge/tests/vcr_cassettes'
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-forge-ci.yml'
|
- '.github/workflows/classic-forge-ci.yml'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- '!classic/forge/tests/vcr_cassettes'
|
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||||
@@ -21,131 +19,60 @@ concurrency:
|
|||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
working-directory: classic/forge
|
working-directory: classic
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
strategy:
|
runs-on: ubuntu-latest
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
python-version: ["3.10"]
|
|
||||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
|
||||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
- name: Start MinIO service
|
||||||
# - name: Set up Docker (macOS)
|
|
||||||
# if: runner.os == 'macOS'
|
|
||||||
# uses: crazy-max/ghaction-setup-docker@v3
|
|
||||||
|
|
||||||
- name: Start MinIO service (Linux)
|
|
||||||
if: runner.os == 'Linux'
|
|
||||||
working-directory: '.'
|
working-directory: '.'
|
||||||
run: |
|
run: |
|
||||||
docker pull minio/minio:edge-cicd
|
docker pull minio/minio:edge-cicd
|
||||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||||
|
|
||||||
- name: Start MinIO service (macOS)
|
|
||||||
if: runner.os == 'macOS'
|
|
||||||
working-directory: ${{ runner.temp }}
|
|
||||||
run: |
|
|
||||||
brew install minio/stable/minio
|
|
||||||
mkdir data
|
|
||||||
minio server ./data &
|
|
||||||
|
|
||||||
# No MinIO on Windows:
|
|
||||||
# - Windows doesn't support running Linux Docker containers
|
|
||||||
# - It doesn't seem possible to start background processes on Windows. They are
|
|
||||||
# killed after the step returns.
|
|
||||||
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
submodules: true
|
|
||||||
|
|
||||||
- name: Checkout cassettes
|
- name: Set up Python 3.12
|
||||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
|
||||||
env:
|
|
||||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
|
||||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
|
||||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
|
||||||
run: |
|
|
||||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
|
||||||
cassette_base_branch="${PR_BASE}"
|
|
||||||
cd tests/vcr_cassettes
|
|
||||||
|
|
||||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
|
||||||
cassette_base_branch="master"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
|
||||||
git fetch origin $cassette_branch
|
|
||||||
git fetch origin $cassette_base_branch
|
|
||||||
|
|
||||||
git checkout $cassette_branch
|
|
||||||
|
|
||||||
# Pick non-conflicting cassette updates from the base branch
|
|
||||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
|
||||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
|
||||||
"synced to upstream branch '$cassette_base_branch'."
|
|
||||||
else
|
|
||||||
git checkout -b $cassette_branch
|
|
||||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
|
||||||
"Using cassettes from '$cassette_base_branch'."
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: "3.12"
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
- name: Set up Python dependency cache
|
||||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
|
||||||
if: runner.os != 'Windows'
|
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
path: ~/.cache/pypoetry
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry (Unix)
|
- name: Install Poetry
|
||||||
if: runner.os != 'Windows'
|
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||||
run: |
|
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
|
||||||
|
|
||||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
|
||||||
PATH="$HOME/.local/bin:$PATH"
|
|
||||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Install Poetry (Windows)
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
shell: pwsh
|
|
||||||
run: |
|
|
||||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
|
||||||
|
|
||||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
|
||||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry install
|
run: poetry install
|
||||||
|
|
||||||
|
- name: Install Playwright browsers
|
||||||
|
run: poetry run playwright install chromium
|
||||||
|
|
||||||
- name: Run pytest with coverage
|
- name: Run pytest with coverage
|
||||||
run: |
|
run: |
|
||||||
poetry run pytest -vv \
|
poetry run pytest -vv \
|
||||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||||
--durations=10 \
|
--durations=10 \
|
||||||
--junitxml=junit.xml -o junit_family=legacy \
|
--junitxml=junit.xml -o junit_family=legacy \
|
||||||
forge
|
forge/forge forge/tests
|
||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
PLAIN_OUTPUT: True
|
PLAIN_OUTPUT: True
|
||||||
|
# API keys - tests that need these will skip if not available
|
||||||
|
# Secrets are not available to fork PRs (GitHub security feature)
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||||
|
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
AWS_ACCESS_KEY_ID: minioadmin
|
AWS_ACCESS_KEY_ID: minioadmin
|
||||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||||
|
|
||||||
@@ -159,85 +86,11 @@ jobs:
|
|||||||
uses: codecov/codecov-action@v5
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
flags: forge,${{ runner.os }}
|
flags: forge
|
||||||
|
|
||||||
- id: setup_git_auth
|
|
||||||
name: Set up git token authentication
|
|
||||||
# Cassettes may be pushed even when tests fail
|
|
||||||
if: success() || failure()
|
|
||||||
run: |
|
|
||||||
config_key="http.${{ github.server_url }}/.extraheader"
|
|
||||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
|
||||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
|
||||||
else
|
|
||||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
|
||||||
fi
|
|
||||||
|
|
||||||
git config "$config_key" \
|
|
||||||
"Authorization: Basic $base64_pat"
|
|
||||||
|
|
||||||
cd tests/vcr_cassettes
|
|
||||||
git config "$config_key" \
|
|
||||||
"Authorization: Basic $base64_pat"
|
|
||||||
|
|
||||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- id: push_cassettes
|
|
||||||
name: Push updated cassettes
|
|
||||||
# For pull requests, push updated cassettes even when tests fail
|
|
||||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
|
||||||
env:
|
|
||||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
|
||||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
|
||||||
run: |
|
|
||||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
|
||||||
is_pull_request=true
|
|
||||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
|
||||||
else
|
|
||||||
cassette_branch="${{ github.ref_name }}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd tests/vcr_cassettes
|
|
||||||
# Commit & push changes to cassettes if any
|
|
||||||
if ! git diff --quiet; then
|
|
||||||
git add .
|
|
||||||
git commit -m "Auto-update cassettes"
|
|
||||||
git push origin HEAD:$cassette_branch
|
|
||||||
if [ ! $is_pull_request ]; then
|
|
||||||
cd ../..
|
|
||||||
git add tests/vcr_cassettes
|
|
||||||
git commit -m "Update cassette submodule"
|
|
||||||
git push origin HEAD:$cassette_branch
|
|
||||||
fi
|
|
||||||
echo "updated=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "updated=false" >> $GITHUB_OUTPUT
|
|
||||||
echo "No cassette changes to commit"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Post Set up git token auth
|
|
||||||
if: steps.setup_git_auth.outcome == 'success'
|
|
||||||
run: |
|
|
||||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
|
||||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
|
||||||
|
|
||||||
- name: Apply "behaviour change" label and comment on PR
|
|
||||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
|
||||||
run: |
|
|
||||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
|
||||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
|
||||||
REPO="${{ github.repository }}"
|
|
||||||
|
|
||||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
|
||||||
echo "Adding label and comment..."
|
|
||||||
echo $TOKEN | gh auth login --with-token
|
|
||||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
|
||||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Upload logs to artifact
|
- name: Upload logs to artifact
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: test-logs
|
name: test-logs
|
||||||
path: classic/forge/logs/
|
path: classic/logs/
|
||||||
|
|||||||
60
.github/workflows/classic-frontend-ci.yml
vendored
60
.github/workflows/classic-frontend-ci.yml
vendored
@@ -1,60 +0,0 @@
|
|||||||
name: Classic - Frontend CI/CD
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- dev
|
|
||||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
|
||||||
paths:
|
|
||||||
- 'classic/frontend/**'
|
|
||||||
- '.github/workflows/classic-frontend-ci.yml'
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'classic/frontend/**'
|
|
||||||
- '.github/workflows/classic-frontend-ci.yml'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout Repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup Flutter
|
|
||||||
uses: subosito/flutter-action@v2
|
|
||||||
with:
|
|
||||||
flutter-version: '3.13.2'
|
|
||||||
|
|
||||||
- name: Build Flutter to Web
|
|
||||||
run: |
|
|
||||||
cd classic/frontend
|
|
||||||
flutter build web --base-href /app/
|
|
||||||
|
|
||||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
|
||||||
# if: github.event_name == 'push'
|
|
||||||
# run: |
|
|
||||||
# git config --local user.email "action@github.com"
|
|
||||||
# git config --local user.name "GitHub Action"
|
|
||||||
# git add classic/frontend/build/web
|
|
||||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
|
||||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
|
||||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
|
||||||
|
|
||||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
|
||||||
if: github.event_name == 'push'
|
|
||||||
uses: peter-evans/create-pull-request@v8
|
|
||||||
with:
|
|
||||||
add-paths: classic/frontend/build/web
|
|
||||||
base: ${{ github.ref_name }}
|
|
||||||
branch: ${{ env.BUILD_BRANCH }}
|
|
||||||
delete-branch: true
|
|
||||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
|
||||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
|
||||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
|
||||||
67
.github/workflows/classic-python-checks.yml
vendored
67
.github/workflows/classic-python-checks.yml
vendored
@@ -7,7 +7,9 @@ on:
|
|||||||
- '.github/workflows/classic-python-checks-ci.yml'
|
- '.github/workflows/classic-python-checks-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/benchmark/**'
|
- 'classic/direct_benchmark/**'
|
||||||
|
- 'classic/pyproject.toml'
|
||||||
|
- 'classic/poetry.lock'
|
||||||
- '**.py'
|
- '**.py'
|
||||||
- '!classic/forge/tests/vcr_cassettes'
|
- '!classic/forge/tests/vcr_cassettes'
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -16,7 +18,9 @@ on:
|
|||||||
- '.github/workflows/classic-python-checks-ci.yml'
|
- '.github/workflows/classic-python-checks-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/benchmark/**'
|
- 'classic/direct_benchmark/**'
|
||||||
|
- 'classic/pyproject.toml'
|
||||||
|
- 'classic/poetry.lock'
|
||||||
- '**.py'
|
- '**.py'
|
||||||
- '!classic/forge/tests/vcr_cassettes'
|
- '!classic/forge/tests/vcr_cassettes'
|
||||||
|
|
||||||
@@ -27,44 +31,13 @@ concurrency:
|
|||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
|
working-directory: classic
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
get-changed-parts:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- id: changes-in
|
|
||||||
name: Determine affected subprojects
|
|
||||||
uses: dorny/paths-filter@v3
|
|
||||||
with:
|
|
||||||
filters: |
|
|
||||||
original_autogpt:
|
|
||||||
- classic/original_autogpt/autogpt/**
|
|
||||||
- classic/original_autogpt/tests/**
|
|
||||||
- classic/original_autogpt/poetry.lock
|
|
||||||
forge:
|
|
||||||
- classic/forge/forge/**
|
|
||||||
- classic/forge/tests/**
|
|
||||||
- classic/forge/poetry.lock
|
|
||||||
benchmark:
|
|
||||||
- classic/benchmark/agbenchmark/**
|
|
||||||
- classic/benchmark/tests/**
|
|
||||||
- classic/benchmark/poetry.lock
|
|
||||||
outputs:
|
|
||||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
needs: get-changed-parts
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
min-python-version: "3.10"
|
min-python-version: "3.12"
|
||||||
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -81,42 +54,31 @@ jobs:
|
|||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pypoetry
|
path: ~/.cache/pypoetry
|
||||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
run: poetry install
|
||||||
|
|
||||||
# Lint
|
# Lint
|
||||||
|
|
||||||
- name: Lint (isort)
|
- name: Lint (isort)
|
||||||
run: poetry run isort --check .
|
run: poetry run isort --check .
|
||||||
working-directory: classic/${{ matrix.sub-package }}
|
|
||||||
|
|
||||||
- name: Lint (Black)
|
- name: Lint (Black)
|
||||||
if: success() || failure()
|
if: success() || failure()
|
||||||
run: poetry run black --check .
|
run: poetry run black --check .
|
||||||
working-directory: classic/${{ matrix.sub-package }}
|
|
||||||
|
|
||||||
- name: Lint (Flake8)
|
- name: Lint (Flake8)
|
||||||
if: success() || failure()
|
if: success() || failure()
|
||||||
run: poetry run flake8 .
|
run: poetry run flake8 .
|
||||||
working-directory: classic/${{ matrix.sub-package }}
|
|
||||||
|
|
||||||
types:
|
types:
|
||||||
needs: get-changed-parts
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
min-python-version: "3.10"
|
min-python-version: "3.12"
|
||||||
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -133,19 +95,16 @@ jobs:
|
|||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pypoetry
|
path: ~/.cache/pypoetry
|
||||||
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
run: poetry install
|
||||||
|
|
||||||
# Typecheck
|
# Typecheck
|
||||||
|
|
||||||
- name: Typecheck
|
- name: Typecheck
|
||||||
if: success() || failure()
|
if: success() || failure()
|
||||||
run: poetry run pyright
|
run: poetry run pyright
|
||||||
working-directory: classic/${{ matrix.sub-package }}
|
|
||||||
|
|||||||
249
.github/workflows/platform-frontend-ci.yml
vendored
249
.github/workflows/platform-frontend-ci.yml
vendored
@@ -26,6 +26,7 @@ jobs:
|
|||||||
setup:
|
setup:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
|
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||||
components-changed: ${{ steps.filter.outputs.components }}
|
components-changed: ${{ steps.filter.outputs.components }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -40,17 +41,28 @@ jobs:
|
|||||||
components:
|
components:
|
||||||
- 'autogpt_platform/frontend/src/components/**'
|
- 'autogpt_platform/frontend/src/components/**'
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
|
||||||
|
|
||||||
- name: Install dependencies to populate cache
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Generate cache key
|
||||||
|
id: cache-key
|
||||||
|
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Cache dependencies
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ steps.cache-key.outputs.key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@@ -61,15 +73,22 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Restore dependencies cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -92,15 +111,22 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Restore dependencies cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -115,8 +141,10 @@ jobs:
|
|||||||
exitOnceUploaded: true
|
exitOnceUploaded: true
|
||||||
|
|
||||||
e2e_test:
|
e2e_test:
|
||||||
name: end-to-end tests
|
|
||||||
runs-on: big-boi
|
runs-on: big-boi
|
||||||
|
needs: setup
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -124,11 +152,19 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Platform - Copy default supabase .env
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: "22.18.0"
|
||||||
|
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Copy default supabase .env
|
||||||
run: |
|
run: |
|
||||||
cp ../.env.default ../.env
|
cp ../.env.default ../.env
|
||||||
|
|
||||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
- name: Copy backend .env and set OpenAI API key
|
||||||
run: |
|
run: |
|
||||||
cp ../backend/.env.default ../backend/.env
|
cp ../backend/.env.default ../backend/.env
|
||||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||||
@@ -136,125 +172,77 @@ jobs:
|
|||||||
# Used by E2E test data script to generate embeddings for approved store agents
|
# Used by E2E test data script to generate embeddings for approved store agents
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
|
||||||
- name: Set up Platform - Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
|
||||||
driver: docker-container
|
|
||||||
driver-opts: network=host
|
|
||||||
|
|
||||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
- name: Cache Docker layers
|
||||||
uses: crazy-max/ghaction-github-runtime@v3
|
|
||||||
|
|
||||||
- name: Set up Platform - Build Docker images (with cache)
|
|
||||||
working-directory: autogpt_platform
|
|
||||||
run: |
|
|
||||||
pip install pyyaml
|
|
||||||
|
|
||||||
# Resolve extends and generate a flat compose file that bake can understand
|
|
||||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
|
||||||
|
|
||||||
# Add cache configuration to the resolved compose file
|
|
||||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
|
||||||
--source docker-compose.resolved.yml \
|
|
||||||
--cache-from "type=gha" \
|
|
||||||
--cache-to "type=gha,mode=max" \
|
|
||||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
|
||||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
|
||||||
--git-ref "${{ github.ref }}"
|
|
||||||
|
|
||||||
# Build with bake using the resolved compose file (now includes cache config)
|
|
||||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
|
||||||
env:
|
|
||||||
NEXT_PUBLIC_PW_TEST: true
|
|
||||||
|
|
||||||
- name: Set up tests - Cache E2E test data
|
|
||||||
id: e2e-data-cache
|
|
||||||
uses: actions/cache@v5
|
uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
path: /tmp/e2e_test_data.sql
|
path: /tmp/.buildx-cache
|
||||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-frontend-test-
|
||||||
|
|
||||||
- name: Set up Platform - Start Supabase DB + Auth
|
- name: Run docker compose
|
||||||
run: |
|
run: |
|
||||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||||
echo "Waiting for database to be ready..."
|
|
||||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
|
||||||
echo "Waiting for auth service to be ready..."
|
|
||||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
|
||||||
|
|
||||||
- name: Set up Platform - Run migrations
|
|
||||||
run: |
|
|
||||||
echo "Running migrations..."
|
|
||||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
|
||||||
echo "✅ Migrations completed"
|
|
||||||
env:
|
env:
|
||||||
NEXT_PUBLIC_PW_TEST: true
|
DOCKER_BUILDKIT: 1
|
||||||
|
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||||
|
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||||
|
|
||||||
- name: Set up tests - Load cached E2E test data
|
- name: Move cache
|
||||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
|
||||||
run: |
|
run: |
|
||||||
echo "✅ Found cached E2E test data, restoring..."
|
rm -rf /tmp/.buildx-cache
|
||||||
{
|
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||||
echo "SET session_replication_role = 'replica';"
|
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||||
cat /tmp/e2e_test_data.sql
|
fi
|
||||||
echo "SET session_replication_role = 'origin';"
|
|
||||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
|
||||||
# Refresh materialized views after restore
|
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
|
||||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
|
||||||
|
|
||||||
echo "✅ E2E test data restored from cache"
|
- name: Wait for services to be ready
|
||||||
|
|
||||||
- name: Set up Platform - Start (all other services)
|
|
||||||
run: |
|
run: |
|
||||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
|
||||||
echo "Waiting for rest_server to be ready..."
|
echo "Waiting for rest_server to be ready..."
|
||||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||||
env:
|
echo "Waiting for database to be ready..."
|
||||||
NEXT_PUBLIC_PW_TEST: true
|
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||||
|
|
||||||
- name: Set up tests - Create E2E test data
|
- name: Create E2E test data
|
||||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
run: |
|
||||||
echo "Creating E2E test data..."
|
echo "Creating E2E test data..."
|
||||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
# First try to run the script from inside the container
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||||
echo "❌ E2E test data creation failed!"
|
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||||
exit 1
|
echo "❌ E2E test data creation failed!"
|
||||||
}
|
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
else
|
||||||
|
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||||
|
# Copy the script into the container and run it
|
||||||
|
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||||
|
echo "❌ Failed to copy script to container"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||||
|
echo "❌ E2E test data creation failed!"
|
||||||
|
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
- name: Restore dependencies cache
|
||||||
echo "Dumping database for cache..."
|
uses: actions/cache@v5
|
||||||
{
|
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
|
||||||
pg_dump -U postgres --data-only --column-inserts \
|
|
||||||
--table='auth.users' postgres
|
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
|
||||||
pg_dump -U postgres --data-only --column-inserts \
|
|
||||||
--schema=platform \
|
|
||||||
--exclude-table='platform._prisma_migrations' \
|
|
||||||
--exclude-table='platform.apscheduler_jobs' \
|
|
||||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
|
||||||
postgres
|
|
||||||
} > /tmp/e2e_test_data.sql
|
|
||||||
|
|
||||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
|
||||||
|
|
||||||
- name: Set up tests - Enable corepack
|
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up tests - Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
path: ~/.pnpm-store
|
||||||
cache: "pnpm"
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Set up tests - Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
- name: Set up tests - Install browser 'chromium'
|
- name: Install Browser 'chromium'
|
||||||
run: pnpm playwright install --with-deps chromium
|
run: pnpm playwright install --with-deps chromium
|
||||||
|
|
||||||
- name: Run Playwright tests
|
- name: Run Playwright tests
|
||||||
@@ -281,7 +269,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Print Final Docker Compose logs
|
- name: Print Final Docker Compose logs
|
||||||
if: always()
|
if: always()
|
||||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
run: docker compose -f ../docker-compose.yml logs
|
||||||
|
|
||||||
integration_test:
|
integration_test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -293,15 +281,22 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Restore dependencies cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|||||||
@@ -1,195 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Add cache configuration to a resolved docker-compose file for all services
|
|
||||||
that have a build key, and ensure image names match what docker compose expects.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_BRANCH = "dev"
|
|
||||||
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Add cache config to a resolved compose file"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--source",
|
|
||||||
required=True,
|
|
||||||
help="Source compose file to read (should be output of `docker compose config`)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--cache-from",
|
|
||||||
default="type=gha",
|
|
||||||
help="Cache source configuration",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--cache-to",
|
|
||||||
default="type=gha,mode=max",
|
|
||||||
help="Cache destination configuration",
|
|
||||||
)
|
|
||||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
|
||||||
parser.add_argument(
|
|
||||||
f"--{component}-hash",
|
|
||||||
default="",
|
|
||||||
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--git-ref",
|
|
||||||
default="",
|
|
||||||
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
|
|
||||||
git_ref_scope = ""
|
|
||||||
if args.git_ref:
|
|
||||||
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
|
|
||||||
|
|
||||||
with open(args.source, "r") as f:
|
|
||||||
compose = yaml.safe_load(f)
|
|
||||||
|
|
||||||
# Get project name from compose file or default
|
|
||||||
project_name = compose.get("name", "autogpt_platform")
|
|
||||||
|
|
||||||
def get_image_name(dockerfile: str, target: str) -> str:
|
|
||||||
"""Generate image name based on Dockerfile folder and build target."""
|
|
||||||
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
|
||||||
if len(dockerfile_parts) >= 2:
|
|
||||||
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
|
||||||
else:
|
|
||||||
folder_name = "app"
|
|
||||||
return f"{project_name}-{folder_name}:{target}"
|
|
||||||
|
|
||||||
def get_build_key(dockerfile: str, target: str) -> str:
|
|
||||||
"""Generate a unique key for a Dockerfile+target combination."""
|
|
||||||
return f"{dockerfile}:{target}"
|
|
||||||
|
|
||||||
def get_component(dockerfile: str) -> str | None:
|
|
||||||
"""Get component name (frontend/backend) from dockerfile path."""
|
|
||||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
|
||||||
if component in dockerfile:
|
|
||||||
return component
|
|
||||||
return None
|
|
||||||
|
|
||||||
# First pass: collect all services with build configs and identify duplicates
|
|
||||||
# Track which (dockerfile, target) combinations we've seen
|
|
||||||
build_key_to_first_service: dict[str, str] = {}
|
|
||||||
services_to_build: list[str] = []
|
|
||||||
services_to_dedupe: list[str] = []
|
|
||||||
|
|
||||||
for service_name, service_config in compose.get("services", {}).items():
|
|
||||||
if "build" not in service_config:
|
|
||||||
continue
|
|
||||||
|
|
||||||
build_config = service_config["build"]
|
|
||||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
|
||||||
target = build_config.get("target", "default")
|
|
||||||
build_key = get_build_key(dockerfile, target)
|
|
||||||
|
|
||||||
if build_key not in build_key_to_first_service:
|
|
||||||
# First service with this build config - it will do the actual build
|
|
||||||
build_key_to_first_service[build_key] = service_name
|
|
||||||
services_to_build.append(service_name)
|
|
||||||
else:
|
|
||||||
# Duplicate - will just use the image from the first service
|
|
||||||
services_to_dedupe.append(service_name)
|
|
||||||
|
|
||||||
# Second pass: configure builds and deduplicate
|
|
||||||
modified_services = []
|
|
||||||
for service_name, service_config in compose.get("services", {}).items():
|
|
||||||
if "build" not in service_config:
|
|
||||||
continue
|
|
||||||
|
|
||||||
build_config = service_config["build"]
|
|
||||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
|
||||||
target = build_config.get("target", "latest")
|
|
||||||
image_name = get_image_name(dockerfile, target)
|
|
||||||
|
|
||||||
# Set image name for all services (needed for both builders and deduped)
|
|
||||||
service_config["image"] = image_name
|
|
||||||
|
|
||||||
if service_name in services_to_dedupe:
|
|
||||||
# Remove build config - this service will use the pre-built image
|
|
||||||
del service_config["build"]
|
|
||||||
continue
|
|
||||||
|
|
||||||
# This service will do the actual build - add cache config
|
|
||||||
cache_from_list = []
|
|
||||||
cache_to_list = []
|
|
||||||
|
|
||||||
component = get_component(dockerfile)
|
|
||||||
if not component:
|
|
||||||
# Skip services that don't clearly match frontend/backend
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get the hash for this component
|
|
||||||
component_hash = getattr(args, f"{component}_hash")
|
|
||||||
|
|
||||||
# Scope format: platform-{component}-{target}-{hash|ref}
|
|
||||||
# Example: platform-backend-server-abc123
|
|
||||||
|
|
||||||
if "type=gha" in args.cache_from:
|
|
||||||
# 1. Primary: exact hash match (most specific)
|
|
||||||
if component_hash:
|
|
||||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
|
||||||
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
|
|
||||||
|
|
||||||
# 2. Fallback: branch-based cache
|
|
||||||
if git_ref_scope:
|
|
||||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
|
||||||
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
|
|
||||||
|
|
||||||
# 3. Fallback: dev branch cache (for PRs/feature branches)
|
|
||||||
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
|
|
||||||
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
|
|
||||||
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
|
|
||||||
|
|
||||||
if "type=gha" in args.cache_to:
|
|
||||||
# Write to both hash-based and branch-based scopes
|
|
||||||
if component_hash:
|
|
||||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
|
||||||
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
|
|
||||||
|
|
||||||
if git_ref_scope:
|
|
||||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
|
||||||
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
|
|
||||||
|
|
||||||
# Ensure we have at least one cache source/target
|
|
||||||
if not cache_from_list:
|
|
||||||
cache_from_list.append(args.cache_from)
|
|
||||||
if not cache_to_list:
|
|
||||||
cache_to_list.append(args.cache_to)
|
|
||||||
|
|
||||||
build_config["cache_from"] = cache_from_list
|
|
||||||
build_config["cache_to"] = cache_to_list
|
|
||||||
modified_services.append(service_name)
|
|
||||||
|
|
||||||
# Write back to the same file
|
|
||||||
with open(args.source, "w") as f:
|
|
||||||
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
|
||||||
|
|
||||||
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
|
||||||
for svc in modified_services:
|
|
||||||
svc_config = compose["services"][svc]
|
|
||||||
build_cfg = svc_config.get("build", {})
|
|
||||||
cache_from_list = build_cfg.get("cache_from", ["none"])
|
|
||||||
cache_to_list = build_cfg.get("cache_to", ["none"])
|
|
||||||
print(f" - {svc}")
|
|
||||||
print(f" image: {svc_config.get('image', 'N/A')}")
|
|
||||||
print(f" cache_from: {cache_from_list}")
|
|
||||||
print(f" cache_to: {cache_to_list}")
|
|
||||||
if services_to_dedupe:
|
|
||||||
print(
|
|
||||||
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
|
||||||
)
|
|
||||||
for svc in services_to_dedupe:
|
|
||||||
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -3,6 +3,7 @@
|
|||||||
classic/original_autogpt/keys.py
|
classic/original_autogpt/keys.py
|
||||||
classic/original_autogpt/*.json
|
classic/original_autogpt/*.json
|
||||||
auto_gpt_workspace/*
|
auto_gpt_workspace/*
|
||||||
|
.autogpt/
|
||||||
*.mpeg
|
*.mpeg
|
||||||
.env
|
.env
|
||||||
# Root .env files
|
# Root .env files
|
||||||
@@ -159,6 +160,10 @@ CURRENT_BULLETIN.md
|
|||||||
|
|
||||||
# AgBenchmark
|
# AgBenchmark
|
||||||
classic/benchmark/agbenchmark/reports/
|
classic/benchmark/agbenchmark/reports/
|
||||||
|
classic/reports/
|
||||||
|
classic/direct_benchmark/reports/
|
||||||
|
classic/.benchmark_workspaces/
|
||||||
|
classic/direct_benchmark/.benchmark_workspaces/
|
||||||
|
|
||||||
# Nodejs
|
# Nodejs
|
||||||
package-lock.json
|
package-lock.json
|
||||||
@@ -177,7 +182,11 @@ autogpt_platform/backend/settings.py
|
|||||||
|
|
||||||
*.ign.*
|
*.ign.*
|
||||||
.test-contents
|
.test-contents
|
||||||
|
**/.claude/settings.local.json
|
||||||
.claude/settings.local.json
|
.claude/settings.local.json
|
||||||
CLAUDE.local.md
|
CLAUDE.local.md
|
||||||
/autogpt_platform/backend/logs
|
/autogpt_platform/backend/logs
|
||||||
.next
|
|
||||||
|
# Test database
|
||||||
|
test.db
|
||||||
|
.next
|
||||||
|
|||||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
|||||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
|
||||||
path = classic/forge/tests/vcr_cassettes
|
|
||||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
|
||||||
@@ -43,29 +43,10 @@ repos:
|
|||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
- id: poetry-install
|
- id: poetry-install
|
||||||
name: Check & Install dependencies - Classic - AutoGPT
|
name: Check & Install dependencies - Classic
|
||||||
alias: poetry-install-classic-autogpt
|
alias: poetry-install-classic
|
||||||
entry: poetry -C classic/original_autogpt install
|
entry: poetry -C classic install
|
||||||
# include forge source (since it's a path dependency)
|
files: ^classic/poetry\.lock$
|
||||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
|
||||||
types: [file]
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: poetry-install
|
|
||||||
name: Check & Install dependencies - Classic - Forge
|
|
||||||
alias: poetry-install-classic-forge
|
|
||||||
entry: poetry -C classic/forge install
|
|
||||||
files: ^classic/forge/poetry\.lock$
|
|
||||||
types: [file]
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: poetry-install
|
|
||||||
name: Check & Install dependencies - Classic - Benchmark
|
|
||||||
alias: poetry-install-classic-benchmark
|
|
||||||
entry: poetry -C classic/benchmark install
|
|
||||||
files: ^classic/benchmark/poetry\.lock$
|
|
||||||
types: [file]
|
types: [file]
|
||||||
language: system
|
language: system
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
@@ -116,26 +97,10 @@ repos:
|
|||||||
language: system
|
language: system
|
||||||
|
|
||||||
- id: isort
|
- id: isort
|
||||||
name: Lint (isort) - Classic - AutoGPT
|
name: Lint (isort) - Classic
|
||||||
alias: isort-classic-autogpt
|
alias: isort-classic
|
||||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
entry: bash -c 'cd classic && poetry run isort $(echo "$@" | sed "s|classic/||g")' --
|
||||||
files: ^classic/original_autogpt/
|
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||||
types: [file, python]
|
|
||||||
language: system
|
|
||||||
|
|
||||||
- id: isort
|
|
||||||
name: Lint (isort) - Classic - Forge
|
|
||||||
alias: isort-classic-forge
|
|
||||||
entry: poetry -P classic/forge run isort -p forge
|
|
||||||
files: ^classic/forge/
|
|
||||||
types: [file, python]
|
|
||||||
language: system
|
|
||||||
|
|
||||||
- id: isort
|
|
||||||
name: Lint (isort) - Classic - Benchmark
|
|
||||||
alias: isort-classic-benchmark
|
|
||||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
|
||||||
files: ^classic/benchmark/
|
|
||||||
types: [file, python]
|
types: [file, python]
|
||||||
language: system
|
language: system
|
||||||
|
|
||||||
@@ -149,26 +114,13 @@ repos:
|
|||||||
|
|
||||||
- repo: https://github.com/PyCQA/flake8
|
- repo: https://github.com/PyCQA/flake8
|
||||||
rev: 7.0.0
|
rev: 7.0.0
|
||||||
# To have flake8 load the config of the individual subprojects, we have to call
|
# Use consolidated flake8 config at classic/.flake8
|
||||||
# them separately.
|
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
name: Lint (Flake8) - Classic - AutoGPT
|
name: Lint (Flake8) - Classic
|
||||||
alias: flake8-classic-autogpt
|
alias: flake8-classic
|
||||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
||||||
args: [--config=classic/original_autogpt/.flake8]
|
args: [--config=classic/.flake8]
|
||||||
|
|
||||||
- id: flake8
|
|
||||||
name: Lint (Flake8) - Classic - Forge
|
|
||||||
alias: flake8-classic-forge
|
|
||||||
files: ^classic/forge/(forge|tests)/
|
|
||||||
args: [--config=classic/forge/.flake8]
|
|
||||||
|
|
||||||
- id: flake8
|
|
||||||
name: Lint (Flake8) - Classic - Benchmark
|
|
||||||
alias: flake8-classic-benchmark
|
|
||||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
|
||||||
args: [--config=classic/benchmark/.flake8]
|
|
||||||
|
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
@@ -204,29 +156,10 @@ repos:
|
|||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
- id: pyright
|
- id: pyright
|
||||||
name: Typecheck - Classic - AutoGPT
|
name: Typecheck - Classic
|
||||||
alias: pyright-classic-autogpt
|
alias: pyright-classic
|
||||||
entry: poetry -C classic/original_autogpt run pyright
|
entry: poetry -C classic run pyright
|
||||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
files: ^classic/(original_autogpt|forge|direct_benchmark)/.*\.py$|^classic/poetry\.lock$
|
||||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
|
||||||
types: [file]
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: pyright
|
|
||||||
name: Typecheck - Classic - Forge
|
|
||||||
alias: pyright-classic-forge
|
|
||||||
entry: poetry -C classic/forge run pyright
|
|
||||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
|
||||||
types: [file]
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: pyright
|
|
||||||
name: Typecheck - Classic - Benchmark
|
|
||||||
alias: pyright-classic-benchmark
|
|
||||||
entry: poetry -C classic/benchmark run pyright
|
|
||||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
|
||||||
types: [file]
|
types: [file]
|
||||||
language: system
|
language: system
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
# ============================ DEPENDENCY BUILDER ============================ #
|
|
||||||
|
|
||||||
FROM debian:13-slim AS builder
|
FROM debian:13-slim AS builder
|
||||||
|
|
||||||
# Set environment variables
|
# Set environment variables
|
||||||
@@ -53,9 +51,7 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
|||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
# ============================== BACKEND SERVER ============================== #
|
FROM debian:13-slim AS server_dependencies
|
||||||
|
|
||||||
FROM debian:13-slim AS server
|
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@@ -67,14 +63,15 @@ ENV POETRY_HOME=/opt/poetry \
|
|||||||
ENV PATH=/opt/poetry/bin:$PATH
|
ENV PATH=/opt/poetry/bin:$PATH
|
||||||
|
|
||||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||||
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
RUN apt-get update && apt-get install -y \
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
python3.13 \
|
python3.13 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
ffmpeg \
|
ffmpeg \
|
||||||
imagemagick \
|
imagemagick \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy only necessary files from builder
|
||||||
|
COPY --from=builder /app /app
|
||||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||||
# Copy Node.js installation for Prisma
|
# Copy Node.js installation for Prisma
|
||||||
@@ -84,54 +81,30 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|||||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
# Copy only the .venv from builder (not the entire /app directory)
|
|
||||||
# The .venv includes the generated Prisma client
|
|
||||||
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
|
||||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||||
|
|
||||||
# Copy dependency files + autogpt_libs (path dependency)
|
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
RUN mkdir -p /app/autogpt_platform/backend
|
||||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
|
||||||
|
|
||||||
# Copy backend code + docs (for Copilot docs search)
|
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||||
COPY autogpt_platform/backend ./
|
|
||||||
|
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||||
|
|
||||||
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
|
FROM server_dependencies AS migrate
|
||||||
|
|
||||||
|
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||||
|
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||||
|
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
||||||
|
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||||
|
|
||||||
|
FROM server_dependencies AS server
|
||||||
|
|
||||||
|
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||||
COPY docs /app/docs
|
COPY docs /app/docs
|
||||||
RUN poetry install --no-ansi --only-root
|
RUN poetry install --no-ansi --only-root
|
||||||
|
|
||||||
ENV PORT=8000
|
ENV PORT=8000
|
||||||
|
|
||||||
CMD ["poetry", "run", "rest"]
|
CMD ["poetry", "run", "rest"]
|
||||||
|
|
||||||
# =============================== DB MIGRATOR =============================== #
|
|
||||||
|
|
||||||
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
|
||||||
FROM debian:13-slim AS migrate
|
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
|
|
||||||
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
python3.13 \
|
|
||||||
python3-pip \
|
|
||||||
ca-certificates \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Copy Node.js from builder (needed for Prisma CLI)
|
|
||||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
|
||||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
|
||||||
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|
||||||
|
|
||||||
# Copy Prisma binaries
|
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
|
||||||
|
|
||||||
# Install prisma-client-py directly (much smaller than copying full venv)
|
|
||||||
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
|
||||||
|
|
||||||
COPY autogpt_platform/backend/schema.prisma ./
|
|
||||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
|
||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
|
||||||
COPY autogpt_platform/backend/migrations ./migrations
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from typing_extensions import TypedDict
|
|||||||
|
|
||||||
import backend.api.features.store.cache as store_cache
|
import backend.api.features.store.cache as store_cache
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
import backend.data.block
|
import backend.blocks
|
||||||
from backend.api.external.middleware import require_permission
|
from backend.api.external.middleware import require_permission
|
||||||
from backend.data import execution as execution_db
|
from backend.data import execution as execution_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
@@ -67,7 +67,7 @@ async def get_user_info(
|
|||||||
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
|
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
|
||||||
)
|
)
|
||||||
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
|
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
|
||||||
blocks = [block() for block in backend.data.block.get_blocks().values()]
|
blocks = [block() for block in backend.blocks.get_blocks().values()]
|
||||||
return [b.to_dict() for b in blocks if not b.disabled]
|
return [b.to_dict() for b in blocks if not b.disabled]
|
||||||
|
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ async def execute_graph_block(
|
|||||||
require_permission(APIKeyPermission.EXECUTE_BLOCK)
|
require_permission(APIKeyPermission.EXECUTE_BLOCK)
|
||||||
),
|
),
|
||||||
) -> CompletedBlockOutput:
|
) -> CompletedBlockOutput:
|
||||||
obj = backend.data.block.get_block(block_id)
|
obj = backend.blocks.get_block(block_id)
|
||||||
if not obj:
|
if not obj:
|
||||||
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
||||||
if obj.disabled:
|
if obj.disabled:
|
||||||
|
|||||||
@@ -10,10 +10,15 @@ import backend.api.features.library.db as library_db
|
|||||||
import backend.api.features.library.model as library_model
|
import backend.api.features.library.model as library_model
|
||||||
import backend.api.features.store.db as store_db
|
import backend.api.features.store.db as store_db
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
import backend.data.block
|
|
||||||
from backend.blocks import load_all_blocks
|
from backend.blocks import load_all_blocks
|
||||||
|
from backend.blocks._base import (
|
||||||
|
AnyBlockSchema,
|
||||||
|
BlockCategory,
|
||||||
|
BlockInfo,
|
||||||
|
BlockSchema,
|
||||||
|
BlockType,
|
||||||
|
)
|
||||||
from backend.blocks.llm import LlmModel
|
from backend.blocks.llm import LlmModel
|
||||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
|
||||||
from backend.data.db import query_raw_with_schema
|
from backend.data.db import query_raw_with_schema
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.cache import cached
|
from backend.util.cache import cached
|
||||||
@@ -22,7 +27,7 @@ from backend.util.models import Pagination
|
|||||||
from .model import (
|
from .model import (
|
||||||
BlockCategoryResponse,
|
BlockCategoryResponse,
|
||||||
BlockResponse,
|
BlockResponse,
|
||||||
BlockType,
|
BlockTypeFilter,
|
||||||
CountResponse,
|
CountResponse,
|
||||||
FilterType,
|
FilterType,
|
||||||
Provider,
|
Provider,
|
||||||
@@ -88,7 +93,7 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
|
|||||||
def get_blocks(
|
def get_blocks(
|
||||||
*,
|
*,
|
||||||
category: str | None = None,
|
category: str | None = None,
|
||||||
type: BlockType | None = None,
|
type: BlockTypeFilter | None = None,
|
||||||
provider: ProviderName | None = None,
|
provider: ProviderName | None = None,
|
||||||
page: int = 1,
|
page: int = 1,
|
||||||
page_size: int = 50,
|
page_size: int = 50,
|
||||||
@@ -669,9 +674,9 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
|||||||
for block_type in load_all_blocks().values():
|
for block_type in load_all_blocks().values():
|
||||||
block: AnyBlockSchema = block_type()
|
block: AnyBlockSchema = block_type()
|
||||||
if block.disabled or block.block_type in (
|
if block.disabled or block.block_type in (
|
||||||
backend.data.block.BlockType.INPUT,
|
BlockType.INPUT,
|
||||||
backend.data.block.BlockType.OUTPUT,
|
BlockType.OUTPUT,
|
||||||
backend.data.block.BlockType.AGENT,
|
BlockType.AGENT,
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
# Find the execution count for this block
|
# Find the execution count for this block
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from pydantic import BaseModel
|
|||||||
|
|
||||||
import backend.api.features.library.model as library_model
|
import backend.api.features.library.model as library_model
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
from backend.data.block import BlockInfo
|
from backend.blocks._base import BlockInfo
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.models import Pagination
|
from backend.util.models import Pagination
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@ FilterType = Literal[
|
|||||||
"my_agents",
|
"my_agents",
|
||||||
]
|
]
|
||||||
|
|
||||||
BlockType = Literal["all", "input", "action", "output"]
|
BlockTypeFilter = Literal["all", "input", "action", "output"]
|
||||||
|
|
||||||
|
|
||||||
class SearchEntry(BaseModel):
|
class SearchEntry(BaseModel):
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ async def get_block_categories(
|
|||||||
)
|
)
|
||||||
async def get_blocks(
|
async def get_blocks(
|
||||||
category: Annotated[str | None, fastapi.Query()] = None,
|
category: Annotated[str | None, fastapi.Query()] = None,
|
||||||
type: Annotated[builder_model.BlockType | None, fastapi.Query()] = None,
|
type: Annotated[builder_model.BlockTypeFilter | None, fastapi.Query()] = None,
|
||||||
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
|
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
|
||||||
page: Annotated[int, fastapi.Query()] = 1,
|
page: Annotated[int, fastapi.Query()] = 1,
|
||||||
page_size: Annotated[int, fastapi.Query()] = 50,
|
page_size: Annotated[int, fastapi.Query()] = 50,
|
||||||
|
|||||||
@@ -0,0 +1,154 @@
|
|||||||
|
"""Dummy Agent Generator for testing.
|
||||||
|
|
||||||
|
Returns mock responses matching the format expected from the external service.
|
||||||
|
Enable via AGENTGENERATOR_USE_DUMMY=true in settings.
|
||||||
|
|
||||||
|
WARNING: This is for testing only. Do not use in production.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Dummy decomposition result (instructions type)
|
||||||
|
DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = {
|
||||||
|
"type": "instructions",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"description": "Get input from user",
|
||||||
|
"action": "input",
|
||||||
|
"block_name": "AgentInputBlock",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Process the input",
|
||||||
|
"action": "process",
|
||||||
|
"block_name": "TextFormatterBlock",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Return output to user",
|
||||||
|
"action": "output",
|
||||||
|
"block_name": "AgentOutputBlock",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Block IDs from backend/blocks/io.py
|
||||||
|
AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b"
|
||||||
|
AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4"
|
||||||
|
|
||||||
|
|
||||||
|
def _generate_dummy_agent_json() -> dict[str, Any]:
|
||||||
|
"""Generate a minimal valid agent JSON for testing."""
|
||||||
|
input_node_id = str(uuid.uuid4())
|
||||||
|
output_node_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"version": 1,
|
||||||
|
"is_active": True,
|
||||||
|
"name": "Dummy Test Agent",
|
||||||
|
"description": "A dummy agent generated for testing purposes",
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": input_node_id,
|
||||||
|
"block_id": AGENT_INPUT_BLOCK_ID,
|
||||||
|
"input_default": {
|
||||||
|
"name": "input",
|
||||||
|
"title": "Input",
|
||||||
|
"description": "Enter your input",
|
||||||
|
"placeholder_values": [],
|
||||||
|
},
|
||||||
|
"metadata": {"position": {"x": 0, "y": 0}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": output_node_id,
|
||||||
|
"block_id": AGENT_OUTPUT_BLOCK_ID,
|
||||||
|
"input_default": {
|
||||||
|
"name": "output",
|
||||||
|
"title": "Output",
|
||||||
|
"description": "Agent output",
|
||||||
|
"format": "{output}",
|
||||||
|
},
|
||||||
|
"metadata": {"position": {"x": 400, "y": 0}},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"source_id": input_node_id,
|
||||||
|
"sink_id": output_node_id,
|
||||||
|
"source_name": "result",
|
||||||
|
"sink_name": "value",
|
||||||
|
"is_static": False,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async def decompose_goal_dummy(
|
||||||
|
description: str,
|
||||||
|
context: str = "",
|
||||||
|
library_agents: list[dict[str, Any]] | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy decomposition result."""
|
||||||
|
logger.info("Using dummy agent generator for decompose_goal")
|
||||||
|
return DUMMY_DECOMPOSITION_RESULT.copy()
|
||||||
|
|
||||||
|
|
||||||
|
async def generate_agent_dummy(
|
||||||
|
instructions: dict[str, Any],
|
||||||
|
library_agents: list[dict[str, Any]] | None = None,
|
||||||
|
operation_id: str | None = None,
|
||||||
|
task_id: str | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy agent JSON after a simulated delay."""
|
||||||
|
logger.info("Using dummy agent generator for generate_agent (30s delay)")
|
||||||
|
await asyncio.sleep(30)
|
||||||
|
return _generate_dummy_agent_json()
|
||||||
|
|
||||||
|
|
||||||
|
async def generate_agent_patch_dummy(
|
||||||
|
update_request: str,
|
||||||
|
current_agent: dict[str, Any],
|
||||||
|
library_agents: list[dict[str, Any]] | None = None,
|
||||||
|
operation_id: str | None = None,
|
||||||
|
task_id: str | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy patched agent (returns the current agent with updated description)."""
|
||||||
|
logger.info("Using dummy agent generator for generate_agent_patch")
|
||||||
|
patched = current_agent.copy()
|
||||||
|
patched["description"] = (
|
||||||
|
f"{current_agent.get('description', '')} (updated: {update_request})"
|
||||||
|
)
|
||||||
|
return patched
|
||||||
|
|
||||||
|
|
||||||
|
async def customize_template_dummy(
|
||||||
|
template_agent: dict[str, Any],
|
||||||
|
modification_request: str,
|
||||||
|
context: str = "",
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Return dummy customized template (returns template with updated description)."""
|
||||||
|
logger.info("Using dummy agent generator for customize_template")
|
||||||
|
customized = template_agent.copy()
|
||||||
|
customized["description"] = (
|
||||||
|
f"{template_agent.get('description', '')} (customized: {modification_request})"
|
||||||
|
)
|
||||||
|
return customized
|
||||||
|
|
||||||
|
|
||||||
|
async def get_blocks_dummy() -> list[dict[str, Any]]:
|
||||||
|
"""Return dummy blocks list."""
|
||||||
|
logger.info("Using dummy agent generator for get_blocks")
|
||||||
|
return [
|
||||||
|
{"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"},
|
||||||
|
{"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def health_check_dummy() -> bool:
|
||||||
|
"""Always returns healthy for dummy service."""
|
||||||
|
return True
|
||||||
@@ -12,8 +12,19 @@ import httpx
|
|||||||
|
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
from .dummy import (
|
||||||
|
customize_template_dummy,
|
||||||
|
decompose_goal_dummy,
|
||||||
|
generate_agent_dummy,
|
||||||
|
generate_agent_patch_dummy,
|
||||||
|
get_blocks_dummy,
|
||||||
|
health_check_dummy,
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_dummy_mode_warned = False
|
||||||
|
|
||||||
|
|
||||||
def _create_error_response(
|
def _create_error_response(
|
||||||
error_message: str,
|
error_message: str,
|
||||||
@@ -90,10 +101,26 @@ def _get_settings() -> Settings:
|
|||||||
return _settings
|
return _settings
|
||||||
|
|
||||||
|
|
||||||
def is_external_service_configured() -> bool:
|
def _is_dummy_mode() -> bool:
|
||||||
"""Check if external Agent Generator service is configured."""
|
"""Check if dummy mode is enabled for testing."""
|
||||||
|
global _dummy_mode_warned
|
||||||
settings = _get_settings()
|
settings = _get_settings()
|
||||||
return bool(settings.config.agentgenerator_host)
|
is_dummy = bool(settings.config.agentgenerator_use_dummy)
|
||||||
|
if is_dummy and not _dummy_mode_warned:
|
||||||
|
logger.warning(
|
||||||
|
"Agent Generator running in DUMMY MODE - returning mock responses. "
|
||||||
|
"Do not use in production!"
|
||||||
|
)
|
||||||
|
_dummy_mode_warned = True
|
||||||
|
return is_dummy
|
||||||
|
|
||||||
|
|
||||||
|
def is_external_service_configured() -> bool:
|
||||||
|
"""Check if external Agent Generator service is configured (or dummy mode)."""
|
||||||
|
settings = _get_settings()
|
||||||
|
return bool(settings.config.agentgenerator_host) or bool(
|
||||||
|
settings.config.agentgenerator_use_dummy
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _get_base_url() -> str:
|
def _get_base_url() -> str:
|
||||||
@@ -137,6 +164,9 @@ async def decompose_goal_external(
|
|||||||
- {"type": "error", "error": "...", "error_type": "..."} on error
|
- {"type": "error", "error": "...", "error_type": "..."} on error
|
||||||
Or None on unexpected error
|
Or None on unexpected error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await decompose_goal_dummy(description, context, library_agents)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
if context:
|
if context:
|
||||||
@@ -226,6 +256,11 @@ async def generate_agent_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await generate_agent_dummy(
|
||||||
|
instructions, library_agents, operation_id, task_id
|
||||||
|
)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -297,6 +332,11 @@ async def generate_agent_patch_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await generate_agent_patch_dummy(
|
||||||
|
update_request, current_agent, library_agents, operation_id, task_id
|
||||||
|
)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -383,6 +423,11 @@ async def customize_template_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Customized agent JSON, clarifying questions dict, or error dict on error
|
Customized agent JSON, clarifying questions dict, or error dict on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await customize_template_dummy(
|
||||||
|
template_agent, modification_request, context
|
||||||
|
)
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
request = modification_request
|
request = modification_request
|
||||||
@@ -445,6 +490,9 @@ async def get_blocks_external() -> list[dict[str, Any]] | None:
|
|||||||
Returns:
|
Returns:
|
||||||
List of block info dicts or None on error
|
List of block info dicts or None on error
|
||||||
"""
|
"""
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await get_blocks_dummy()
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -478,6 +526,9 @@ async def health_check() -> bool:
|
|||||||
if not is_external_service_configured():
|
if not is_external_service_configured():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if _is_dummy_mode():
|
||||||
|
return await health_check_dummy()
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -13,7 +13,8 @@ from backend.api.features.chat.tools.models import (
|
|||||||
NoResultsResponse,
|
NoResultsResponse,
|
||||||
)
|
)
|
||||||
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
||||||
from backend.data.block import BlockType, get_block
|
from backend.blocks import get_block
|
||||||
|
from backend.blocks._base import BlockType
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from backend.api.features.chat.tools.find_block import (
|
|||||||
FindBlockTool,
|
FindBlockTool,
|
||||||
)
|
)
|
||||||
from backend.api.features.chat.tools.models import BlockListResponse
|
from backend.api.features.chat.tools.models import BlockListResponse
|
||||||
from backend.data.block import BlockType
|
from backend.blocks._base import BlockType
|
||||||
|
|
||||||
from ._test_data import make_session
|
from ._test_data import make_session
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,8 @@ from backend.api.features.chat.tools.find_block import (
|
|||||||
COPILOT_EXCLUDED_BLOCK_IDS,
|
COPILOT_EXCLUDED_BLOCK_IDS,
|
||||||
COPILOT_EXCLUDED_BLOCK_TYPES,
|
COPILOT_EXCLUDED_BLOCK_TYPES,
|
||||||
)
|
)
|
||||||
from backend.data.block import AnyBlockSchema, get_block
|
from backend.blocks import get_block
|
||||||
|
from backend.blocks._base import AnyBlockSchema
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||||
from backend.data.workspace import get_or_create_workspace
|
from backend.data.workspace import get_or_create_workspace
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import pytest
|
|||||||
|
|
||||||
from backend.api.features.chat.tools.models import ErrorResponse
|
from backend.api.features.chat.tools.models import ErrorResponse
|
||||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||||
from backend.data.block import BlockType
|
from backend.blocks._base import BlockType
|
||||||
|
|
||||||
from ._test_data import make_session
|
from ._test_data import make_session
|
||||||
|
|
||||||
|
|||||||
@@ -12,12 +12,11 @@ import backend.api.features.store.image_gen as store_image_gen
|
|||||||
import backend.api.features.store.media as store_media
|
import backend.api.features.store.media as store_media
|
||||||
import backend.data.graph as graph_db
|
import backend.data.graph as graph_db
|
||||||
import backend.data.integrations as integrations_db
|
import backend.data.integrations as integrations_db
|
||||||
from backend.data.block import BlockInput
|
|
||||||
from backend.data.db import transaction
|
from backend.data.db import transaction
|
||||||
from backend.data.execution import get_graph_execution
|
from backend.data.execution import get_graph_execution
|
||||||
from backend.data.graph import GraphSettings
|
from backend.data.graph import GraphSettings
|
||||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||||
from backend.data.model import CredentialsMetaInput
|
from backend.data.model import CredentialsMetaInput, GraphInput
|
||||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||||
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||||
on_graph_activate,
|
on_graph_activate,
|
||||||
@@ -1130,7 +1129,7 @@ async def create_preset_from_graph_execution(
|
|||||||
async def update_preset(
|
async def update_preset(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
preset_id: str,
|
preset_id: str,
|
||||||
inputs: Optional[BlockInput] = None,
|
inputs: Optional[GraphInput] = None,
|
||||||
credentials: Optional[dict[str, CredentialsMetaInput]] = None,
|
credentials: Optional[dict[str, CredentialsMetaInput]] = None,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
description: Optional[str] = None,
|
description: Optional[str] = None,
|
||||||
|
|||||||
@@ -6,9 +6,12 @@ import prisma.enums
|
|||||||
import prisma.models
|
import prisma.models
|
||||||
import pydantic
|
import pydantic
|
||||||
|
|
||||||
from backend.data.block import BlockInput
|
|
||||||
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
|
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
|
||||||
from backend.data.model import CredentialsMetaInput, is_credentials_field_name
|
from backend.data.model import (
|
||||||
|
CredentialsMetaInput,
|
||||||
|
GraphInput,
|
||||||
|
is_credentials_field_name,
|
||||||
|
)
|
||||||
from backend.util.json import loads as json_loads
|
from backend.util.json import loads as json_loads
|
||||||
from backend.util.models import Pagination
|
from backend.util.models import Pagination
|
||||||
|
|
||||||
@@ -323,7 +326,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel):
|
|||||||
graph_id: str
|
graph_id: str
|
||||||
graph_version: int
|
graph_version: int
|
||||||
|
|
||||||
inputs: BlockInput
|
inputs: GraphInput
|
||||||
credentials: dict[str, CredentialsMetaInput]
|
credentials: dict[str, CredentialsMetaInput]
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
@@ -352,7 +355,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel):
|
|||||||
Request model used when updating a preset for a library agent.
|
Request model used when updating a preset for a library agent.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
inputs: Optional[BlockInput] = None
|
inputs: Optional[GraphInput] = None
|
||||||
credentials: Optional[dict[str, CredentialsMetaInput]] = None
|
credentials: Optional[dict[str, CredentialsMetaInput]] = None
|
||||||
|
|
||||||
name: Optional[str] = None
|
name: Optional[str] = None
|
||||||
@@ -395,7 +398,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
|
|||||||
"Webhook must be included in AgentPreset query when webhookId is set"
|
"Webhook must be included in AgentPreset query when webhookId is set"
|
||||||
)
|
)
|
||||||
|
|
||||||
input_data: BlockInput = {}
|
input_data: GraphInput = {}
|
||||||
input_credentials: dict[str, CredentialsMetaInput] = {}
|
input_credentials: dict[str, CredentialsMetaInput] = {}
|
||||||
|
|
||||||
for preset_input in preset.InputPresets:
|
for preset_input in preset.InputPresets:
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ from typing import Optional
|
|||||||
import aiohttp
|
import aiohttp
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
|
|
||||||
|
from backend.blocks import get_block
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
from backend.data.block import get_block
|
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
from .models import ApiResponse, ChatRequest, GraphData
|
from .models import ApiResponse, ChatRequest, GraphData
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ class BlockHandler(ContentHandler):
|
|||||||
|
|
||||||
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
|
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
|
||||||
"""Fetch blocks without embeddings."""
|
"""Fetch blocks without embeddings."""
|
||||||
from backend.data.block import get_blocks
|
from backend.blocks import get_blocks
|
||||||
|
|
||||||
# Get all available blocks
|
# Get all available blocks
|
||||||
all_blocks = get_blocks()
|
all_blocks = get_blocks()
|
||||||
@@ -249,7 +249,7 @@ class BlockHandler(ContentHandler):
|
|||||||
|
|
||||||
async def get_stats(self) -> dict[str, int]:
|
async def get_stats(self) -> dict[str, int]:
|
||||||
"""Get statistics about block embedding coverage."""
|
"""Get statistics about block embedding coverage."""
|
||||||
from backend.data.block import get_blocks
|
from backend.blocks import get_blocks
|
||||||
|
|
||||||
all_blocks = get_blocks()
|
all_blocks = get_blocks()
|
||||||
|
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ async def test_block_handler_get_missing_items(mocker):
|
|||||||
mock_existing = []
|
mock_existing = []
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -135,7 +135,7 @@ async def test_block_handler_get_stats(mocker):
|
|||||||
mock_embedded = [{"count": 2}]
|
mock_embedded = [{"count": 2}]
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -327,7 +327,7 @@ async def test_block_handler_handles_missing_attributes():
|
|||||||
mock_blocks = {"block-minimal": mock_block_class}
|
mock_blocks = {"block-minimal": mock_block_class}
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -360,7 +360,7 @@ async def test_block_handler_skips_failed_blocks():
|
|||||||
mock_blocks = {"good-block": good_block, "bad-block": bad_block}
|
mock_blocks = {"good-block": good_block, "bad-block": bad_block}
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
|
|||||||
@@ -662,7 +662,7 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]:
|
|||||||
)
|
)
|
||||||
current_ids = {row["id"] for row in valid_agents}
|
current_ids = {row["id"] for row in valid_agents}
|
||||||
elif content_type == ContentType.BLOCK:
|
elif content_type == ContentType.BLOCK:
|
||||||
from backend.data.block import get_blocks
|
from backend.blocks import get_blocks
|
||||||
|
|
||||||
current_ids = set(get_blocks().keys())
|
current_ids = set(get_blocks().keys())
|
||||||
elif content_type == ContentType.DOCUMENTATION:
|
elif content_type == ContentType.DOCUMENTATION:
|
||||||
|
|||||||
@@ -7,15 +7,6 @@ from replicate.client import Client as ReplicateClient
|
|||||||
from replicate.exceptions import ReplicateError
|
from replicate.exceptions import ReplicateError
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.blocks.ideogram import (
|
|
||||||
AspectRatio,
|
|
||||||
ColorPalettePreset,
|
|
||||||
IdeogramModelBlock,
|
|
||||||
IdeogramModelName,
|
|
||||||
MagicPromptOption,
|
|
||||||
StyleType,
|
|
||||||
UpscaleOption,
|
|
||||||
)
|
|
||||||
from backend.data.graph import GraphBaseMeta
|
from backend.data.graph import GraphBaseMeta
|
||||||
from backend.data.model import CredentialsMetaInput, ProviderName
|
from backend.data.model import CredentialsMetaInput, ProviderName
|
||||||
from backend.integrations.credentials_store import ideogram_credentials
|
from backend.integrations.credentials_store import ideogram_credentials
|
||||||
@@ -50,6 +41,16 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes
|
|||||||
if not ideogram_credentials.api_key:
|
if not ideogram_credentials.api_key:
|
||||||
raise ValueError("Missing Ideogram API key")
|
raise ValueError("Missing Ideogram API key")
|
||||||
|
|
||||||
|
from backend.blocks.ideogram import (
|
||||||
|
AspectRatio,
|
||||||
|
ColorPalettePreset,
|
||||||
|
IdeogramModelBlock,
|
||||||
|
IdeogramModelName,
|
||||||
|
MagicPromptOption,
|
||||||
|
StyleType,
|
||||||
|
UpscaleOption,
|
||||||
|
)
|
||||||
|
|
||||||
name = graph.name
|
name = graph.name
|
||||||
description = f"{name} ({graph.description})" if graph.description else name
|
description = f"{name} ({graph.description})" if graph.description else name
|
||||||
|
|
||||||
|
|||||||
@@ -40,10 +40,11 @@ from backend.api.model import (
|
|||||||
UpdateTimezoneRequest,
|
UpdateTimezoneRequest,
|
||||||
UploadFileResponse,
|
UploadFileResponse,
|
||||||
)
|
)
|
||||||
|
from backend.blocks import get_block, get_blocks
|
||||||
from backend.data import execution as execution_db
|
from backend.data import execution as execution_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
from backend.data.auth import api_key as api_key_db
|
from backend.data.auth import api_key as api_key_db
|
||||||
from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks
|
from backend.data.block import BlockInput, CompletedBlockOutput
|
||||||
from backend.data.credit import (
|
from backend.data.credit import (
|
||||||
AutoTopUpConfig,
|
AutoTopUpConfig,
|
||||||
RefundRequest,
|
RefundRequest,
|
||||||
|
|||||||
@@ -3,22 +3,19 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, TypeVar
|
from typing import Sequence, Type, TypeVar
|
||||||
|
|
||||||
|
from backend.blocks._base import AnyBlockSchema, BlockType
|
||||||
from backend.util.cache import cached
|
from backend.util.cache import cached
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.data.block import Block
|
|
||||||
|
|
||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl_seconds=3600)
|
@cached(ttl_seconds=3600)
|
||||||
def load_all_blocks() -> dict[str, type["Block"]]:
|
def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
|
||||||
from backend.data.block import Block
|
from backend.blocks._base import Block
|
||||||
from backend.util.settings import Config
|
from backend.util.settings import Config
|
||||||
|
|
||||||
# Check if example blocks should be loaded from settings
|
# Check if example blocks should be loaded from settings
|
||||||
@@ -50,8 +47,8 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
importlib.import_module(f".{module}", package=__name__)
|
importlib.import_module(f".{module}", package=__name__)
|
||||||
|
|
||||||
# Load all Block instances from the available modules
|
# Load all Block instances from the available modules
|
||||||
available_blocks: dict[str, type["Block"]] = {}
|
available_blocks: dict[str, type["AnyBlockSchema"]] = {}
|
||||||
for block_cls in all_subclasses(Block):
|
for block_cls in _all_subclasses(Block):
|
||||||
class_name = block_cls.__name__
|
class_name = block_cls.__name__
|
||||||
|
|
||||||
if class_name.endswith("Base"):
|
if class_name.endswith("Base"):
|
||||||
@@ -64,7 +61,7 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
"please name the class with 'Base' at the end"
|
"please name the class with 'Base' at the end"
|
||||||
)
|
)
|
||||||
|
|
||||||
block = block_cls.create()
|
block = block_cls() # pyright: ignore[reportAbstractUsage]
|
||||||
|
|
||||||
if not isinstance(block.id, str) or len(block.id) != 36:
|
if not isinstance(block.id, str) or len(block.id) != 36:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -105,7 +102,7 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
available_blocks[block.id] = block_cls
|
available_blocks[block.id] = block_cls
|
||||||
|
|
||||||
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
|
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
|
||||||
from backend.data.block import is_block_auth_configured
|
from ._utils import is_block_auth_configured
|
||||||
|
|
||||||
filtered_blocks = {}
|
filtered_blocks = {}
|
||||||
for block_id, block_cls in available_blocks.items():
|
for block_id, block_cls in available_blocks.items():
|
||||||
@@ -115,11 +112,48 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
return filtered_blocks
|
return filtered_blocks
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["load_all_blocks"]
|
def _all_subclasses(cls: type[T]) -> list[type[T]]:
|
||||||
|
|
||||||
|
|
||||||
def all_subclasses(cls: type[T]) -> list[type[T]]:
|
|
||||||
subclasses = cls.__subclasses__()
|
subclasses = cls.__subclasses__()
|
||||||
for subclass in subclasses:
|
for subclass in subclasses:
|
||||||
subclasses += all_subclasses(subclass)
|
subclasses += _all_subclasses(subclass)
|
||||||
return subclasses
|
return subclasses
|
||||||
|
|
||||||
|
|
||||||
|
# ============== Block access helper functions ============== #
|
||||||
|
|
||||||
|
|
||||||
|
def get_blocks() -> dict[str, Type["AnyBlockSchema"]]:
|
||||||
|
return load_all_blocks()
|
||||||
|
|
||||||
|
|
||||||
|
# Note on the return type annotation: https://github.com/microsoft/pyright/issues/10281
|
||||||
|
def get_block(block_id: str) -> "AnyBlockSchema | None":
|
||||||
|
cls = get_blocks().get(block_id)
|
||||||
|
return cls() if cls else None
|
||||||
|
|
||||||
|
|
||||||
|
@cached(ttl_seconds=3600)
|
||||||
|
def get_webhook_block_ids() -> Sequence[str]:
|
||||||
|
return [
|
||||||
|
id
|
||||||
|
for id, B in get_blocks().items()
|
||||||
|
if B().block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@cached(ttl_seconds=3600)
|
||||||
|
def get_io_block_ids() -> Sequence[str]:
|
||||||
|
return [
|
||||||
|
id
|
||||||
|
for id, B in get_blocks().items()
|
||||||
|
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@cached(ttl_seconds=3600)
|
||||||
|
def get_human_in_the_loop_block_ids() -> Sequence[str]:
|
||||||
|
return [
|
||||||
|
id
|
||||||
|
for id, B in get_blocks().items()
|
||||||
|
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||||
|
]
|
||||||
|
|||||||
739
autogpt_platform/backend/backend/blocks/_base.py
Normal file
739
autogpt_platform/backend/backend/blocks/_base.py
Normal file
@@ -0,0 +1,739 @@
|
|||||||
|
import inspect
|
||||||
|
import logging
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from enum import Enum
|
||||||
|
from typing import (
|
||||||
|
TYPE_CHECKING,
|
||||||
|
Any,
|
||||||
|
Callable,
|
||||||
|
ClassVar,
|
||||||
|
Generic,
|
||||||
|
Optional,
|
||||||
|
Type,
|
||||||
|
TypeAlias,
|
||||||
|
TypeVar,
|
||||||
|
cast,
|
||||||
|
get_origin,
|
||||||
|
)
|
||||||
|
|
||||||
|
import jsonref
|
||||||
|
import jsonschema
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from backend.data.block import BlockInput, BlockOutput, BlockOutputEntry
|
||||||
|
from backend.data.model import (
|
||||||
|
Credentials,
|
||||||
|
CredentialsFieldInfo,
|
||||||
|
CredentialsMetaInput,
|
||||||
|
SchemaField,
|
||||||
|
is_credentials_field_name,
|
||||||
|
)
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util import json
|
||||||
|
from backend.util.exceptions import (
|
||||||
|
BlockError,
|
||||||
|
BlockExecutionError,
|
||||||
|
BlockInputError,
|
||||||
|
BlockOutputError,
|
||||||
|
BlockUnknownError,
|
||||||
|
)
|
||||||
|
from backend.util.settings import Config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import ContributorDetails, NodeExecutionStats
|
||||||
|
|
||||||
|
from ..data.graph import Link
|
||||||
|
|
||||||
|
app_config = Config()
|
||||||
|
|
||||||
|
|
||||||
|
BlockTestOutput = BlockOutputEntry | tuple[str, Callable[[Any], bool]]
|
||||||
|
|
||||||
|
|
||||||
|
class BlockType(Enum):
|
||||||
|
STANDARD = "Standard"
|
||||||
|
INPUT = "Input"
|
||||||
|
OUTPUT = "Output"
|
||||||
|
NOTE = "Note"
|
||||||
|
WEBHOOK = "Webhook"
|
||||||
|
WEBHOOK_MANUAL = "Webhook (manual)"
|
||||||
|
AGENT = "Agent"
|
||||||
|
AI = "AI"
|
||||||
|
AYRSHARE = "Ayrshare"
|
||||||
|
HUMAN_IN_THE_LOOP = "Human In The Loop"
|
||||||
|
|
||||||
|
|
||||||
|
class BlockCategory(Enum):
|
||||||
|
AI = "Block that leverages AI to perform a task."
|
||||||
|
SOCIAL = "Block that interacts with social media platforms."
|
||||||
|
TEXT = "Block that processes text data."
|
||||||
|
SEARCH = "Block that searches or extracts information from the internet."
|
||||||
|
BASIC = "Block that performs basic operations."
|
||||||
|
INPUT = "Block that interacts with input of the graph."
|
||||||
|
OUTPUT = "Block that interacts with output of the graph."
|
||||||
|
LOGIC = "Programming logic to control the flow of your agent"
|
||||||
|
COMMUNICATION = "Block that interacts with communication platforms."
|
||||||
|
DEVELOPER_TOOLS = "Developer tools such as GitHub blocks."
|
||||||
|
DATA = "Block that interacts with structured data."
|
||||||
|
HARDWARE = "Block that interacts with hardware."
|
||||||
|
AGENT = "Block that interacts with other agents."
|
||||||
|
CRM = "Block that interacts with CRM services."
|
||||||
|
SAFETY = (
|
||||||
|
"Block that provides AI safety mechanisms such as detecting harmful content"
|
||||||
|
)
|
||||||
|
PRODUCTIVITY = "Block that helps with productivity"
|
||||||
|
ISSUE_TRACKING = "Block that helps with issue tracking"
|
||||||
|
MULTIMEDIA = "Block that interacts with multimedia content"
|
||||||
|
MARKETING = "Block that helps with marketing"
|
||||||
|
|
||||||
|
def dict(self) -> dict[str, str]:
|
||||||
|
return {"category": self.name, "description": self.value}
|
||||||
|
|
||||||
|
|
||||||
|
class BlockCostType(str, Enum):
|
||||||
|
RUN = "run" # cost X credits per run
|
||||||
|
BYTE = "byte" # cost X credits per byte
|
||||||
|
SECOND = "second" # cost X credits per second
|
||||||
|
|
||||||
|
|
||||||
|
class BlockCost(BaseModel):
|
||||||
|
cost_amount: int
|
||||||
|
cost_filter: BlockInput
|
||||||
|
cost_type: BlockCostType
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
cost_amount: int,
|
||||||
|
cost_type: BlockCostType = BlockCostType.RUN,
|
||||||
|
cost_filter: Optional[BlockInput] = None,
|
||||||
|
**data: Any,
|
||||||
|
) -> None:
|
||||||
|
super().__init__(
|
||||||
|
cost_amount=cost_amount,
|
||||||
|
cost_filter=cost_filter or {},
|
||||||
|
cost_type=cost_type,
|
||||||
|
**data,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BlockInfo(BaseModel):
|
||||||
|
id: str
|
||||||
|
name: str
|
||||||
|
inputSchema: dict[str, Any]
|
||||||
|
outputSchema: dict[str, Any]
|
||||||
|
costs: list[BlockCost]
|
||||||
|
description: str
|
||||||
|
categories: list[dict[str, str]]
|
||||||
|
contributors: list[dict[str, Any]]
|
||||||
|
staticOutput: bool
|
||||||
|
uiType: str
|
||||||
|
|
||||||
|
|
||||||
|
class BlockSchema(BaseModel):
|
||||||
|
cached_jsonschema: ClassVar[dict[str, Any]]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def jsonschema(cls) -> dict[str, Any]:
|
||||||
|
if cls.cached_jsonschema:
|
||||||
|
return cls.cached_jsonschema
|
||||||
|
|
||||||
|
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
|
||||||
|
|
||||||
|
def ref_to_dict(obj):
|
||||||
|
if isinstance(obj, dict):
|
||||||
|
# OpenAPI <3.1 does not support sibling fields that has a $ref key
|
||||||
|
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
|
||||||
|
keys = {"allOf", "anyOf", "oneOf"}
|
||||||
|
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
|
||||||
|
if one_key:
|
||||||
|
obj.update(obj[one_key][0])
|
||||||
|
|
||||||
|
return {
|
||||||
|
key: ref_to_dict(value)
|
||||||
|
for key, value in obj.items()
|
||||||
|
if not key.startswith("$") and key != one_key
|
||||||
|
}
|
||||||
|
elif isinstance(obj, list):
|
||||||
|
return [ref_to_dict(item) for item in obj]
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
|
||||||
|
|
||||||
|
return cls.cached_jsonschema
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def validate_data(cls, data: BlockInput) -> str | None:
|
||||||
|
return json.validate_with_jsonschema(
|
||||||
|
schema=cls.jsonschema(),
|
||||||
|
data={k: v for k, v in data.items() if v is not None},
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_mismatch_error(cls, data: BlockInput) -> str | None:
|
||||||
|
return cls.validate_data(data)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_field_schema(cls, field_name: str) -> dict[str, Any]:
|
||||||
|
model_schema = cls.jsonschema().get("properties", {})
|
||||||
|
if not model_schema:
|
||||||
|
raise ValueError(f"Invalid model schema {cls}")
|
||||||
|
|
||||||
|
property_schema = model_schema.get(field_name)
|
||||||
|
if not property_schema:
|
||||||
|
raise ValueError(f"Invalid property name {field_name}")
|
||||||
|
|
||||||
|
return property_schema
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def validate_field(cls, field_name: str, data: BlockInput) -> str | None:
|
||||||
|
"""
|
||||||
|
Validate the data against a specific property (one of the input/output name).
|
||||||
|
Returns the validation error message if the data does not match the schema.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
property_schema = cls.get_field_schema(field_name)
|
||||||
|
jsonschema.validate(json.to_dict(data), property_schema)
|
||||||
|
return None
|
||||||
|
except jsonschema.ValidationError as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_fields(cls) -> set[str]:
|
||||||
|
return set(cls.model_fields.keys())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_required_fields(cls) -> set[str]:
|
||||||
|
return {
|
||||||
|
field
|
||||||
|
for field, field_info in cls.model_fields.items()
|
||||||
|
if field_info.is_required()
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __pydantic_init_subclass__(cls, **kwargs):
|
||||||
|
"""Validates the schema definition. Rules:
|
||||||
|
- Fields with annotation `CredentialsMetaInput` MUST be
|
||||||
|
named `credentials` or `*_credentials`
|
||||||
|
- Fields named `credentials` or `*_credentials` MUST be
|
||||||
|
of type `CredentialsMetaInput`
|
||||||
|
"""
|
||||||
|
super().__pydantic_init_subclass__(**kwargs)
|
||||||
|
|
||||||
|
# Reset cached JSON schema to prevent inheriting it from parent class
|
||||||
|
cls.cached_jsonschema = {}
|
||||||
|
|
||||||
|
credentials_fields = cls.get_credentials_fields()
|
||||||
|
|
||||||
|
for field_name in cls.get_fields():
|
||||||
|
if is_credentials_field_name(field_name):
|
||||||
|
if field_name not in credentials_fields:
|
||||||
|
raise TypeError(
|
||||||
|
f"Credentials field '{field_name}' on {cls.__qualname__} "
|
||||||
|
f"is not of type {CredentialsMetaInput.__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
CredentialsMetaInput.validate_credentials_field_schema(
|
||||||
|
cls.get_field_schema(field_name), field_name
|
||||||
|
)
|
||||||
|
|
||||||
|
elif field_name in credentials_fields:
|
||||||
|
raise KeyError(
|
||||||
|
f"Credentials field '{field_name}' on {cls.__qualname__} "
|
||||||
|
"has invalid name: must be 'credentials' or *_credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]:
|
||||||
|
return {
|
||||||
|
field_name: info.annotation
|
||||||
|
for field_name, info in cls.model_fields.items()
|
||||||
|
if (
|
||||||
|
inspect.isclass(info.annotation)
|
||||||
|
and issubclass(
|
||||||
|
get_origin(info.annotation) or info.annotation,
|
||||||
|
CredentialsMetaInput,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
|
||||||
|
|
||||||
|
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If multiple fields have the same kwarg_name, as this would
|
||||||
|
cause silent overwriting and only the last field would be processed.
|
||||||
|
"""
|
||||||
|
result: dict[str, dict[str, Any]] = {}
|
||||||
|
schema = cls.jsonschema()
|
||||||
|
properties = schema.get("properties", {})
|
||||||
|
|
||||||
|
for field_name, field_schema in properties.items():
|
||||||
|
auto_creds = field_schema.get("auto_credentials")
|
||||||
|
if auto_creds:
|
||||||
|
kwarg_name = auto_creds.get("kwarg_name", "credentials")
|
||||||
|
if kwarg_name in result:
|
||||||
|
raise ValueError(
|
||||||
|
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
|
||||||
|
f"in fields '{result[kwarg_name]['field_name']}' and "
|
||||||
|
f"'{field_name}' on {cls.__qualname__}"
|
||||||
|
)
|
||||||
|
result[kwarg_name] = {
|
||||||
|
"field_name": field_name,
|
||||||
|
"config": auto_creds,
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
# Regular credentials fields
|
||||||
|
for field_name in cls.get_credentials_fields().keys():
|
||||||
|
result[field_name] = CredentialsFieldInfo.model_validate(
|
||||||
|
cls.get_field_schema(field_name), by_alias=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
|
||||||
|
for kwarg_name, info in cls.get_auto_credentials_fields().items():
|
||||||
|
config = info["config"]
|
||||||
|
# Build a schema-like dict that CredentialsFieldInfo can parse
|
||||||
|
auto_schema = {
|
||||||
|
"credentials_provider": [config.get("provider", "google")],
|
||||||
|
"credentials_types": [config.get("type", "oauth2")],
|
||||||
|
"credentials_scopes": config.get("scopes"),
|
||||||
|
}
|
||||||
|
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||||
|
auto_schema, by_alias=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
||||||
|
return data # Return as is, by default.
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||||
|
input_fields_from_nodes = {link.sink_name for link in links}
|
||||||
|
return input_fields_from_nodes - set(data)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
||||||
|
return cls.get_required_fields() - set(data)
|
||||||
|
|
||||||
|
|
||||||
|
class BlockSchemaInput(BlockSchema):
|
||||||
|
"""
|
||||||
|
Base schema class for block inputs.
|
||||||
|
All block input schemas should extend this class for consistency.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BlockSchemaOutput(BlockSchema):
|
||||||
|
"""
|
||||||
|
Base schema class for block outputs that includes a standard error field.
|
||||||
|
All block output schemas should extend this class to ensure consistent error handling.
|
||||||
|
"""
|
||||||
|
|
||||||
|
error: str = SchemaField(
|
||||||
|
description="Error message if the operation failed", default=""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchemaInput)
|
||||||
|
BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchemaOutput)
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyInputSchema(BlockSchemaInput):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyOutputSchema(BlockSchemaOutput):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# For backward compatibility - will be deprecated
|
||||||
|
EmptySchema = EmptyOutputSchema
|
||||||
|
|
||||||
|
|
||||||
|
# --8<-- [start:BlockWebhookConfig]
|
||||||
|
class BlockManualWebhookConfig(BaseModel):
|
||||||
|
"""
|
||||||
|
Configuration model for webhook-triggered blocks on which
|
||||||
|
the user has to manually set up the webhook at the provider.
|
||||||
|
"""
|
||||||
|
|
||||||
|
provider: ProviderName
|
||||||
|
"""The service provider that the webhook connects to"""
|
||||||
|
|
||||||
|
webhook_type: str
|
||||||
|
"""
|
||||||
|
Identifier for the webhook type. E.g. GitHub has repo and organization level hooks.
|
||||||
|
|
||||||
|
Only for use in the corresponding `WebhooksManager`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
event_filter_input: str = ""
|
||||||
|
"""
|
||||||
|
Name of the block's event filter input.
|
||||||
|
Leave empty if the corresponding webhook doesn't have distinct event/payload types.
|
||||||
|
"""
|
||||||
|
|
||||||
|
event_format: str = "{event}"
|
||||||
|
"""
|
||||||
|
Template string for the event(s) that a block instance subscribes to.
|
||||||
|
Applied individually to each event selected in the event filter input.
|
||||||
|
|
||||||
|
Example: `"pull_request.{event}"` -> `"pull_request.opened"`
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class BlockWebhookConfig(BlockManualWebhookConfig):
|
||||||
|
"""
|
||||||
|
Configuration model for webhook-triggered blocks for which
|
||||||
|
the webhook can be automatically set up through the provider's API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
resource_format: str
|
||||||
|
"""
|
||||||
|
Template string for the resource that a block instance subscribes to.
|
||||||
|
Fields will be filled from the block's inputs (except `payload`).
|
||||||
|
|
||||||
|
Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented)
|
||||||
|
|
||||||
|
Only for use in the corresponding `WebhooksManager`.
|
||||||
|
"""
|
||||||
|
# --8<-- [end:BlockWebhookConfig]
|
||||||
|
|
||||||
|
|
||||||
|
class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
id: str = "",
|
||||||
|
description: str = "",
|
||||||
|
contributors: list["ContributorDetails"] = [],
|
||||||
|
categories: set[BlockCategory] | None = None,
|
||||||
|
input_schema: Type[BlockSchemaInputType] = EmptyInputSchema,
|
||||||
|
output_schema: Type[BlockSchemaOutputType] = EmptyOutputSchema,
|
||||||
|
test_input: BlockInput | list[BlockInput] | None = None,
|
||||||
|
test_output: BlockTestOutput | list[BlockTestOutput] | None = None,
|
||||||
|
test_mock: dict[str, Any] | None = None,
|
||||||
|
test_credentials: Optional[Credentials | dict[str, Credentials]] = None,
|
||||||
|
disabled: bool = False,
|
||||||
|
static_output: bool = False,
|
||||||
|
block_type: BlockType = BlockType.STANDARD,
|
||||||
|
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
|
||||||
|
is_sensitive_action: bool = False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the block with the given schema.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id: The unique identifier for the block, this value will be persisted in the
|
||||||
|
DB. So it should be a unique and constant across the application run.
|
||||||
|
Use the UUID format for the ID.
|
||||||
|
description: The description of the block, explaining what the block does.
|
||||||
|
contributors: The list of contributors who contributed to the block.
|
||||||
|
input_schema: The schema, defined as a Pydantic model, for the input data.
|
||||||
|
output_schema: The schema, defined as a Pydantic model, for the output data.
|
||||||
|
test_input: The list or single sample input data for the block, for testing.
|
||||||
|
test_output: The list or single expected output if the test_input is run.
|
||||||
|
test_mock: function names on the block implementation to mock on test run.
|
||||||
|
disabled: If the block is disabled, it will not be available for execution.
|
||||||
|
static_output: Whether the output links of the block are static by default.
|
||||||
|
"""
|
||||||
|
from backend.data.model import NodeExecutionStats
|
||||||
|
|
||||||
|
self.id = id
|
||||||
|
self.input_schema = input_schema
|
||||||
|
self.output_schema = output_schema
|
||||||
|
self.test_input = test_input
|
||||||
|
self.test_output = test_output
|
||||||
|
self.test_mock = test_mock
|
||||||
|
self.test_credentials = test_credentials
|
||||||
|
self.description = description
|
||||||
|
self.categories = categories or set()
|
||||||
|
self.contributors = contributors or set()
|
||||||
|
self.disabled = disabled
|
||||||
|
self.static_output = static_output
|
||||||
|
self.block_type = block_type
|
||||||
|
self.webhook_config = webhook_config
|
||||||
|
self.is_sensitive_action = is_sensitive_action
|
||||||
|
self.execution_stats: "NodeExecutionStats" = NodeExecutionStats()
|
||||||
|
|
||||||
|
if self.webhook_config:
|
||||||
|
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||||
|
# Enforce presence of credentials field on auto-setup webhook blocks
|
||||||
|
if not (cred_fields := self.input_schema.get_credentials_fields()):
|
||||||
|
raise TypeError(
|
||||||
|
"credentials field is required on auto-setup webhook blocks"
|
||||||
|
)
|
||||||
|
# Disallow multiple credentials inputs on webhook blocks
|
||||||
|
elif len(cred_fields) > 1:
|
||||||
|
raise ValueError(
|
||||||
|
"Multiple credentials inputs not supported on webhook blocks"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.block_type = BlockType.WEBHOOK
|
||||||
|
else:
|
||||||
|
self.block_type = BlockType.WEBHOOK_MANUAL
|
||||||
|
|
||||||
|
# Enforce shape of webhook event filter, if present
|
||||||
|
if self.webhook_config.event_filter_input:
|
||||||
|
event_filter_field = self.input_schema.model_fields[
|
||||||
|
self.webhook_config.event_filter_input
|
||||||
|
]
|
||||||
|
if not (
|
||||||
|
isinstance(event_filter_field.annotation, type)
|
||||||
|
and issubclass(event_filter_field.annotation, BaseModel)
|
||||||
|
and all(
|
||||||
|
field.annotation is bool
|
||||||
|
for field in event_filter_field.annotation.model_fields.values()
|
||||||
|
)
|
||||||
|
):
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"{self.name} has an invalid webhook event selector: "
|
||||||
|
"field must be a BaseModel and all its fields must be boolean"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Enforce presence of 'payload' input
|
||||||
|
if "payload" not in self.input_schema.model_fields:
|
||||||
|
raise TypeError(
|
||||||
|
f"{self.name} is webhook-triggered but has no 'payload' input"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Disable webhook-triggered block if webhook functionality not available
|
||||||
|
if not app_config.platform_base_url:
|
||||||
|
self.disabled = True
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput:
|
||||||
|
"""
|
||||||
|
Run the block with the given input data.
|
||||||
|
Args:
|
||||||
|
input_data: The input data with the structure of input_schema.
|
||||||
|
|
||||||
|
Kwargs: Currently 14/02/2025 these include
|
||||||
|
graph_id: The ID of the graph.
|
||||||
|
node_id: The ID of the node.
|
||||||
|
graph_exec_id: The ID of the graph execution.
|
||||||
|
node_exec_id: The ID of the node execution.
|
||||||
|
user_id: The ID of the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A Generator that yields (output_name, output_data).
|
||||||
|
output_name: One of the output name defined in Block's output_schema.
|
||||||
|
output_data: The data for the output_name, matching the defined schema.
|
||||||
|
"""
|
||||||
|
# --- satisfy the type checker, never executed -------------
|
||||||
|
if False: # noqa: SIM115
|
||||||
|
yield "name", "value" # pyright: ignore[reportMissingYield]
|
||||||
|
raise NotImplementedError(f"{self.name} does not implement the run method.")
|
||||||
|
|
||||||
|
async def run_once(
|
||||||
|
self, input_data: BlockSchemaInputType, output: str, **kwargs
|
||||||
|
) -> Any:
|
||||||
|
async for item in self.run(input_data, **kwargs):
|
||||||
|
name, data = item
|
||||||
|
if name == output:
|
||||||
|
return data
|
||||||
|
raise ValueError(f"{self.name} did not produce any output for {output}")
|
||||||
|
|
||||||
|
def merge_stats(self, stats: "NodeExecutionStats") -> "NodeExecutionStats":
|
||||||
|
self.execution_stats += stats
|
||||||
|
return self.execution_stats
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self.__class__.__name__
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
"id": self.id,
|
||||||
|
"name": self.name,
|
||||||
|
"inputSchema": self.input_schema.jsonschema(),
|
||||||
|
"outputSchema": self.output_schema.jsonschema(),
|
||||||
|
"description": self.description,
|
||||||
|
"categories": [category.dict() for category in self.categories],
|
||||||
|
"contributors": [
|
||||||
|
contributor.model_dump() for contributor in self.contributors
|
||||||
|
],
|
||||||
|
"staticOutput": self.static_output,
|
||||||
|
"uiType": self.block_type.value,
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_info(self) -> BlockInfo:
|
||||||
|
from backend.data.credit import get_block_cost
|
||||||
|
|
||||||
|
return BlockInfo(
|
||||||
|
id=self.id,
|
||||||
|
name=self.name,
|
||||||
|
inputSchema=self.input_schema.jsonschema(),
|
||||||
|
outputSchema=self.output_schema.jsonschema(),
|
||||||
|
costs=get_block_cost(self),
|
||||||
|
description=self.description,
|
||||||
|
categories=[category.dict() for category in self.categories],
|
||||||
|
contributors=[
|
||||||
|
contributor.model_dump() for contributor in self.contributors
|
||||||
|
],
|
||||||
|
staticOutput=self.static_output,
|
||||||
|
uiType=self.block_type.value,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
async for output_name, output_data in self._execute(input_data, **kwargs):
|
||||||
|
yield output_name, output_data
|
||||||
|
except Exception as ex:
|
||||||
|
if isinstance(ex, BlockError):
|
||||||
|
raise ex
|
||||||
|
else:
|
||||||
|
raise (
|
||||||
|
BlockExecutionError
|
||||||
|
if isinstance(ex, ValueError)
|
||||||
|
else BlockUnknownError
|
||||||
|
)(
|
||||||
|
message=str(ex),
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
) from ex
|
||||||
|
|
||||||
|
async def is_block_exec_need_review(
|
||||||
|
self,
|
||||||
|
input_data: BlockInput,
|
||||||
|
*,
|
||||||
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
|
node_exec_id: str,
|
||||||
|
graph_exec_id: str,
|
||||||
|
graph_id: str,
|
||||||
|
graph_version: int,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
|
**kwargs,
|
||||||
|
) -> tuple[bool, BlockInput]:
|
||||||
|
"""
|
||||||
|
Check if this block execution needs human review and handle the review process.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (should_pause, input_data_to_use)
|
||||||
|
- should_pause: True if execution should be paused for review
|
||||||
|
- input_data_to_use: The input data to use (may be modified by reviewer)
|
||||||
|
"""
|
||||||
|
if not (
|
||||||
|
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
|
||||||
|
):
|
||||||
|
return False, input_data
|
||||||
|
|
||||||
|
from backend.blocks.helpers.review import HITLReviewHelper
|
||||||
|
|
||||||
|
# Handle the review request and get decision
|
||||||
|
decision = await HITLReviewHelper.handle_review_decision(
|
||||||
|
input_data=input_data,
|
||||||
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
|
node_exec_id=node_exec_id,
|
||||||
|
graph_exec_id=graph_exec_id,
|
||||||
|
graph_id=graph_id,
|
||||||
|
graph_version=graph_version,
|
||||||
|
block_name=self.name,
|
||||||
|
editable=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if decision is None:
|
||||||
|
# We're awaiting review - pause execution
|
||||||
|
return True, input_data
|
||||||
|
|
||||||
|
if not decision.should_proceed:
|
||||||
|
# Review was rejected, raise an error to stop execution
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Block execution rejected by reviewer: {decision.message}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Review was approved - use the potentially modified data
|
||||||
|
# ReviewResult.data must be a dict for block inputs
|
||||||
|
reviewed_data = decision.review_result.data
|
||||||
|
if not isinstance(reviewed_data, dict):
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
return False, reviewed_data
|
||||||
|
|
||||||
|
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||||
|
# Check for review requirement only if running within a graph execution context
|
||||||
|
# Direct block execution (e.g., from chat) skips the review process
|
||||||
|
has_graph_context = all(
|
||||||
|
key in kwargs
|
||||||
|
for key in (
|
||||||
|
"node_exec_id",
|
||||||
|
"graph_exec_id",
|
||||||
|
"graph_id",
|
||||||
|
"execution_context",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if has_graph_context:
|
||||||
|
should_pause, input_data = await self.is_block_exec_need_review(
|
||||||
|
input_data, **kwargs
|
||||||
|
)
|
||||||
|
if should_pause:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Validate the input data (original or reviewer-modified) once
|
||||||
|
if error := self.input_schema.validate_data(input_data):
|
||||||
|
raise BlockInputError(
|
||||||
|
message=f"Unable to execute block with invalid input data: {error}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the validated input data
|
||||||
|
async for output_name, output_data in self.run(
|
||||||
|
self.input_schema(**{k: v for k, v in input_data.items() if v is not None}),
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
if output_name == "error":
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=output_data, block_name=self.name, block_id=self.id
|
||||||
|
)
|
||||||
|
if self.block_type == BlockType.STANDARD and (
|
||||||
|
error := self.output_schema.validate_field(output_name, output_data)
|
||||||
|
):
|
||||||
|
raise BlockOutputError(
|
||||||
|
message=f"Block produced an invalid output data: {error}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
yield output_name, output_data
|
||||||
|
|
||||||
|
def is_triggered_by_event_type(
|
||||||
|
self, trigger_config: dict[str, Any], event_type: str
|
||||||
|
) -> bool:
|
||||||
|
if not self.webhook_config:
|
||||||
|
raise TypeError("This method can't be used on non-trigger blocks")
|
||||||
|
if not self.webhook_config.event_filter_input:
|
||||||
|
return True
|
||||||
|
event_filter = trigger_config.get(self.webhook_config.event_filter_input)
|
||||||
|
if not event_filter:
|
||||||
|
raise ValueError("Event filter is not configured on trigger")
|
||||||
|
return event_type in [
|
||||||
|
self.webhook_config.event_format.format(event=k)
|
||||||
|
for k in event_filter
|
||||||
|
if event_filter[k] is True
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# Type alias for any block with standard input/output schemas
|
||||||
|
AnyBlockSchema: TypeAlias = Block[BlockSchemaInput, BlockSchemaOutput]
|
||||||
122
autogpt_platform/backend/backend/blocks/_utils.py
Normal file
122
autogpt_platform/backend/backend/blocks/_utils.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
|
||||||
|
from ._base import AnyBlockSchema
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def is_block_auth_configured(
|
||||||
|
block_cls: type[AnyBlockSchema],
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a block has a valid authentication method configured at runtime.
|
||||||
|
|
||||||
|
For example if a block is an OAuth-only block and there env vars are not set,
|
||||||
|
do not show it in the UI.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from backend.sdk.registry import AutoRegistry
|
||||||
|
|
||||||
|
# Create an instance to access input_schema
|
||||||
|
try:
|
||||||
|
block = block_cls()
|
||||||
|
except Exception as e:
|
||||||
|
# If we can't create a block instance, assume it's not OAuth-only
|
||||||
|
logger.error(f"Error creating block instance for {block_cls.__name__}: {e}")
|
||||||
|
return True
|
||||||
|
logger.debug(
|
||||||
|
f"Checking if block {block_cls.__name__} has a valid provider configured"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get all credential inputs from input schema
|
||||||
|
credential_inputs = block.input_schema.get_credentials_fields_info()
|
||||||
|
required_inputs = block.input_schema.get_required_fields()
|
||||||
|
if not credential_inputs:
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} has no credential inputs - Treating as valid"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check credential inputs
|
||||||
|
if len(required_inputs.intersection(credential_inputs.keys())) == 0:
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} has only optional credential inputs"
|
||||||
|
" - will work without credentials configured"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if the credential inputs for this block are correctly configured
|
||||||
|
for field_name, field_info in credential_inputs.items():
|
||||||
|
provider_names = field_info.provider
|
||||||
|
if not provider_names:
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} "
|
||||||
|
f"has credential input '{field_name}' with no provider options"
|
||||||
|
" - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# If a field has multiple possible providers, each one needs to be usable to
|
||||||
|
# prevent breaking the UX
|
||||||
|
for _provider_name in provider_names:
|
||||||
|
provider_name = _provider_name.value
|
||||||
|
if provider_name in ProviderName.__members__.values():
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' is part of the legacy provider system"
|
||||||
|
" - Treating as valid"
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
provider = AutoRegistry.get_provider(provider_name)
|
||||||
|
if not provider:
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"refers to unknown provider '{provider_name}' - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check the provider's supported auth types
|
||||||
|
if field_info.supported_types != provider.supported_auth_types:
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"has mismatched supported auth types (field <> Provider): "
|
||||||
|
f"{field_info.supported_types} != {provider.supported_auth_types}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not (supported_auth_types := provider.supported_auth_types):
|
||||||
|
# No auth methods are been configured for this provider
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' "
|
||||||
|
"has no authentication methods configured - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if provider supports OAuth
|
||||||
|
if "oauth2" in supported_auth_types:
|
||||||
|
# Check if OAuth environment variables are set
|
||||||
|
if (oauth_config := provider.oauth_config) and bool(
|
||||||
|
os.getenv(oauth_config.client_id_env_var)
|
||||||
|
and os.getenv(oauth_config.client_secret_env_var)
|
||||||
|
):
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' is configured for OAuth"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' "
|
||||||
|
"is missing OAuth client ID or secret - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' is valid; "
|
||||||
|
f"supported credential types: {', '.join(field_info.supported_types)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Optional
|
from typing import TYPE_CHECKING, Any, Optional
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockInput,
|
BlockInput,
|
||||||
@@ -9,13 +9,15 @@ from backend.data.block import (
|
|||||||
BlockSchema,
|
BlockSchema,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockType,
|
BlockType,
|
||||||
get_block,
|
|
||||||
)
|
)
|
||||||
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
|
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
|
||||||
from backend.data.model import NodeExecutionStats, SchemaField
|
from backend.data.model import NodeExecutionStats, SchemaField
|
||||||
from backend.util.json import validate_with_jsonschema
|
from backend.util.json import validate_with_jsonschema
|
||||||
from backend.util.retry import func_retry
|
from backend.util.retry import func_retry
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import LogMetadata
|
||||||
|
|
||||||
_logger = logging.getLogger(__name__)
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -124,9 +126,10 @@ class AgentExecutorBlock(Block):
|
|||||||
graph_version: int,
|
graph_version: int,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
logger,
|
logger: "LogMetadata",
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
|
|
||||||
|
from backend.blocks import get_block
|
||||||
from backend.data.execution import ExecutionEventType
|
from backend.data.execution import ExecutionEventType
|
||||||
from backend.executor import utils as execution_utils
|
from backend.executor import utils as execution_utils
|
||||||
|
|
||||||
@@ -198,7 +201,7 @@ class AgentExecutorBlock(Block):
|
|||||||
self,
|
self,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
logger,
|
logger: "LogMetadata",
|
||||||
) -> None:
|
) -> None:
|
||||||
from backend.executor import utils as execution_utils
|
from backend.executor import utils as execution_utils
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.llm import (
|
from backend.blocks.llm import (
|
||||||
DEFAULT_LLM_MODEL,
|
DEFAULT_LLM_MODEL,
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -11,12 +17,6 @@ from backend.blocks.llm import (
|
|||||||
LLMResponse,
|
LLMResponse,
|
||||||
llm_call,
|
llm_call,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -5,7 +5,12 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.data.block import Block, BlockCategory, BlockSchemaInput, BlockSchemaOutput
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
APIKeyCredentials,
|
APIKeyCredentials,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Literal
|
|||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Literal
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -10,13 +17,6 @@ from backend.blocks.apollo.models import (
|
|||||||
PrimaryPhone,
|
PrimaryPhone,
|
||||||
SearchOrganizationsRequest,
|
SearchOrganizationsRequest,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,12 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -14,13 +21,6 @@ from backend.blocks.apollo.models import (
|
|||||||
SearchPeopleRequest,
|
SearchPeopleRequest,
|
||||||
SenorityLevels,
|
SenorityLevels,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -6,13 +13,6 @@ from backend.blocks.apollo._auth import (
|
|||||||
ApolloCredentialsInput,
|
ApolloCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
|
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from backend.data.block import BlockSchemaInput
|
from backend.blocks._base import BlockSchemaInput
|
||||||
from backend.data.model import SchemaField, UserIntegrations
|
from backend.data.model import SchemaField, UserIntegrations
|
||||||
from backend.integrations.ayrshare import AyrshareClient
|
from backend.integrations.ayrshare import AyrshareClient
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import enum
|
import enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
from typing import Type
|
from typing import Type
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
import json
|
import json
|
||||||
import shlex
|
import shlex
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Literal, Optional
|
from typing import TYPE_CHECKING, Literal, Optional
|
||||||
|
|
||||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.sandbox_files import (
|
||||||
|
SandboxFileOutput,
|
||||||
|
extract_and_store_sandbox_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import ExecutionContext
|
||||||
|
|
||||||
|
|
||||||
class ClaudeCodeExecutionError(Exception):
|
class ClaudeCodeExecutionError(Exception):
|
||||||
@@ -174,22 +181,15 @@ class ClaudeCodeBlock(Block):
|
|||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
class FileOutput(BaseModel):
|
|
||||||
"""A file extracted from the sandbox."""
|
|
||||||
|
|
||||||
path: str
|
|
||||||
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
|
||||||
name: str
|
|
||||||
content: str
|
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
response: str = SchemaField(
|
response: str = SchemaField(
|
||||||
description="The output/response from Claude Code execution"
|
description="The output/response from Claude Code execution"
|
||||||
)
|
)
|
||||||
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
files: list[SandboxFileOutput] = SchemaField(
|
||||||
description=(
|
description=(
|
||||||
"List of text files created/modified by Claude Code during this execution. "
|
"List of text files created/modified by Claude Code during this execution. "
|
||||||
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
|
||||||
|
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
conversation_history: str = SchemaField(
|
conversation_history: str = SchemaField(
|
||||||
@@ -252,6 +252,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
"relative_path": "index.html",
|
"relative_path": "index.html",
|
||||||
"name": "index.html",
|
"name": "index.html",
|
||||||
"content": "<html>Hello World</html>",
|
"content": "<html>Hello World</html>",
|
||||||
|
"workspace_ref": None,
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
@@ -267,11 +268,12 @@ class ClaudeCodeBlock(Block):
|
|||||||
"execute_claude_code": lambda *args, **kwargs: (
|
"execute_claude_code": lambda *args, **kwargs: (
|
||||||
"Created index.html with hello world content", # response
|
"Created index.html with hello world content", # response
|
||||||
[
|
[
|
||||||
ClaudeCodeBlock.FileOutput(
|
SandboxFileOutput(
|
||||||
path="/home/user/index.html",
|
path="/home/user/index.html",
|
||||||
relative_path="index.html",
|
relative_path="index.html",
|
||||||
name="index.html",
|
name="index.html",
|
||||||
content="<html>Hello World</html>",
|
content="<html>Hello World</html>",
|
||||||
|
workspace_ref=None,
|
||||||
)
|
)
|
||||||
], # files
|
], # files
|
||||||
"User: Create a hello world HTML file\n"
|
"User: Create a hello world HTML file\n"
|
||||||
@@ -294,7 +296,8 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id: str,
|
existing_sandbox_id: str,
|
||||||
conversation_history: str,
|
conversation_history: str,
|
||||||
dispose_sandbox: bool,
|
dispose_sandbox: bool,
|
||||||
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
execution_context: "ExecutionContext",
|
||||||
|
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
|
||||||
"""
|
"""
|
||||||
Execute Claude Code in an E2B sandbox.
|
Execute Claude Code in an E2B sandbox.
|
||||||
|
|
||||||
@@ -449,14 +452,18 @@ class ClaudeCodeBlock(Block):
|
|||||||
else:
|
else:
|
||||||
new_conversation_history = turn_entry
|
new_conversation_history = turn_entry
|
||||||
|
|
||||||
# Extract files created/modified during this run
|
# Extract files created/modified during this run and store to workspace
|
||||||
files = await self._extract_files(
|
sandbox_files = await extract_and_store_sandbox_files(
|
||||||
sandbox, working_directory, start_timestamp
|
sandbox=sandbox,
|
||||||
|
working_directory=working_directory,
|
||||||
|
execution_context=execution_context,
|
||||||
|
since_timestamp=start_timestamp,
|
||||||
|
text_only=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
response,
|
response,
|
||||||
files,
|
sandbox_files, # Already SandboxFileOutput objects
|
||||||
new_conversation_history,
|
new_conversation_history,
|
||||||
current_session_id,
|
current_session_id,
|
||||||
sandbox_id,
|
sandbox_id,
|
||||||
@@ -471,140 +478,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
await sandbox.kill()
|
await sandbox.kill()
|
||||||
|
|
||||||
async def _extract_files(
|
|
||||||
self,
|
|
||||||
sandbox: BaseAsyncSandbox,
|
|
||||||
working_directory: str,
|
|
||||||
since_timestamp: str | None = None,
|
|
||||||
) -> list["ClaudeCodeBlock.FileOutput"]:
|
|
||||||
"""
|
|
||||||
Extract text files created/modified during this Claude Code execution.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sandbox: The E2B sandbox instance
|
|
||||||
working_directory: Directory to search for files
|
|
||||||
since_timestamp: ISO timestamp - only return files modified after this time
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of FileOutput objects with path, relative_path, name, and content
|
|
||||||
"""
|
|
||||||
files: list[ClaudeCodeBlock.FileOutput] = []
|
|
||||||
|
|
||||||
# Text file extensions we can safely read as text
|
|
||||||
text_extensions = {
|
|
||||||
".txt",
|
|
||||||
".md",
|
|
||||||
".html",
|
|
||||||
".htm",
|
|
||||||
".css",
|
|
||||||
".js",
|
|
||||||
".ts",
|
|
||||||
".jsx",
|
|
||||||
".tsx",
|
|
||||||
".json",
|
|
||||||
".xml",
|
|
||||||
".yaml",
|
|
||||||
".yml",
|
|
||||||
".toml",
|
|
||||||
".ini",
|
|
||||||
".cfg",
|
|
||||||
".conf",
|
|
||||||
".py",
|
|
||||||
".rb",
|
|
||||||
".php",
|
|
||||||
".java",
|
|
||||||
".c",
|
|
||||||
".cpp",
|
|
||||||
".h",
|
|
||||||
".hpp",
|
|
||||||
".cs",
|
|
||||||
".go",
|
|
||||||
".rs",
|
|
||||||
".swift",
|
|
||||||
".kt",
|
|
||||||
".scala",
|
|
||||||
".sh",
|
|
||||||
".bash",
|
|
||||||
".zsh",
|
|
||||||
".sql",
|
|
||||||
".graphql",
|
|
||||||
".env",
|
|
||||||
".gitignore",
|
|
||||||
".dockerfile",
|
|
||||||
"Dockerfile",
|
|
||||||
".vue",
|
|
||||||
".svelte",
|
|
||||||
".astro",
|
|
||||||
".mdx",
|
|
||||||
".rst",
|
|
||||||
".tex",
|
|
||||||
".csv",
|
|
||||||
".log",
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
# List files recursively using find command
|
|
||||||
# Exclude node_modules and .git directories, but allow hidden files
|
|
||||||
# like .env and .gitignore (they're filtered by text_extensions later)
|
|
||||||
# Filter by timestamp to only get files created/modified during this run
|
|
||||||
safe_working_dir = shlex.quote(working_directory)
|
|
||||||
timestamp_filter = ""
|
|
||||||
if since_timestamp:
|
|
||||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
|
||||||
find_result = await sandbox.commands.run(
|
|
||||||
f"find {safe_working_dir} -type f "
|
|
||||||
f"{timestamp_filter}"
|
|
||||||
f"-not -path '*/node_modules/*' "
|
|
||||||
f"-not -path '*/.git/*' "
|
|
||||||
f"2>/dev/null"
|
|
||||||
)
|
|
||||||
|
|
||||||
if find_result.stdout:
|
|
||||||
for file_path in find_result.stdout.strip().split("\n"):
|
|
||||||
if not file_path:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's a text file we can read
|
|
||||||
is_text = any(
|
|
||||||
file_path.endswith(ext) for ext in text_extensions
|
|
||||||
) or file_path.endswith("Dockerfile")
|
|
||||||
|
|
||||||
if is_text:
|
|
||||||
try:
|
|
||||||
content = await sandbox.files.read(file_path)
|
|
||||||
# Handle bytes or string
|
|
||||||
if isinstance(content, bytes):
|
|
||||||
content = content.decode("utf-8", errors="replace")
|
|
||||||
|
|
||||||
# Extract filename from path
|
|
||||||
file_name = file_path.split("/")[-1]
|
|
||||||
|
|
||||||
# Calculate relative path by stripping working directory
|
|
||||||
relative_path = file_path
|
|
||||||
if file_path.startswith(working_directory):
|
|
||||||
relative_path = file_path[len(working_directory) :]
|
|
||||||
# Remove leading slash if present
|
|
||||||
if relative_path.startswith("/"):
|
|
||||||
relative_path = relative_path[1:]
|
|
||||||
|
|
||||||
files.append(
|
|
||||||
ClaudeCodeBlock.FileOutput(
|
|
||||||
path=file_path,
|
|
||||||
relative_path=relative_path,
|
|
||||||
name=file_name,
|
|
||||||
content=content,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
# Skip files that can't be read
|
|
||||||
pass
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
# If file extraction fails, return empty results
|
|
||||||
pass
|
|
||||||
|
|
||||||
return files
|
|
||||||
|
|
||||||
def _escape_prompt(self, prompt: str) -> str:
|
def _escape_prompt(self, prompt: str) -> str:
|
||||||
"""Escape the prompt for safe shell execution."""
|
"""Escape the prompt for safe shell execution."""
|
||||||
# Use single quotes and escape any single quotes in the prompt
|
# Use single quotes and escape any single quotes in the prompt
|
||||||
@@ -617,6 +490,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
*,
|
*,
|
||||||
e2b_credentials: APIKeyCredentials,
|
e2b_credentials: APIKeyCredentials,
|
||||||
anthropic_credentials: APIKeyCredentials,
|
anthropic_credentials: APIKeyCredentials,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
@@ -637,6 +511,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id=input_data.sandbox_id,
|
existing_sandbox_id=input_data.sandbox_id,
|
||||||
conversation_history=input_data.conversation_history,
|
conversation_history=input_data.conversation_history,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
|
execution_context=execution_context,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield "response", response
|
yield "response", response
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Literal, Optional
|
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||||
|
|
||||||
from e2b_code_interpreter import AsyncSandbox
|
from e2b_code_interpreter import AsyncSandbox
|
||||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||||
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
|
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
|
||||||
from pydantic import BaseModel, Field, JsonValue, SecretStr
|
from pydantic import BaseModel, Field, JsonValue, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.sandbox_files import (
|
||||||
|
SandboxFileOutput,
|
||||||
|
extract_and_store_sandbox_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import ExecutionContext
|
||||||
|
|
||||||
TEST_CREDENTIALS = APIKeyCredentials(
|
TEST_CREDENTIALS = APIKeyCredentials(
|
||||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||||
@@ -85,6 +92,9 @@ class CodeExecutionResult(MainCodeExecutionResult):
|
|||||||
class BaseE2BExecutorMixin:
|
class BaseE2BExecutorMixin:
|
||||||
"""Shared implementation methods for E2B executor blocks."""
|
"""Shared implementation methods for E2B executor blocks."""
|
||||||
|
|
||||||
|
# Default working directory in E2B sandboxes
|
||||||
|
WORKING_DIR = "/home/user"
|
||||||
|
|
||||||
async def execute_code(
|
async def execute_code(
|
||||||
self,
|
self,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
@@ -95,14 +105,21 @@ class BaseE2BExecutorMixin:
|
|||||||
timeout: Optional[int] = None,
|
timeout: Optional[int] = None,
|
||||||
sandbox_id: Optional[str] = None,
|
sandbox_id: Optional[str] = None,
|
||||||
dispose_sandbox: bool = False,
|
dispose_sandbox: bool = False,
|
||||||
|
execution_context: Optional["ExecutionContext"] = None,
|
||||||
|
extract_files: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Unified code execution method that handles all three use cases:
|
Unified code execution method that handles all three use cases:
|
||||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extract_files: If True and execution_context provided, extract files
|
||||||
|
created/modified during execution and store to workspace.
|
||||||
""" # noqa
|
""" # noqa
|
||||||
sandbox = None
|
sandbox = None
|
||||||
|
files: list[SandboxFileOutput] = []
|
||||||
try:
|
try:
|
||||||
if sandbox_id:
|
if sandbox_id:
|
||||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||||
@@ -118,6 +135,12 @@ class BaseE2BExecutorMixin:
|
|||||||
for cmd in setup_commands:
|
for cmd in setup_commands:
|
||||||
await sandbox.commands.run(cmd)
|
await sandbox.commands.run(cmd)
|
||||||
|
|
||||||
|
# Capture timestamp before execution to scope file extraction
|
||||||
|
start_timestamp = None
|
||||||
|
if extract_files:
|
||||||
|
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
|
||||||
|
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
|
||||||
|
|
||||||
# Execute the code
|
# Execute the code
|
||||||
execution = await sandbox.run_code(
|
execution = await sandbox.run_code(
|
||||||
code,
|
code,
|
||||||
@@ -133,7 +156,24 @@ class BaseE2BExecutorMixin:
|
|||||||
stdout_logs = "".join(execution.logs.stdout)
|
stdout_logs = "".join(execution.logs.stdout)
|
||||||
stderr_logs = "".join(execution.logs.stderr)
|
stderr_logs = "".join(execution.logs.stderr)
|
||||||
|
|
||||||
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
# Extract files created/modified during this execution
|
||||||
|
if extract_files and execution_context:
|
||||||
|
files = await extract_and_store_sandbox_files(
|
||||||
|
sandbox=sandbox,
|
||||||
|
working_directory=self.WORKING_DIR,
|
||||||
|
execution_context=execution_context,
|
||||||
|
since_timestamp=start_timestamp,
|
||||||
|
text_only=False, # Include binary files too
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
results,
|
||||||
|
text_output,
|
||||||
|
stdout_logs,
|
||||||
|
stderr_logs,
|
||||||
|
sandbox.sandbox_id,
|
||||||
|
files,
|
||||||
|
)
|
||||||
finally:
|
finally:
|
||||||
# Dispose of sandbox if requested to reduce usage costs
|
# Dispose of sandbox if requested to reduce usage costs
|
||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
@@ -238,6 +278,12 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
description="Standard output logs from execution"
|
description="Standard output logs from execution"
|
||||||
)
|
)
|
||||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||||
|
files: list[SandboxFileOutput] = SchemaField(
|
||||||
|
description=(
|
||||||
|
"Files created or modified during execution. "
|
||||||
|
"Each file has path, name, content, and workspace_ref (if stored)."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
@@ -259,23 +305,30 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
("results", []),
|
("results", []),
|
||||||
("response", "Hello World"),
|
("response", "Hello World"),
|
||||||
("stdout_logs", "Hello World\n"),
|
("stdout_logs", "Hello World\n"),
|
||||||
|
("files", []),
|
||||||
],
|
],
|
||||||
test_mock={
|
test_mock={
|
||||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
|
||||||
[], # results
|
[], # results
|
||||||
"Hello World", # text_output
|
"Hello World", # text_output
|
||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def run(
|
async def run(
|
||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
credentials: APIKeyCredentials,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _, files = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.code,
|
code=input_data.code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -283,6 +336,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
setup_commands=input_data.setup_commands,
|
setup_commands=input_data.setup_commands,
|
||||||
timeout=input_data.timeout,
|
timeout=input_data.timeout,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
|
execution_context=execution_context,
|
||||||
|
extract_files=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine result object shape & filter out empty formats
|
# Determine result object shape & filter out empty formats
|
||||||
@@ -296,6 +351,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
yield "stdout_logs", stdout
|
yield "stdout_logs", stdout
|
||||||
if stderr:
|
if stderr:
|
||||||
yield "stderr_logs", stderr
|
yield "stderr_logs", stderr
|
||||||
|
# Always yield files (empty list if none)
|
||||||
|
yield "files", [f.model_dump() for f in files]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "error", str(e)
|
yield "error", str(e)
|
||||||
|
|
||||||
@@ -393,6 +450,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -401,7 +459,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.setup_code,
|
code=input_data.setup_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -500,6 +558,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
sandbox_id, # sandbox_id
|
sandbox_id, # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -508,7 +567,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.step_code,
|
code=input_data.step_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from openai import AsyncOpenAI
|
|||||||
from openai.types.responses import Response as OpenAIResponse
|
from openai.types.responses import Response as OpenAIResponse
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockManualWebhookConfig,
|
BlockManualWebhookConfig,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any, List
|
from typing import Any, List
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import codecs
|
import codecs
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from typing import Any, Literal, cast
|
|||||||
import discord
|
import discord
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
Discord OAuth-based blocks.
|
Discord OAuth-based blocks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from typing import Literal
|
|||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, SecretStr
|
from pydantic import BaseModel, ConfigDict, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ which provides access to LinkedIn profile data and related information.
|
|||||||
import logging
|
import logging
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,6 +3,13 @@ import logging
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.fal._auth import (
|
from backend.blocks.fal._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
@@ -10,13 +17,6 @@ from backend.blocks.fal._auth import (
|
|||||||
FalCredentialsField,
|
FalCredentialsField,
|
||||||
FalCredentialsInput,
|
FalCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.file import store_media_file
|
from backend.util.file import store_media_file
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from urllib.parse import urlparse
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import re
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import base64
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from typing import Any, List, Optional
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from pathlib import Path
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from gravitas_md2gdocs import to_requests
|
from gravitas_md2gdocs import to_requests
|
||||||
|
|
||||||
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
from backend.blocks._base import (
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ from enum import Enum
|
|||||||
from google.oauth2.credentials import Credentials
|
from google.oauth2.credentials import Credentials
|
||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
|
|
||||||
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
from backend.blocks._base import (
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Literal
|
|||||||
import googlemaps
|
import googlemaps
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -9,9 +9,7 @@ from typing import Any, Optional
|
|||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.execution import ExecutionStatus
|
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult
|
||||||
from backend.executor.manager import async_update_node_execution_status
|
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -43,6 +41,8 @@ class HITLReviewHelper:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
async def update_node_execution_status(**kwargs) -> None:
|
async def update_node_execution_status(**kwargs) -> None:
|
||||||
"""Update the execution status of a node."""
|
"""Update the execution status of a node."""
|
||||||
|
from backend.executor.manager import async_update_node_execution_status
|
||||||
|
|
||||||
await async_update_node_execution_status(
|
await async_update_node_execution_status(
|
||||||
db_client=get_database_manager_async_client(), **kwargs
|
db_client=get_database_manager_async_client(), **kwargs
|
||||||
)
|
)
|
||||||
@@ -88,12 +88,13 @@ class HITLReviewHelper:
|
|||||||
Raises:
|
Raises:
|
||||||
Exception: If review creation or status update fails
|
Exception: If review creation or status update fails
|
||||||
"""
|
"""
|
||||||
|
from backend.data.execution import ExecutionStatus
|
||||||
|
|
||||||
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
||||||
# are handled by the caller:
|
# are handled by the caller:
|
||||||
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
||||||
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
||||||
# This function only handles checking for existing approvals.
|
# This function only handles checking for existing approvals.
|
||||||
|
|
||||||
# Check if this node has already been approved (normal or auto-approval)
|
# Check if this node has already been approved (normal or auto-approval)
|
||||||
if approval_result := await HITLReviewHelper.check_approval(
|
if approval_result := await HITLReviewHelper.check_approval(
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from typing import Literal
|
|||||||
import aiofiles
|
import aiofiles
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.hubspot._auth import (
|
from backend.blocks._base import (
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.hubspot._auth import (
|
from backend.blocks._base import (
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from backend.blocks.hubspot._auth import (
|
from backend.blocks._base import (
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ from typing import Any
|
|||||||
|
|
||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
|
|
||||||
from backend.blocks.helpers.review import HITLReviewHelper
|
from backend.blocks._base import (
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -12,6 +11,7 @@ from backend.data.block import (
|
|||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
BlockType,
|
BlockType,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.helpers.review import HITLReviewHelper
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Any, Dict, Literal, Optional
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,9 +2,7 @@ import copy
|
|||||||
from datetime import date, time
|
from datetime import date, time
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
# Import for Google Drive file input block
|
from backend.blocks._base import (
|
||||||
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -12,6 +10,9 @@ from backend.data.block import (
|
|||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockType,
|
BlockType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Import for Google Drive file input block
|
||||||
|
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.file import store_media_file
|
from backend.util.file import store_media_file
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.jina._auth import (
|
from backend.blocks._base import (
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.jina._auth import (
|
from backend.blocks._base import (
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -3,18 +3,18 @@ from urllib.parse import quote
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks.jina._auth import (
|
from backend.blocks._base import (
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,12 @@
|
|||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.jina._auth import (
|
from backend.blocks.jina._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
@@ -8,13 +15,6 @@ from backend.blocks.jina._auth import (
|
|||||||
JinaCredentialsInput,
|
JinaCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.blocks.search import GetRequest
|
from backend.blocks.search import GetRequest
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.exceptions import BlockExecutionError
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from anthropic.types import ToolParam
|
|||||||
from groq import AsyncGroq
|
from groq import AsyncGroq
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import operator
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import List, Literal
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Any, Literal, Optional, Union
|
|||||||
from mem0 import MemoryClient
|
from mem0 import MemoryClient
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
|
from backend.blocks._base import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
APIKeyCredentials,
|
APIKeyCredentials,
|
||||||
CredentialsField,
|
CredentialsField,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional
|
|||||||
|
|
||||||
from pydantic import model_validator
|
from pydantic import model_validator
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from typing import List, Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.nvidia._auth import (
|
from backend.blocks._base import (
|
||||||
NvidiaCredentials,
|
|
||||||
NvidiaCredentialsField,
|
|
||||||
NvidiaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.nvidia._auth import (
|
||||||
|
NvidiaCredentials,
|
||||||
|
NvidiaCredentialsField,
|
||||||
|
NvidiaCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
from backend.util.type import MediaFileType
|
from backend.util.type import MediaFileType
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Any, Literal
|
|||||||
import openai
|
import openai
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user