mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 08:14:58 -05:00
Compare commits
3 Commits
make-old-w
...
fix/edge-h
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42b7b6ee37 | ||
|
|
a7f9bf3cb8 | ||
|
|
764070f6a7 |
@@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"permissions": {
|
|
||||||
"allowedTools": [
|
|
||||||
"Read", "Grep", "Glob",
|
|
||||||
"Bash(ls:*)", "Bash(cat:*)", "Bash(grep:*)", "Bash(find:*)",
|
|
||||||
"Bash(git status:*)", "Bash(git diff:*)", "Bash(git log:*)", "Bash(git worktree:*)",
|
|
||||||
"Bash(tmux:*)", "Bash(sleep:*)", "Bash(branchlet:*)"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
74
.github/workflows/classic-autogpt-ci.yml
vendored
74
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -6,15 +6,11 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-autogpt-ci.yml'
|
- '.github/workflows/classic-autogpt-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/direct_benchmark/**'
|
|
||||||
- 'classic/forge/**'
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-autogpt-ci.yml'
|
- '.github/workflows/classic-autogpt-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/direct_benchmark/**'
|
|
||||||
- 'classic/forge/**'
|
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||||
@@ -23,22 +19,47 @@ concurrency:
|
|||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
working-directory: classic
|
working-directory: classic/original_autogpt
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
runs-on: ubuntu-latest
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python-version: ["3.10"]
|
||||||
|
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||||
|
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Start MinIO service
|
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||||
|
# - name: Set up Docker (macOS)
|
||||||
|
# if: runner.os == 'macOS'
|
||||||
|
# uses: crazy-max/ghaction-setup-docker@v3
|
||||||
|
|
||||||
|
- name: Start MinIO service (Linux)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
working-directory: '.'
|
working-directory: '.'
|
||||||
run: |
|
run: |
|
||||||
docker pull minio/minio:edge-cicd
|
docker pull minio/minio:edge-cicd
|
||||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||||
|
|
||||||
|
- name: Start MinIO service (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
working-directory: ${{ runner.temp }}
|
||||||
|
run: |
|
||||||
|
brew install minio/stable/minio
|
||||||
|
mkdir data
|
||||||
|
minio server ./data &
|
||||||
|
|
||||||
|
# No MinIO on Windows:
|
||||||
|
# - Windows doesn't support running Linux Docker containers
|
||||||
|
# - It doesn't seem possible to start background processes on Windows. They are
|
||||||
|
# killed after the step returns.
|
||||||
|
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -50,23 +71,41 @@ jobs:
|
|||||||
git config --global user.name "Auto-GPT-Bot"
|
git config --global user.name "Auto-GPT-Bot"
|
||||||
git config --global user.email "github-bot@agpt.co"
|
git config --global user.email "github-bot@agpt.co"
|
||||||
|
|
||||||
- name: Set up Python 3.12
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
- id: get_date
|
- id: get_date
|
||||||
name: Get date
|
name: Get date
|
||||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
- name: Set up Python dependency cache
|
||||||
|
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||||
|
if: runner.os != 'Windows'
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pypoetry
|
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry (Unix)
|
||||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
if: runner.os != 'Windows'
|
||||||
|
run: |
|
||||||
|
curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
|
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||||
|
PATH="$HOME/.local/bin:$PATH"
|
||||||
|
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install Poetry (Windows)
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
shell: pwsh
|
||||||
|
run: |
|
||||||
|
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||||
|
|
||||||
|
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||||
|
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry install
|
run: poetry install
|
||||||
@@ -77,13 +116,12 @@ jobs:
|
|||||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||||
--numprocesses=logical --durations=10 \
|
--numprocesses=logical --durations=10 \
|
||||||
--junitxml=junit.xml -o junit_family=legacy \
|
--junitxml=junit.xml -o junit_family=legacy \
|
||||||
original_autogpt/tests/unit original_autogpt/tests/integration
|
tests/unit tests/integration
|
||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
PLAIN_OUTPUT: True
|
PLAIN_OUTPUT: True
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
|
||||||
AWS_ACCESS_KEY_ID: minioadmin
|
AWS_ACCESS_KEY_ID: minioadmin
|
||||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||||
|
|
||||||
@@ -97,11 +135,11 @@ jobs:
|
|||||||
uses: codecov/codecov-action@v5
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
flags: autogpt-agent
|
flags: autogpt-agent,${{ runner.os }}
|
||||||
|
|
||||||
- name: Upload logs to artifact
|
- name: Upload logs to artifact
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: test-logs
|
name: test-logs
|
||||||
path: classic/logs/
|
path: classic/original_autogpt/logs/
|
||||||
|
|||||||
36
.github/workflows/classic-autogpts-ci.yml
vendored
36
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -11,6 +11,9 @@ on:
|
|||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
|
- 'classic/run'
|
||||||
|
- 'classic/cli.py'
|
||||||
|
- 'classic/setup.py'
|
||||||
- '!**/*.md'
|
- '!**/*.md'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
@@ -19,6 +22,9 @@ on:
|
|||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
|
- 'classic/run'
|
||||||
|
- 'classic/cli.py'
|
||||||
|
- 'classic/setup.py'
|
||||||
- '!**/*.md'
|
- '!**/*.md'
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -29,9 +35,13 @@ defaults:
|
|||||||
jobs:
|
jobs:
|
||||||
serve-agent-protocol:
|
serve-agent-protocol:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
agent-name: [ original_autogpt ]
|
||||||
|
fail-fast: false
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
env:
|
env:
|
||||||
min-python-version: '3.12'
|
min-python-version: '3.10'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -45,22 +55,22 @@ jobs:
|
|||||||
python-version: ${{ env.min-python-version }}
|
python-version: ${{ env.min-python-version }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
|
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||||
run: |
|
run: |
|
||||||
curl -sSL https://install.python-poetry.org | python -
|
curl -sSL https://install.python-poetry.org | python -
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Run regression tests
|
||||||
run: poetry install
|
|
||||||
|
|
||||||
- name: Run smoke tests with direct-benchmark
|
|
||||||
run: |
|
run: |
|
||||||
poetry run direct-benchmark run \
|
./run agent start ${{ matrix.agent-name }}
|
||||||
--strategies one_shot \
|
cd ${{ matrix.agent-name }}
|
||||||
--models claude \
|
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||||
--tests ReadFile,WriteFile \
|
poetry run agbenchmark --test=WriteFile
|
||||||
--json
|
|
||||||
env:
|
env:
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
AGENT_NAME: ${{ matrix.agent-name }}
|
||||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||||
NONINTERACTIVE_MODE: "true"
|
HELICONE_CACHE_ENABLED: false
|
||||||
CI: true
|
HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
|
||||||
|
REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
|
||||||
|
TELEMETRY_ENVIRONMENT: autogpt-ci
|
||||||
|
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||||
|
|||||||
194
.github/workflows/classic-benchmark-ci.yml
vendored
194
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -1,21 +1,17 @@
|
|||||||
name: Classic - Direct Benchmark CI
|
name: Classic - AGBenchmark CI
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master, dev, ci-test* ]
|
branches: [ master, dev, ci-test* ]
|
||||||
paths:
|
paths:
|
||||||
- 'classic/direct_benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
- '!classic/benchmark/reports/**'
|
||||||
- 'classic/original_autogpt/**'
|
|
||||||
- 'classic/forge/**'
|
|
||||||
- .github/workflows/classic-benchmark-ci.yml
|
- .github/workflows/classic-benchmark-ci.yml
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
paths:
|
paths:
|
||||||
- 'classic/direct_benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
- 'classic/benchmark/agbenchmark/challenges/**'
|
- '!classic/benchmark/reports/**'
|
||||||
- 'classic/original_autogpt/**'
|
|
||||||
- 'classic/forge/**'
|
|
||||||
- .github/workflows/classic-benchmark-ci.yml
|
- .github/workflows/classic-benchmark-ci.yml
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
@@ -27,16 +23,23 @@ defaults:
|
|||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
env:
|
env:
|
||||||
min-python-version: '3.12'
|
min-python-version: '3.10'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
benchmark-tests:
|
test:
|
||||||
runs-on: ubuntu-latest
|
permissions:
|
||||||
|
contents: read
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python-version: ["3.10"]
|
||||||
|
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||||
|
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
working-directory: classic
|
working-directory: classic/benchmark
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -44,88 +47,71 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Set up Python ${{ env.min-python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.min-python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
- name: Set up Python dependency cache
|
||||||
|
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||||
|
if: runner.os != 'Windows'
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pypoetry
|
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry (Unix)
|
||||||
|
if: runner.os != 'Windows'
|
||||||
run: |
|
run: |
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
- name: Install dependencies
|
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||||
|
PATH="$HOME/.local/bin:$PATH"
|
||||||
|
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install Poetry (Windows)
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
shell: pwsh
|
||||||
|
run: |
|
||||||
|
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||||
|
|
||||||
|
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||||
|
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Install Python dependencies
|
||||||
run: poetry install
|
run: poetry install
|
||||||
|
|
||||||
- name: Run basic benchmark tests
|
- name: Run pytest with coverage
|
||||||
run: |
|
run: |
|
||||||
echo "Testing ReadFile challenge with one_shot strategy..."
|
poetry run pytest -vv \
|
||||||
poetry run direct-benchmark run \
|
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||||
--fresh \
|
--durations=10 \
|
||||||
--strategies one_shot \
|
--junitxml=junit.xml -o junit_family=legacy \
|
||||||
--models claude \
|
tests
|
||||||
--tests ReadFile \
|
|
||||||
--json
|
|
||||||
|
|
||||||
echo "Testing WriteFile challenge..."
|
|
||||||
poetry run direct-benchmark run \
|
|
||||||
--fresh \
|
|
||||||
--strategies one_shot \
|
|
||||||
--models claude \
|
|
||||||
--tests WriteFile \
|
|
||||||
--json
|
|
||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
NONINTERACTIVE_MODE: "true"
|
|
||||||
|
|
||||||
- name: Test category filtering
|
- name: Upload test results to Codecov
|
||||||
run: |
|
if: ${{ !cancelled() }} # Run even if tests fail
|
||||||
echo "Testing coding category..."
|
uses: codecov/test-results-action@v1
|
||||||
poetry run direct-benchmark run \
|
with:
|
||||||
--fresh \
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
--strategies one_shot \
|
|
||||||
--models claude \
|
|
||||||
--categories coding \
|
|
||||||
--tests ReadFile,WriteFile \
|
|
||||||
--json
|
|
||||||
env:
|
|
||||||
CI: true
|
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
|
||||||
NONINTERACTIVE_MODE: "true"
|
|
||||||
|
|
||||||
- name: Test multiple strategies
|
- name: Upload coverage reports to Codecov
|
||||||
run: |
|
uses: codecov/codecov-action@v5
|
||||||
echo "Testing multiple strategies..."
|
with:
|
||||||
poetry run direct-benchmark run \
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
--fresh \
|
flags: agbenchmark,${{ runner.os }}
|
||||||
--strategies one_shot,plan_execute \
|
|
||||||
--models claude \
|
|
||||||
--tests ReadFile \
|
|
||||||
--parallel 2 \
|
|
||||||
--json
|
|
||||||
env:
|
|
||||||
CI: true
|
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
|
||||||
NONINTERACTIVE_MODE: "true"
|
|
||||||
|
|
||||||
# Run regression tests on maintain challenges
|
self-test-with-agent:
|
||||||
regression-tests:
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 45
|
strategy:
|
||||||
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev'
|
matrix:
|
||||||
defaults:
|
agent-name: [forge]
|
||||||
run:
|
fail-fast: false
|
||||||
shell: bash
|
timeout-minutes: 20
|
||||||
working-directory: classic
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -140,23 +126,51 @@ jobs:
|
|||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
run: |
|
run: |
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
curl -sSL https://install.python-poetry.org | python -
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: poetry install
|
|
||||||
|
|
||||||
- name: Run regression tests
|
- name: Run regression tests
|
||||||
|
working-directory: classic
|
||||||
run: |
|
run: |
|
||||||
echo "Running regression tests (previously beaten challenges)..."
|
./run agent start ${{ matrix.agent-name }}
|
||||||
poetry run direct-benchmark run \
|
cd ${{ matrix.agent-name }}
|
||||||
--fresh \
|
|
||||||
--strategies one_shot \
|
set +e # Ignore non-zero exit codes and continue execution
|
||||||
--models claude \
|
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||||
--maintain \
|
poetry run agbenchmark --maintain --mock
|
||||||
--parallel 4 \
|
EXIT_CODE=$?
|
||||||
--json
|
set -e # Stop ignoring non-zero exit codes
|
||||||
|
# Check if the exit code was 5, and if so, exit with 0 instead
|
||||||
|
if [ $EXIT_CODE -eq 5 ]; then
|
||||||
|
echo "regression_tests.json is empty."
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Running the following command: poetry run agbenchmark --mock"
|
||||||
|
poetry run agbenchmark --mock
|
||||||
|
|
||||||
|
echo "Running the following command: poetry run agbenchmark --mock --category=data"
|
||||||
|
poetry run agbenchmark --mock --category=data
|
||||||
|
|
||||||
|
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||||
|
poetry run agbenchmark --mock --category=coding
|
||||||
|
|
||||||
|
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||||
|
# poetry run agbenchmark --test=WriteFile
|
||||||
|
cd ../benchmark
|
||||||
|
poetry install
|
||||||
|
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||||
|
export BUILD_SKILL_TREE=true
|
||||||
|
|
||||||
|
# poetry run agbenchmark --mock
|
||||||
|
|
||||||
|
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||||
|
# if [ ! -z "$CHANGED" ]; then
|
||||||
|
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||||
|
# echo "$CHANGED"
|
||||||
|
# exit 1
|
||||||
|
# else
|
||||||
|
# echo "No unstaged changes."
|
||||||
|
# fi
|
||||||
env:
|
env:
|
||||||
CI: true
|
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
NONINTERACTIVE_MODE: "true"
|
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||||
|
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||||
|
|||||||
185
.github/workflows/classic-forge-ci.yml
vendored
185
.github/workflows/classic-forge-ci.yml
vendored
@@ -6,11 +6,13 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-forge-ci.yml'
|
- '.github/workflows/classic-forge-ci.yml'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
|
- '!classic/forge/tests/vcr_cassettes'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, dev, release-* ]
|
branches: [ master, dev, release-* ]
|
||||||
paths:
|
paths:
|
||||||
- '.github/workflows/classic-forge-ci.yml'
|
- '.github/workflows/classic-forge-ci.yml'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
|
- '!classic/forge/tests/vcr_cassettes'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||||
@@ -19,60 +21,131 @@ concurrency:
|
|||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
working-directory: classic
|
working-directory: classic/forge
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
runs-on: ubuntu-latest
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python-version: ["3.10"]
|
||||||
|
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||||
|
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Start MinIO service
|
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||||
|
# - name: Set up Docker (macOS)
|
||||||
|
# if: runner.os == 'macOS'
|
||||||
|
# uses: crazy-max/ghaction-setup-docker@v3
|
||||||
|
|
||||||
|
- name: Start MinIO service (Linux)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
working-directory: '.'
|
working-directory: '.'
|
||||||
run: |
|
run: |
|
||||||
docker pull minio/minio:edge-cicd
|
docker pull minio/minio:edge-cicd
|
||||||
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
docker run -d -p 9000:9000 minio/minio:edge-cicd
|
||||||
|
|
||||||
|
- name: Start MinIO service (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
working-directory: ${{ runner.temp }}
|
||||||
|
run: |
|
||||||
|
brew install minio/stable/minio
|
||||||
|
mkdir data
|
||||||
|
minio server ./data &
|
||||||
|
|
||||||
|
# No MinIO on Windows:
|
||||||
|
# - Windows doesn't support running Linux Docker containers
|
||||||
|
# - It doesn't seem possible to start background processes on Windows. They are
|
||||||
|
# killed after the step returns.
|
||||||
|
# See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
submodules: true
|
||||||
|
|
||||||
- name: Set up Python 3.12
|
- name: Checkout cassettes
|
||||||
|
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||||
|
env:
|
||||||
|
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||||
|
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||||
|
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||||
|
run: |
|
||||||
|
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||||
|
cassette_base_branch="${PR_BASE}"
|
||||||
|
cd tests/vcr_cassettes
|
||||||
|
|
||||||
|
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||||
|
cassette_base_branch="master"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||||
|
git fetch origin $cassette_branch
|
||||||
|
git fetch origin $cassette_base_branch
|
||||||
|
|
||||||
|
git checkout $cassette_branch
|
||||||
|
|
||||||
|
# Pick non-conflicting cassette updates from the base branch
|
||||||
|
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||||
|
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||||
|
"synced to upstream branch '$cassette_base_branch'."
|
||||||
|
else
|
||||||
|
git checkout -b $cassette_branch
|
||||||
|
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||||
|
"Using cassettes from '$cassette_base_branch'."
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
- name: Set up Python dependency cache
|
- name: Set up Python dependency cache
|
||||||
|
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||||
|
if: runner.os != 'Windows'
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pypoetry
|
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/poetry.lock') }}
|
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry (Unix)
|
||||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
if: runner.os != 'Windows'
|
||||||
|
run: |
|
||||||
|
curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
|
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||||
|
PATH="$HOME/.local/bin:$PATH"
|
||||||
|
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install Poetry (Windows)
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
shell: pwsh
|
||||||
|
run: |
|
||||||
|
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||||
|
|
||||||
|
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||||
|
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry install
|
run: poetry install
|
||||||
|
|
||||||
- name: Install Playwright browsers
|
|
||||||
run: poetry run playwright install chromium
|
|
||||||
|
|
||||||
- name: Run pytest with coverage
|
- name: Run pytest with coverage
|
||||||
run: |
|
run: |
|
||||||
poetry run pytest -vv \
|
poetry run pytest -vv \
|
||||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||||
--durations=10 \
|
--durations=10 \
|
||||||
--junitxml=junit.xml -o junit_family=legacy \
|
--junitxml=junit.xml -o junit_family=legacy \
|
||||||
forge/forge forge/tests
|
forge
|
||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
PLAIN_OUTPUT: True
|
PLAIN_OUTPUT: True
|
||||||
# API keys - tests that need these will skip if not available
|
|
||||||
# Secrets are not available to fork PRs (GitHub security feature)
|
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
|
||||||
S3_ENDPOINT_URL: http://127.0.0.1:9000
|
|
||||||
AWS_ACCESS_KEY_ID: minioadmin
|
AWS_ACCESS_KEY_ID: minioadmin
|
||||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||||
|
|
||||||
@@ -86,11 +159,85 @@ jobs:
|
|||||||
uses: codecov/codecov-action@v5
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
flags: forge
|
flags: forge,${{ runner.os }}
|
||||||
|
|
||||||
|
- id: setup_git_auth
|
||||||
|
name: Set up git token authentication
|
||||||
|
# Cassettes may be pushed even when tests fail
|
||||||
|
if: success() || failure()
|
||||||
|
run: |
|
||||||
|
config_key="http.${{ github.server_url }}/.extraheader"
|
||||||
|
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||||
|
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||||
|
else
|
||||||
|
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||||
|
fi
|
||||||
|
|
||||||
|
git config "$config_key" \
|
||||||
|
"Authorization: Basic $base64_pat"
|
||||||
|
|
||||||
|
cd tests/vcr_cassettes
|
||||||
|
git config "$config_key" \
|
||||||
|
"Authorization: Basic $base64_pat"
|
||||||
|
|
||||||
|
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- id: push_cassettes
|
||||||
|
name: Push updated cassettes
|
||||||
|
# For pull requests, push updated cassettes even when tests fail
|
||||||
|
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||||
|
env:
|
||||||
|
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||||
|
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||||
|
run: |
|
||||||
|
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||||
|
is_pull_request=true
|
||||||
|
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||||
|
else
|
||||||
|
cassette_branch="${{ github.ref_name }}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd tests/vcr_cassettes
|
||||||
|
# Commit & push changes to cassettes if any
|
||||||
|
if ! git diff --quiet; then
|
||||||
|
git add .
|
||||||
|
git commit -m "Auto-update cassettes"
|
||||||
|
git push origin HEAD:$cassette_branch
|
||||||
|
if [ ! $is_pull_request ]; then
|
||||||
|
cd ../..
|
||||||
|
git add tests/vcr_cassettes
|
||||||
|
git commit -m "Update cassette submodule"
|
||||||
|
git push origin HEAD:$cassette_branch
|
||||||
|
fi
|
||||||
|
echo "updated=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "updated=false" >> $GITHUB_OUTPUT
|
||||||
|
echo "No cassette changes to commit"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Post Set up git token auth
|
||||||
|
if: steps.setup_git_auth.outcome == 'success'
|
||||||
|
run: |
|
||||||
|
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||||
|
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||||
|
|
||||||
|
- name: Apply "behaviour change" label and comment on PR
|
||||||
|
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||||
|
run: |
|
||||||
|
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||||
|
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||||
|
REPO="${{ github.repository }}"
|
||||||
|
|
||||||
|
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||||
|
echo "Adding label and comment..."
|
||||||
|
echo $TOKEN | gh auth login --with-token
|
||||||
|
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||||
|
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Upload logs to artifact
|
- name: Upload logs to artifact
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: test-logs
|
name: test-logs
|
||||||
path: classic/logs/
|
path: classic/forge/logs/
|
||||||
|
|||||||
60
.github/workflows/classic-frontend-ci.yml
vendored
Normal file
60
.github/workflows/classic-frontend-ci.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
name: Classic - Frontend CI/CD
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- dev
|
||||||
|
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||||
|
paths:
|
||||||
|
- 'classic/frontend/**'
|
||||||
|
- '.github/workflows/classic-frontend-ci.yml'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'classic/frontend/**'
|
||||||
|
- '.github/workflows/classic-frontend-ci.yml'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout Repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Flutter
|
||||||
|
uses: subosito/flutter-action@v2
|
||||||
|
with:
|
||||||
|
flutter-version: '3.13.2'
|
||||||
|
|
||||||
|
- name: Build Flutter to Web
|
||||||
|
run: |
|
||||||
|
cd classic/frontend
|
||||||
|
flutter build web --base-href /app/
|
||||||
|
|
||||||
|
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||||
|
# if: github.event_name == 'push'
|
||||||
|
# run: |
|
||||||
|
# git config --local user.email "action@github.com"
|
||||||
|
# git config --local user.name "GitHub Action"
|
||||||
|
# git add classic/frontend/build/web
|
||||||
|
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||||
|
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||||
|
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||||
|
|
||||||
|
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
uses: peter-evans/create-pull-request@v8
|
||||||
|
with:
|
||||||
|
add-paths: classic/frontend/build/web
|
||||||
|
base: ${{ github.ref_name }}
|
||||||
|
branch: ${{ env.BUILD_BRANCH }}
|
||||||
|
delete-branch: true
|
||||||
|
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||||
|
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||||
|
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||||
67
.github/workflows/classic-python-checks.yml
vendored
67
.github/workflows/classic-python-checks.yml
vendored
@@ -7,9 +7,7 @@ on:
|
|||||||
- '.github/workflows/classic-python-checks-ci.yml'
|
- '.github/workflows/classic-python-checks-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/direct_benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
- 'classic/pyproject.toml'
|
|
||||||
- 'classic/poetry.lock'
|
|
||||||
- '**.py'
|
- '**.py'
|
||||||
- '!classic/forge/tests/vcr_cassettes'
|
- '!classic/forge/tests/vcr_cassettes'
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -18,9 +16,7 @@ on:
|
|||||||
- '.github/workflows/classic-python-checks-ci.yml'
|
- '.github/workflows/classic-python-checks-ci.yml'
|
||||||
- 'classic/original_autogpt/**'
|
- 'classic/original_autogpt/**'
|
||||||
- 'classic/forge/**'
|
- 'classic/forge/**'
|
||||||
- 'classic/direct_benchmark/**'
|
- 'classic/benchmark/**'
|
||||||
- 'classic/pyproject.toml'
|
|
||||||
- 'classic/poetry.lock'
|
|
||||||
- '**.py'
|
- '**.py'
|
||||||
- '!classic/forge/tests/vcr_cassettes'
|
- '!classic/forge/tests/vcr_cassettes'
|
||||||
|
|
||||||
@@ -31,13 +27,44 @@ concurrency:
|
|||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
working-directory: classic
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
get-changed-parts:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- id: changes-in
|
||||||
|
name: Determine affected subprojects
|
||||||
|
uses: dorny/paths-filter@v3
|
||||||
|
with:
|
||||||
|
filters: |
|
||||||
|
original_autogpt:
|
||||||
|
- classic/original_autogpt/autogpt/**
|
||||||
|
- classic/original_autogpt/tests/**
|
||||||
|
- classic/original_autogpt/poetry.lock
|
||||||
|
forge:
|
||||||
|
- classic/forge/forge/**
|
||||||
|
- classic/forge/tests/**
|
||||||
|
- classic/forge/poetry.lock
|
||||||
|
benchmark:
|
||||||
|
- classic/benchmark/agbenchmark/**
|
||||||
|
- classic/benchmark/tests/**
|
||||||
|
- classic/benchmark/poetry.lock
|
||||||
|
outputs:
|
||||||
|
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
|
needs: get-changed-parts
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
min-python-version: "3.12"
|
min-python-version: "3.10"
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -54,31 +81,42 @@ jobs:
|
|||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pypoetry
|
path: ~/.cache/pypoetry
|
||||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry install
|
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||||
|
|
||||||
# Lint
|
# Lint
|
||||||
|
|
||||||
- name: Lint (isort)
|
- name: Lint (isort)
|
||||||
run: poetry run isort --check .
|
run: poetry run isort --check .
|
||||||
|
working-directory: classic/${{ matrix.sub-package }}
|
||||||
|
|
||||||
- name: Lint (Black)
|
- name: Lint (Black)
|
||||||
if: success() || failure()
|
if: success() || failure()
|
||||||
run: poetry run black --check .
|
run: poetry run black --check .
|
||||||
|
working-directory: classic/${{ matrix.sub-package }}
|
||||||
|
|
||||||
- name: Lint (Flake8)
|
- name: Lint (Flake8)
|
||||||
if: success() || failure()
|
if: success() || failure()
|
||||||
run: poetry run flake8 .
|
run: poetry run flake8 .
|
||||||
|
working-directory: classic/${{ matrix.sub-package }}
|
||||||
|
|
||||||
types:
|
types:
|
||||||
|
needs: get-changed-parts
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
min-python-version: "3.12"
|
min-python-version: "3.10"
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -95,16 +133,19 @@ jobs:
|
|||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pypoetry
|
path: ~/.cache/pypoetry
|
||||||
key: ${{ runner.os }}-poetry-${{ hashFiles('classic/poetry.lock') }}
|
key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
|
||||||
|
|
||||||
- name: Install Poetry
|
- name: Install Poetry
|
||||||
run: curl -sSL https://install.python-poetry.org | python3 -
|
run: curl -sSL https://install.python-poetry.org | python3 -
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: poetry install
|
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||||
|
|
||||||
# Typecheck
|
# Typecheck
|
||||||
|
|
||||||
- name: Typecheck
|
- name: Typecheck
|
||||||
if: success() || failure()
|
if: success() || failure()
|
||||||
run: poetry run pyright
|
run: poetry run pyright
|
||||||
|
working-directory: classic/${{ matrix.sub-package }}
|
||||||
|
|||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -3,7 +3,6 @@
|
|||||||
classic/original_autogpt/keys.py
|
classic/original_autogpt/keys.py
|
||||||
classic/original_autogpt/*.json
|
classic/original_autogpt/*.json
|
||||||
auto_gpt_workspace/*
|
auto_gpt_workspace/*
|
||||||
.autogpt/
|
|
||||||
*.mpeg
|
*.mpeg
|
||||||
.env
|
.env
|
||||||
# Root .env files
|
# Root .env files
|
||||||
@@ -160,10 +159,6 @@ CURRENT_BULLETIN.md
|
|||||||
|
|
||||||
# AgBenchmark
|
# AgBenchmark
|
||||||
classic/benchmark/agbenchmark/reports/
|
classic/benchmark/agbenchmark/reports/
|
||||||
classic/reports/
|
|
||||||
classic/direct_benchmark/reports/
|
|
||||||
classic/.benchmark_workspaces/
|
|
||||||
classic/direct_benchmark/.benchmark_workspaces/
|
|
||||||
|
|
||||||
# Nodejs
|
# Nodejs
|
||||||
package-lock.json
|
package-lock.json
|
||||||
@@ -182,11 +177,7 @@ autogpt_platform/backend/settings.py
|
|||||||
|
|
||||||
*.ign.*
|
*.ign.*
|
||||||
.test-contents
|
.test-contents
|
||||||
**/.claude/settings.local.json
|
|
||||||
.claude/settings.local.json
|
.claude/settings.local.json
|
||||||
CLAUDE.local.md
|
CLAUDE.local.md
|
||||||
/autogpt_platform/backend/logs
|
/autogpt_platform/backend/logs
|
||||||
|
|
||||||
# Test database
|
|
||||||
test.db
|
|
||||||
.next
|
.next
|
||||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||||
|
path = classic/forge/tests/vcr_cassettes
|
||||||
|
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||||
@@ -43,10 +43,29 @@ repos:
|
|||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
- id: poetry-install
|
- id: poetry-install
|
||||||
name: Check & Install dependencies - Classic
|
name: Check & Install dependencies - Classic - AutoGPT
|
||||||
alias: poetry-install-classic
|
alias: poetry-install-classic-autogpt
|
||||||
entry: poetry -C classic install
|
entry: poetry -C classic/original_autogpt install
|
||||||
files: ^classic/poetry\.lock$
|
# include forge source (since it's a path dependency)
|
||||||
|
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||||
|
types: [file]
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: poetry-install
|
||||||
|
name: Check & Install dependencies - Classic - Forge
|
||||||
|
alias: poetry-install-classic-forge
|
||||||
|
entry: poetry -C classic/forge install
|
||||||
|
files: ^classic/forge/poetry\.lock$
|
||||||
|
types: [file]
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: poetry-install
|
||||||
|
name: Check & Install dependencies - Classic - Benchmark
|
||||||
|
alias: poetry-install-classic-benchmark
|
||||||
|
entry: poetry -C classic/benchmark install
|
||||||
|
files: ^classic/benchmark/poetry\.lock$
|
||||||
types: [file]
|
types: [file]
|
||||||
language: system
|
language: system
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
@@ -97,10 +116,26 @@ repos:
|
|||||||
language: system
|
language: system
|
||||||
|
|
||||||
- id: isort
|
- id: isort
|
||||||
name: Lint (isort) - Classic
|
name: Lint (isort) - Classic - AutoGPT
|
||||||
alias: isort-classic
|
alias: isort-classic-autogpt
|
||||||
entry: bash -c 'cd classic && poetry run isort $(echo "$@" | sed "s|classic/||g")' --
|
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
files: ^classic/original_autogpt/
|
||||||
|
types: [file, python]
|
||||||
|
language: system
|
||||||
|
|
||||||
|
- id: isort
|
||||||
|
name: Lint (isort) - Classic - Forge
|
||||||
|
alias: isort-classic-forge
|
||||||
|
entry: poetry -P classic/forge run isort -p forge
|
||||||
|
files: ^classic/forge/
|
||||||
|
types: [file, python]
|
||||||
|
language: system
|
||||||
|
|
||||||
|
- id: isort
|
||||||
|
name: Lint (isort) - Classic - Benchmark
|
||||||
|
alias: isort-classic-benchmark
|
||||||
|
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||||
|
files: ^classic/benchmark/
|
||||||
types: [file, python]
|
types: [file, python]
|
||||||
language: system
|
language: system
|
||||||
|
|
||||||
@@ -114,13 +149,26 @@ repos:
|
|||||||
|
|
||||||
- repo: https://github.com/PyCQA/flake8
|
- repo: https://github.com/PyCQA/flake8
|
||||||
rev: 7.0.0
|
rev: 7.0.0
|
||||||
# Use consolidated flake8 config at classic/.flake8
|
# To have flake8 load the config of the individual subprojects, we have to call
|
||||||
|
# them separately.
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
name: Lint (Flake8) - Classic
|
name: Lint (Flake8) - Classic - AutoGPT
|
||||||
alias: flake8-classic
|
alias: flake8-classic-autogpt
|
||||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/
|
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||||
args: [--config=classic/.flake8]
|
args: [--config=classic/original_autogpt/.flake8]
|
||||||
|
|
||||||
|
- id: flake8
|
||||||
|
name: Lint (Flake8) - Classic - Forge
|
||||||
|
alias: flake8-classic-forge
|
||||||
|
files: ^classic/forge/(forge|tests)/
|
||||||
|
args: [--config=classic/forge/.flake8]
|
||||||
|
|
||||||
|
- id: flake8
|
||||||
|
name: Lint (Flake8) - Classic - Benchmark
|
||||||
|
alias: flake8-classic-benchmark
|
||||||
|
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||||
|
args: [--config=classic/benchmark/.flake8]
|
||||||
|
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
@@ -156,10 +204,29 @@ repos:
|
|||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
- id: pyright
|
- id: pyright
|
||||||
name: Typecheck - Classic
|
name: Typecheck - Classic - AutoGPT
|
||||||
alias: pyright-classic
|
alias: pyright-classic-autogpt
|
||||||
entry: poetry -C classic run pyright
|
entry: poetry -C classic/original_autogpt run pyright
|
||||||
files: ^classic/(original_autogpt|forge|direct_benchmark)/.*\.py$|^classic/poetry\.lock$
|
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||||
|
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||||
|
types: [file]
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: pyright
|
||||||
|
name: Typecheck - Classic - Forge
|
||||||
|
alias: pyright-classic-forge
|
||||||
|
entry: poetry -C classic/forge run pyright
|
||||||
|
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||||
|
types: [file]
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: pyright
|
||||||
|
name: Typecheck - Classic - Benchmark
|
||||||
|
alias: pyright-classic-benchmark
|
||||||
|
entry: poetry -C classic/benchmark run pyright
|
||||||
|
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||||
types: [file]
|
types: [file]
|
||||||
language: system
|
language: system
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from typing_extensions import TypedDict
|
|||||||
|
|
||||||
import backend.api.features.store.cache as store_cache
|
import backend.api.features.store.cache as store_cache
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
import backend.blocks
|
import backend.data.block
|
||||||
from backend.api.external.middleware import require_permission
|
from backend.api.external.middleware import require_permission
|
||||||
from backend.data import execution as execution_db
|
from backend.data import execution as execution_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
@@ -67,7 +67,7 @@ async def get_user_info(
|
|||||||
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
|
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
|
||||||
)
|
)
|
||||||
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
|
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
|
||||||
blocks = [block() for block in backend.blocks.get_blocks().values()]
|
blocks = [block() for block in backend.data.block.get_blocks().values()]
|
||||||
return [b.to_dict() for b in blocks if not b.disabled]
|
return [b.to_dict() for b in blocks if not b.disabled]
|
||||||
|
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ async def execute_graph_block(
|
|||||||
require_permission(APIKeyPermission.EXECUTE_BLOCK)
|
require_permission(APIKeyPermission.EXECUTE_BLOCK)
|
||||||
),
|
),
|
||||||
) -> CompletedBlockOutput:
|
) -> CompletedBlockOutput:
|
||||||
obj = backend.blocks.get_block(block_id)
|
obj = backend.data.block.get_block(block_id)
|
||||||
if not obj:
|
if not obj:
|
||||||
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
||||||
if obj.disabled:
|
if obj.disabled:
|
||||||
|
|||||||
@@ -10,15 +10,10 @@ import backend.api.features.library.db as library_db
|
|||||||
import backend.api.features.library.model as library_model
|
import backend.api.features.library.model as library_model
|
||||||
import backend.api.features.store.db as store_db
|
import backend.api.features.store.db as store_db
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
|
import backend.data.block
|
||||||
from backend.blocks import load_all_blocks
|
from backend.blocks import load_all_blocks
|
||||||
from backend.blocks._base import (
|
|
||||||
AnyBlockSchema,
|
|
||||||
BlockCategory,
|
|
||||||
BlockInfo,
|
|
||||||
BlockSchema,
|
|
||||||
BlockType,
|
|
||||||
)
|
|
||||||
from backend.blocks.llm import LlmModel
|
from backend.blocks.llm import LlmModel
|
||||||
|
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||||
from backend.data.db import query_raw_with_schema
|
from backend.data.db import query_raw_with_schema
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.cache import cached
|
from backend.util.cache import cached
|
||||||
@@ -27,7 +22,7 @@ from backend.util.models import Pagination
|
|||||||
from .model import (
|
from .model import (
|
||||||
BlockCategoryResponse,
|
BlockCategoryResponse,
|
||||||
BlockResponse,
|
BlockResponse,
|
||||||
BlockTypeFilter,
|
BlockType,
|
||||||
CountResponse,
|
CountResponse,
|
||||||
FilterType,
|
FilterType,
|
||||||
Provider,
|
Provider,
|
||||||
@@ -93,7 +88,7 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
|
|||||||
def get_blocks(
|
def get_blocks(
|
||||||
*,
|
*,
|
||||||
category: str | None = None,
|
category: str | None = None,
|
||||||
type: BlockTypeFilter | None = None,
|
type: BlockType | None = None,
|
||||||
provider: ProviderName | None = None,
|
provider: ProviderName | None = None,
|
||||||
page: int = 1,
|
page: int = 1,
|
||||||
page_size: int = 50,
|
page_size: int = 50,
|
||||||
@@ -674,9 +669,9 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
|||||||
for block_type in load_all_blocks().values():
|
for block_type in load_all_blocks().values():
|
||||||
block: AnyBlockSchema = block_type()
|
block: AnyBlockSchema = block_type()
|
||||||
if block.disabled or block.block_type in (
|
if block.disabled or block.block_type in (
|
||||||
BlockType.INPUT,
|
backend.data.block.BlockType.INPUT,
|
||||||
BlockType.OUTPUT,
|
backend.data.block.BlockType.OUTPUT,
|
||||||
BlockType.AGENT,
|
backend.data.block.BlockType.AGENT,
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
# Find the execution count for this block
|
# Find the execution count for this block
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from pydantic import BaseModel
|
|||||||
|
|
||||||
import backend.api.features.library.model as library_model
|
import backend.api.features.library.model as library_model
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
from backend.blocks._base import BlockInfo
|
from backend.data.block import BlockInfo
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.models import Pagination
|
from backend.util.models import Pagination
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@ FilterType = Literal[
|
|||||||
"my_agents",
|
"my_agents",
|
||||||
]
|
]
|
||||||
|
|
||||||
BlockTypeFilter = Literal["all", "input", "action", "output"]
|
BlockType = Literal["all", "input", "action", "output"]
|
||||||
|
|
||||||
|
|
||||||
class SearchEntry(BaseModel):
|
class SearchEntry(BaseModel):
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ async def get_block_categories(
|
|||||||
)
|
)
|
||||||
async def get_blocks(
|
async def get_blocks(
|
||||||
category: Annotated[str | None, fastapi.Query()] = None,
|
category: Annotated[str | None, fastapi.Query()] = None,
|
||||||
type: Annotated[builder_model.BlockTypeFilter | None, fastapi.Query()] = None,
|
type: Annotated[builder_model.BlockType | None, fastapi.Query()] = None,
|
||||||
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
|
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
|
||||||
page: Annotated[int, fastapi.Query()] = 1,
|
page: Annotated[int, fastapi.Query()] = 1,
|
||||||
page_size: Annotated[int, fastapi.Query()] = 50,
|
page_size: Annotated[int, fastapi.Query()] = 50,
|
||||||
|
|||||||
@@ -1,154 +0,0 @@
|
|||||||
"""Dummy Agent Generator for testing.
|
|
||||||
|
|
||||||
Returns mock responses matching the format expected from the external service.
|
|
||||||
Enable via AGENTGENERATOR_USE_DUMMY=true in settings.
|
|
||||||
|
|
||||||
WARNING: This is for testing only. Do not use in production.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import uuid
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Dummy decomposition result (instructions type)
|
|
||||||
DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = {
|
|
||||||
"type": "instructions",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"description": "Get input from user",
|
|
||||||
"action": "input",
|
|
||||||
"block_name": "AgentInputBlock",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Process the input",
|
|
||||||
"action": "process",
|
|
||||||
"block_name": "TextFormatterBlock",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Return output to user",
|
|
||||||
"action": "output",
|
|
||||||
"block_name": "AgentOutputBlock",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
# Block IDs from backend/blocks/io.py
|
|
||||||
AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b"
|
|
||||||
AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4"
|
|
||||||
|
|
||||||
|
|
||||||
def _generate_dummy_agent_json() -> dict[str, Any]:
|
|
||||||
"""Generate a minimal valid agent JSON for testing."""
|
|
||||||
input_node_id = str(uuid.uuid4())
|
|
||||||
output_node_id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
return {
|
|
||||||
"id": str(uuid.uuid4()),
|
|
||||||
"version": 1,
|
|
||||||
"is_active": True,
|
|
||||||
"name": "Dummy Test Agent",
|
|
||||||
"description": "A dummy agent generated for testing purposes",
|
|
||||||
"nodes": [
|
|
||||||
{
|
|
||||||
"id": input_node_id,
|
|
||||||
"block_id": AGENT_INPUT_BLOCK_ID,
|
|
||||||
"input_default": {
|
|
||||||
"name": "input",
|
|
||||||
"title": "Input",
|
|
||||||
"description": "Enter your input",
|
|
||||||
"placeholder_values": [],
|
|
||||||
},
|
|
||||||
"metadata": {"position": {"x": 0, "y": 0}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": output_node_id,
|
|
||||||
"block_id": AGENT_OUTPUT_BLOCK_ID,
|
|
||||||
"input_default": {
|
|
||||||
"name": "output",
|
|
||||||
"title": "Output",
|
|
||||||
"description": "Agent output",
|
|
||||||
"format": "{output}",
|
|
||||||
},
|
|
||||||
"metadata": {"position": {"x": 400, "y": 0}},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"links": [
|
|
||||||
{
|
|
||||||
"id": str(uuid.uuid4()),
|
|
||||||
"source_id": input_node_id,
|
|
||||||
"sink_id": output_node_id,
|
|
||||||
"source_name": "result",
|
|
||||||
"sink_name": "value",
|
|
||||||
"is_static": False,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
async def decompose_goal_dummy(
|
|
||||||
description: str,
|
|
||||||
context: str = "",
|
|
||||||
library_agents: list[dict[str, Any]] | None = None,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy decomposition result."""
|
|
||||||
logger.info("Using dummy agent generator for decompose_goal")
|
|
||||||
return DUMMY_DECOMPOSITION_RESULT.copy()
|
|
||||||
|
|
||||||
|
|
||||||
async def generate_agent_dummy(
|
|
||||||
instructions: dict[str, Any],
|
|
||||||
library_agents: list[dict[str, Any]] | None = None,
|
|
||||||
operation_id: str | None = None,
|
|
||||||
task_id: str | None = None,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy agent JSON after a simulated delay."""
|
|
||||||
logger.info("Using dummy agent generator for generate_agent (30s delay)")
|
|
||||||
await asyncio.sleep(30)
|
|
||||||
return _generate_dummy_agent_json()
|
|
||||||
|
|
||||||
|
|
||||||
async def generate_agent_patch_dummy(
|
|
||||||
update_request: str,
|
|
||||||
current_agent: dict[str, Any],
|
|
||||||
library_agents: list[dict[str, Any]] | None = None,
|
|
||||||
operation_id: str | None = None,
|
|
||||||
task_id: str | None = None,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy patched agent (returns the current agent with updated description)."""
|
|
||||||
logger.info("Using dummy agent generator for generate_agent_patch")
|
|
||||||
patched = current_agent.copy()
|
|
||||||
patched["description"] = (
|
|
||||||
f"{current_agent.get('description', '')} (updated: {update_request})"
|
|
||||||
)
|
|
||||||
return patched
|
|
||||||
|
|
||||||
|
|
||||||
async def customize_template_dummy(
|
|
||||||
template_agent: dict[str, Any],
|
|
||||||
modification_request: str,
|
|
||||||
context: str = "",
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy customized template (returns template with updated description)."""
|
|
||||||
logger.info("Using dummy agent generator for customize_template")
|
|
||||||
customized = template_agent.copy()
|
|
||||||
customized["description"] = (
|
|
||||||
f"{template_agent.get('description', '')} (customized: {modification_request})"
|
|
||||||
)
|
|
||||||
return customized
|
|
||||||
|
|
||||||
|
|
||||||
async def get_blocks_dummy() -> list[dict[str, Any]]:
|
|
||||||
"""Return dummy blocks list."""
|
|
||||||
logger.info("Using dummy agent generator for get_blocks")
|
|
||||||
return [
|
|
||||||
{"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"},
|
|
||||||
{"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"},
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
async def health_check_dummy() -> bool:
|
|
||||||
"""Always returns healthy for dummy service."""
|
|
||||||
return True
|
|
||||||
@@ -12,19 +12,8 @@ import httpx
|
|||||||
|
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
from .dummy import (
|
|
||||||
customize_template_dummy,
|
|
||||||
decompose_goal_dummy,
|
|
||||||
generate_agent_dummy,
|
|
||||||
generate_agent_patch_dummy,
|
|
||||||
get_blocks_dummy,
|
|
||||||
health_check_dummy,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
_dummy_mode_warned = False
|
|
||||||
|
|
||||||
|
|
||||||
def _create_error_response(
|
def _create_error_response(
|
||||||
error_message: str,
|
error_message: str,
|
||||||
@@ -101,26 +90,10 @@ def _get_settings() -> Settings:
|
|||||||
return _settings
|
return _settings
|
||||||
|
|
||||||
|
|
||||||
def _is_dummy_mode() -> bool:
|
|
||||||
"""Check if dummy mode is enabled for testing."""
|
|
||||||
global _dummy_mode_warned
|
|
||||||
settings = _get_settings()
|
|
||||||
is_dummy = bool(settings.config.agentgenerator_use_dummy)
|
|
||||||
if is_dummy and not _dummy_mode_warned:
|
|
||||||
logger.warning(
|
|
||||||
"Agent Generator running in DUMMY MODE - returning mock responses. "
|
|
||||||
"Do not use in production!"
|
|
||||||
)
|
|
||||||
_dummy_mode_warned = True
|
|
||||||
return is_dummy
|
|
||||||
|
|
||||||
|
|
||||||
def is_external_service_configured() -> bool:
|
def is_external_service_configured() -> bool:
|
||||||
"""Check if external Agent Generator service is configured (or dummy mode)."""
|
"""Check if external Agent Generator service is configured."""
|
||||||
settings = _get_settings()
|
settings = _get_settings()
|
||||||
return bool(settings.config.agentgenerator_host) or bool(
|
return bool(settings.config.agentgenerator_host)
|
||||||
settings.config.agentgenerator_use_dummy
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_base_url() -> str:
|
def _get_base_url() -> str:
|
||||||
@@ -164,9 +137,6 @@ async def decompose_goal_external(
|
|||||||
- {"type": "error", "error": "...", "error_type": "..."} on error
|
- {"type": "error", "error": "...", "error_type": "..."} on error
|
||||||
Or None on unexpected error
|
Or None on unexpected error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await decompose_goal_dummy(description, context, library_agents)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
if context:
|
if context:
|
||||||
@@ -256,11 +226,6 @@ async def generate_agent_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await generate_agent_dummy(
|
|
||||||
instructions, library_agents, operation_id, task_id
|
|
||||||
)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -332,11 +297,6 @@ async def generate_agent_patch_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await generate_agent_patch_dummy(
|
|
||||||
update_request, current_agent, library_agents, operation_id, task_id
|
|
||||||
)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -423,11 +383,6 @@ async def customize_template_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Customized agent JSON, clarifying questions dict, or error dict on error
|
Customized agent JSON, clarifying questions dict, or error dict on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await customize_template_dummy(
|
|
||||||
template_agent, modification_request, context
|
|
||||||
)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
request = modification_request
|
request = modification_request
|
||||||
@@ -490,9 +445,6 @@ async def get_blocks_external() -> list[dict[str, Any]] | None:
|
|||||||
Returns:
|
Returns:
|
||||||
List of block info dicts or None on error
|
List of block info dicts or None on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await get_blocks_dummy()
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -526,9 +478,6 @@ async def health_check() -> bool:
|
|||||||
if not is_external_service_configured():
|
if not is_external_service_configured():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if _is_dummy_mode():
|
|
||||||
return await health_check_dummy()
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -13,8 +13,7 @@ from backend.api.features.chat.tools.models import (
|
|||||||
NoResultsResponse,
|
NoResultsResponse,
|
||||||
)
|
)
|
||||||
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
||||||
from backend.blocks import get_block
|
from backend.data.block import BlockType, get_block
|
||||||
from backend.blocks._base import BlockType
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from backend.api.features.chat.tools.find_block import (
|
|||||||
FindBlockTool,
|
FindBlockTool,
|
||||||
)
|
)
|
||||||
from backend.api.features.chat.tools.models import BlockListResponse
|
from backend.api.features.chat.tools.models import BlockListResponse
|
||||||
from backend.blocks._base import BlockType
|
from backend.data.block import BlockType
|
||||||
|
|
||||||
from ._test_data import make_session
|
from ._test_data import make_session
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,7 @@ from backend.api.features.chat.tools.find_block import (
|
|||||||
COPILOT_EXCLUDED_BLOCK_IDS,
|
COPILOT_EXCLUDED_BLOCK_IDS,
|
||||||
COPILOT_EXCLUDED_BLOCK_TYPES,
|
COPILOT_EXCLUDED_BLOCK_TYPES,
|
||||||
)
|
)
|
||||||
from backend.blocks import get_block
|
from backend.data.block import AnyBlockSchema, get_block
|
||||||
from backend.blocks._base import AnyBlockSchema
|
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||||
from backend.data.workspace import get_or_create_workspace
|
from backend.data.workspace import get_or_create_workspace
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import pytest
|
|||||||
|
|
||||||
from backend.api.features.chat.tools.models import ErrorResponse
|
from backend.api.features.chat.tools.models import ErrorResponse
|
||||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||||
from backend.blocks._base import BlockType
|
from backend.data.block import BlockType
|
||||||
|
|
||||||
from ._test_data import make_session
|
from ._test_data import make_session
|
||||||
|
|
||||||
|
|||||||
@@ -12,11 +12,12 @@ import backend.api.features.store.image_gen as store_image_gen
|
|||||||
import backend.api.features.store.media as store_media
|
import backend.api.features.store.media as store_media
|
||||||
import backend.data.graph as graph_db
|
import backend.data.graph as graph_db
|
||||||
import backend.data.integrations as integrations_db
|
import backend.data.integrations as integrations_db
|
||||||
|
from backend.data.block import BlockInput
|
||||||
from backend.data.db import transaction
|
from backend.data.db import transaction
|
||||||
from backend.data.execution import get_graph_execution
|
from backend.data.execution import get_graph_execution
|
||||||
from backend.data.graph import GraphSettings
|
from backend.data.graph import GraphSettings
|
||||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||||
from backend.data.model import CredentialsMetaInput, GraphInput
|
from backend.data.model import CredentialsMetaInput
|
||||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||||
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||||
on_graph_activate,
|
on_graph_activate,
|
||||||
@@ -1129,7 +1130,7 @@ async def create_preset_from_graph_execution(
|
|||||||
async def update_preset(
|
async def update_preset(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
preset_id: str,
|
preset_id: str,
|
||||||
inputs: Optional[GraphInput] = None,
|
inputs: Optional[BlockInput] = None,
|
||||||
credentials: Optional[dict[str, CredentialsMetaInput]] = None,
|
credentials: Optional[dict[str, CredentialsMetaInput]] = None,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
description: Optional[str] = None,
|
description: Optional[str] = None,
|
||||||
|
|||||||
@@ -6,12 +6,9 @@ import prisma.enums
|
|||||||
import prisma.models
|
import prisma.models
|
||||||
import pydantic
|
import pydantic
|
||||||
|
|
||||||
|
from backend.data.block import BlockInput
|
||||||
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
|
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
|
||||||
from backend.data.model import (
|
from backend.data.model import CredentialsMetaInput, is_credentials_field_name
|
||||||
CredentialsMetaInput,
|
|
||||||
GraphInput,
|
|
||||||
is_credentials_field_name,
|
|
||||||
)
|
|
||||||
from backend.util.json import loads as json_loads
|
from backend.util.json import loads as json_loads
|
||||||
from backend.util.models import Pagination
|
from backend.util.models import Pagination
|
||||||
|
|
||||||
@@ -326,7 +323,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel):
|
|||||||
graph_id: str
|
graph_id: str
|
||||||
graph_version: int
|
graph_version: int
|
||||||
|
|
||||||
inputs: GraphInput
|
inputs: BlockInput
|
||||||
credentials: dict[str, CredentialsMetaInput]
|
credentials: dict[str, CredentialsMetaInput]
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
@@ -355,7 +352,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel):
|
|||||||
Request model used when updating a preset for a library agent.
|
Request model used when updating a preset for a library agent.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
inputs: Optional[GraphInput] = None
|
inputs: Optional[BlockInput] = None
|
||||||
credentials: Optional[dict[str, CredentialsMetaInput]] = None
|
credentials: Optional[dict[str, CredentialsMetaInput]] = None
|
||||||
|
|
||||||
name: Optional[str] = None
|
name: Optional[str] = None
|
||||||
@@ -398,7 +395,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
|
|||||||
"Webhook must be included in AgentPreset query when webhookId is set"
|
"Webhook must be included in AgentPreset query when webhookId is set"
|
||||||
)
|
)
|
||||||
|
|
||||||
input_data: GraphInput = {}
|
input_data: BlockInput = {}
|
||||||
input_credentials: dict[str, CredentialsMetaInput] = {}
|
input_credentials: dict[str, CredentialsMetaInput] = {}
|
||||||
|
|
||||||
for preset_input in preset.InputPresets:
|
for preset_input in preset.InputPresets:
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ from typing import Optional
|
|||||||
import aiohttp
|
import aiohttp
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
|
|
||||||
from backend.blocks import get_block
|
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
|
from backend.data.block import get_block
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
from .models import ApiResponse, ChatRequest, GraphData
|
from .models import ApiResponse, ChatRequest, GraphData
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ class BlockHandler(ContentHandler):
|
|||||||
|
|
||||||
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
|
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
|
||||||
"""Fetch blocks without embeddings."""
|
"""Fetch blocks without embeddings."""
|
||||||
from backend.blocks import get_blocks
|
from backend.data.block import get_blocks
|
||||||
|
|
||||||
# Get all available blocks
|
# Get all available blocks
|
||||||
all_blocks = get_blocks()
|
all_blocks = get_blocks()
|
||||||
@@ -249,7 +249,7 @@ class BlockHandler(ContentHandler):
|
|||||||
|
|
||||||
async def get_stats(self) -> dict[str, int]:
|
async def get_stats(self) -> dict[str, int]:
|
||||||
"""Get statistics about block embedding coverage."""
|
"""Get statistics about block embedding coverage."""
|
||||||
from backend.blocks import get_blocks
|
from backend.data.block import get_blocks
|
||||||
|
|
||||||
all_blocks = get_blocks()
|
all_blocks = get_blocks()
|
||||||
|
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ async def test_block_handler_get_missing_items(mocker):
|
|||||||
mock_existing = []
|
mock_existing = []
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.blocks.get_blocks",
|
"backend.data.block.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -135,7 +135,7 @@ async def test_block_handler_get_stats(mocker):
|
|||||||
mock_embedded = [{"count": 2}]
|
mock_embedded = [{"count": 2}]
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.blocks.get_blocks",
|
"backend.data.block.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -327,7 +327,7 @@ async def test_block_handler_handles_missing_attributes():
|
|||||||
mock_blocks = {"block-minimal": mock_block_class}
|
mock_blocks = {"block-minimal": mock_block_class}
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.blocks.get_blocks",
|
"backend.data.block.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -360,7 +360,7 @@ async def test_block_handler_skips_failed_blocks():
|
|||||||
mock_blocks = {"good-block": good_block, "bad-block": bad_block}
|
mock_blocks = {"good-block": good_block, "bad-block": bad_block}
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.blocks.get_blocks",
|
"backend.data.block.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
|
|||||||
@@ -662,7 +662,7 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]:
|
|||||||
)
|
)
|
||||||
current_ids = {row["id"] for row in valid_agents}
|
current_ids = {row["id"] for row in valid_agents}
|
||||||
elif content_type == ContentType.BLOCK:
|
elif content_type == ContentType.BLOCK:
|
||||||
from backend.blocks import get_blocks
|
from backend.data.block import get_blocks
|
||||||
|
|
||||||
current_ids = set(get_blocks().keys())
|
current_ids = set(get_blocks().keys())
|
||||||
elif content_type == ContentType.DOCUMENTATION:
|
elif content_type == ContentType.DOCUMENTATION:
|
||||||
|
|||||||
@@ -7,6 +7,15 @@ from replicate.client import Client as ReplicateClient
|
|||||||
from replicate.exceptions import ReplicateError
|
from replicate.exceptions import ReplicateError
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
|
from backend.blocks.ideogram import (
|
||||||
|
AspectRatio,
|
||||||
|
ColorPalettePreset,
|
||||||
|
IdeogramModelBlock,
|
||||||
|
IdeogramModelName,
|
||||||
|
MagicPromptOption,
|
||||||
|
StyleType,
|
||||||
|
UpscaleOption,
|
||||||
|
)
|
||||||
from backend.data.graph import GraphBaseMeta
|
from backend.data.graph import GraphBaseMeta
|
||||||
from backend.data.model import CredentialsMetaInput, ProviderName
|
from backend.data.model import CredentialsMetaInput, ProviderName
|
||||||
from backend.integrations.credentials_store import ideogram_credentials
|
from backend.integrations.credentials_store import ideogram_credentials
|
||||||
@@ -41,16 +50,6 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes
|
|||||||
if not ideogram_credentials.api_key:
|
if not ideogram_credentials.api_key:
|
||||||
raise ValueError("Missing Ideogram API key")
|
raise ValueError("Missing Ideogram API key")
|
||||||
|
|
||||||
from backend.blocks.ideogram import (
|
|
||||||
AspectRatio,
|
|
||||||
ColorPalettePreset,
|
|
||||||
IdeogramModelBlock,
|
|
||||||
IdeogramModelName,
|
|
||||||
MagicPromptOption,
|
|
||||||
StyleType,
|
|
||||||
UpscaleOption,
|
|
||||||
)
|
|
||||||
|
|
||||||
name = graph.name
|
name = graph.name
|
||||||
description = f"{name} ({graph.description})" if graph.description else name
|
description = f"{name} ({graph.description})" if graph.description else name
|
||||||
|
|
||||||
|
|||||||
@@ -40,11 +40,10 @@ from backend.api.model import (
|
|||||||
UpdateTimezoneRequest,
|
UpdateTimezoneRequest,
|
||||||
UploadFileResponse,
|
UploadFileResponse,
|
||||||
)
|
)
|
||||||
from backend.blocks import get_block, get_blocks
|
|
||||||
from backend.data import execution as execution_db
|
from backend.data import execution as execution_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
from backend.data.auth import api_key as api_key_db
|
from backend.data.auth import api_key as api_key_db
|
||||||
from backend.data.block import BlockInput, CompletedBlockOutput
|
from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks
|
||||||
from backend.data.credit import (
|
from backend.data.credit import (
|
||||||
AutoTopUpConfig,
|
AutoTopUpConfig,
|
||||||
RefundRequest,
|
RefundRequest,
|
||||||
|
|||||||
@@ -3,19 +3,22 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Sequence, Type, TypeVar
|
from typing import TYPE_CHECKING, TypeVar
|
||||||
|
|
||||||
from backend.blocks._base import AnyBlockSchema, BlockType
|
|
||||||
from backend.util.cache import cached
|
from backend.util.cache import cached
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.data.block import Block
|
||||||
|
|
||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl_seconds=3600)
|
@cached(ttl_seconds=3600)
|
||||||
def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
|
def load_all_blocks() -> dict[str, type["Block"]]:
|
||||||
from backend.blocks._base import Block
|
from backend.data.block import Block
|
||||||
from backend.util.settings import Config
|
from backend.util.settings import Config
|
||||||
|
|
||||||
# Check if example blocks should be loaded from settings
|
# Check if example blocks should be loaded from settings
|
||||||
@@ -47,8 +50,8 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
|
|||||||
importlib.import_module(f".{module}", package=__name__)
|
importlib.import_module(f".{module}", package=__name__)
|
||||||
|
|
||||||
# Load all Block instances from the available modules
|
# Load all Block instances from the available modules
|
||||||
available_blocks: dict[str, type["AnyBlockSchema"]] = {}
|
available_blocks: dict[str, type["Block"]] = {}
|
||||||
for block_cls in _all_subclasses(Block):
|
for block_cls in all_subclasses(Block):
|
||||||
class_name = block_cls.__name__
|
class_name = block_cls.__name__
|
||||||
|
|
||||||
if class_name.endswith("Base"):
|
if class_name.endswith("Base"):
|
||||||
@@ -61,7 +64,7 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
|
|||||||
"please name the class with 'Base' at the end"
|
"please name the class with 'Base' at the end"
|
||||||
)
|
)
|
||||||
|
|
||||||
block = block_cls() # pyright: ignore[reportAbstractUsage]
|
block = block_cls.create()
|
||||||
|
|
||||||
if not isinstance(block.id, str) or len(block.id) != 36:
|
if not isinstance(block.id, str) or len(block.id) != 36:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -102,7 +105,7 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
|
|||||||
available_blocks[block.id] = block_cls
|
available_blocks[block.id] = block_cls
|
||||||
|
|
||||||
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
|
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
|
||||||
from ._utils import is_block_auth_configured
|
from backend.data.block import is_block_auth_configured
|
||||||
|
|
||||||
filtered_blocks = {}
|
filtered_blocks = {}
|
||||||
for block_id, block_cls in available_blocks.items():
|
for block_id, block_cls in available_blocks.items():
|
||||||
@@ -112,48 +115,11 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
|
|||||||
return filtered_blocks
|
return filtered_blocks
|
||||||
|
|
||||||
|
|
||||||
def _all_subclasses(cls: type[T]) -> list[type[T]]:
|
__all__ = ["load_all_blocks"]
|
||||||
|
|
||||||
|
|
||||||
|
def all_subclasses(cls: type[T]) -> list[type[T]]:
|
||||||
subclasses = cls.__subclasses__()
|
subclasses = cls.__subclasses__()
|
||||||
for subclass in subclasses:
|
for subclass in subclasses:
|
||||||
subclasses += _all_subclasses(subclass)
|
subclasses += all_subclasses(subclass)
|
||||||
return subclasses
|
return subclasses
|
||||||
|
|
||||||
|
|
||||||
# ============== Block access helper functions ============== #
|
|
||||||
|
|
||||||
|
|
||||||
def get_blocks() -> dict[str, Type["AnyBlockSchema"]]:
|
|
||||||
return load_all_blocks()
|
|
||||||
|
|
||||||
|
|
||||||
# Note on the return type annotation: https://github.com/microsoft/pyright/issues/10281
|
|
||||||
def get_block(block_id: str) -> "AnyBlockSchema | None":
|
|
||||||
cls = get_blocks().get(block_id)
|
|
||||||
return cls() if cls else None
|
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl_seconds=3600)
|
|
||||||
def get_webhook_block_ids() -> Sequence[str]:
|
|
||||||
return [
|
|
||||||
id
|
|
||||||
for id, B in get_blocks().items()
|
|
||||||
if B().block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl_seconds=3600)
|
|
||||||
def get_io_block_ids() -> Sequence[str]:
|
|
||||||
return [
|
|
||||||
id
|
|
||||||
for id, B in get_blocks().items()
|
|
||||||
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl_seconds=3600)
|
|
||||||
def get_human_in_the_loop_block_ids() -> Sequence[str]:
|
|
||||||
return [
|
|
||||||
id
|
|
||||||
for id, B in get_blocks().items()
|
|
||||||
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
|
|
||||||
]
|
|
||||||
|
|||||||
@@ -1,739 +0,0 @@
|
|||||||
import inspect
|
|
||||||
import logging
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from enum import Enum
|
|
||||||
from typing import (
|
|
||||||
TYPE_CHECKING,
|
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
ClassVar,
|
|
||||||
Generic,
|
|
||||||
Optional,
|
|
||||||
Type,
|
|
||||||
TypeAlias,
|
|
||||||
TypeVar,
|
|
||||||
cast,
|
|
||||||
get_origin,
|
|
||||||
)
|
|
||||||
|
|
||||||
import jsonref
|
|
||||||
import jsonschema
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from backend.data.block import BlockInput, BlockOutput, BlockOutputEntry
|
|
||||||
from backend.data.model import (
|
|
||||||
Credentials,
|
|
||||||
CredentialsFieldInfo,
|
|
||||||
CredentialsMetaInput,
|
|
||||||
SchemaField,
|
|
||||||
is_credentials_field_name,
|
|
||||||
)
|
|
||||||
from backend.integrations.providers import ProviderName
|
|
||||||
from backend.util import json
|
|
||||||
from backend.util.exceptions import (
|
|
||||||
BlockError,
|
|
||||||
BlockExecutionError,
|
|
||||||
BlockInputError,
|
|
||||||
BlockOutputError,
|
|
||||||
BlockUnknownError,
|
|
||||||
)
|
|
||||||
from backend.util.settings import Config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.data.execution import ExecutionContext
|
|
||||||
from backend.data.model import ContributorDetails, NodeExecutionStats
|
|
||||||
|
|
||||||
from ..data.graph import Link
|
|
||||||
|
|
||||||
app_config = Config()
|
|
||||||
|
|
||||||
|
|
||||||
BlockTestOutput = BlockOutputEntry | tuple[str, Callable[[Any], bool]]
|
|
||||||
|
|
||||||
|
|
||||||
class BlockType(Enum):
|
|
||||||
STANDARD = "Standard"
|
|
||||||
INPUT = "Input"
|
|
||||||
OUTPUT = "Output"
|
|
||||||
NOTE = "Note"
|
|
||||||
WEBHOOK = "Webhook"
|
|
||||||
WEBHOOK_MANUAL = "Webhook (manual)"
|
|
||||||
AGENT = "Agent"
|
|
||||||
AI = "AI"
|
|
||||||
AYRSHARE = "Ayrshare"
|
|
||||||
HUMAN_IN_THE_LOOP = "Human In The Loop"
|
|
||||||
|
|
||||||
|
|
||||||
class BlockCategory(Enum):
|
|
||||||
AI = "Block that leverages AI to perform a task."
|
|
||||||
SOCIAL = "Block that interacts with social media platforms."
|
|
||||||
TEXT = "Block that processes text data."
|
|
||||||
SEARCH = "Block that searches or extracts information from the internet."
|
|
||||||
BASIC = "Block that performs basic operations."
|
|
||||||
INPUT = "Block that interacts with input of the graph."
|
|
||||||
OUTPUT = "Block that interacts with output of the graph."
|
|
||||||
LOGIC = "Programming logic to control the flow of your agent"
|
|
||||||
COMMUNICATION = "Block that interacts with communication platforms."
|
|
||||||
DEVELOPER_TOOLS = "Developer tools such as GitHub blocks."
|
|
||||||
DATA = "Block that interacts with structured data."
|
|
||||||
HARDWARE = "Block that interacts with hardware."
|
|
||||||
AGENT = "Block that interacts with other agents."
|
|
||||||
CRM = "Block that interacts with CRM services."
|
|
||||||
SAFETY = (
|
|
||||||
"Block that provides AI safety mechanisms such as detecting harmful content"
|
|
||||||
)
|
|
||||||
PRODUCTIVITY = "Block that helps with productivity"
|
|
||||||
ISSUE_TRACKING = "Block that helps with issue tracking"
|
|
||||||
MULTIMEDIA = "Block that interacts with multimedia content"
|
|
||||||
MARKETING = "Block that helps with marketing"
|
|
||||||
|
|
||||||
def dict(self) -> dict[str, str]:
|
|
||||||
return {"category": self.name, "description": self.value}
|
|
||||||
|
|
||||||
|
|
||||||
class BlockCostType(str, Enum):
|
|
||||||
RUN = "run" # cost X credits per run
|
|
||||||
BYTE = "byte" # cost X credits per byte
|
|
||||||
SECOND = "second" # cost X credits per second
|
|
||||||
|
|
||||||
|
|
||||||
class BlockCost(BaseModel):
|
|
||||||
cost_amount: int
|
|
||||||
cost_filter: BlockInput
|
|
||||||
cost_type: BlockCostType
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
cost_amount: int,
|
|
||||||
cost_type: BlockCostType = BlockCostType.RUN,
|
|
||||||
cost_filter: Optional[BlockInput] = None,
|
|
||||||
**data: Any,
|
|
||||||
) -> None:
|
|
||||||
super().__init__(
|
|
||||||
cost_amount=cost_amount,
|
|
||||||
cost_filter=cost_filter or {},
|
|
||||||
cost_type=cost_type,
|
|
||||||
**data,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BlockInfo(BaseModel):
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
inputSchema: dict[str, Any]
|
|
||||||
outputSchema: dict[str, Any]
|
|
||||||
costs: list[BlockCost]
|
|
||||||
description: str
|
|
||||||
categories: list[dict[str, str]]
|
|
||||||
contributors: list[dict[str, Any]]
|
|
||||||
staticOutput: bool
|
|
||||||
uiType: str
|
|
||||||
|
|
||||||
|
|
||||||
class BlockSchema(BaseModel):
|
|
||||||
cached_jsonschema: ClassVar[dict[str, Any]]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def jsonschema(cls) -> dict[str, Any]:
|
|
||||||
if cls.cached_jsonschema:
|
|
||||||
return cls.cached_jsonschema
|
|
||||||
|
|
||||||
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
|
|
||||||
|
|
||||||
def ref_to_dict(obj):
|
|
||||||
if isinstance(obj, dict):
|
|
||||||
# OpenAPI <3.1 does not support sibling fields that has a $ref key
|
|
||||||
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
|
|
||||||
keys = {"allOf", "anyOf", "oneOf"}
|
|
||||||
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
|
|
||||||
if one_key:
|
|
||||||
obj.update(obj[one_key][0])
|
|
||||||
|
|
||||||
return {
|
|
||||||
key: ref_to_dict(value)
|
|
||||||
for key, value in obj.items()
|
|
||||||
if not key.startswith("$") and key != one_key
|
|
||||||
}
|
|
||||||
elif isinstance(obj, list):
|
|
||||||
return [ref_to_dict(item) for item in obj]
|
|
||||||
|
|
||||||
return obj
|
|
||||||
|
|
||||||
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
|
|
||||||
|
|
||||||
return cls.cached_jsonschema
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def validate_data(cls, data: BlockInput) -> str | None:
|
|
||||||
return json.validate_with_jsonschema(
|
|
||||||
schema=cls.jsonschema(),
|
|
||||||
data={k: v for k, v in data.items() if v is not None},
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_mismatch_error(cls, data: BlockInput) -> str | None:
|
|
||||||
return cls.validate_data(data)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_field_schema(cls, field_name: str) -> dict[str, Any]:
|
|
||||||
model_schema = cls.jsonschema().get("properties", {})
|
|
||||||
if not model_schema:
|
|
||||||
raise ValueError(f"Invalid model schema {cls}")
|
|
||||||
|
|
||||||
property_schema = model_schema.get(field_name)
|
|
||||||
if not property_schema:
|
|
||||||
raise ValueError(f"Invalid property name {field_name}")
|
|
||||||
|
|
||||||
return property_schema
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def validate_field(cls, field_name: str, data: BlockInput) -> str | None:
|
|
||||||
"""
|
|
||||||
Validate the data against a specific property (one of the input/output name).
|
|
||||||
Returns the validation error message if the data does not match the schema.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
property_schema = cls.get_field_schema(field_name)
|
|
||||||
jsonschema.validate(json.to_dict(data), property_schema)
|
|
||||||
return None
|
|
||||||
except jsonschema.ValidationError as e:
|
|
||||||
return str(e)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_fields(cls) -> set[str]:
|
|
||||||
return set(cls.model_fields.keys())
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_required_fields(cls) -> set[str]:
|
|
||||||
return {
|
|
||||||
field
|
|
||||||
for field, field_info in cls.model_fields.items()
|
|
||||||
if field_info.is_required()
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def __pydantic_init_subclass__(cls, **kwargs):
|
|
||||||
"""Validates the schema definition. Rules:
|
|
||||||
- Fields with annotation `CredentialsMetaInput` MUST be
|
|
||||||
named `credentials` or `*_credentials`
|
|
||||||
- Fields named `credentials` or `*_credentials` MUST be
|
|
||||||
of type `CredentialsMetaInput`
|
|
||||||
"""
|
|
||||||
super().__pydantic_init_subclass__(**kwargs)
|
|
||||||
|
|
||||||
# Reset cached JSON schema to prevent inheriting it from parent class
|
|
||||||
cls.cached_jsonschema = {}
|
|
||||||
|
|
||||||
credentials_fields = cls.get_credentials_fields()
|
|
||||||
|
|
||||||
for field_name in cls.get_fields():
|
|
||||||
if is_credentials_field_name(field_name):
|
|
||||||
if field_name not in credentials_fields:
|
|
||||||
raise TypeError(
|
|
||||||
f"Credentials field '{field_name}' on {cls.__qualname__} "
|
|
||||||
f"is not of type {CredentialsMetaInput.__name__}"
|
|
||||||
)
|
|
||||||
|
|
||||||
CredentialsMetaInput.validate_credentials_field_schema(
|
|
||||||
cls.get_field_schema(field_name), field_name
|
|
||||||
)
|
|
||||||
|
|
||||||
elif field_name in credentials_fields:
|
|
||||||
raise KeyError(
|
|
||||||
f"Credentials field '{field_name}' on {cls.__qualname__} "
|
|
||||||
"has invalid name: must be 'credentials' or *_credentials"
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]:
|
|
||||||
return {
|
|
||||||
field_name: info.annotation
|
|
||||||
for field_name, info in cls.model_fields.items()
|
|
||||||
if (
|
|
||||||
inspect.isclass(info.annotation)
|
|
||||||
and issubclass(
|
|
||||||
get_origin(info.annotation) or info.annotation,
|
|
||||||
CredentialsMetaInput,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
|
|
||||||
|
|
||||||
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If multiple fields have the same kwarg_name, as this would
|
|
||||||
cause silent overwriting and only the last field would be processed.
|
|
||||||
"""
|
|
||||||
result: dict[str, dict[str, Any]] = {}
|
|
||||||
schema = cls.jsonschema()
|
|
||||||
properties = schema.get("properties", {})
|
|
||||||
|
|
||||||
for field_name, field_schema in properties.items():
|
|
||||||
auto_creds = field_schema.get("auto_credentials")
|
|
||||||
if auto_creds:
|
|
||||||
kwarg_name = auto_creds.get("kwarg_name", "credentials")
|
|
||||||
if kwarg_name in result:
|
|
||||||
raise ValueError(
|
|
||||||
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
|
|
||||||
f"in fields '{result[kwarg_name]['field_name']}' and "
|
|
||||||
f"'{field_name}' on {cls.__qualname__}"
|
|
||||||
)
|
|
||||||
result[kwarg_name] = {
|
|
||||||
"field_name": field_name,
|
|
||||||
"config": auto_creds,
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
|
|
||||||
result = {}
|
|
||||||
|
|
||||||
# Regular credentials fields
|
|
||||||
for field_name in cls.get_credentials_fields().keys():
|
|
||||||
result[field_name] = CredentialsFieldInfo.model_validate(
|
|
||||||
cls.get_field_schema(field_name), by_alias=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
|
|
||||||
for kwarg_name, info in cls.get_auto_credentials_fields().items():
|
|
||||||
config = info["config"]
|
|
||||||
# Build a schema-like dict that CredentialsFieldInfo can parse
|
|
||||||
auto_schema = {
|
|
||||||
"credentials_provider": [config.get("provider", "google")],
|
|
||||||
"credentials_types": [config.get("type", "oauth2")],
|
|
||||||
"credentials_scopes": config.get("scopes"),
|
|
||||||
}
|
|
||||||
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
|
||||||
auto_schema, by_alias=True
|
|
||||||
)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
|
||||||
return data # Return as is, by default.
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
|
||||||
input_fields_from_nodes = {link.sink_name for link in links}
|
|
||||||
return input_fields_from_nodes - set(data)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
|
||||||
return cls.get_required_fields() - set(data)
|
|
||||||
|
|
||||||
|
|
||||||
class BlockSchemaInput(BlockSchema):
|
|
||||||
"""
|
|
||||||
Base schema class for block inputs.
|
|
||||||
All block input schemas should extend this class for consistency.
|
|
||||||
"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class BlockSchemaOutput(BlockSchema):
|
|
||||||
"""
|
|
||||||
Base schema class for block outputs that includes a standard error field.
|
|
||||||
All block output schemas should extend this class to ensure consistent error handling.
|
|
||||||
"""
|
|
||||||
|
|
||||||
error: str = SchemaField(
|
|
||||||
description="Error message if the operation failed", default=""
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchemaInput)
|
|
||||||
BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchemaOutput)
|
|
||||||
|
|
||||||
|
|
||||||
class EmptyInputSchema(BlockSchemaInput):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class EmptyOutputSchema(BlockSchemaOutput):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# For backward compatibility - will be deprecated
|
|
||||||
EmptySchema = EmptyOutputSchema
|
|
||||||
|
|
||||||
|
|
||||||
# --8<-- [start:BlockWebhookConfig]
|
|
||||||
class BlockManualWebhookConfig(BaseModel):
|
|
||||||
"""
|
|
||||||
Configuration model for webhook-triggered blocks on which
|
|
||||||
the user has to manually set up the webhook at the provider.
|
|
||||||
"""
|
|
||||||
|
|
||||||
provider: ProviderName
|
|
||||||
"""The service provider that the webhook connects to"""
|
|
||||||
|
|
||||||
webhook_type: str
|
|
||||||
"""
|
|
||||||
Identifier for the webhook type. E.g. GitHub has repo and organization level hooks.
|
|
||||||
|
|
||||||
Only for use in the corresponding `WebhooksManager`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
event_filter_input: str = ""
|
|
||||||
"""
|
|
||||||
Name of the block's event filter input.
|
|
||||||
Leave empty if the corresponding webhook doesn't have distinct event/payload types.
|
|
||||||
"""
|
|
||||||
|
|
||||||
event_format: str = "{event}"
|
|
||||||
"""
|
|
||||||
Template string for the event(s) that a block instance subscribes to.
|
|
||||||
Applied individually to each event selected in the event filter input.
|
|
||||||
|
|
||||||
Example: `"pull_request.{event}"` -> `"pull_request.opened"`
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class BlockWebhookConfig(BlockManualWebhookConfig):
|
|
||||||
"""
|
|
||||||
Configuration model for webhook-triggered blocks for which
|
|
||||||
the webhook can be automatically set up through the provider's API.
|
|
||||||
"""
|
|
||||||
|
|
||||||
resource_format: str
|
|
||||||
"""
|
|
||||||
Template string for the resource that a block instance subscribes to.
|
|
||||||
Fields will be filled from the block's inputs (except `payload`).
|
|
||||||
|
|
||||||
Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented)
|
|
||||||
|
|
||||||
Only for use in the corresponding `WebhooksManager`.
|
|
||||||
"""
|
|
||||||
# --8<-- [end:BlockWebhookConfig]
|
|
||||||
|
|
||||||
|
|
||||||
class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
id: str = "",
|
|
||||||
description: str = "",
|
|
||||||
contributors: list["ContributorDetails"] = [],
|
|
||||||
categories: set[BlockCategory] | None = None,
|
|
||||||
input_schema: Type[BlockSchemaInputType] = EmptyInputSchema,
|
|
||||||
output_schema: Type[BlockSchemaOutputType] = EmptyOutputSchema,
|
|
||||||
test_input: BlockInput | list[BlockInput] | None = None,
|
|
||||||
test_output: BlockTestOutput | list[BlockTestOutput] | None = None,
|
|
||||||
test_mock: dict[str, Any] | None = None,
|
|
||||||
test_credentials: Optional[Credentials | dict[str, Credentials]] = None,
|
|
||||||
disabled: bool = False,
|
|
||||||
static_output: bool = False,
|
|
||||||
block_type: BlockType = BlockType.STANDARD,
|
|
||||||
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
|
|
||||||
is_sensitive_action: bool = False,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initialize the block with the given schema.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
id: The unique identifier for the block, this value will be persisted in the
|
|
||||||
DB. So it should be a unique and constant across the application run.
|
|
||||||
Use the UUID format for the ID.
|
|
||||||
description: The description of the block, explaining what the block does.
|
|
||||||
contributors: The list of contributors who contributed to the block.
|
|
||||||
input_schema: The schema, defined as a Pydantic model, for the input data.
|
|
||||||
output_schema: The schema, defined as a Pydantic model, for the output data.
|
|
||||||
test_input: The list or single sample input data for the block, for testing.
|
|
||||||
test_output: The list or single expected output if the test_input is run.
|
|
||||||
test_mock: function names on the block implementation to mock on test run.
|
|
||||||
disabled: If the block is disabled, it will not be available for execution.
|
|
||||||
static_output: Whether the output links of the block are static by default.
|
|
||||||
"""
|
|
||||||
from backend.data.model import NodeExecutionStats
|
|
||||||
|
|
||||||
self.id = id
|
|
||||||
self.input_schema = input_schema
|
|
||||||
self.output_schema = output_schema
|
|
||||||
self.test_input = test_input
|
|
||||||
self.test_output = test_output
|
|
||||||
self.test_mock = test_mock
|
|
||||||
self.test_credentials = test_credentials
|
|
||||||
self.description = description
|
|
||||||
self.categories = categories or set()
|
|
||||||
self.contributors = contributors or set()
|
|
||||||
self.disabled = disabled
|
|
||||||
self.static_output = static_output
|
|
||||||
self.block_type = block_type
|
|
||||||
self.webhook_config = webhook_config
|
|
||||||
self.is_sensitive_action = is_sensitive_action
|
|
||||||
self.execution_stats: "NodeExecutionStats" = NodeExecutionStats()
|
|
||||||
|
|
||||||
if self.webhook_config:
|
|
||||||
if isinstance(self.webhook_config, BlockWebhookConfig):
|
|
||||||
# Enforce presence of credentials field on auto-setup webhook blocks
|
|
||||||
if not (cred_fields := self.input_schema.get_credentials_fields()):
|
|
||||||
raise TypeError(
|
|
||||||
"credentials field is required on auto-setup webhook blocks"
|
|
||||||
)
|
|
||||||
# Disallow multiple credentials inputs on webhook blocks
|
|
||||||
elif len(cred_fields) > 1:
|
|
||||||
raise ValueError(
|
|
||||||
"Multiple credentials inputs not supported on webhook blocks"
|
|
||||||
)
|
|
||||||
|
|
||||||
self.block_type = BlockType.WEBHOOK
|
|
||||||
else:
|
|
||||||
self.block_type = BlockType.WEBHOOK_MANUAL
|
|
||||||
|
|
||||||
# Enforce shape of webhook event filter, if present
|
|
||||||
if self.webhook_config.event_filter_input:
|
|
||||||
event_filter_field = self.input_schema.model_fields[
|
|
||||||
self.webhook_config.event_filter_input
|
|
||||||
]
|
|
||||||
if not (
|
|
||||||
isinstance(event_filter_field.annotation, type)
|
|
||||||
and issubclass(event_filter_field.annotation, BaseModel)
|
|
||||||
and all(
|
|
||||||
field.annotation is bool
|
|
||||||
for field in event_filter_field.annotation.model_fields.values()
|
|
||||||
)
|
|
||||||
):
|
|
||||||
raise NotImplementedError(
|
|
||||||
f"{self.name} has an invalid webhook event selector: "
|
|
||||||
"field must be a BaseModel and all its fields must be boolean"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Enforce presence of 'payload' input
|
|
||||||
if "payload" not in self.input_schema.model_fields:
|
|
||||||
raise TypeError(
|
|
||||||
f"{self.name} is webhook-triggered but has no 'payload' input"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Disable webhook-triggered block if webhook functionality not available
|
|
||||||
if not app_config.platform_base_url:
|
|
||||||
self.disabled = True
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
async def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput:
|
|
||||||
"""
|
|
||||||
Run the block with the given input data.
|
|
||||||
Args:
|
|
||||||
input_data: The input data with the structure of input_schema.
|
|
||||||
|
|
||||||
Kwargs: Currently 14/02/2025 these include
|
|
||||||
graph_id: The ID of the graph.
|
|
||||||
node_id: The ID of the node.
|
|
||||||
graph_exec_id: The ID of the graph execution.
|
|
||||||
node_exec_id: The ID of the node execution.
|
|
||||||
user_id: The ID of the user.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A Generator that yields (output_name, output_data).
|
|
||||||
output_name: One of the output name defined in Block's output_schema.
|
|
||||||
output_data: The data for the output_name, matching the defined schema.
|
|
||||||
"""
|
|
||||||
# --- satisfy the type checker, never executed -------------
|
|
||||||
if False: # noqa: SIM115
|
|
||||||
yield "name", "value" # pyright: ignore[reportMissingYield]
|
|
||||||
raise NotImplementedError(f"{self.name} does not implement the run method.")
|
|
||||||
|
|
||||||
async def run_once(
|
|
||||||
self, input_data: BlockSchemaInputType, output: str, **kwargs
|
|
||||||
) -> Any:
|
|
||||||
async for item in self.run(input_data, **kwargs):
|
|
||||||
name, data = item
|
|
||||||
if name == output:
|
|
||||||
return data
|
|
||||||
raise ValueError(f"{self.name} did not produce any output for {output}")
|
|
||||||
|
|
||||||
def merge_stats(self, stats: "NodeExecutionStats") -> "NodeExecutionStats":
|
|
||||||
self.execution_stats += stats
|
|
||||||
return self.execution_stats
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
return self.__class__.__name__
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
return {
|
|
||||||
"id": self.id,
|
|
||||||
"name": self.name,
|
|
||||||
"inputSchema": self.input_schema.jsonschema(),
|
|
||||||
"outputSchema": self.output_schema.jsonschema(),
|
|
||||||
"description": self.description,
|
|
||||||
"categories": [category.dict() for category in self.categories],
|
|
||||||
"contributors": [
|
|
||||||
contributor.model_dump() for contributor in self.contributors
|
|
||||||
],
|
|
||||||
"staticOutput": self.static_output,
|
|
||||||
"uiType": self.block_type.value,
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_info(self) -> BlockInfo:
|
|
||||||
from backend.data.credit import get_block_cost
|
|
||||||
|
|
||||||
return BlockInfo(
|
|
||||||
id=self.id,
|
|
||||||
name=self.name,
|
|
||||||
inputSchema=self.input_schema.jsonschema(),
|
|
||||||
outputSchema=self.output_schema.jsonschema(),
|
|
||||||
costs=get_block_cost(self),
|
|
||||||
description=self.description,
|
|
||||||
categories=[category.dict() for category in self.categories],
|
|
||||||
contributors=[
|
|
||||||
contributor.model_dump() for contributor in self.contributors
|
|
||||||
],
|
|
||||||
staticOutput=self.static_output,
|
|
||||||
uiType=self.block_type.value,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
|
||||||
try:
|
|
||||||
async for output_name, output_data in self._execute(input_data, **kwargs):
|
|
||||||
yield output_name, output_data
|
|
||||||
except Exception as ex:
|
|
||||||
if isinstance(ex, BlockError):
|
|
||||||
raise ex
|
|
||||||
else:
|
|
||||||
raise (
|
|
||||||
BlockExecutionError
|
|
||||||
if isinstance(ex, ValueError)
|
|
||||||
else BlockUnknownError
|
|
||||||
)(
|
|
||||||
message=str(ex),
|
|
||||||
block_name=self.name,
|
|
||||||
block_id=self.id,
|
|
||||||
) from ex
|
|
||||||
|
|
||||||
async def is_block_exec_need_review(
|
|
||||||
self,
|
|
||||||
input_data: BlockInput,
|
|
||||||
*,
|
|
||||||
user_id: str,
|
|
||||||
node_id: str,
|
|
||||||
node_exec_id: str,
|
|
||||||
graph_exec_id: str,
|
|
||||||
graph_id: str,
|
|
||||||
graph_version: int,
|
|
||||||
execution_context: "ExecutionContext",
|
|
||||||
**kwargs,
|
|
||||||
) -> tuple[bool, BlockInput]:
|
|
||||||
"""
|
|
||||||
Check if this block execution needs human review and handle the review process.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (should_pause, input_data_to_use)
|
|
||||||
- should_pause: True if execution should be paused for review
|
|
||||||
- input_data_to_use: The input data to use (may be modified by reviewer)
|
|
||||||
"""
|
|
||||||
if not (
|
|
||||||
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
|
|
||||||
):
|
|
||||||
return False, input_data
|
|
||||||
|
|
||||||
from backend.blocks.helpers.review import HITLReviewHelper
|
|
||||||
|
|
||||||
# Handle the review request and get decision
|
|
||||||
decision = await HITLReviewHelper.handle_review_decision(
|
|
||||||
input_data=input_data,
|
|
||||||
user_id=user_id,
|
|
||||||
node_id=node_id,
|
|
||||||
node_exec_id=node_exec_id,
|
|
||||||
graph_exec_id=graph_exec_id,
|
|
||||||
graph_id=graph_id,
|
|
||||||
graph_version=graph_version,
|
|
||||||
block_name=self.name,
|
|
||||||
editable=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if decision is None:
|
|
||||||
# We're awaiting review - pause execution
|
|
||||||
return True, input_data
|
|
||||||
|
|
||||||
if not decision.should_proceed:
|
|
||||||
# Review was rejected, raise an error to stop execution
|
|
||||||
raise BlockExecutionError(
|
|
||||||
message=f"Block execution rejected by reviewer: {decision.message}",
|
|
||||||
block_name=self.name,
|
|
||||||
block_id=self.id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Review was approved - use the potentially modified data
|
|
||||||
# ReviewResult.data must be a dict for block inputs
|
|
||||||
reviewed_data = decision.review_result.data
|
|
||||||
if not isinstance(reviewed_data, dict):
|
|
||||||
raise BlockExecutionError(
|
|
||||||
message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}",
|
|
||||||
block_name=self.name,
|
|
||||||
block_id=self.id,
|
|
||||||
)
|
|
||||||
return False, reviewed_data
|
|
||||||
|
|
||||||
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
|
||||||
# Check for review requirement only if running within a graph execution context
|
|
||||||
# Direct block execution (e.g., from chat) skips the review process
|
|
||||||
has_graph_context = all(
|
|
||||||
key in kwargs
|
|
||||||
for key in (
|
|
||||||
"node_exec_id",
|
|
||||||
"graph_exec_id",
|
|
||||||
"graph_id",
|
|
||||||
"execution_context",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if has_graph_context:
|
|
||||||
should_pause, input_data = await self.is_block_exec_need_review(
|
|
||||||
input_data, **kwargs
|
|
||||||
)
|
|
||||||
if should_pause:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Validate the input data (original or reviewer-modified) once
|
|
||||||
if error := self.input_schema.validate_data(input_data):
|
|
||||||
raise BlockInputError(
|
|
||||||
message=f"Unable to execute block with invalid input data: {error}",
|
|
||||||
block_name=self.name,
|
|
||||||
block_id=self.id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Use the validated input data
|
|
||||||
async for output_name, output_data in self.run(
|
|
||||||
self.input_schema(**{k: v for k, v in input_data.items() if v is not None}),
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
if output_name == "error":
|
|
||||||
raise BlockExecutionError(
|
|
||||||
message=output_data, block_name=self.name, block_id=self.id
|
|
||||||
)
|
|
||||||
if self.block_type == BlockType.STANDARD and (
|
|
||||||
error := self.output_schema.validate_field(output_name, output_data)
|
|
||||||
):
|
|
||||||
raise BlockOutputError(
|
|
||||||
message=f"Block produced an invalid output data: {error}",
|
|
||||||
block_name=self.name,
|
|
||||||
block_id=self.id,
|
|
||||||
)
|
|
||||||
yield output_name, output_data
|
|
||||||
|
|
||||||
def is_triggered_by_event_type(
|
|
||||||
self, trigger_config: dict[str, Any], event_type: str
|
|
||||||
) -> bool:
|
|
||||||
if not self.webhook_config:
|
|
||||||
raise TypeError("This method can't be used on non-trigger blocks")
|
|
||||||
if not self.webhook_config.event_filter_input:
|
|
||||||
return True
|
|
||||||
event_filter = trigger_config.get(self.webhook_config.event_filter_input)
|
|
||||||
if not event_filter:
|
|
||||||
raise ValueError("Event filter is not configured on trigger")
|
|
||||||
return event_type in [
|
|
||||||
self.webhook_config.event_format.format(event=k)
|
|
||||||
for k in event_filter
|
|
||||||
if event_filter[k] is True
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# Type alias for any block with standard input/output schemas
|
|
||||||
AnyBlockSchema: TypeAlias = Block[BlockSchemaInput, BlockSchemaOutput]
|
|
||||||
@@ -1,122 +0,0 @@
|
|||||||
import logging
|
|
||||||
import os
|
|
||||||
|
|
||||||
from backend.integrations.providers import ProviderName
|
|
||||||
|
|
||||||
from ._base import AnyBlockSchema
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def is_block_auth_configured(
|
|
||||||
block_cls: type[AnyBlockSchema],
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a block has a valid authentication method configured at runtime.
|
|
||||||
|
|
||||||
For example if a block is an OAuth-only block and there env vars are not set,
|
|
||||||
do not show it in the UI.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from backend.sdk.registry import AutoRegistry
|
|
||||||
|
|
||||||
# Create an instance to access input_schema
|
|
||||||
try:
|
|
||||||
block = block_cls()
|
|
||||||
except Exception as e:
|
|
||||||
# If we can't create a block instance, assume it's not OAuth-only
|
|
||||||
logger.error(f"Error creating block instance for {block_cls.__name__}: {e}")
|
|
||||||
return True
|
|
||||||
logger.debug(
|
|
||||||
f"Checking if block {block_cls.__name__} has a valid provider configured"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get all credential inputs from input schema
|
|
||||||
credential_inputs = block.input_schema.get_credentials_fields_info()
|
|
||||||
required_inputs = block.input_schema.get_required_fields()
|
|
||||||
if not credential_inputs:
|
|
||||||
logger.debug(
|
|
||||||
f"Block {block_cls.__name__} has no credential inputs - Treating as valid"
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Check credential inputs
|
|
||||||
if len(required_inputs.intersection(credential_inputs.keys())) == 0:
|
|
||||||
logger.debug(
|
|
||||||
f"Block {block_cls.__name__} has only optional credential inputs"
|
|
||||||
" - will work without credentials configured"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if the credential inputs for this block are correctly configured
|
|
||||||
for field_name, field_info in credential_inputs.items():
|
|
||||||
provider_names = field_info.provider
|
|
||||||
if not provider_names:
|
|
||||||
logger.warning(
|
|
||||||
f"Block {block_cls.__name__} "
|
|
||||||
f"has credential input '{field_name}' with no provider options"
|
|
||||||
" - Disabling"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# If a field has multiple possible providers, each one needs to be usable to
|
|
||||||
# prevent breaking the UX
|
|
||||||
for _provider_name in provider_names:
|
|
||||||
provider_name = _provider_name.value
|
|
||||||
if provider_name in ProviderName.__members__.values():
|
|
||||||
logger.debug(
|
|
||||||
f"Block {block_cls.__name__} credential input '{field_name}' "
|
|
||||||
f"provider '{provider_name}' is part of the legacy provider system"
|
|
||||||
" - Treating as valid"
|
|
||||||
)
|
|
||||||
break
|
|
||||||
|
|
||||||
provider = AutoRegistry.get_provider(provider_name)
|
|
||||||
if not provider:
|
|
||||||
logger.warning(
|
|
||||||
f"Block {block_cls.__name__} credential input '{field_name}' "
|
|
||||||
f"refers to unknown provider '{provider_name}' - Disabling"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check the provider's supported auth types
|
|
||||||
if field_info.supported_types != provider.supported_auth_types:
|
|
||||||
logger.warning(
|
|
||||||
f"Block {block_cls.__name__} credential input '{field_name}' "
|
|
||||||
f"has mismatched supported auth types (field <> Provider): "
|
|
||||||
f"{field_info.supported_types} != {provider.supported_auth_types}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not (supported_auth_types := provider.supported_auth_types):
|
|
||||||
# No auth methods are been configured for this provider
|
|
||||||
logger.warning(
|
|
||||||
f"Block {block_cls.__name__} credential input '{field_name}' "
|
|
||||||
f"provider '{provider_name}' "
|
|
||||||
"has no authentication methods configured - Disabling"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check if provider supports OAuth
|
|
||||||
if "oauth2" in supported_auth_types:
|
|
||||||
# Check if OAuth environment variables are set
|
|
||||||
if (oauth_config := provider.oauth_config) and bool(
|
|
||||||
os.getenv(oauth_config.client_id_env_var)
|
|
||||||
and os.getenv(oauth_config.client_secret_env_var)
|
|
||||||
):
|
|
||||||
logger.debug(
|
|
||||||
f"Block {block_cls.__name__} credential input '{field_name}' "
|
|
||||||
f"provider '{provider_name}' is configured for OAuth"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
f"Block {block_cls.__name__} credential input '{field_name}' "
|
|
||||||
f"provider '{provider_name}' "
|
|
||||||
"is missing OAuth client ID or secret - Disabling"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f"Block {block_cls.__name__} credential input '{field_name}' is valid; "
|
|
||||||
f"supported credential types: {', '.join(field_info.supported_types)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return True
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockInput,
|
BlockInput,
|
||||||
@@ -9,15 +9,13 @@ from backend.blocks._base import (
|
|||||||
BlockSchema,
|
BlockSchema,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockType,
|
BlockType,
|
||||||
|
get_block,
|
||||||
)
|
)
|
||||||
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
|
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
|
||||||
from backend.data.model import NodeExecutionStats, SchemaField
|
from backend.data.model import NodeExecutionStats, SchemaField
|
||||||
from backend.util.json import validate_with_jsonschema
|
from backend.util.json import validate_with_jsonschema
|
||||||
from backend.util.retry import func_retry
|
from backend.util.retry import func_retry
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.executor.utils import LogMetadata
|
|
||||||
|
|
||||||
_logger = logging.getLogger(__name__)
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -126,10 +124,9 @@ class AgentExecutorBlock(Block):
|
|||||||
graph_version: int,
|
graph_version: int,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
logger: "LogMetadata",
|
logger,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
|
|
||||||
from backend.blocks import get_block
|
|
||||||
from backend.data.execution import ExecutionEventType
|
from backend.data.execution import ExecutionEventType
|
||||||
from backend.executor import utils as execution_utils
|
from backend.executor import utils as execution_utils
|
||||||
|
|
||||||
@@ -201,7 +198,7 @@ class AgentExecutorBlock(Block):
|
|||||||
self,
|
self,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
logger: "LogMetadata",
|
logger,
|
||||||
) -> None:
|
) -> None:
|
||||||
from backend.executor import utils as execution_utils
|
from backend.executor import utils as execution_utils
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,5 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.blocks._base import (
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.blocks.llm import (
|
from backend.blocks.llm import (
|
||||||
DEFAULT_LLM_MODEL,
|
DEFAULT_LLM_MODEL,
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -17,6 +11,12 @@ from backend.blocks.llm import (
|
|||||||
LLMResponse,
|
LLMResponse,
|
||||||
llm_call,
|
llm_call,
|
||||||
)
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -5,12 +5,7 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import Block, BlockCategory, BlockSchemaInput, BlockSchemaOutput
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
APIKeyCredentials,
|
APIKeyCredentials,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Literal
|
|||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Literal
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,10 +1,3 @@
|
|||||||
from backend.blocks._base import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -17,6 +10,13 @@ from backend.blocks.apollo.models import (
|
|||||||
PrimaryPhone,
|
PrimaryPhone,
|
||||||
SearchOrganizationsRequest,
|
SearchOrganizationsRequest,
|
||||||
)
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,5 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from backend.blocks._base import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -21,6 +14,13 @@ from backend.blocks.apollo.models import (
|
|||||||
SearchPeopleRequest,
|
SearchPeopleRequest,
|
||||||
SenorityLevels,
|
SenorityLevels,
|
||||||
)
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,3 @@
|
|||||||
from backend.blocks._base import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -13,6 +6,13 @@ from backend.blocks.apollo._auth import (
|
|||||||
ApolloCredentialsInput,
|
ApolloCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
|
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from backend.blocks._base import BlockSchemaInput
|
from backend.data.block import BlockSchemaInput
|
||||||
from backend.data.model import SchemaField, UserIntegrations
|
from backend.data.model import SchemaField, UserIntegrations
|
||||||
from backend.integrations.ayrshare import AyrshareClient
|
from backend.integrations.ayrshare import AyrshareClient
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import enum
|
import enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
from typing import Type
|
from typing import Type
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
import json
|
import json
|
||||||
import shlex
|
import shlex
|
||||||
import uuid
|
import uuid
|
||||||
from typing import TYPE_CHECKING, Literal, Optional
|
from typing import Literal, Optional
|
||||||
|
|
||||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||||
from pydantic import SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -20,13 +20,6 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.sandbox_files import (
|
|
||||||
SandboxFileOutput,
|
|
||||||
extract_and_store_sandbox_files,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.executor.utils import ExecutionContext
|
|
||||||
|
|
||||||
|
|
||||||
class ClaudeCodeExecutionError(Exception):
|
class ClaudeCodeExecutionError(Exception):
|
||||||
@@ -181,15 +174,22 @@ class ClaudeCodeBlock(Block):
|
|||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
class FileOutput(BaseModel):
|
||||||
|
"""A file extracted from the sandbox."""
|
||||||
|
|
||||||
|
path: str
|
||||||
|
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
||||||
|
name: str
|
||||||
|
content: str
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
response: str = SchemaField(
|
response: str = SchemaField(
|
||||||
description="The output/response from Claude Code execution"
|
description="The output/response from Claude Code execution"
|
||||||
)
|
)
|
||||||
files: list[SandboxFileOutput] = SchemaField(
|
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
||||||
description=(
|
description=(
|
||||||
"List of text files created/modified by Claude Code during this execution. "
|
"List of text files created/modified by Claude Code during this execution. "
|
||||||
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
|
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
||||||
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
conversation_history: str = SchemaField(
|
conversation_history: str = SchemaField(
|
||||||
@@ -252,7 +252,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
"relative_path": "index.html",
|
"relative_path": "index.html",
|
||||||
"name": "index.html",
|
"name": "index.html",
|
||||||
"content": "<html>Hello World</html>",
|
"content": "<html>Hello World</html>",
|
||||||
"workspace_ref": None,
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
@@ -268,12 +267,11 @@ class ClaudeCodeBlock(Block):
|
|||||||
"execute_claude_code": lambda *args, **kwargs: (
|
"execute_claude_code": lambda *args, **kwargs: (
|
||||||
"Created index.html with hello world content", # response
|
"Created index.html with hello world content", # response
|
||||||
[
|
[
|
||||||
SandboxFileOutput(
|
ClaudeCodeBlock.FileOutput(
|
||||||
path="/home/user/index.html",
|
path="/home/user/index.html",
|
||||||
relative_path="index.html",
|
relative_path="index.html",
|
||||||
name="index.html",
|
name="index.html",
|
||||||
content="<html>Hello World</html>",
|
content="<html>Hello World</html>",
|
||||||
workspace_ref=None,
|
|
||||||
)
|
)
|
||||||
], # files
|
], # files
|
||||||
"User: Create a hello world HTML file\n"
|
"User: Create a hello world HTML file\n"
|
||||||
@@ -296,8 +294,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id: str,
|
existing_sandbox_id: str,
|
||||||
conversation_history: str,
|
conversation_history: str,
|
||||||
dispose_sandbox: bool,
|
dispose_sandbox: bool,
|
||||||
execution_context: "ExecutionContext",
|
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
||||||
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
|
|
||||||
"""
|
"""
|
||||||
Execute Claude Code in an E2B sandbox.
|
Execute Claude Code in an E2B sandbox.
|
||||||
|
|
||||||
@@ -452,18 +449,14 @@ class ClaudeCodeBlock(Block):
|
|||||||
else:
|
else:
|
||||||
new_conversation_history = turn_entry
|
new_conversation_history = turn_entry
|
||||||
|
|
||||||
# Extract files created/modified during this run and store to workspace
|
# Extract files created/modified during this run
|
||||||
sandbox_files = await extract_and_store_sandbox_files(
|
files = await self._extract_files(
|
||||||
sandbox=sandbox,
|
sandbox, working_directory, start_timestamp
|
||||||
working_directory=working_directory,
|
|
||||||
execution_context=execution_context,
|
|
||||||
since_timestamp=start_timestamp,
|
|
||||||
text_only=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
response,
|
response,
|
||||||
sandbox_files, # Already SandboxFileOutput objects
|
files,
|
||||||
new_conversation_history,
|
new_conversation_history,
|
||||||
current_session_id,
|
current_session_id,
|
||||||
sandbox_id,
|
sandbox_id,
|
||||||
@@ -478,6 +471,140 @@ class ClaudeCodeBlock(Block):
|
|||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
await sandbox.kill()
|
await sandbox.kill()
|
||||||
|
|
||||||
|
async def _extract_files(
|
||||||
|
self,
|
||||||
|
sandbox: BaseAsyncSandbox,
|
||||||
|
working_directory: str,
|
||||||
|
since_timestamp: str | None = None,
|
||||||
|
) -> list["ClaudeCodeBlock.FileOutput"]:
|
||||||
|
"""
|
||||||
|
Extract text files created/modified during this Claude Code execution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sandbox: The E2B sandbox instance
|
||||||
|
working_directory: Directory to search for files
|
||||||
|
since_timestamp: ISO timestamp - only return files modified after this time
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of FileOutput objects with path, relative_path, name, and content
|
||||||
|
"""
|
||||||
|
files: list[ClaudeCodeBlock.FileOutput] = []
|
||||||
|
|
||||||
|
# Text file extensions we can safely read as text
|
||||||
|
text_extensions = {
|
||||||
|
".txt",
|
||||||
|
".md",
|
||||||
|
".html",
|
||||||
|
".htm",
|
||||||
|
".css",
|
||||||
|
".js",
|
||||||
|
".ts",
|
||||||
|
".jsx",
|
||||||
|
".tsx",
|
||||||
|
".json",
|
||||||
|
".xml",
|
||||||
|
".yaml",
|
||||||
|
".yml",
|
||||||
|
".toml",
|
||||||
|
".ini",
|
||||||
|
".cfg",
|
||||||
|
".conf",
|
||||||
|
".py",
|
||||||
|
".rb",
|
||||||
|
".php",
|
||||||
|
".java",
|
||||||
|
".c",
|
||||||
|
".cpp",
|
||||||
|
".h",
|
||||||
|
".hpp",
|
||||||
|
".cs",
|
||||||
|
".go",
|
||||||
|
".rs",
|
||||||
|
".swift",
|
||||||
|
".kt",
|
||||||
|
".scala",
|
||||||
|
".sh",
|
||||||
|
".bash",
|
||||||
|
".zsh",
|
||||||
|
".sql",
|
||||||
|
".graphql",
|
||||||
|
".env",
|
||||||
|
".gitignore",
|
||||||
|
".dockerfile",
|
||||||
|
"Dockerfile",
|
||||||
|
".vue",
|
||||||
|
".svelte",
|
||||||
|
".astro",
|
||||||
|
".mdx",
|
||||||
|
".rst",
|
||||||
|
".tex",
|
||||||
|
".csv",
|
||||||
|
".log",
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# List files recursively using find command
|
||||||
|
# Exclude node_modules and .git directories, but allow hidden files
|
||||||
|
# like .env and .gitignore (they're filtered by text_extensions later)
|
||||||
|
# Filter by timestamp to only get files created/modified during this run
|
||||||
|
safe_working_dir = shlex.quote(working_directory)
|
||||||
|
timestamp_filter = ""
|
||||||
|
if since_timestamp:
|
||||||
|
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||||
|
find_result = await sandbox.commands.run(
|
||||||
|
f"find {safe_working_dir} -type f "
|
||||||
|
f"{timestamp_filter}"
|
||||||
|
f"-not -path '*/node_modules/*' "
|
||||||
|
f"-not -path '*/.git/*' "
|
||||||
|
f"2>/dev/null"
|
||||||
|
)
|
||||||
|
|
||||||
|
if find_result.stdout:
|
||||||
|
for file_path in find_result.stdout.strip().split("\n"):
|
||||||
|
if not file_path:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if it's a text file we can read
|
||||||
|
is_text = any(
|
||||||
|
file_path.endswith(ext) for ext in text_extensions
|
||||||
|
) or file_path.endswith("Dockerfile")
|
||||||
|
|
||||||
|
if is_text:
|
||||||
|
try:
|
||||||
|
content = await sandbox.files.read(file_path)
|
||||||
|
# Handle bytes or string
|
||||||
|
if isinstance(content, bytes):
|
||||||
|
content = content.decode("utf-8", errors="replace")
|
||||||
|
|
||||||
|
# Extract filename from path
|
||||||
|
file_name = file_path.split("/")[-1]
|
||||||
|
|
||||||
|
# Calculate relative path by stripping working directory
|
||||||
|
relative_path = file_path
|
||||||
|
if file_path.startswith(working_directory):
|
||||||
|
relative_path = file_path[len(working_directory) :]
|
||||||
|
# Remove leading slash if present
|
||||||
|
if relative_path.startswith("/"):
|
||||||
|
relative_path = relative_path[1:]
|
||||||
|
|
||||||
|
files.append(
|
||||||
|
ClaudeCodeBlock.FileOutput(
|
||||||
|
path=file_path,
|
||||||
|
relative_path=relative_path,
|
||||||
|
name=file_name,
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
# Skip files that can't be read
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# If file extraction fails, return empty results
|
||||||
|
pass
|
||||||
|
|
||||||
|
return files
|
||||||
|
|
||||||
def _escape_prompt(self, prompt: str) -> str:
|
def _escape_prompt(self, prompt: str) -> str:
|
||||||
"""Escape the prompt for safe shell execution."""
|
"""Escape the prompt for safe shell execution."""
|
||||||
# Use single quotes and escape any single quotes in the prompt
|
# Use single quotes and escape any single quotes in the prompt
|
||||||
@@ -490,7 +617,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
*,
|
*,
|
||||||
e2b_credentials: APIKeyCredentials,
|
e2b_credentials: APIKeyCredentials,
|
||||||
anthropic_credentials: APIKeyCredentials,
|
anthropic_credentials: APIKeyCredentials,
|
||||||
execution_context: "ExecutionContext",
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
@@ -511,7 +637,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id=input_data.sandbox_id,
|
existing_sandbox_id=input_data.sandbox_id,
|
||||||
conversation_history=input_data.conversation_history,
|
conversation_history=input_data.conversation_history,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
execution_context=execution_context,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
yield "response", response
|
yield "response", response
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import TYPE_CHECKING, Any, Literal, Optional
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
from e2b_code_interpreter import AsyncSandbox
|
from e2b_code_interpreter import AsyncSandbox
|
||||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||||
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
|
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
|
||||||
from pydantic import BaseModel, Field, JsonValue, SecretStr
|
from pydantic import BaseModel, Field, JsonValue, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -20,13 +20,6 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.sandbox_files import (
|
|
||||||
SandboxFileOutput,
|
|
||||||
extract_and_store_sandbox_files,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.executor.utils import ExecutionContext
|
|
||||||
|
|
||||||
TEST_CREDENTIALS = APIKeyCredentials(
|
TEST_CREDENTIALS = APIKeyCredentials(
|
||||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||||
@@ -92,9 +85,6 @@ class CodeExecutionResult(MainCodeExecutionResult):
|
|||||||
class BaseE2BExecutorMixin:
|
class BaseE2BExecutorMixin:
|
||||||
"""Shared implementation methods for E2B executor blocks."""
|
"""Shared implementation methods for E2B executor blocks."""
|
||||||
|
|
||||||
# Default working directory in E2B sandboxes
|
|
||||||
WORKING_DIR = "/home/user"
|
|
||||||
|
|
||||||
async def execute_code(
|
async def execute_code(
|
||||||
self,
|
self,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
@@ -105,21 +95,14 @@ class BaseE2BExecutorMixin:
|
|||||||
timeout: Optional[int] = None,
|
timeout: Optional[int] = None,
|
||||||
sandbox_id: Optional[str] = None,
|
sandbox_id: Optional[str] = None,
|
||||||
dispose_sandbox: bool = False,
|
dispose_sandbox: bool = False,
|
||||||
execution_context: Optional["ExecutionContext"] = None,
|
|
||||||
extract_files: bool = False,
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Unified code execution method that handles all three use cases:
|
Unified code execution method that handles all three use cases:
|
||||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||||
|
|
||||||
Args:
|
|
||||||
extract_files: If True and execution_context provided, extract files
|
|
||||||
created/modified during execution and store to workspace.
|
|
||||||
""" # noqa
|
""" # noqa
|
||||||
sandbox = None
|
sandbox = None
|
||||||
files: list[SandboxFileOutput] = []
|
|
||||||
try:
|
try:
|
||||||
if sandbox_id:
|
if sandbox_id:
|
||||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||||
@@ -135,12 +118,6 @@ class BaseE2BExecutorMixin:
|
|||||||
for cmd in setup_commands:
|
for cmd in setup_commands:
|
||||||
await sandbox.commands.run(cmd)
|
await sandbox.commands.run(cmd)
|
||||||
|
|
||||||
# Capture timestamp before execution to scope file extraction
|
|
||||||
start_timestamp = None
|
|
||||||
if extract_files:
|
|
||||||
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
|
|
||||||
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
|
|
||||||
|
|
||||||
# Execute the code
|
# Execute the code
|
||||||
execution = await sandbox.run_code(
|
execution = await sandbox.run_code(
|
||||||
code,
|
code,
|
||||||
@@ -156,24 +133,7 @@ class BaseE2BExecutorMixin:
|
|||||||
stdout_logs = "".join(execution.logs.stdout)
|
stdout_logs = "".join(execution.logs.stdout)
|
||||||
stderr_logs = "".join(execution.logs.stderr)
|
stderr_logs = "".join(execution.logs.stderr)
|
||||||
|
|
||||||
# Extract files created/modified during this execution
|
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
||||||
if extract_files and execution_context:
|
|
||||||
files = await extract_and_store_sandbox_files(
|
|
||||||
sandbox=sandbox,
|
|
||||||
working_directory=self.WORKING_DIR,
|
|
||||||
execution_context=execution_context,
|
|
||||||
since_timestamp=start_timestamp,
|
|
||||||
text_only=False, # Include binary files too
|
|
||||||
)
|
|
||||||
|
|
||||||
return (
|
|
||||||
results,
|
|
||||||
text_output,
|
|
||||||
stdout_logs,
|
|
||||||
stderr_logs,
|
|
||||||
sandbox.sandbox_id,
|
|
||||||
files,
|
|
||||||
)
|
|
||||||
finally:
|
finally:
|
||||||
# Dispose of sandbox if requested to reduce usage costs
|
# Dispose of sandbox if requested to reduce usage costs
|
||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
@@ -278,12 +238,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
description="Standard output logs from execution"
|
description="Standard output logs from execution"
|
||||||
)
|
)
|
||||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||||
files: list[SandboxFileOutput] = SchemaField(
|
|
||||||
description=(
|
|
||||||
"Files created or modified during execution. "
|
|
||||||
"Each file has path, name, content, and workspace_ref (if stored)."
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
@@ -305,30 +259,23 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
("results", []),
|
("results", []),
|
||||||
("response", "Hello World"),
|
("response", "Hello World"),
|
||||||
("stdout_logs", "Hello World\n"),
|
("stdout_logs", "Hello World\n"),
|
||||||
("files", []),
|
|
||||||
],
|
],
|
||||||
test_mock={
|
test_mock={
|
||||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
|
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
||||||
[], # results
|
[], # results
|
||||||
"Hello World", # text_output
|
"Hello World", # text_output
|
||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
[], # files
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def run(
|
async def run(
|
||||||
self,
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
input_data: Input,
|
|
||||||
*,
|
|
||||||
credentials: APIKeyCredentials,
|
|
||||||
execution_context: "ExecutionContext",
|
|
||||||
**kwargs,
|
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _, files = await self.execute_code(
|
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.code,
|
code=input_data.code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -336,8 +283,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
setup_commands=input_data.setup_commands,
|
setup_commands=input_data.setup_commands,
|
||||||
timeout=input_data.timeout,
|
timeout=input_data.timeout,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
execution_context=execution_context,
|
|
||||||
extract_files=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine result object shape & filter out empty formats
|
# Determine result object shape & filter out empty formats
|
||||||
@@ -351,8 +296,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
yield "stdout_logs", stdout
|
yield "stdout_logs", stdout
|
||||||
if stderr:
|
if stderr:
|
||||||
yield "stderr_logs", stderr
|
yield "stderr_logs", stderr
|
||||||
# Always yield files (empty list if none)
|
|
||||||
yield "files", [f.model_dump() for f in files]
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "error", str(e)
|
yield "error", str(e)
|
||||||
|
|
||||||
@@ -450,7 +393,6 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
[], # files
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -459,7 +401,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
|
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.setup_code,
|
code=input_data.setup_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -558,7 +500,6 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
sandbox_id, # sandbox_id
|
sandbox_id, # sandbox_id
|
||||||
[], # files
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -567,7 +508,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.step_code,
|
code=input_data.step_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from openai import AsyncOpenAI
|
|||||||
from openai.types.responses import Response as OpenAIResponse
|
from openai.types.responses import Response as OpenAIResponse
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockManualWebhookConfig,
|
BlockManualWebhookConfig,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any, List
|
from typing import Any, List
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import codecs
|
import codecs
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from typing import Any, Literal, cast
|
|||||||
import discord
|
import discord
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
Discord OAuth-based blocks.
|
Discord OAuth-based blocks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from typing import Literal
|
|||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, SecretStr
|
from pydantic import BaseModel, ConfigDict, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ which provides access to LinkedIn profile data and related information.
|
|||||||
import logging
|
import logging
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,13 +3,6 @@ import logging
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.blocks._base import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.blocks.fal._auth import (
|
from backend.blocks.fal._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
@@ -17,6 +10,13 @@ from backend.blocks.fal._auth import (
|
|||||||
FalCredentialsField,
|
FalCredentialsField,
|
||||||
FalCredentialsInput,
|
FalCredentialsInput,
|
||||||
)
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.file import store_media_file
|
from backend.util.file import store_media_file
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from urllib.parse import urlparse
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import re
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import base64
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from typing import Any, List, Optional
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from pathlib import Path
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from gravitas_md2gdocs import to_requests
|
from gravitas_md2gdocs import to_requests
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ from enum import Enum
|
|||||||
from google.oauth2.credentials import Credentials
|
from google.oauth2.credentials import Credentials
|
||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Literal
|
|||||||
import googlemaps
|
import googlemaps
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ from typing import Any, Optional
|
|||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from backend.data.execution import ExecutionStatus
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult
|
||||||
|
from backend.executor.manager import async_update_node_execution_status
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -41,8 +43,6 @@ class HITLReviewHelper:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
async def update_node_execution_status(**kwargs) -> None:
|
async def update_node_execution_status(**kwargs) -> None:
|
||||||
"""Update the execution status of a node."""
|
"""Update the execution status of a node."""
|
||||||
from backend.executor.manager import async_update_node_execution_status
|
|
||||||
|
|
||||||
await async_update_node_execution_status(
|
await async_update_node_execution_status(
|
||||||
db_client=get_database_manager_async_client(), **kwargs
|
db_client=get_database_manager_async_client(), **kwargs
|
||||||
)
|
)
|
||||||
@@ -88,13 +88,12 @@ class HITLReviewHelper:
|
|||||||
Raises:
|
Raises:
|
||||||
Exception: If review creation or status update fails
|
Exception: If review creation or status update fails
|
||||||
"""
|
"""
|
||||||
from backend.data.execution import ExecutionStatus
|
|
||||||
|
|
||||||
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
||||||
# are handled by the caller:
|
# are handled by the caller:
|
||||||
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
||||||
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
||||||
# This function only handles checking for existing approvals.
|
# This function only handles checking for existing approvals.
|
||||||
|
|
||||||
# Check if this node has already been approved (normal or auto-approval)
|
# Check if this node has already been approved (normal or auto-approval)
|
||||||
if approval_result := await HITLReviewHelper.check_approval(
|
if approval_result := await HITLReviewHelper.check_approval(
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from typing import Literal
|
|||||||
import aiofiles
|
import aiofiles
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks._base import (
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.hubspot._auth import (
|
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks._base import (
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.hubspot._auth import (
|
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.hubspot._auth import (
|
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,8 @@ from typing import Any
|
|||||||
|
|
||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.blocks.helpers.review import HITLReviewHelper
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -11,7 +12,6 @@ from backend.blocks._base import (
|
|||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
BlockType,
|
BlockType,
|
||||||
)
|
)
|
||||||
from backend.blocks.helpers.review import HITLReviewHelper
|
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Any, Dict, Literal, Optional
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,9 @@ import copy
|
|||||||
from datetime import date, time
|
from datetime import date, time
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from backend.blocks._base import (
|
# Import for Google Drive file input block
|
||||||
|
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -10,9 +12,6 @@ from backend.blocks._base import (
|
|||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockType,
|
BlockType,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Import for Google Drive file input block
|
|
||||||
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.file import store_media_file
|
from backend.util.file import store_media_file
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks._base import (
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.jina._auth import (
|
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks._base import (
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.jina._auth import (
|
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -3,18 +3,18 @@ from urllib.parse import quote
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.jina._auth import (
|
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,5 @@
|
|||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
|
|
||||||
from backend.blocks._base import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.blocks.jina._auth import (
|
from backend.blocks.jina._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
@@ -15,6 +8,13 @@ from backend.blocks.jina._auth import (
|
|||||||
JinaCredentialsInput,
|
JinaCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.blocks.search import GetRequest
|
from backend.blocks.search import GetRequest
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.exceptions import BlockExecutionError
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from anthropic.types import ToolParam
|
|||||||
from groq import AsyncGroq
|
from groq import AsyncGroq
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import operator
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import List, Literal
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Any, Literal, Optional, Union
|
|||||||
from mem0 import MemoryClient
|
from mem0 import MemoryClient
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
|
from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
APIKeyCredentials,
|
APIKeyCredentials,
|
||||||
CredentialsField,
|
CredentialsField,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional
|
|||||||
|
|
||||||
from pydantic import model_validator
|
from pydantic import model_validator
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from typing import List, Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks._base import (
|
from backend.blocks.nvidia._auth import (
|
||||||
|
NvidiaCredentials,
|
||||||
|
NvidiaCredentialsField,
|
||||||
|
NvidiaCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
from backend.blocks.nvidia._auth import (
|
|
||||||
NvidiaCredentials,
|
|
||||||
NvidiaCredentialsField,
|
|
||||||
NvidiaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
from backend.util.type import MediaFileType
|
from backend.util.type import MediaFileType
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Any, Literal
|
|||||||
import openai
|
import openai
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Literal
|
from typing import Any, Literal
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Any, Literal
|
|||||||
|
|
||||||
from pinecone import Pinecone, ServerlessSpec
|
from pinecone import Pinecone, ServerlessSpec
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import praw
|
|||||||
from praw.models import Comment, MoreComments, Submission
|
from praw.models import Comment, MoreComments, Submission
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.data.block import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,19 +4,19 @@ from enum import Enum
|
|||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
|
|
||||||
from backend.blocks._base import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.blocks.replicate._auth import (
|
from backend.blocks.replicate._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
ReplicateCredentialsInput,
|
ReplicateCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.blocks.replicate._helper import ReplicateOutputs, extract_result
|
from backend.blocks.replicate._helper import ReplicateOutputs, extract_result
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
|
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user