mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-18 10:41:49 -05:00
Compare commits
52 Commits
master
...
kpczerwins
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4caa69dad8 | ||
|
|
845609f07d | ||
|
|
d629401afd | ||
|
|
be0184d14f | ||
|
|
e9ba7e51db | ||
|
|
d23248f065 | ||
|
|
905373a712 | ||
|
|
ee9d39bc0f | ||
|
|
05aaf7a85e | ||
|
|
d63f2ead40 | ||
|
|
6471342e55 | ||
|
|
b6d7e9ad8c | ||
|
|
9d4dcbd9e0 | ||
|
|
074be7aea6 | ||
|
|
39d28b24fc | ||
|
|
bf79a7748a | ||
|
|
649d4ab7f5 | ||
|
|
223df9d3da | ||
|
|
187ab04745 | ||
|
|
e2d3c8a217 | ||
|
|
647c8ed8d4 | ||
|
|
27d94e395c | ||
|
|
b8f5c208d0 | ||
|
|
ca216dfd7f | ||
|
|
f9f358c526 | ||
|
|
52b3aebf71 | ||
|
|
965b7d3e04 | ||
|
|
c2368f15ff | ||
|
|
9ac3f64d56 | ||
|
|
5035b69c79 | ||
|
|
86af8fc856 | ||
|
|
dfa517300b | ||
|
|
43b25b5e2f | ||
|
|
ab0b537cc7 | ||
|
|
9a8c6ad609 | ||
|
|
e8c50b96d1 | ||
|
|
30e854569a | ||
|
|
301d7cbada | ||
|
|
d95aef7665 | ||
|
|
cb166dd6fb | ||
|
|
3d31f62bf1 | ||
|
|
b8b6c9de23 | ||
|
|
4f6055f494 | ||
|
|
695a185fa1 | ||
|
|
113e87a23c | ||
|
|
d09f1532a4 | ||
|
|
eb285eadd9 | ||
|
|
a78145505b | ||
|
|
36aeb0b2b3 | ||
|
|
2a189c44c4 | ||
|
|
508759610f | ||
|
|
808dddfc26 |
@@ -5,42 +5,13 @@
|
|||||||
!docs/
|
!docs/
|
||||||
|
|
||||||
# Platform - Libs
|
# Platform - Libs
|
||||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
!autogpt_platform/autogpt_libs/
|
||||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
|
||||||
!autogpt_platform/autogpt_libs/poetry.lock
|
|
||||||
!autogpt_platform/autogpt_libs/README.md
|
|
||||||
|
|
||||||
# Platform - Backend
|
# Platform - Backend
|
||||||
!autogpt_platform/backend/backend/
|
!autogpt_platform/backend/
|
||||||
!autogpt_platform/backend/test/e2e_test_data.py
|
|
||||||
!autogpt_platform/backend/migrations/
|
|
||||||
!autogpt_platform/backend/schema.prisma
|
|
||||||
!autogpt_platform/backend/pyproject.toml
|
|
||||||
!autogpt_platform/backend/poetry.lock
|
|
||||||
!autogpt_platform/backend/README.md
|
|
||||||
!autogpt_platform/backend/.env
|
|
||||||
!autogpt_platform/backend/gen_prisma_types_stub.py
|
|
||||||
|
|
||||||
# Platform - Market
|
|
||||||
!autogpt_platform/market/market/
|
|
||||||
!autogpt_platform/market/scripts.py
|
|
||||||
!autogpt_platform/market/schema.prisma
|
|
||||||
!autogpt_platform/market/pyproject.toml
|
|
||||||
!autogpt_platform/market/poetry.lock
|
|
||||||
!autogpt_platform/market/README.md
|
|
||||||
|
|
||||||
# Platform - Frontend
|
# Platform - Frontend
|
||||||
!autogpt_platform/frontend/src/
|
!autogpt_platform/frontend/
|
||||||
!autogpt_platform/frontend/public/
|
|
||||||
!autogpt_platform/frontend/scripts/
|
|
||||||
!autogpt_platform/frontend/package.json
|
|
||||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
|
||||||
!autogpt_platform/frontend/tsconfig.json
|
|
||||||
!autogpt_platform/frontend/README.md
|
|
||||||
## config
|
|
||||||
!autogpt_platform/frontend/*.config.*
|
|
||||||
!autogpt_platform/frontend/.env.*
|
|
||||||
!autogpt_platform/frontend/.env
|
|
||||||
|
|
||||||
# Classic - AutoGPT
|
# Classic - AutoGPT
|
||||||
!classic/original_autogpt/autogpt/
|
!classic/original_autogpt/autogpt/
|
||||||
@@ -64,6 +35,38 @@
|
|||||||
# Classic - Frontend
|
# Classic - Frontend
|
||||||
!classic/frontend/build/web/
|
!classic/frontend/build/web/
|
||||||
|
|
||||||
# Explicitly re-ignore some folders
|
# Explicitly re-ignore unwanted files from whitelisted directories
|
||||||
.*
|
# Note: These patterns MUST come after the whitelist rules to take effect
|
||||||
**/__pycache__
|
|
||||||
|
# Hidden files and directories (but keep frontend .env files needed for build)
|
||||||
|
**/.*
|
||||||
|
!autogpt_platform/frontend/.env
|
||||||
|
!autogpt_platform/frontend/.env.default
|
||||||
|
!autogpt_platform/frontend/.env.production
|
||||||
|
|
||||||
|
# Python artifacts
|
||||||
|
**/__pycache__/
|
||||||
|
**/*.pyc
|
||||||
|
**/*.pyo
|
||||||
|
**/.venv/
|
||||||
|
**/.ruff_cache/
|
||||||
|
**/.pytest_cache/
|
||||||
|
**/.coverage
|
||||||
|
**/htmlcov/
|
||||||
|
|
||||||
|
# Node artifacts
|
||||||
|
**/node_modules/
|
||||||
|
**/.next/
|
||||||
|
**/storybook-static/
|
||||||
|
**/playwright-report/
|
||||||
|
**/test-results/
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
**/dist/
|
||||||
|
**/build/
|
||||||
|
!autogpt_platform/frontend/src/**/build/
|
||||||
|
**/target/
|
||||||
|
|
||||||
|
# Logs and temp files
|
||||||
|
**/*.log
|
||||||
|
**/*.tmp
|
||||||
|
|||||||
1229
.github/scripts/detect_overlaps.py
vendored
Normal file
1229
.github/scripts/detect_overlaps.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
42
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
42
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
@@ -40,6 +40,48 @@ jobs:
|
|||||||
git checkout -b "$BRANCH_NAME"
|
git checkout -b "$BRANCH_NAME"
|
||||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Backend Python/Poetry setup (so Claude can run linting/tests)
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.11"
|
||||||
|
|
||||||
|
- name: Set up Python dependency cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pypoetry
|
||||||
|
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||||
|
|
||||||
|
- name: Install Poetry
|
||||||
|
run: |
|
||||||
|
cd autogpt_platform/backend
|
||||||
|
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||||
|
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||||
|
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Install Python dependencies
|
||||||
|
working-directory: autogpt_platform/backend
|
||||||
|
run: poetry install
|
||||||
|
|
||||||
|
- name: Generate Prisma Client
|
||||||
|
working-directory: autogpt_platform/backend
|
||||||
|
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
|
# Frontend Node.js/pnpm setup (so Claude can run linting/tests)
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: "22"
|
||||||
|
cache: "pnpm"
|
||||||
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
|
|
||||||
|
- name: Install JavaScript dependencies
|
||||||
|
working-directory: autogpt_platform/frontend
|
||||||
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
- name: Get CI failure details
|
- name: Get CI failure details
|
||||||
id: failure_details
|
id: failure_details
|
||||||
uses: actions/github-script@v8
|
uses: actions/github-script@v8
|
||||||
|
|||||||
22
.github/workflows/claude-dependabot.yml
vendored
22
.github/workflows/claude-dependabot.yml
vendored
@@ -77,27 +77,15 @@ jobs:
|
|||||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22"
|
node-version: "22"
|
||||||
|
cache: "pnpm"
|
||||||
- name: Enable corepack
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set pnpm store directory
|
|
||||||
run: |
|
|
||||||
pnpm config set store-dir ~/.pnpm-store
|
|
||||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Cache frontend dependencies
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
|
||||||
path: ~/.pnpm-store
|
|
||||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install JavaScript dependencies
|
- name: Install JavaScript dependencies
|
||||||
working-directory: autogpt_platform/frontend
|
working-directory: autogpt_platform/frontend
|
||||||
|
|||||||
22
.github/workflows/claude.yml
vendored
22
.github/workflows/claude.yml
vendored
@@ -93,27 +93,15 @@ jobs:
|
|||||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22"
|
node-version: "22"
|
||||||
|
cache: "pnpm"
|
||||||
- name: Enable corepack
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set pnpm store directory
|
|
||||||
run: |
|
|
||||||
pnpm config set store-dir ~/.pnpm-store
|
|
||||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Cache frontend dependencies
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
|
||||||
path: ~/.pnpm-store
|
|
||||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install JavaScript dependencies
|
- name: Install JavaScript dependencies
|
||||||
working-directory: autogpt_platform/frontend
|
working-directory: autogpt_platform/frontend
|
||||||
|
|||||||
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -62,7 +62,7 @@ jobs:
|
|||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v3
|
uses: github/codeql-action/init@v4
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
build-mode: ${{ matrix.build-mode }}
|
build-mode: ${{ matrix.build-mode }}
|
||||||
@@ -93,6 +93,6 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v3
|
uses: github/codeql-action/analyze@v4
|
||||||
with:
|
with:
|
||||||
category: "/language:${{matrix.language}}"
|
category: "/language:${{matrix.language}}"
|
||||||
|
|||||||
34
.github/workflows/docs-claude-review.yml
vendored
34
.github/workflows/docs-claude-review.yml
vendored
@@ -7,6 +7,10 @@ on:
|
|||||||
- "docs/integrations/**"
|
- "docs/integrations/**"
|
||||||
- "autogpt_platform/backend/backend/blocks/**"
|
- "autogpt_platform/backend/backend/blocks/**"
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: claude-docs-review-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
claude-review:
|
claude-review:
|
||||||
# Only run for PRs from members/collaborators
|
# Only run for PRs from members/collaborators
|
||||||
@@ -91,5 +95,35 @@ jobs:
|
|||||||
3. Read corresponding documentation files to verify accuracy
|
3. Read corresponding documentation files to verify accuracy
|
||||||
4. Provide your feedback as a PR comment
|
4. Provide your feedback as a PR comment
|
||||||
|
|
||||||
|
## IMPORTANT: Comment Marker
|
||||||
|
Start your PR comment with exactly this HTML comment marker on its own line:
|
||||||
|
<!-- CLAUDE_DOCS_REVIEW -->
|
||||||
|
|
||||||
|
This marker is used to identify and replace your comment on subsequent runs.
|
||||||
|
|
||||||
Be constructive and specific. If everything looks good, say so!
|
Be constructive and specific. If everything looks good, say so!
|
||||||
If there are issues, explain what's wrong and suggest how to fix it.
|
If there are issues, explain what's wrong and suggest how to fix it.
|
||||||
|
|
||||||
|
- name: Delete old Claude review comments
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
# Get all comment IDs with our marker, sorted by creation date (oldest first)
|
||||||
|
COMMENT_IDS=$(gh api \
|
||||||
|
repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments \
|
||||||
|
--jq '[.[] | select(.body | contains("<!-- CLAUDE_DOCS_REVIEW -->"))] | sort_by(.created_at) | .[].id')
|
||||||
|
|
||||||
|
# Count comments
|
||||||
|
COMMENT_COUNT=$(echo "$COMMENT_IDS" | grep -c . || true)
|
||||||
|
|
||||||
|
if [ "$COMMENT_COUNT" -gt 1 ]; then
|
||||||
|
# Delete all but the last (newest) comment
|
||||||
|
echo "$COMMENT_IDS" | head -n -1 | while read -r COMMENT_ID; do
|
||||||
|
if [ -n "$COMMENT_ID" ]; then
|
||||||
|
echo "Deleting old review comment: $COMMENT_ID"
|
||||||
|
gh api -X DELETE repos/${{ github.repository }}/issues/comments/$COMMENT_ID
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo "No old review comments to clean up"
|
||||||
|
fi
|
||||||
|
|||||||
9
.github/workflows/platform-backend-ci.yml
vendored
9
.github/workflows/platform-backend-ci.yml
vendored
@@ -41,13 +41,18 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- 6379:6379
|
- 6379:6379
|
||||||
rabbitmq:
|
rabbitmq:
|
||||||
image: rabbitmq:3.12-management
|
image: rabbitmq:4.1.4
|
||||||
ports:
|
ports:
|
||||||
- 5672:5672
|
- 5672:5672
|
||||||
- 15672:15672
|
|
||||||
env:
|
env:
|
||||||
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
||||||
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
||||||
|
options: >-
|
||||||
|
--health-cmd "rabbitmq-diagnostics -q ping"
|
||||||
|
--health-interval 30s
|
||||||
|
--health-timeout 10s
|
||||||
|
--health-retries 5
|
||||||
|
--health-start-period 10s
|
||||||
clamav:
|
clamav:
|
||||||
image: clamav/clamav-debian:latest
|
image: clamav/clamav-debian:latest
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
247
.github/workflows/platform-frontend-ci.yml
vendored
247
.github/workflows/platform-frontend-ci.yml
vendored
@@ -6,10 +6,16 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- ".github/workflows/platform-frontend-ci.yml"
|
- ".github/workflows/platform-frontend-ci.yml"
|
||||||
- "autogpt_platform/frontend/**"
|
- "autogpt_platform/frontend/**"
|
||||||
|
- "autogpt_platform/backend/Dockerfile"
|
||||||
|
- "autogpt_platform/docker-compose.yml"
|
||||||
|
- "autogpt_platform/docker-compose.platform.yml"
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/platform-frontend-ci.yml"
|
- ".github/workflows/platform-frontend-ci.yml"
|
||||||
- "autogpt_platform/frontend/**"
|
- "autogpt_platform/frontend/**"
|
||||||
|
- "autogpt_platform/backend/Dockerfile"
|
||||||
|
- "autogpt_platform/docker-compose.yml"
|
||||||
|
- "autogpt_platform/docker-compose.platform.yml"
|
||||||
merge_group:
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
@@ -26,7 +32,6 @@ jobs:
|
|||||||
setup:
|
setup:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
|
||||||
components-changed: ${{ steps.filter.outputs.components }}
|
components-changed: ${{ steps.filter.outputs.components }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -41,28 +46,17 @@ jobs:
|
|||||||
components:
|
components:
|
||||||
- 'autogpt_platform/frontend/src/components/**'
|
- 'autogpt_platform/frontend/src/components/**'
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Generate cache key
|
- name: Set up Node
|
||||||
id: cache-key
|
uses: actions/setup-node@v6
|
||||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Cache dependencies
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ steps.cache-key.outputs.key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies to populate cache
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@@ -73,22 +67,15 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -111,22 +98,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -141,10 +121,8 @@ jobs:
|
|||||||
exitOnceUploaded: true
|
exitOnceUploaded: true
|
||||||
|
|
||||||
e2e_test:
|
e2e_test:
|
||||||
|
name: end-to-end tests
|
||||||
runs-on: big-boi
|
runs-on: big-boi
|
||||||
needs: setup
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -152,19 +130,11 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Platform - Copy default supabase .env
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Copy default supabase .env
|
|
||||||
run: |
|
run: |
|
||||||
cp ../.env.default ../.env
|
cp ../.env.default ../.env
|
||||||
|
|
||||||
- name: Copy backend .env and set OpenAI API key
|
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||||
run: |
|
run: |
|
||||||
cp ../backend/.env.default ../backend/.env
|
cp ../backend/.env.default ../backend/.env
|
||||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||||
@@ -172,77 +142,125 @@ jobs:
|
|||||||
# Used by E2E test data script to generate embeddings for approved store agents
|
# Used by E2E test data script to generate embeddings for approved store agents
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Platform - Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
driver-opts: network=host
|
||||||
|
|
||||||
- name: Cache Docker layers
|
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||||
|
uses: crazy-max/ghaction-github-runtime@v3
|
||||||
|
|
||||||
|
- name: Set up Platform - Build Docker images (with cache)
|
||||||
|
working-directory: autogpt_platform
|
||||||
|
run: |
|
||||||
|
pip install pyyaml
|
||||||
|
|
||||||
|
# Resolve extends and generate a flat compose file that bake can understand
|
||||||
|
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||||
|
|
||||||
|
# Add cache configuration to the resolved compose file
|
||||||
|
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||||
|
--source docker-compose.resolved.yml \
|
||||||
|
--cache-from "type=gha" \
|
||||||
|
--cache-to "type=gha,mode=max" \
|
||||||
|
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||||
|
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
||||||
|
--git-ref "${{ github.ref }}"
|
||||||
|
|
||||||
|
# Build with bake using the resolved compose file (now includes cache config)
|
||||||
|
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||||
|
env:
|
||||||
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
|
|
||||||
|
- name: Set up tests - Cache E2E test data
|
||||||
|
id: e2e-data-cache
|
||||||
uses: actions/cache@v5
|
uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
path: /tmp/.buildx-cache
|
path: /tmp/e2e_test_data.sql
|
||||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-frontend-test-
|
|
||||||
|
|
||||||
- name: Run docker compose
|
- name: Set up Platform - Start Supabase DB + Auth
|
||||||
run: |
|
run: |
|
||||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||||
|
echo "Waiting for database to be ready..."
|
||||||
|
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||||
|
echo "Waiting for auth service to be ready..."
|
||||||
|
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||||
|
|
||||||
|
- name: Set up Platform - Run migrations
|
||||||
|
run: |
|
||||||
|
echo "Running migrations..."
|
||||||
|
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||||
|
echo "✅ Migrations completed"
|
||||||
env:
|
env:
|
||||||
DOCKER_BUILDKIT: 1
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
|
||||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
|
||||||
|
|
||||||
- name: Move cache
|
- name: Set up tests - Load cached E2E test data
|
||||||
|
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||||
run: |
|
run: |
|
||||||
rm -rf /tmp/.buildx-cache
|
echo "✅ Found cached E2E test data, restoring..."
|
||||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
{
|
||||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
echo "SET session_replication_role = 'replica';"
|
||||||
fi
|
cat /tmp/e2e_test_data.sql
|
||||||
|
echo "SET session_replication_role = 'origin';"
|
||||||
|
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||||
|
# Refresh materialized views after restore
|
||||||
|
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||||
|
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||||
|
|
||||||
- name: Wait for services to be ready
|
echo "✅ E2E test data restored from cache"
|
||||||
|
|
||||||
|
- name: Set up Platform - Start (all other services)
|
||||||
run: |
|
run: |
|
||||||
|
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||||
echo "Waiting for rest_server to be ready..."
|
echo "Waiting for rest_server to be ready..."
|
||||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||||
echo "Waiting for database to be ready..."
|
env:
|
||||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
|
|
||||||
- name: Create E2E test data
|
- name: Set up tests - Create E2E test data
|
||||||
|
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||||
run: |
|
run: |
|
||||||
echo "Creating E2E test data..."
|
echo "Creating E2E test data..."
|
||||||
# First try to run the script from inside the container
|
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
echo "❌ E2E test data creation failed!"
|
||||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||||
echo "❌ E2E test data creation failed!"
|
exit 1
|
||||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
}
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
else
|
|
||||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
|
||||||
# Copy the script into the container and run it
|
|
||||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
|
||||||
echo "❌ Failed to copy script to container"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
|
||||||
echo "❌ E2E test data creation failed!"
|
|
||||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||||
uses: actions/cache@v5
|
echo "Dumping database for cache..."
|
||||||
|
{
|
||||||
|
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||||
|
pg_dump -U postgres --data-only --column-inserts \
|
||||||
|
--table='auth.users' postgres
|
||||||
|
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||||
|
pg_dump -U postgres --data-only --column-inserts \
|
||||||
|
--schema=platform \
|
||||||
|
--exclude-table='platform._prisma_migrations' \
|
||||||
|
--exclude-table='platform.apscheduler_jobs' \
|
||||||
|
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||||
|
postgres
|
||||||
|
} > /tmp/e2e_test_data.sql
|
||||||
|
|
||||||
|
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||||
|
|
||||||
|
- name: Set up tests - Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Set up tests - Set up Node
|
||||||
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Set up tests - Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
- name: Install Browser 'chromium'
|
- name: Set up tests - Install browser 'chromium'
|
||||||
run: pnpm playwright install --with-deps chromium
|
run: pnpm playwright install --with-deps chromium
|
||||||
|
|
||||||
- name: Run Playwright tests
|
- name: Run Playwright tests
|
||||||
@@ -269,7 +287,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Print Final Docker Compose logs
|
- name: Print Final Docker Compose logs
|
||||||
if: always()
|
if: always()
|
||||||
run: docker compose -f ../docker-compose.yml logs
|
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||||
|
|
||||||
integration_test:
|
integration_test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -281,22 +299,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|||||||
39
.github/workflows/pr-overlap-check.yml
vendored
Normal file
39
.github/workflows/pr-overlap-check.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
name: PR Overlap Detection
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
branches:
|
||||||
|
- dev
|
||||||
|
- master
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-overlaps:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # Need full history for merge testing
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Configure git
|
||||||
|
run: |
|
||||||
|
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
|
||||||
|
- name: Run overlap detection
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
# Always succeed - this check informs contributors, it shouldn't block merging
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
python .github/scripts/detect_overlaps.py ${{ github.event.pull_request.number }}
|
||||||
195
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
195
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Add cache configuration to a resolved docker-compose file for all services
|
||||||
|
that have a build key, and ensure image names match what docker compose expects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_BRANCH = "dev"
|
||||||
|
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Add cache config to a resolved compose file"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--source",
|
||||||
|
required=True,
|
||||||
|
help="Source compose file to read (should be output of `docker compose config`)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--cache-from",
|
||||||
|
default="type=gha",
|
||||||
|
help="Cache source configuration",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--cache-to",
|
||||||
|
default="type=gha,mode=max",
|
||||||
|
help="Cache destination configuration",
|
||||||
|
)
|
||||||
|
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||||
|
parser.add_argument(
|
||||||
|
f"--{component}-hash",
|
||||||
|
default="",
|
||||||
|
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--git-ref",
|
||||||
|
default="",
|
||||||
|
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
|
||||||
|
git_ref_scope = ""
|
||||||
|
if args.git_ref:
|
||||||
|
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
|
||||||
|
|
||||||
|
with open(args.source, "r") as f:
|
||||||
|
compose = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Get project name from compose file or default
|
||||||
|
project_name = compose.get("name", "autogpt_platform")
|
||||||
|
|
||||||
|
def get_image_name(dockerfile: str, target: str) -> str:
|
||||||
|
"""Generate image name based on Dockerfile folder and build target."""
|
||||||
|
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
||||||
|
if len(dockerfile_parts) >= 2:
|
||||||
|
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
||||||
|
else:
|
||||||
|
folder_name = "app"
|
||||||
|
return f"{project_name}-{folder_name}:{target}"
|
||||||
|
|
||||||
|
def get_build_key(dockerfile: str, target: str) -> str:
|
||||||
|
"""Generate a unique key for a Dockerfile+target combination."""
|
||||||
|
return f"{dockerfile}:{target}"
|
||||||
|
|
||||||
|
def get_component(dockerfile: str) -> str | None:
|
||||||
|
"""Get component name (frontend/backend) from dockerfile path."""
|
||||||
|
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||||
|
if component in dockerfile:
|
||||||
|
return component
|
||||||
|
return None
|
||||||
|
|
||||||
|
# First pass: collect all services with build configs and identify duplicates
|
||||||
|
# Track which (dockerfile, target) combinations we've seen
|
||||||
|
build_key_to_first_service: dict[str, str] = {}
|
||||||
|
services_to_build: list[str] = []
|
||||||
|
services_to_dedupe: list[str] = []
|
||||||
|
|
||||||
|
for service_name, service_config in compose.get("services", {}).items():
|
||||||
|
if "build" not in service_config:
|
||||||
|
continue
|
||||||
|
|
||||||
|
build_config = service_config["build"]
|
||||||
|
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||||
|
target = build_config.get("target", "default")
|
||||||
|
build_key = get_build_key(dockerfile, target)
|
||||||
|
|
||||||
|
if build_key not in build_key_to_first_service:
|
||||||
|
# First service with this build config - it will do the actual build
|
||||||
|
build_key_to_first_service[build_key] = service_name
|
||||||
|
services_to_build.append(service_name)
|
||||||
|
else:
|
||||||
|
# Duplicate - will just use the image from the first service
|
||||||
|
services_to_dedupe.append(service_name)
|
||||||
|
|
||||||
|
# Second pass: configure builds and deduplicate
|
||||||
|
modified_services = []
|
||||||
|
for service_name, service_config in compose.get("services", {}).items():
|
||||||
|
if "build" not in service_config:
|
||||||
|
continue
|
||||||
|
|
||||||
|
build_config = service_config["build"]
|
||||||
|
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||||
|
target = build_config.get("target", "latest")
|
||||||
|
image_name = get_image_name(dockerfile, target)
|
||||||
|
|
||||||
|
# Set image name for all services (needed for both builders and deduped)
|
||||||
|
service_config["image"] = image_name
|
||||||
|
|
||||||
|
if service_name in services_to_dedupe:
|
||||||
|
# Remove build config - this service will use the pre-built image
|
||||||
|
del service_config["build"]
|
||||||
|
continue
|
||||||
|
|
||||||
|
# This service will do the actual build - add cache config
|
||||||
|
cache_from_list = []
|
||||||
|
cache_to_list = []
|
||||||
|
|
||||||
|
component = get_component(dockerfile)
|
||||||
|
if not component:
|
||||||
|
# Skip services that don't clearly match frontend/backend
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get the hash for this component
|
||||||
|
component_hash = getattr(args, f"{component}_hash")
|
||||||
|
|
||||||
|
# Scope format: platform-{component}-{target}-{hash|ref}
|
||||||
|
# Example: platform-backend-server-abc123
|
||||||
|
|
||||||
|
if "type=gha" in args.cache_from:
|
||||||
|
# 1. Primary: exact hash match (most specific)
|
||||||
|
if component_hash:
|
||||||
|
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||||
|
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
|
||||||
|
|
||||||
|
# 2. Fallback: branch-based cache
|
||||||
|
if git_ref_scope:
|
||||||
|
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||||
|
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
|
||||||
|
|
||||||
|
# 3. Fallback: dev branch cache (for PRs/feature branches)
|
||||||
|
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
|
||||||
|
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
|
||||||
|
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
|
||||||
|
|
||||||
|
if "type=gha" in args.cache_to:
|
||||||
|
# Write to both hash-based and branch-based scopes
|
||||||
|
if component_hash:
|
||||||
|
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||||
|
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
|
||||||
|
|
||||||
|
if git_ref_scope:
|
||||||
|
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||||
|
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
|
||||||
|
|
||||||
|
# Ensure we have at least one cache source/target
|
||||||
|
if not cache_from_list:
|
||||||
|
cache_from_list.append(args.cache_from)
|
||||||
|
if not cache_to_list:
|
||||||
|
cache_to_list.append(args.cache_to)
|
||||||
|
|
||||||
|
build_config["cache_from"] = cache_from_list
|
||||||
|
build_config["cache_to"] = cache_to_list
|
||||||
|
modified_services.append(service_name)
|
||||||
|
|
||||||
|
# Write back to the same file
|
||||||
|
with open(args.source, "w") as f:
|
||||||
|
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
||||||
|
|
||||||
|
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
||||||
|
for svc in modified_services:
|
||||||
|
svc_config = compose["services"][svc]
|
||||||
|
build_cfg = svc_config.get("build", {})
|
||||||
|
cache_from_list = build_cfg.get("cache_from", ["none"])
|
||||||
|
cache_to_list = build_cfg.get("cache_to", ["none"])
|
||||||
|
print(f" - {svc}")
|
||||||
|
print(f" image: {svc_config.get('image', 'N/A')}")
|
||||||
|
print(f" cache_from: {cache_from_list}")
|
||||||
|
print(f" cache_to: {cache_to_list}")
|
||||||
|
if services_to_dedupe:
|
||||||
|
print(
|
||||||
|
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
||||||
|
)
|
||||||
|
for svc in services_to_dedupe:
|
||||||
|
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -45,6 +45,11 @@ AutoGPT Platform is a monorepo containing:
|
|||||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||||
|
|
||||||
|
### Branching Strategy
|
||||||
|
|
||||||
|
- **`dev`** is the main development branch. All PRs should target `dev`.
|
||||||
|
- **`master`** is the production branch. Only used for production releases.
|
||||||
|
|
||||||
### Creating Pull Requests
|
### Creating Pull Requests
|
||||||
|
|
||||||
- Create the PR against the `dev` branch of the repository.
|
- Create the PR against the `dev` branch of the repository.
|
||||||
|
|||||||
169
autogpt_platform/autogpt_libs/poetry.lock
generated
169
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cryptography"
|
name = "cryptography"
|
||||||
version = "46.0.4"
|
version = "46.0.5"
|
||||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
|
{file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
|
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
|
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
|
{file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
|
{file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
|
{file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
|
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
|
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
|
{file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
|
{file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
|
{file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
|
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
|
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
|
{file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
|
{file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"},
|
||||||
{file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
|
{file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
|||||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||||
sdist = ["build (>=1.0.0)"]
|
sdist = ["build (>=1.0.0)"]
|
||||||
ssh = ["bcrypt (>=3.1.5)"]
|
ssh = ["bcrypt (>=3.1.5)"]
|
||||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||||
test-randomorder = ["pytest-randomly"]
|
test-randomorder = ["pytest-randomly"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -570,24 +570,25 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastapi"
|
name = "fastapi"
|
||||||
version = "0.128.0"
|
version = "0.128.7"
|
||||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
|
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||||
{file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
|
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
annotated-doc = ">=0.0.2"
|
annotated-doc = ">=0.0.2"
|
||||||
pydantic = ">=2.7.0"
|
pydantic = ">=2.7.0"
|
||||||
starlette = ">=0.40.0,<0.51.0"
|
starlette = ">=0.40.0,<1.0.0"
|
||||||
typing-extensions = ">=4.8.0"
|
typing-extensions = ">=4.8.0"
|
||||||
|
typing-inspection = ">=0.4.2"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
|
|
||||||
@@ -1062,14 +1063,14 @@ urllib3 = ">=1.26.0,<3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "launchdarkly-server-sdk"
|
name = "launchdarkly-server-sdk"
|
||||||
version = "9.14.1"
|
version = "9.15.0"
|
||||||
description = "LaunchDarkly SDK for Python"
|
description = "LaunchDarkly SDK for Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.10"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -1478,14 +1479,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "postgrest"
|
name = "postgrest"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
|
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||||
{file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
|
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2248,14 +2249,14 @@ cli = ["click (>=5.0)"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "realtime"
|
name = "realtime"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = ""
|
description = ""
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
|
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||||
{file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
|
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2436,14 +2437,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "storage3"
|
name = "storage3"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Supabase Storage client for Python."
|
description = "Supabase Storage client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
|
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||||
{file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
|
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2487,35 +2488,35 @@ python-dateutil = ">=2.6.0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase"
|
name = "supabase"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Supabase client for Python."
|
description = "Supabase client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
|
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||||
{file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
|
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
httpx = ">=0.26,<0.29"
|
httpx = ">=0.26,<0.29"
|
||||||
postgrest = "2.27.2"
|
postgrest = "2.28.0"
|
||||||
realtime = "2.27.2"
|
realtime = "2.28.0"
|
||||||
storage3 = "2.27.2"
|
storage3 = "2.28.0"
|
||||||
supabase-auth = "2.27.2"
|
supabase-auth = "2.28.0"
|
||||||
supabase-functions = "2.27.2"
|
supabase-functions = "2.28.0"
|
||||||
yarl = ">=1.22.0"
|
yarl = ">=1.22.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-auth"
|
name = "supabase-auth"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Python Client Library for Supabase Auth"
|
description = "Python Client Library for Supabase Auth"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
|
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||||
{file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
|
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2525,14 +2526,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-functions"
|
name = "supabase-functions"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Library for Supabase Functions"
|
description = "Library for Supabase Functions"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
|
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||||
{file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
|
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2911,4 +2912,4 @@ type = ["pytest-mypy"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<4.0"
|
python-versions = ">=3.10,<4.0"
|
||||||
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
|
content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb"
|
||||||
|
|||||||
@@ -11,14 +11,14 @@ python = ">=3.10,<4.0"
|
|||||||
colorama = "^0.4.6"
|
colorama = "^0.4.6"
|
||||||
cryptography = "^46.0"
|
cryptography = "^46.0"
|
||||||
expiringdict = "^1.2.2"
|
expiringdict = "^1.2.2"
|
||||||
fastapi = "^0.128.0"
|
fastapi = "^0.128.7"
|
||||||
google-cloud-logging = "^3.13.0"
|
google-cloud-logging = "^3.13.0"
|
||||||
launchdarkly-server-sdk = "^9.14.1"
|
launchdarkly-server-sdk = "^9.15.0"
|
||||||
pydantic = "^2.12.5"
|
pydantic = "^2.12.5"
|
||||||
pydantic-settings = "^2.12.0"
|
pydantic-settings = "^2.12.0"
|
||||||
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
||||||
redis = "^6.2.0"
|
redis = "^6.2.0"
|
||||||
supabase = "^2.27.2"
|
supabase = "^2.28.0"
|
||||||
uvicorn = "^0.40.0"
|
uvicorn = "^0.40.0"
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
|||||||
@@ -104,6 +104,12 @@ TWITTER_CLIENT_SECRET=
|
|||||||
# Make a new workspace for your OAuth APP -- trust me
|
# Make a new workspace for your OAuth APP -- trust me
|
||||||
# https://linear.app/settings/api/applications/new
|
# https://linear.app/settings/api/applications/new
|
||||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||||
|
LINEAR_API_KEY=
|
||||||
|
# Linear project and team IDs for the feature request tracker.
|
||||||
|
# Find these in your Linear workspace URL: linear.app/<workspace>/project/<project-id>
|
||||||
|
# and in team settings. Used by the chat copilot to file and search feature requests.
|
||||||
|
LINEAR_FEATURE_REQUEST_PROJECT_ID=
|
||||||
|
LINEAR_FEATURE_REQUEST_TEAM_ID=
|
||||||
LINEAR_CLIENT_ID=
|
LINEAR_CLIENT_ID=
|
||||||
LINEAR_CLIENT_SECRET=
|
LINEAR_CLIENT_SECRET=
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
# ============================ DEPENDENCY BUILDER ============================ #
|
||||||
|
|
||||||
FROM debian:13-slim AS builder
|
FROM debian:13-slim AS builder
|
||||||
|
|
||||||
# Set environment variables
|
# Set environment variables
|
||||||
@@ -51,27 +53,62 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
|||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
FROM debian:13-slim AS server_dependencies
|
# =============================== DB MIGRATOR =============================== #
|
||||||
|
|
||||||
|
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
||||||
|
FROM debian:13-slim AS migrate
|
||||||
|
|
||||||
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
python3.13 \
|
||||||
|
python3-pip \
|
||||||
|
ca-certificates \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy Node.js from builder (needed for Prisma CLI)
|
||||||
|
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||||
|
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||||
|
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||||
|
|
||||||
|
# Copy Prisma binaries
|
||||||
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
|
# Install prisma-client-py directly (much smaller than copying full venv)
|
||||||
|
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
||||||
|
|
||||||
|
COPY autogpt_platform/backend/schema.prisma ./
|
||||||
|
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||||
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
|
COPY autogpt_platform/backend/migrations ./migrations
|
||||||
|
|
||||||
|
# ============================== BACKEND SERVER ============================== #
|
||||||
|
|
||||||
|
FROM debian:13-slim AS server
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
ENV POETRY_HOME=/opt/poetry \
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
POETRY_NO_INTERACTION=1 \
|
|
||||||
POETRY_VIRTUALENVS_CREATE=true \
|
|
||||||
POETRY_VIRTUALENVS_IN_PROJECT=true \
|
|
||||||
DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV PATH=/opt/poetry/bin:$PATH
|
|
||||||
|
|
||||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
# Install Python, FFmpeg, ImageMagick, and CLI tools for agent use.
|
||||||
RUN apt-get update && apt-get install -y \
|
# bubblewrap provides OS-level sandbox (whitelist-only FS + no network)
|
||||||
|
# for the bash_exec MCP tool.
|
||||||
|
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
python3.13 \
|
python3.13 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
ffmpeg \
|
ffmpeg \
|
||||||
imagemagick \
|
imagemagick \
|
||||||
|
jq \
|
||||||
|
ripgrep \
|
||||||
|
tree \
|
||||||
|
bubblewrap \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy only necessary files from builder
|
# Copy poetry (build-time only, for `poetry install --only-root` to create entry points)
|
||||||
COPY --from=builder /app /app
|
|
||||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||||
# Copy Node.js installation for Prisma
|
# Copy Node.js installation for Prisma
|
||||||
@@ -81,30 +118,25 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|||||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
|
||||||
|
|
||||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
|
||||||
RUN mkdir -p /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
|
||||||
|
|
||||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
FROM server_dependencies AS migrate
|
# Copy only the .venv from builder (not the entire /app directory)
|
||||||
|
# The .venv includes the generated Prisma client
|
||||||
|
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
||||||
|
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||||
|
|
||||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
# Copy dependency files + autogpt_libs (path dependency)
|
||||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
||||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
|
||||||
|
|
||||||
FROM server_dependencies AS server
|
# Copy backend code + docs (for Copilot docs search)
|
||||||
|
COPY autogpt_platform/backend ./
|
||||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
|
||||||
COPY docs /app/docs
|
COPY docs /app/docs
|
||||||
RUN poetry install --no-ansi --only-root
|
# Install the project package to create entry point scripts in .venv/bin/
|
||||||
|
# (e.g., rest, executor, ws, db, scheduler, notification - see [tool.poetry.scripts])
|
||||||
|
RUN POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true \
|
||||||
|
poetry install --no-ansi --only-root
|
||||||
|
|
||||||
ENV PORT=8000
|
ENV PORT=8000
|
||||||
|
|
||||||
CMD ["poetry", "run", "rest"]
|
CMD ["rest"]
|
||||||
|
|||||||
@@ -1,4 +1,9 @@
|
|||||||
"""Common test fixtures for server tests."""
|
"""Common test fixtures for server tests.
|
||||||
|
|
||||||
|
Note: Common fixtures like test_user_id, admin_user_id, target_user_id,
|
||||||
|
setup_test_user, and setup_admin_user are defined in the parent conftest.py
|
||||||
|
(backend/conftest.py) and are available here automatically.
|
||||||
|
"""
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from pytest_snapshot.plugin import Snapshot
|
from pytest_snapshot.plugin import Snapshot
|
||||||
@@ -11,54 +16,6 @@ def configured_snapshot(snapshot: Snapshot) -> Snapshot:
|
|||||||
return snapshot
|
return snapshot
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def test_user_id() -> str:
|
|
||||||
"""Test user ID fixture."""
|
|
||||||
return "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def admin_user_id() -> str:
|
|
||||||
"""Admin user ID fixture."""
|
|
||||||
return "4e53486c-cf57-477e-ba2a-cb02dc828e1b"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def target_user_id() -> str:
|
|
||||||
"""Target user ID fixture."""
|
|
||||||
return "5e53486c-cf57-477e-ba2a-cb02dc828e1c"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
async def setup_test_user(test_user_id):
|
|
||||||
"""Create test user in database before tests."""
|
|
||||||
from backend.data.user import get_or_create_user
|
|
||||||
|
|
||||||
# Create the test user in the database using JWT token format
|
|
||||||
user_data = {
|
|
||||||
"sub": test_user_id,
|
|
||||||
"email": "test@example.com",
|
|
||||||
"user_metadata": {"name": "Test User"},
|
|
||||||
}
|
|
||||||
await get_or_create_user(user_data)
|
|
||||||
return test_user_id
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
async def setup_admin_user(admin_user_id):
|
|
||||||
"""Create admin user in database before tests."""
|
|
||||||
from backend.data.user import get_or_create_user
|
|
||||||
|
|
||||||
# Create the admin user in the database using JWT token format
|
|
||||||
user_data = {
|
|
||||||
"sub": admin_user_id,
|
|
||||||
"email": "test-admin@example.com",
|
|
||||||
"user_metadata": {"name": "Test Admin"},
|
|
||||||
}
|
|
||||||
await get_or_create_user(user_data)
|
|
||||||
return admin_user_id
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_jwt_user(test_user_id):
|
def mock_jwt_user(test_user_id):
|
||||||
"""Provide mock JWT payload for regular user testing."""
|
"""Provide mock JWT payload for regular user testing."""
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from typing_extensions import TypedDict
|
|||||||
|
|
||||||
import backend.api.features.store.cache as store_cache
|
import backend.api.features.store.cache as store_cache
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
import backend.data.block
|
import backend.blocks
|
||||||
from backend.api.external.middleware import require_permission
|
from backend.api.external.middleware import require_permission
|
||||||
from backend.data import execution as execution_db
|
from backend.data import execution as execution_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
@@ -67,7 +67,7 @@ async def get_user_info(
|
|||||||
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
|
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
|
||||||
)
|
)
|
||||||
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
|
async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
|
||||||
blocks = [block() for block in backend.data.block.get_blocks().values()]
|
blocks = [block() for block in backend.blocks.get_blocks().values()]
|
||||||
return [b.to_dict() for b in blocks if not b.disabled]
|
return [b.to_dict() for b in blocks if not b.disabled]
|
||||||
|
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ async def execute_graph_block(
|
|||||||
require_permission(APIKeyPermission.EXECUTE_BLOCK)
|
require_permission(APIKeyPermission.EXECUTE_BLOCK)
|
||||||
),
|
),
|
||||||
) -> CompletedBlockOutput:
|
) -> CompletedBlockOutput:
|
||||||
obj = backend.data.block.get_block(block_id)
|
obj = backend.blocks.get_block(block_id)
|
||||||
if not obj:
|
if not obj:
|
||||||
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
|
||||||
if obj.disabled:
|
if obj.disabled:
|
||||||
|
|||||||
@@ -15,9 +15,9 @@ from prisma.enums import APIKeyPermission
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from backend.api.external.middleware import require_permission
|
from backend.api.external.middleware import require_permission
|
||||||
from backend.api.features.chat.model import ChatSession
|
from backend.copilot.model import ChatSession
|
||||||
from backend.api.features.chat.tools import find_agent_tool, run_agent_tool
|
from backend.copilot.tools import find_agent_tool, run_agent_tool
|
||||||
from backend.api.features.chat.tools.models import ToolResponseBase
|
from backend.copilot.tools.models import ToolResponseBase
|
||||||
from backend.data.auth.base import APIAuthorizationInfo
|
from backend.data.auth.base import APIAuthorizationInfo
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|||||||
@@ -10,10 +10,15 @@ import backend.api.features.library.db as library_db
|
|||||||
import backend.api.features.library.model as library_model
|
import backend.api.features.library.model as library_model
|
||||||
import backend.api.features.store.db as store_db
|
import backend.api.features.store.db as store_db
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
import backend.data.block
|
|
||||||
from backend.blocks import load_all_blocks
|
from backend.blocks import load_all_blocks
|
||||||
|
from backend.blocks._base import (
|
||||||
|
AnyBlockSchema,
|
||||||
|
BlockCategory,
|
||||||
|
BlockInfo,
|
||||||
|
BlockSchema,
|
||||||
|
BlockType,
|
||||||
|
)
|
||||||
from backend.blocks.llm import LlmModel
|
from backend.blocks.llm import LlmModel
|
||||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
|
||||||
from backend.data.db import query_raw_with_schema
|
from backend.data.db import query_raw_with_schema
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.cache import cached
|
from backend.util.cache import cached
|
||||||
@@ -22,7 +27,7 @@ from backend.util.models import Pagination
|
|||||||
from .model import (
|
from .model import (
|
||||||
BlockCategoryResponse,
|
BlockCategoryResponse,
|
||||||
BlockResponse,
|
BlockResponse,
|
||||||
BlockType,
|
BlockTypeFilter,
|
||||||
CountResponse,
|
CountResponse,
|
||||||
FilterType,
|
FilterType,
|
||||||
Provider,
|
Provider,
|
||||||
@@ -88,7 +93,7 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
|
|||||||
def get_blocks(
|
def get_blocks(
|
||||||
*,
|
*,
|
||||||
category: str | None = None,
|
category: str | None = None,
|
||||||
type: BlockType | None = None,
|
type: BlockTypeFilter | None = None,
|
||||||
provider: ProviderName | None = None,
|
provider: ProviderName | None = None,
|
||||||
page: int = 1,
|
page: int = 1,
|
||||||
page_size: int = 50,
|
page_size: int = 50,
|
||||||
@@ -669,9 +674,9 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
|||||||
for block_type in load_all_blocks().values():
|
for block_type in load_all_blocks().values():
|
||||||
block: AnyBlockSchema = block_type()
|
block: AnyBlockSchema = block_type()
|
||||||
if block.disabled or block.block_type in (
|
if block.disabled or block.block_type in (
|
||||||
backend.data.block.BlockType.INPUT,
|
BlockType.INPUT,
|
||||||
backend.data.block.BlockType.OUTPUT,
|
BlockType.OUTPUT,
|
||||||
backend.data.block.BlockType.AGENT,
|
BlockType.AGENT,
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
# Find the execution count for this block
|
# Find the execution count for this block
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from pydantic import BaseModel
|
|||||||
|
|
||||||
import backend.api.features.library.model as library_model
|
import backend.api.features.library.model as library_model
|
||||||
import backend.api.features.store.model as store_model
|
import backend.api.features.store.model as store_model
|
||||||
from backend.data.block import BlockInfo
|
from backend.blocks._base import BlockInfo
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.models import Pagination
|
from backend.util.models import Pagination
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@ FilterType = Literal[
|
|||||||
"my_agents",
|
"my_agents",
|
||||||
]
|
]
|
||||||
|
|
||||||
BlockType = Literal["all", "input", "action", "output"]
|
BlockTypeFilter = Literal["all", "input", "action", "output"]
|
||||||
|
|
||||||
|
|
||||||
class SearchEntry(BaseModel):
|
class SearchEntry(BaseModel):
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ async def get_block_categories(
|
|||||||
)
|
)
|
||||||
async def get_blocks(
|
async def get_blocks(
|
||||||
category: Annotated[str | None, fastapi.Query()] = None,
|
category: Annotated[str | None, fastapi.Query()] = None,
|
||||||
type: Annotated[builder_model.BlockType | None, fastapi.Query()] = None,
|
type: Annotated[builder_model.BlockTypeFilter | None, fastapi.Query()] = None,
|
||||||
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
|
provider: Annotated[ProviderName | None, fastapi.Query()] = None,
|
||||||
page: Annotated[int, fastapi.Query()] = 1,
|
page: Annotated[int, fastapi.Query()] = 1,
|
||||||
page_size: Annotated[int, fastapi.Query()] = 50,
|
page_size: Annotated[int, fastapi.Query()] = 50,
|
||||||
|
|||||||
@@ -1,119 +0,0 @@
|
|||||||
import pytest
|
|
||||||
|
|
||||||
from .model import (
|
|
||||||
ChatMessage,
|
|
||||||
ChatSession,
|
|
||||||
Usage,
|
|
||||||
get_chat_session,
|
|
||||||
upsert_chat_session,
|
|
||||||
)
|
|
||||||
|
|
||||||
messages = [
|
|
||||||
ChatMessage(content="Hello, how are you?", role="user"),
|
|
||||||
ChatMessage(
|
|
||||||
content="I'm fine, thank you!",
|
|
||||||
role="assistant",
|
|
||||||
tool_calls=[
|
|
||||||
{
|
|
||||||
"id": "t123",
|
|
||||||
"type": "function",
|
|
||||||
"function": {
|
|
||||||
"name": "get_weather",
|
|
||||||
"arguments": '{"city": "New York"}',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
],
|
|
||||||
),
|
|
||||||
ChatMessage(
|
|
||||||
content="I'm using the tool to get the weather",
|
|
||||||
role="tool",
|
|
||||||
tool_call_id="t123",
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_chatsession_serialization_deserialization():
|
|
||||||
s = ChatSession.new(user_id="abc123")
|
|
||||||
s.messages = messages
|
|
||||||
s.usage = [Usage(prompt_tokens=100, completion_tokens=200, total_tokens=300)]
|
|
||||||
serialized = s.model_dump_json()
|
|
||||||
s2 = ChatSession.model_validate_json(serialized)
|
|
||||||
assert s2.model_dump() == s.model_dump()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_chatsession_redis_storage(setup_test_user, test_user_id):
|
|
||||||
|
|
||||||
s = ChatSession.new(user_id=test_user_id)
|
|
||||||
s.messages = messages
|
|
||||||
|
|
||||||
s = await upsert_chat_session(s)
|
|
||||||
|
|
||||||
s2 = await get_chat_session(
|
|
||||||
session_id=s.session_id,
|
|
||||||
user_id=s.user_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
assert s2 == s
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_chatsession_redis_storage_user_id_mismatch(
|
|
||||||
setup_test_user, test_user_id
|
|
||||||
):
|
|
||||||
|
|
||||||
s = ChatSession.new(user_id=test_user_id)
|
|
||||||
s.messages = messages
|
|
||||||
s = await upsert_chat_session(s)
|
|
||||||
|
|
||||||
s2 = await get_chat_session(s.session_id, "different_user_id")
|
|
||||||
|
|
||||||
assert s2 is None
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_chatsession_db_storage(setup_test_user, test_user_id):
|
|
||||||
"""Test that messages are correctly saved to and loaded from DB (not cache)."""
|
|
||||||
from backend.data.redis_client import get_redis_async
|
|
||||||
|
|
||||||
# Create session with messages including assistant message
|
|
||||||
s = ChatSession.new(user_id=test_user_id)
|
|
||||||
s.messages = messages # Contains user, assistant, and tool messages
|
|
||||||
assert s.session_id is not None, "Session id is not set"
|
|
||||||
# Upsert to save to both cache and DB
|
|
||||||
s = await upsert_chat_session(s)
|
|
||||||
|
|
||||||
# Clear the Redis cache to force DB load
|
|
||||||
redis_key = f"chat:session:{s.session_id}"
|
|
||||||
async_redis = await get_redis_async()
|
|
||||||
await async_redis.delete(redis_key)
|
|
||||||
|
|
||||||
# Load from DB (cache was cleared)
|
|
||||||
s2 = await get_chat_session(
|
|
||||||
session_id=s.session_id,
|
|
||||||
user_id=s.user_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
assert s2 is not None, "Session not found after loading from DB"
|
|
||||||
assert len(s2.messages) == len(
|
|
||||||
s.messages
|
|
||||||
), f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}"
|
|
||||||
|
|
||||||
# Verify all roles are present
|
|
||||||
roles = [m.role for m in s2.messages]
|
|
||||||
assert "user" in roles, f"User message missing. Roles found: {roles}"
|
|
||||||
assert "assistant" in roles, f"Assistant message missing. Roles found: {roles}"
|
|
||||||
assert "tool" in roles, f"Tool message missing. Roles found: {roles}"
|
|
||||||
|
|
||||||
# Verify message content
|
|
||||||
for orig, loaded in zip(s.messages, s2.messages):
|
|
||||||
assert orig.role == loaded.role, f"Role mismatch: {orig.role} != {loaded.role}"
|
|
||||||
assert (
|
|
||||||
orig.content == loaded.content
|
|
||||||
), f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}"
|
|
||||||
if orig.tool_calls:
|
|
||||||
assert (
|
|
||||||
loaded.tool_calls is not None
|
|
||||||
), f"Tool calls missing for {orig.role} message"
|
|
||||||
assert len(orig.tool_calls) == len(loaded.tool_calls)
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
"""Chat API routes for chat session management and streaming via SSE."""
|
"""Chat API routes for chat session management and streaming via SSE."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import uuid as uuid_module
|
import uuid as uuid_module
|
||||||
from collections.abc import AsyncGenerator
|
from collections.abc import AsyncGenerator
|
||||||
@@ -10,20 +11,31 @@ from fastapi import APIRouter, Depends, Header, HTTPException, Query, Response,
|
|||||||
from fastapi.responses import StreamingResponse
|
from fastapi.responses import StreamingResponse
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.util.exceptions import NotFoundError
|
from backend.copilot import service as chat_service
|
||||||
|
from backend.copilot import stream_registry
|
||||||
from . import service as chat_service
|
from backend.copilot.completion_handler import (
|
||||||
from . import stream_registry
|
process_operation_failure,
|
||||||
from .completion_handler import process_operation_failure, process_operation_success
|
process_operation_success,
|
||||||
from .config import ChatConfig
|
)
|
||||||
from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions
|
from backend.copilot.config import ChatConfig
|
||||||
from .response_model import StreamFinish, StreamHeartbeat
|
from backend.copilot.executor.utils import enqueue_copilot_task
|
||||||
from .tools.models import (
|
from backend.copilot.model import (
|
||||||
|
ChatMessage,
|
||||||
|
ChatSession,
|
||||||
|
append_and_save_message,
|
||||||
|
create_chat_session,
|
||||||
|
delete_chat_session,
|
||||||
|
get_chat_session,
|
||||||
|
get_user_sessions,
|
||||||
|
)
|
||||||
|
from backend.copilot.response_model import StreamError, StreamFinish, StreamHeartbeat
|
||||||
|
from backend.copilot.tools.models import (
|
||||||
AgentDetailsResponse,
|
AgentDetailsResponse,
|
||||||
AgentOutputResponse,
|
AgentOutputResponse,
|
||||||
AgentPreviewResponse,
|
AgentPreviewResponse,
|
||||||
AgentSavedResponse,
|
AgentSavedResponse,
|
||||||
AgentsFoundResponse,
|
AgentsFoundResponse,
|
||||||
|
BlockDetailsResponse,
|
||||||
BlockListResponse,
|
BlockListResponse,
|
||||||
BlockOutputResponse,
|
BlockOutputResponse,
|
||||||
ClarificationNeededResponse,
|
ClarificationNeededResponse,
|
||||||
@@ -40,6 +52,8 @@ from .tools.models import (
|
|||||||
SetupRequirementsResponse,
|
SetupRequirementsResponse,
|
||||||
UnderstandingUpdatedResponse,
|
UnderstandingUpdatedResponse,
|
||||||
)
|
)
|
||||||
|
from backend.copilot.tracking import track_user_message
|
||||||
|
from backend.util.exceptions import NotFoundError
|
||||||
|
|
||||||
config = ChatConfig()
|
config = ChatConfig()
|
||||||
|
|
||||||
@@ -199,6 +213,43 @@ async def create_session(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete(
|
||||||
|
"/sessions/{session_id}",
|
||||||
|
dependencies=[Security(auth.requires_user)],
|
||||||
|
status_code=204,
|
||||||
|
responses={404: {"description": "Session not found or access denied"}},
|
||||||
|
)
|
||||||
|
async def delete_session(
|
||||||
|
session_id: str,
|
||||||
|
user_id: Annotated[str, Security(auth.get_user_id)],
|
||||||
|
) -> Response:
|
||||||
|
"""
|
||||||
|
Delete a chat session.
|
||||||
|
|
||||||
|
Permanently removes a chat session and all its messages.
|
||||||
|
Only the owner can delete their sessions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: The session ID to delete.
|
||||||
|
user_id: The authenticated user's ID.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
204 No Content on success.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HTTPException: 404 if session not found or not owned by user.
|
||||||
|
"""
|
||||||
|
deleted = await delete_chat_session(session_id, user_id)
|
||||||
|
|
||||||
|
if not deleted:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=404,
|
||||||
|
detail=f"Session {session_id} not found or access denied",
|
||||||
|
)
|
||||||
|
|
||||||
|
return Response(status_code=204)
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
"/sessions/{session_id}",
|
"/sessions/{session_id}",
|
||||||
)
|
)
|
||||||
@@ -231,6 +282,10 @@ async def get_session(
|
|||||||
active_task, last_message_id = await stream_registry.get_active_task_for_session(
|
active_task, last_message_id = await stream_registry.get_active_task_for_session(
|
||||||
session_id, user_id
|
session_id, user_id
|
||||||
)
|
)
|
||||||
|
logger.info(
|
||||||
|
f"[GET_SESSION] session={session_id}, active_task={active_task is not None}, "
|
||||||
|
f"msg_count={len(messages)}, last_role={messages[-1].get('role') if messages else 'none'}"
|
||||||
|
)
|
||||||
if active_task:
|
if active_task:
|
||||||
# Filter out the in-progress assistant message from the session response.
|
# Filter out the in-progress assistant message from the session response.
|
||||||
# The client will receive the complete assistant response through the SSE
|
# The client will receive the complete assistant response through the SSE
|
||||||
@@ -300,10 +355,9 @@ async def stream_chat_post(
|
|||||||
f"user={user_id}, message_len={len(request.message)}",
|
f"user={user_id}, message_len={len(request.message)}",
|
||||||
extra={"json_fields": log_meta},
|
extra={"json_fields": log_meta},
|
||||||
)
|
)
|
||||||
|
await _validate_and_get_session(session_id, user_id)
|
||||||
session = await _validate_and_get_session(session_id, user_id)
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time)*1000:.1f}ms",
|
f"[TIMING] session validated in {(time.perf_counter() - stream_start_time) * 1000:.1f}ms",
|
||||||
extra={
|
extra={
|
||||||
"json_fields": {
|
"json_fields": {
|
||||||
**log_meta,
|
**log_meta,
|
||||||
@@ -312,6 +366,25 @@ async def stream_chat_post(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Atomically append user message to session BEFORE creating task to avoid
|
||||||
|
# race condition where GET_SESSION sees task as "running" but message isn't
|
||||||
|
# saved yet. append_and_save_message re-fetches inside a lock to prevent
|
||||||
|
# message loss from concurrent requests.
|
||||||
|
if request.message:
|
||||||
|
message = ChatMessage(
|
||||||
|
role="user" if request.is_user_message else "assistant",
|
||||||
|
content=request.message,
|
||||||
|
)
|
||||||
|
if request.is_user_message:
|
||||||
|
track_user_message(
|
||||||
|
user_id=user_id,
|
||||||
|
session_id=session_id,
|
||||||
|
message_length=len(request.message),
|
||||||
|
)
|
||||||
|
logger.info(f"[STREAM] Saving user message to session {session_id}")
|
||||||
|
await append_and_save_message(session_id, message)
|
||||||
|
logger.info(f"[STREAM] User message saved for session {session_id}")
|
||||||
|
|
||||||
# Create a task in the stream registry for reconnection support
|
# Create a task in the stream registry for reconnection support
|
||||||
task_id = str(uuid_module.uuid4())
|
task_id = str(uuid_module.uuid4())
|
||||||
operation_id = str(uuid_module.uuid4())
|
operation_id = str(uuid_module.uuid4())
|
||||||
@@ -327,7 +400,7 @@ async def stream_chat_post(
|
|||||||
operation_id=operation_id,
|
operation_id=operation_id,
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start)*1000:.1f}ms",
|
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start) * 1000:.1f}ms",
|
||||||
extra={
|
extra={
|
||||||
"json_fields": {
|
"json_fields": {
|
||||||
**log_meta,
|
**log_meta,
|
||||||
@@ -336,82 +409,19 @@ async def stream_chat_post(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Background task that runs the AI generation independently of SSE connection
|
await enqueue_copilot_task(
|
||||||
async def run_ai_generation():
|
task_id=task_id,
|
||||||
import time as time_module
|
session_id=session_id,
|
||||||
|
user_id=user_id,
|
||||||
|
operation_id=operation_id,
|
||||||
|
message=request.message,
|
||||||
|
is_user_message=request.is_user_message,
|
||||||
|
context=request.context,
|
||||||
|
)
|
||||||
|
|
||||||
gen_start_time = time_module.perf_counter()
|
|
||||||
logger.info(
|
|
||||||
f"[TIMING] run_ai_generation STARTED, task={task_id}, session={session_id}, user={user_id}",
|
|
||||||
extra={"json_fields": log_meta},
|
|
||||||
)
|
|
||||||
first_chunk_time, ttfc = None, None
|
|
||||||
chunk_count = 0
|
|
||||||
try:
|
|
||||||
async for chunk in chat_service.stream_chat_completion(
|
|
||||||
session_id,
|
|
||||||
request.message,
|
|
||||||
is_user_message=request.is_user_message,
|
|
||||||
user_id=user_id,
|
|
||||||
session=session, # Pass pre-fetched session to avoid double-fetch
|
|
||||||
context=request.context,
|
|
||||||
_task_id=task_id, # Pass task_id so service emits start with taskId for reconnection
|
|
||||||
):
|
|
||||||
chunk_count += 1
|
|
||||||
if first_chunk_time is None:
|
|
||||||
first_chunk_time = time_module.perf_counter()
|
|
||||||
ttfc = first_chunk_time - gen_start_time
|
|
||||||
logger.info(
|
|
||||||
f"[TIMING] FIRST AI CHUNK at {ttfc:.2f}s, type={type(chunk).__name__}",
|
|
||||||
extra={
|
|
||||||
"json_fields": {
|
|
||||||
**log_meta,
|
|
||||||
"chunk_type": type(chunk).__name__,
|
|
||||||
"time_to_first_chunk_ms": ttfc * 1000,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
# Write to Redis (subscribers will receive via XREAD)
|
|
||||||
await stream_registry.publish_chunk(task_id, chunk)
|
|
||||||
|
|
||||||
gen_end_time = time_module.perf_counter()
|
|
||||||
total_time = (gen_end_time - gen_start_time) * 1000
|
|
||||||
logger.info(
|
|
||||||
f"[TIMING] run_ai_generation FINISHED in {total_time/1000:.1f}s; "
|
|
||||||
f"task={task_id}, session={session_id}, "
|
|
||||||
f"ttfc={ttfc or -1:.2f}s, n_chunks={chunk_count}",
|
|
||||||
extra={
|
|
||||||
"json_fields": {
|
|
||||||
**log_meta,
|
|
||||||
"total_time_ms": total_time,
|
|
||||||
"time_to_first_chunk_ms": (
|
|
||||||
ttfc * 1000 if ttfc is not None else None
|
|
||||||
),
|
|
||||||
"n_chunks": chunk_count,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
await stream_registry.mark_task_completed(task_id, "completed")
|
|
||||||
except Exception as e:
|
|
||||||
elapsed = time_module.perf_counter() - gen_start_time
|
|
||||||
logger.error(
|
|
||||||
f"[TIMING] run_ai_generation ERROR after {elapsed:.2f}s: {e}",
|
|
||||||
extra={
|
|
||||||
"json_fields": {
|
|
||||||
**log_meta,
|
|
||||||
"elapsed_ms": elapsed * 1000,
|
|
||||||
"error": str(e),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
await stream_registry.mark_task_completed(task_id, "failed")
|
|
||||||
|
|
||||||
# Start the AI generation in a background task
|
|
||||||
bg_task = asyncio.create_task(run_ai_generation())
|
|
||||||
await stream_registry.set_task_asyncio_task(task_id, bg_task)
|
|
||||||
setup_time = (time.perf_counter() - stream_start_time) * 1000
|
setup_time = (time.perf_counter() - stream_start_time) * 1000
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[TIMING] Background task started, setup={setup_time:.1f}ms",
|
f"[TIMING] Task enqueued to RabbitMQ, setup={setup_time:.1f}ms",
|
||||||
extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}},
|
extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}},
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -506,8 +516,14 @@ async def stream_chat_post(
|
|||||||
"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}
|
"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
# Surface error to frontend so it doesn't appear stuck
|
||||||
|
yield StreamError(
|
||||||
|
errorText="An error occurred. Please try again.",
|
||||||
|
code="stream_error",
|
||||||
|
).to_sse()
|
||||||
|
yield StreamFinish().to_sse()
|
||||||
finally:
|
finally:
|
||||||
# Unsubscribe when client disconnects or stream ends to prevent resource leak
|
# Unsubscribe when client disconnects or stream ends
|
||||||
if subscriber_queue is not None:
|
if subscriber_queue is not None:
|
||||||
try:
|
try:
|
||||||
await stream_registry.unsubscribe_from_task(
|
await stream_registry.unsubscribe_from_task(
|
||||||
@@ -751,8 +767,6 @@ async def stream_task(
|
|||||||
)
|
)
|
||||||
|
|
||||||
async def event_generator() -> AsyncGenerator[str, None]:
|
async def event_generator() -> AsyncGenerator[str, None]:
|
||||||
import asyncio
|
|
||||||
|
|
||||||
heartbeat_interval = 15.0 # Send heartbeat every 15 seconds
|
heartbeat_interval = 15.0 # Send heartbeat every 15 seconds
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
@@ -971,6 +985,7 @@ ToolResponseUnion = (
|
|||||||
| AgentSavedResponse
|
| AgentSavedResponse
|
||||||
| ClarificationNeededResponse
|
| ClarificationNeededResponse
|
||||||
| BlockListResponse
|
| BlockListResponse
|
||||||
|
| BlockDetailsResponse
|
||||||
| BlockOutputResponse
|
| BlockOutputResponse
|
||||||
| DocSearchResultsResponse
|
| DocSearchResultsResponse
|
||||||
| DocPageResponse
|
| DocPageResponse
|
||||||
|
|||||||
@@ -1,82 +0,0 @@
|
|||||||
import logging
|
|
||||||
from os import getenv
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from . import service as chat_service
|
|
||||||
from .model import create_chat_session, get_chat_session, upsert_chat_session
|
|
||||||
from .response_model import (
|
|
||||||
StreamError,
|
|
||||||
StreamFinish,
|
|
||||||
StreamTextDelta,
|
|
||||||
StreamToolOutputAvailable,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_stream_chat_completion(setup_test_user, test_user_id):
|
|
||||||
"""
|
|
||||||
Test the stream_chat_completion function.
|
|
||||||
"""
|
|
||||||
api_key: str | None = getenv("OPEN_ROUTER_API_KEY")
|
|
||||||
if not api_key:
|
|
||||||
return pytest.skip("OPEN_ROUTER_API_KEY is not set, skipping test")
|
|
||||||
|
|
||||||
session = await create_chat_session(test_user_id)
|
|
||||||
|
|
||||||
has_errors = False
|
|
||||||
has_ended = False
|
|
||||||
assistant_message = ""
|
|
||||||
async for chunk in chat_service.stream_chat_completion(
|
|
||||||
session.session_id, "Hello, how are you?", user_id=session.user_id
|
|
||||||
):
|
|
||||||
logger.info(chunk)
|
|
||||||
if isinstance(chunk, StreamError):
|
|
||||||
has_errors = True
|
|
||||||
if isinstance(chunk, StreamTextDelta):
|
|
||||||
assistant_message += chunk.delta
|
|
||||||
if isinstance(chunk, StreamFinish):
|
|
||||||
has_ended = True
|
|
||||||
|
|
||||||
assert has_ended, "Chat completion did not end"
|
|
||||||
assert not has_errors, "Error occurred while streaming chat completion"
|
|
||||||
assert assistant_message, "Assistant message is empty"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_stream_chat_completion_with_tool_calls(setup_test_user, test_user_id):
|
|
||||||
"""
|
|
||||||
Test the stream_chat_completion function.
|
|
||||||
"""
|
|
||||||
api_key: str | None = getenv("OPEN_ROUTER_API_KEY")
|
|
||||||
if not api_key:
|
|
||||||
return pytest.skip("OPEN_ROUTER_API_KEY is not set, skipping test")
|
|
||||||
|
|
||||||
session = await create_chat_session(test_user_id)
|
|
||||||
session = await upsert_chat_session(session)
|
|
||||||
|
|
||||||
has_errors = False
|
|
||||||
has_ended = False
|
|
||||||
had_tool_calls = False
|
|
||||||
async for chunk in chat_service.stream_chat_completion(
|
|
||||||
session.session_id,
|
|
||||||
"Please find me an agent that can help me with my business. Use the query 'moneny printing agent'",
|
|
||||||
user_id=session.user_id,
|
|
||||||
):
|
|
||||||
logger.info(chunk)
|
|
||||||
if isinstance(chunk, StreamError):
|
|
||||||
has_errors = True
|
|
||||||
|
|
||||||
if isinstance(chunk, StreamFinish):
|
|
||||||
has_ended = True
|
|
||||||
if isinstance(chunk, StreamToolOutputAvailable):
|
|
||||||
had_tool_calls = True
|
|
||||||
|
|
||||||
assert has_ended, "Chat completion did not end"
|
|
||||||
assert not has_errors, "Error occurred while streaming chat completion"
|
|
||||||
assert had_tool_calls, "Tool calls did not occur"
|
|
||||||
session = await get_chat_session(session.session_id)
|
|
||||||
assert session, "Session not found"
|
|
||||||
assert session.usage, "Usage is empty"
|
|
||||||
@@ -1,139 +0,0 @@
|
|||||||
"""Tests for block filtering in FindBlockTool."""
|
|
||||||
|
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from backend.api.features.chat.tools.find_block import (
|
|
||||||
COPILOT_EXCLUDED_BLOCK_IDS,
|
|
||||||
COPILOT_EXCLUDED_BLOCK_TYPES,
|
|
||||||
FindBlockTool,
|
|
||||||
)
|
|
||||||
from backend.api.features.chat.tools.models import BlockListResponse
|
|
||||||
from backend.data.block import BlockType
|
|
||||||
|
|
||||||
from ._test_data import make_session
|
|
||||||
|
|
||||||
_TEST_USER_ID = "test-user-find-block"
|
|
||||||
|
|
||||||
|
|
||||||
def make_mock_block(
|
|
||||||
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
|
||||||
):
|
|
||||||
"""Create a mock block for testing."""
|
|
||||||
mock = MagicMock()
|
|
||||||
mock.id = block_id
|
|
||||||
mock.name = name
|
|
||||||
mock.description = f"{name} description"
|
|
||||||
mock.block_type = block_type
|
|
||||||
mock.disabled = disabled
|
|
||||||
mock.input_schema = MagicMock()
|
|
||||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
|
||||||
mock.input_schema.get_credentials_fields.return_value = {}
|
|
||||||
mock.output_schema = MagicMock()
|
|
||||||
mock.output_schema.jsonschema.return_value = {}
|
|
||||||
mock.categories = []
|
|
||||||
return mock
|
|
||||||
|
|
||||||
|
|
||||||
class TestFindBlockFiltering:
|
|
||||||
"""Tests for block filtering in FindBlockTool."""
|
|
||||||
|
|
||||||
def test_excluded_block_types_contains_expected_types(self):
|
|
||||||
"""Verify COPILOT_EXCLUDED_BLOCK_TYPES contains all graph-only types."""
|
|
||||||
assert BlockType.INPUT in COPILOT_EXCLUDED_BLOCK_TYPES
|
|
||||||
assert BlockType.OUTPUT in COPILOT_EXCLUDED_BLOCK_TYPES
|
|
||||||
assert BlockType.WEBHOOK in COPILOT_EXCLUDED_BLOCK_TYPES
|
|
||||||
assert BlockType.WEBHOOK_MANUAL in COPILOT_EXCLUDED_BLOCK_TYPES
|
|
||||||
assert BlockType.NOTE in COPILOT_EXCLUDED_BLOCK_TYPES
|
|
||||||
assert BlockType.HUMAN_IN_THE_LOOP in COPILOT_EXCLUDED_BLOCK_TYPES
|
|
||||||
assert BlockType.AGENT in COPILOT_EXCLUDED_BLOCK_TYPES
|
|
||||||
|
|
||||||
def test_excluded_block_ids_contains_smart_decision_maker(self):
|
|
||||||
"""Verify SmartDecisionMakerBlock is in COPILOT_EXCLUDED_BLOCK_IDS."""
|
|
||||||
assert "3b191d9f-356f-482d-8238-ba04b6d18381" in COPILOT_EXCLUDED_BLOCK_IDS
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_excluded_block_type_filtered_from_results(self):
|
|
||||||
"""Verify blocks with excluded BlockTypes are filtered from search results."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
# Mock search returns an INPUT block (excluded) and a STANDARD block (included)
|
|
||||||
search_results = [
|
|
||||||
{"content_id": "input-block-id", "score": 0.9},
|
|
||||||
{"content_id": "standard-block-id", "score": 0.8},
|
|
||||||
]
|
|
||||||
|
|
||||||
input_block = make_mock_block("input-block-id", "Input Block", BlockType.INPUT)
|
|
||||||
standard_block = make_mock_block(
|
|
||||||
"standard-block-id", "HTTP Request", BlockType.STANDARD
|
|
||||||
)
|
|
||||||
|
|
||||||
def mock_get_block(block_id):
|
|
||||||
return {
|
|
||||||
"input-block-id": input_block,
|
|
||||||
"standard-block-id": standard_block,
|
|
||||||
}.get(block_id)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
|
||||||
new_callable=AsyncMock,
|
|
||||||
return_value=(search_results, 2),
|
|
||||||
):
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.find_block.get_block",
|
|
||||||
side_effect=mock_get_block,
|
|
||||||
):
|
|
||||||
tool = FindBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID, session=session, query="test"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should only return the standard block, not the INPUT block
|
|
||||||
assert isinstance(response, BlockListResponse)
|
|
||||||
assert len(response.blocks) == 1
|
|
||||||
assert response.blocks[0].id == "standard-block-id"
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_excluded_block_id_filtered_from_results(self):
|
|
||||||
"""Verify SmartDecisionMakerBlock is filtered from search results."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
smart_decision_id = "3b191d9f-356f-482d-8238-ba04b6d18381"
|
|
||||||
search_results = [
|
|
||||||
{"content_id": smart_decision_id, "score": 0.9},
|
|
||||||
{"content_id": "normal-block-id", "score": 0.8},
|
|
||||||
]
|
|
||||||
|
|
||||||
# SmartDecisionMakerBlock has STANDARD type but is excluded by ID
|
|
||||||
smart_block = make_mock_block(
|
|
||||||
smart_decision_id, "Smart Decision Maker", BlockType.STANDARD
|
|
||||||
)
|
|
||||||
normal_block = make_mock_block(
|
|
||||||
"normal-block-id", "Normal Block", BlockType.STANDARD
|
|
||||||
)
|
|
||||||
|
|
||||||
def mock_get_block(block_id):
|
|
||||||
return {
|
|
||||||
smart_decision_id: smart_block,
|
|
||||||
"normal-block-id": normal_block,
|
|
||||||
}.get(block_id)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
|
||||||
new_callable=AsyncMock,
|
|
||||||
return_value=(search_results, 2),
|
|
||||||
):
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.find_block.get_block",
|
|
||||||
side_effect=mock_get_block,
|
|
||||||
):
|
|
||||||
tool = FindBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID, session=session, query="decision"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should only return normal block, not SmartDecisionMakerBlock
|
|
||||||
assert isinstance(response, BlockListResponse)
|
|
||||||
assert len(response.blocks) == 1
|
|
||||||
assert response.blocks[0].id == "normal-block-id"
|
|
||||||
@@ -1,106 +0,0 @@
|
|||||||
"""Tests for block execution guards in RunBlockTool."""
|
|
||||||
|
|
||||||
from unittest.mock import MagicMock, patch
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from backend.api.features.chat.tools.models import ErrorResponse
|
|
||||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
|
||||||
from backend.data.block import BlockType
|
|
||||||
|
|
||||||
from ._test_data import make_session
|
|
||||||
|
|
||||||
_TEST_USER_ID = "test-user-run-block"
|
|
||||||
|
|
||||||
|
|
||||||
def make_mock_block(
|
|
||||||
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
|
||||||
):
|
|
||||||
"""Create a mock block for testing."""
|
|
||||||
mock = MagicMock()
|
|
||||||
mock.id = block_id
|
|
||||||
mock.name = name
|
|
||||||
mock.block_type = block_type
|
|
||||||
mock.disabled = disabled
|
|
||||||
mock.input_schema = MagicMock()
|
|
||||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
|
||||||
mock.input_schema.get_credentials_fields_info.return_value = []
|
|
||||||
return mock
|
|
||||||
|
|
||||||
|
|
||||||
class TestRunBlockFiltering:
|
|
||||||
"""Tests for block execution guards in RunBlockTool."""
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_excluded_block_type_returns_error(self):
|
|
||||||
"""Attempting to execute a block with excluded BlockType returns error."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
input_block = make_mock_block("input-block-id", "Input Block", BlockType.INPUT)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=input_block,
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="input-block-id",
|
|
||||||
input_data={},
|
|
||||||
)
|
|
||||||
|
|
||||||
assert isinstance(response, ErrorResponse)
|
|
||||||
assert "cannot be run directly in CoPilot" in response.message
|
|
||||||
assert "designed for use within graphs only" in response.message
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_excluded_block_id_returns_error(self):
|
|
||||||
"""Attempting to execute SmartDecisionMakerBlock returns error."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
smart_decision_id = "3b191d9f-356f-482d-8238-ba04b6d18381"
|
|
||||||
smart_block = make_mock_block(
|
|
||||||
smart_decision_id, "Smart Decision Maker", BlockType.STANDARD
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=smart_block,
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id=smart_decision_id,
|
|
||||||
input_data={},
|
|
||||||
)
|
|
||||||
|
|
||||||
assert isinstance(response, ErrorResponse)
|
|
||||||
assert "cannot be run directly in CoPilot" in response.message
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_non_excluded_block_passes_guard(self):
|
|
||||||
"""Non-excluded blocks pass the filtering guard (may fail later for other reasons)."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
standard_block = make_mock_block(
|
|
||||||
"standard-id", "HTTP Request", BlockType.STANDARD
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=standard_block,
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="standard-id",
|
|
||||||
input_data={},
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should NOT be an ErrorResponse about CoPilot exclusion
|
|
||||||
# (may be other errors like missing credentials, but not the exclusion guard)
|
|
||||||
if isinstance(response, ErrorResponse):
|
|
||||||
assert "cannot be run directly in CoPilot" not in response.message
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import TYPE_CHECKING, Annotated, List, Literal
|
from typing import TYPE_CHECKING, Annotated, Any, List, Literal
|
||||||
|
|
||||||
from autogpt_libs.auth import get_user_id
|
from autogpt_libs.auth import get_user_id
|
||||||
from fastapi import (
|
from fastapi import (
|
||||||
@@ -14,7 +14,7 @@ from fastapi import (
|
|||||||
Security,
|
Security,
|
||||||
status,
|
status,
|
||||||
)
|
)
|
||||||
from pydantic import BaseModel, Field, SecretStr
|
from pydantic import BaseModel, Field, SecretStr, model_validator
|
||||||
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_502_BAD_GATEWAY
|
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_502_BAD_GATEWAY
|
||||||
|
|
||||||
from backend.api.features.library.db import set_preset_webhook, update_preset
|
from backend.api.features.library.db import set_preset_webhook, update_preset
|
||||||
@@ -39,7 +39,11 @@ from backend.data.onboarding import OnboardingStep, complete_onboarding_step
|
|||||||
from backend.data.user import get_user_integrations
|
from backend.data.user import get_user_integrations
|
||||||
from backend.executor.utils import add_graph_execution
|
from backend.executor.utils import add_graph_execution
|
||||||
from backend.integrations.ayrshare import AyrshareClient, SocialPlatform
|
from backend.integrations.ayrshare import AyrshareClient, SocialPlatform
|
||||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
from backend.integrations.credentials_store import provider_matches
|
||||||
|
from backend.integrations.creds_manager import (
|
||||||
|
IntegrationCredentialsManager,
|
||||||
|
create_mcp_oauth_handler,
|
||||||
|
)
|
||||||
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
|
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.integrations.webhooks import get_webhook_manager
|
from backend.integrations.webhooks import get_webhook_manager
|
||||||
@@ -102,9 +106,37 @@ class CredentialsMetaResponse(BaseModel):
|
|||||||
scopes: list[str] | None
|
scopes: list[str] | None
|
||||||
username: str | None
|
username: str | None
|
||||||
host: str | None = Field(
|
host: str | None = Field(
|
||||||
default=None, description="Host pattern for host-scoped credentials"
|
default=None,
|
||||||
|
description="Host pattern for host-scoped or MCP server URL for MCP credentials",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@model_validator(mode="before")
|
||||||
|
@classmethod
|
||||||
|
def _normalize_provider(cls, data: Any) -> Any:
|
||||||
|
"""Fix ``ProviderName.X`` format from Python 3.13 ``str(Enum)`` bug."""
|
||||||
|
if isinstance(data, dict):
|
||||||
|
prov = data.get("provider", "")
|
||||||
|
if isinstance(prov, str) and prov.startswith("ProviderName."):
|
||||||
|
member = prov.removeprefix("ProviderName.")
|
||||||
|
try:
|
||||||
|
data = {**data, "provider": ProviderName[member].value}
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
return data
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_host(cred: Credentials) -> str | None:
|
||||||
|
"""Extract host from credential: HostScoped host or MCP server URL."""
|
||||||
|
if isinstance(cred, HostScopedCredentials):
|
||||||
|
return cred.host
|
||||||
|
if isinstance(cred, OAuth2Credentials) and cred.provider in (
|
||||||
|
ProviderName.MCP,
|
||||||
|
ProviderName.MCP.value,
|
||||||
|
"ProviderName.MCP",
|
||||||
|
):
|
||||||
|
return (cred.metadata or {}).get("mcp_server_url")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
@router.post("/{provider}/callback", summary="Exchange OAuth code for tokens")
|
@router.post("/{provider}/callback", summary="Exchange OAuth code for tokens")
|
||||||
async def callback(
|
async def callback(
|
||||||
@@ -179,9 +211,7 @@ async def callback(
|
|||||||
title=credentials.title,
|
title=credentials.title,
|
||||||
scopes=credentials.scopes,
|
scopes=credentials.scopes,
|
||||||
username=credentials.username,
|
username=credentials.username,
|
||||||
host=(
|
host=(CredentialsMetaResponse.get_host(credentials)),
|
||||||
credentials.host if isinstance(credentials, HostScopedCredentials) else None
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -199,7 +229,7 @@ async def list_credentials(
|
|||||||
title=cred.title,
|
title=cred.title,
|
||||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||||
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
|
host=CredentialsMetaResponse.get_host(cred),
|
||||||
)
|
)
|
||||||
for cred in credentials
|
for cred in credentials
|
||||||
]
|
]
|
||||||
@@ -222,7 +252,7 @@ async def list_credentials_by_provider(
|
|||||||
title=cred.title,
|
title=cred.title,
|
||||||
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None,
|
||||||
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
username=cred.username if isinstance(cred, OAuth2Credentials) else None,
|
||||||
host=cred.host if isinstance(cred, HostScopedCredentials) else None,
|
host=CredentialsMetaResponse.get_host(cred),
|
||||||
)
|
)
|
||||||
for cred in credentials
|
for cred in credentials
|
||||||
]
|
]
|
||||||
@@ -322,7 +352,11 @@ async def delete_credentials(
|
|||||||
|
|
||||||
tokens_revoked = None
|
tokens_revoked = None
|
||||||
if isinstance(creds, OAuth2Credentials):
|
if isinstance(creds, OAuth2Credentials):
|
||||||
handler = _get_provider_oauth_handler(request, provider)
|
if provider_matches(provider.value, ProviderName.MCP.value):
|
||||||
|
# MCP uses dynamic per-server OAuth — create handler from metadata
|
||||||
|
handler = create_mcp_oauth_handler(creds)
|
||||||
|
else:
|
||||||
|
handler = _get_provider_oauth_handler(request, provider)
|
||||||
tokens_revoked = await handler.revoke_tokens(creds)
|
tokens_revoked = await handler.revoke_tokens(creds)
|
||||||
|
|
||||||
return CredentialsDeletionResponse(revoked=tokens_revoked)
|
return CredentialsDeletionResponse(revoked=tokens_revoked)
|
||||||
|
|||||||
@@ -12,12 +12,11 @@ import backend.api.features.store.image_gen as store_image_gen
|
|||||||
import backend.api.features.store.media as store_media
|
import backend.api.features.store.media as store_media
|
||||||
import backend.data.graph as graph_db
|
import backend.data.graph as graph_db
|
||||||
import backend.data.integrations as integrations_db
|
import backend.data.integrations as integrations_db
|
||||||
from backend.data.block import BlockInput
|
|
||||||
from backend.data.db import transaction
|
from backend.data.db import transaction
|
||||||
from backend.data.execution import get_graph_execution
|
from backend.data.execution import get_graph_execution
|
||||||
from backend.data.graph import GraphSettings
|
from backend.data.graph import GraphSettings
|
||||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||||
from backend.data.model import CredentialsMetaInput
|
from backend.data.model import CredentialsMetaInput, GraphInput
|
||||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||||
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||||
on_graph_activate,
|
on_graph_activate,
|
||||||
@@ -1130,7 +1129,7 @@ async def create_preset_from_graph_execution(
|
|||||||
async def update_preset(
|
async def update_preset(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
preset_id: str,
|
preset_id: str,
|
||||||
inputs: Optional[BlockInput] = None,
|
inputs: Optional[GraphInput] = None,
|
||||||
credentials: Optional[dict[str, CredentialsMetaInput]] = None,
|
credentials: Optional[dict[str, CredentialsMetaInput]] = None,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
description: Optional[str] = None,
|
description: Optional[str] = None,
|
||||||
|
|||||||
@@ -6,9 +6,12 @@ import prisma.enums
|
|||||||
import prisma.models
|
import prisma.models
|
||||||
import pydantic
|
import pydantic
|
||||||
|
|
||||||
from backend.data.block import BlockInput
|
|
||||||
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
|
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
|
||||||
from backend.data.model import CredentialsMetaInput, is_credentials_field_name
|
from backend.data.model import (
|
||||||
|
CredentialsMetaInput,
|
||||||
|
GraphInput,
|
||||||
|
is_credentials_field_name,
|
||||||
|
)
|
||||||
from backend.util.json import loads as json_loads
|
from backend.util.json import loads as json_loads
|
||||||
from backend.util.models import Pagination
|
from backend.util.models import Pagination
|
||||||
|
|
||||||
@@ -323,7 +326,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel):
|
|||||||
graph_id: str
|
graph_id: str
|
||||||
graph_version: int
|
graph_version: int
|
||||||
|
|
||||||
inputs: BlockInput
|
inputs: GraphInput
|
||||||
credentials: dict[str, CredentialsMetaInput]
|
credentials: dict[str, CredentialsMetaInput]
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
@@ -352,7 +355,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel):
|
|||||||
Request model used when updating a preset for a library agent.
|
Request model used when updating a preset for a library agent.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
inputs: Optional[BlockInput] = None
|
inputs: Optional[GraphInput] = None
|
||||||
credentials: Optional[dict[str, CredentialsMetaInput]] = None
|
credentials: Optional[dict[str, CredentialsMetaInput]] = None
|
||||||
|
|
||||||
name: Optional[str] = None
|
name: Optional[str] = None
|
||||||
@@ -395,7 +398,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
|
|||||||
"Webhook must be included in AgentPreset query when webhookId is set"
|
"Webhook must be included in AgentPreset query when webhookId is set"
|
||||||
)
|
)
|
||||||
|
|
||||||
input_data: BlockInput = {}
|
input_data: GraphInput = {}
|
||||||
input_credentials: dict[str, CredentialsMetaInput] = {}
|
input_credentials: dict[str, CredentialsMetaInput] = {}
|
||||||
|
|
||||||
for preset_input in preset.InputPresets:
|
for preset_input in preset.InputPresets:
|
||||||
|
|||||||
404
autogpt_platform/backend/backend/api/features/mcp/routes.py
Normal file
404
autogpt_platform/backend/backend/api/features/mcp/routes.py
Normal file
@@ -0,0 +1,404 @@
|
|||||||
|
"""
|
||||||
|
MCP (Model Context Protocol) API routes.
|
||||||
|
|
||||||
|
Provides endpoints for MCP tool discovery and OAuth authentication so the
|
||||||
|
frontend can list available tools on an MCP server before placing a block.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Annotated, Any
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
import fastapi
|
||||||
|
from autogpt_libs.auth import get_user_id
|
||||||
|
from fastapi import Security
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from backend.api.features.integrations.router import CredentialsMetaResponse
|
||||||
|
from backend.blocks.mcp.client import MCPClient, MCPClientError
|
||||||
|
from backend.blocks.mcp.oauth import MCPOAuthHandler
|
||||||
|
from backend.data.model import OAuth2Credentials
|
||||||
|
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.request import HTTPClientError, Requests
|
||||||
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
settings = Settings()
|
||||||
|
router = fastapi.APIRouter(tags=["mcp"])
|
||||||
|
creds_manager = IntegrationCredentialsManager()
|
||||||
|
|
||||||
|
|
||||||
|
# ====================== Tool Discovery ====================== #
|
||||||
|
|
||||||
|
|
||||||
|
class DiscoverToolsRequest(BaseModel):
|
||||||
|
"""Request to discover tools on an MCP server."""
|
||||||
|
|
||||||
|
server_url: str = Field(description="URL of the MCP server")
|
||||||
|
auth_token: str | None = Field(
|
||||||
|
default=None,
|
||||||
|
description="Optional Bearer token for authenticated MCP servers",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MCPToolResponse(BaseModel):
|
||||||
|
"""A single MCP tool returned by discovery."""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
input_schema: dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class DiscoverToolsResponse(BaseModel):
|
||||||
|
"""Response containing the list of tools available on an MCP server."""
|
||||||
|
|
||||||
|
tools: list[MCPToolResponse]
|
||||||
|
server_name: str | None = None
|
||||||
|
protocol_version: str | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/discover-tools",
|
||||||
|
summary="Discover available tools on an MCP server",
|
||||||
|
response_model=DiscoverToolsResponse,
|
||||||
|
)
|
||||||
|
async def discover_tools(
|
||||||
|
request: DiscoverToolsRequest,
|
||||||
|
user_id: Annotated[str, Security(get_user_id)],
|
||||||
|
) -> DiscoverToolsResponse:
|
||||||
|
"""
|
||||||
|
Connect to an MCP server and return its available tools.
|
||||||
|
|
||||||
|
If the user has a stored MCP credential for this server URL, it will be
|
||||||
|
used automatically — no need to pass an explicit auth token.
|
||||||
|
"""
|
||||||
|
auth_token = request.auth_token
|
||||||
|
|
||||||
|
# Auto-use stored MCP credential when no explicit token is provided.
|
||||||
|
if not auth_token:
|
||||||
|
mcp_creds = await creds_manager.store.get_creds_by_provider(
|
||||||
|
user_id, ProviderName.MCP.value
|
||||||
|
)
|
||||||
|
# Find the freshest credential for this server URL
|
||||||
|
best_cred: OAuth2Credentials | None = None
|
||||||
|
for cred in mcp_creds:
|
||||||
|
if (
|
||||||
|
isinstance(cred, OAuth2Credentials)
|
||||||
|
and (cred.metadata or {}).get("mcp_server_url") == request.server_url
|
||||||
|
):
|
||||||
|
if best_cred is None or (
|
||||||
|
(cred.access_token_expires_at or 0)
|
||||||
|
> (best_cred.access_token_expires_at or 0)
|
||||||
|
):
|
||||||
|
best_cred = cred
|
||||||
|
if best_cred:
|
||||||
|
# Refresh the token if expired before using it
|
||||||
|
best_cred = await creds_manager.refresh_if_needed(user_id, best_cred)
|
||||||
|
logger.info(
|
||||||
|
f"Using MCP credential {best_cred.id} for {request.server_url}, "
|
||||||
|
f"expires_at={best_cred.access_token_expires_at}"
|
||||||
|
)
|
||||||
|
auth_token = best_cred.access_token.get_secret_value()
|
||||||
|
|
||||||
|
client = MCPClient(request.server_url, auth_token=auth_token)
|
||||||
|
|
||||||
|
try:
|
||||||
|
init_result = await client.initialize()
|
||||||
|
tools = await client.list_tools()
|
||||||
|
except HTTPClientError as e:
|
||||||
|
if e.status_code in (401, 403):
|
||||||
|
raise fastapi.HTTPException(
|
||||||
|
status_code=401,
|
||||||
|
detail="This MCP server requires authentication. "
|
||||||
|
"Please provide a valid auth token.",
|
||||||
|
)
|
||||||
|
raise fastapi.HTTPException(status_code=502, detail=str(e))
|
||||||
|
except MCPClientError as e:
|
||||||
|
raise fastapi.HTTPException(status_code=502, detail=str(e))
|
||||||
|
except Exception as e:
|
||||||
|
raise fastapi.HTTPException(
|
||||||
|
status_code=502,
|
||||||
|
detail=f"Failed to connect to MCP server: {e}",
|
||||||
|
)
|
||||||
|
|
||||||
|
return DiscoverToolsResponse(
|
||||||
|
tools=[
|
||||||
|
MCPToolResponse(
|
||||||
|
name=t.name,
|
||||||
|
description=t.description,
|
||||||
|
input_schema=t.input_schema,
|
||||||
|
)
|
||||||
|
for t in tools
|
||||||
|
],
|
||||||
|
server_name=(
|
||||||
|
init_result.get("serverInfo", {}).get("name")
|
||||||
|
or urlparse(request.server_url).hostname
|
||||||
|
or "MCP"
|
||||||
|
),
|
||||||
|
protocol_version=init_result.get("protocolVersion"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ======================== OAuth Flow ======================== #
|
||||||
|
|
||||||
|
|
||||||
|
class MCPOAuthLoginRequest(BaseModel):
|
||||||
|
"""Request to start an OAuth flow for an MCP server."""
|
||||||
|
|
||||||
|
server_url: str = Field(description="URL of the MCP server that requires OAuth")
|
||||||
|
|
||||||
|
|
||||||
|
class MCPOAuthLoginResponse(BaseModel):
|
||||||
|
"""Response with the OAuth login URL for the user to authenticate."""
|
||||||
|
|
||||||
|
login_url: str
|
||||||
|
state_token: str
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/oauth/login",
|
||||||
|
summary="Initiate OAuth login for an MCP server",
|
||||||
|
)
|
||||||
|
async def mcp_oauth_login(
|
||||||
|
request: MCPOAuthLoginRequest,
|
||||||
|
user_id: Annotated[str, Security(get_user_id)],
|
||||||
|
) -> MCPOAuthLoginResponse:
|
||||||
|
"""
|
||||||
|
Discover OAuth metadata from the MCP server and return a login URL.
|
||||||
|
|
||||||
|
1. Discovers the protected-resource metadata (RFC 9728)
|
||||||
|
2. Fetches the authorization server metadata (RFC 8414)
|
||||||
|
3. Performs Dynamic Client Registration (RFC 7591) if available
|
||||||
|
4. Returns the authorization URL for the frontend to open in a popup
|
||||||
|
"""
|
||||||
|
client = MCPClient(request.server_url)
|
||||||
|
|
||||||
|
# Step 1: Discover protected-resource metadata (RFC 9728)
|
||||||
|
protected_resource = await client.discover_auth()
|
||||||
|
|
||||||
|
metadata: dict[str, Any] | None = None
|
||||||
|
|
||||||
|
if protected_resource and protected_resource.get("authorization_servers"):
|
||||||
|
auth_server_url = protected_resource["authorization_servers"][0]
|
||||||
|
resource_url = protected_resource.get("resource", request.server_url)
|
||||||
|
|
||||||
|
# Step 2a: Discover auth-server metadata (RFC 8414)
|
||||||
|
metadata = await client.discover_auth_server_metadata(auth_server_url)
|
||||||
|
else:
|
||||||
|
# Fallback: Some MCP servers (e.g. Linear) are their own auth server
|
||||||
|
# and serve OAuth metadata directly without protected-resource metadata.
|
||||||
|
# Don't assume a resource_url — omitting it lets the auth server choose
|
||||||
|
# the correct audience for the token (RFC 8707 resource is optional).
|
||||||
|
resource_url = None
|
||||||
|
metadata = await client.discover_auth_server_metadata(request.server_url)
|
||||||
|
|
||||||
|
if (
|
||||||
|
not metadata
|
||||||
|
or "authorization_endpoint" not in metadata
|
||||||
|
or "token_endpoint" not in metadata
|
||||||
|
):
|
||||||
|
raise fastapi.HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail="This MCP server does not advertise OAuth support. "
|
||||||
|
"You may need to provide an auth token manually.",
|
||||||
|
)
|
||||||
|
|
||||||
|
authorize_url = metadata["authorization_endpoint"]
|
||||||
|
token_url = metadata["token_endpoint"]
|
||||||
|
registration_endpoint = metadata.get("registration_endpoint")
|
||||||
|
revoke_url = metadata.get("revocation_endpoint")
|
||||||
|
|
||||||
|
# Step 3: Dynamic Client Registration (RFC 7591) if available
|
||||||
|
frontend_base_url = settings.config.frontend_base_url
|
||||||
|
if not frontend_base_url:
|
||||||
|
raise fastapi.HTTPException(
|
||||||
|
status_code=500,
|
||||||
|
detail="Frontend base URL is not configured.",
|
||||||
|
)
|
||||||
|
redirect_uri = f"{frontend_base_url}/auth/integrations/mcp_callback"
|
||||||
|
|
||||||
|
client_id = ""
|
||||||
|
client_secret = ""
|
||||||
|
if registration_endpoint:
|
||||||
|
reg_result = await _register_mcp_client(
|
||||||
|
registration_endpoint, redirect_uri, request.server_url
|
||||||
|
)
|
||||||
|
if reg_result:
|
||||||
|
client_id = reg_result.get("client_id", "")
|
||||||
|
client_secret = reg_result.get("client_secret", "")
|
||||||
|
|
||||||
|
if not client_id:
|
||||||
|
client_id = "autogpt-platform"
|
||||||
|
|
||||||
|
# Step 4: Store state token with OAuth metadata for the callback
|
||||||
|
scopes = (protected_resource or {}).get("scopes_supported") or metadata.get(
|
||||||
|
"scopes_supported", []
|
||||||
|
)
|
||||||
|
state_token, code_challenge = await creds_manager.store.store_state_token(
|
||||||
|
user_id,
|
||||||
|
ProviderName.MCP.value,
|
||||||
|
scopes,
|
||||||
|
state_metadata={
|
||||||
|
"authorize_url": authorize_url,
|
||||||
|
"token_url": token_url,
|
||||||
|
"revoke_url": revoke_url,
|
||||||
|
"resource_url": resource_url,
|
||||||
|
"server_url": request.server_url,
|
||||||
|
"client_id": client_id,
|
||||||
|
"client_secret": client_secret,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 5: Build and return the login URL
|
||||||
|
handler = MCPOAuthHandler(
|
||||||
|
client_id=client_id,
|
||||||
|
client_secret=client_secret,
|
||||||
|
redirect_uri=redirect_uri,
|
||||||
|
authorize_url=authorize_url,
|
||||||
|
token_url=token_url,
|
||||||
|
resource_url=resource_url,
|
||||||
|
)
|
||||||
|
login_url = handler.get_login_url(
|
||||||
|
scopes, state_token, code_challenge=code_challenge
|
||||||
|
)
|
||||||
|
|
||||||
|
return MCPOAuthLoginResponse(login_url=login_url, state_token=state_token)
|
||||||
|
|
||||||
|
|
||||||
|
class MCPOAuthCallbackRequest(BaseModel):
|
||||||
|
"""Request to exchange an OAuth code for tokens."""
|
||||||
|
|
||||||
|
code: str = Field(description="Authorization code from OAuth callback")
|
||||||
|
state_token: str = Field(description="State token for CSRF verification")
|
||||||
|
|
||||||
|
|
||||||
|
class MCPOAuthCallbackResponse(BaseModel):
|
||||||
|
"""Response after successfully storing OAuth credentials."""
|
||||||
|
|
||||||
|
credential_id: str
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/oauth/callback",
|
||||||
|
summary="Exchange OAuth code for MCP tokens",
|
||||||
|
)
|
||||||
|
async def mcp_oauth_callback(
|
||||||
|
request: MCPOAuthCallbackRequest,
|
||||||
|
user_id: Annotated[str, Security(get_user_id)],
|
||||||
|
) -> CredentialsMetaResponse:
|
||||||
|
"""
|
||||||
|
Exchange the authorization code for tokens and store the credential.
|
||||||
|
|
||||||
|
The frontend calls this after receiving the OAuth code from the popup.
|
||||||
|
On success, subsequent ``/discover-tools`` calls for the same server URL
|
||||||
|
will automatically use the stored credential.
|
||||||
|
"""
|
||||||
|
valid_state = await creds_manager.store.verify_state_token(
|
||||||
|
user_id, request.state_token, ProviderName.MCP.value
|
||||||
|
)
|
||||||
|
if not valid_state:
|
||||||
|
raise fastapi.HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail="Invalid or expired state token.",
|
||||||
|
)
|
||||||
|
|
||||||
|
meta = valid_state.state_metadata
|
||||||
|
frontend_base_url = settings.config.frontend_base_url
|
||||||
|
if not frontend_base_url:
|
||||||
|
raise fastapi.HTTPException(
|
||||||
|
status_code=500,
|
||||||
|
detail="Frontend base URL is not configured.",
|
||||||
|
)
|
||||||
|
redirect_uri = f"{frontend_base_url}/auth/integrations/mcp_callback"
|
||||||
|
|
||||||
|
handler = MCPOAuthHandler(
|
||||||
|
client_id=meta["client_id"],
|
||||||
|
client_secret=meta.get("client_secret", ""),
|
||||||
|
redirect_uri=redirect_uri,
|
||||||
|
authorize_url=meta["authorize_url"],
|
||||||
|
token_url=meta["token_url"],
|
||||||
|
revoke_url=meta.get("revoke_url"),
|
||||||
|
resource_url=meta.get("resource_url"),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
credentials = await handler.exchange_code_for_tokens(
|
||||||
|
request.code, valid_state.scopes, valid_state.code_verifier
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise fastapi.HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail=f"OAuth token exchange failed: {e}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Enrich credential metadata for future lookup and token refresh
|
||||||
|
if credentials.metadata is None:
|
||||||
|
credentials.metadata = {}
|
||||||
|
credentials.metadata["mcp_server_url"] = meta["server_url"]
|
||||||
|
credentials.metadata["mcp_client_id"] = meta["client_id"]
|
||||||
|
credentials.metadata["mcp_client_secret"] = meta.get("client_secret", "")
|
||||||
|
credentials.metadata["mcp_token_url"] = meta["token_url"]
|
||||||
|
credentials.metadata["mcp_resource_url"] = meta.get("resource_url", "")
|
||||||
|
|
||||||
|
hostname = urlparse(meta["server_url"]).hostname or meta["server_url"]
|
||||||
|
credentials.title = f"MCP: {hostname}"
|
||||||
|
|
||||||
|
# Remove old MCP credentials for the same server to prevent stale token buildup.
|
||||||
|
try:
|
||||||
|
old_creds = await creds_manager.store.get_creds_by_provider(
|
||||||
|
user_id, ProviderName.MCP.value
|
||||||
|
)
|
||||||
|
for old in old_creds:
|
||||||
|
if (
|
||||||
|
isinstance(old, OAuth2Credentials)
|
||||||
|
and (old.metadata or {}).get("mcp_server_url") == meta["server_url"]
|
||||||
|
):
|
||||||
|
await creds_manager.store.delete_creds_by_id(user_id, old.id)
|
||||||
|
logger.info(
|
||||||
|
f"Removed old MCP credential {old.id} for {meta['server_url']}"
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Could not clean up old MCP credentials", exc_info=True)
|
||||||
|
|
||||||
|
await creds_manager.create(user_id, credentials)
|
||||||
|
|
||||||
|
return CredentialsMetaResponse(
|
||||||
|
id=credentials.id,
|
||||||
|
provider=credentials.provider,
|
||||||
|
type=credentials.type,
|
||||||
|
title=credentials.title,
|
||||||
|
scopes=credentials.scopes,
|
||||||
|
username=credentials.username,
|
||||||
|
host=credentials.metadata.get("mcp_server_url"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ======================== Helpers ======================== #
|
||||||
|
|
||||||
|
|
||||||
|
async def _register_mcp_client(
|
||||||
|
registration_endpoint: str,
|
||||||
|
redirect_uri: str,
|
||||||
|
server_url: str,
|
||||||
|
) -> dict[str, Any] | None:
|
||||||
|
"""Attempt Dynamic Client Registration (RFC 7591) with an MCP auth server."""
|
||||||
|
try:
|
||||||
|
response = await Requests(raise_for_status=True).post(
|
||||||
|
registration_endpoint,
|
||||||
|
json={
|
||||||
|
"client_name": "AutoGPT Platform",
|
||||||
|
"redirect_uris": [redirect_uri],
|
||||||
|
"grant_types": ["authorization_code"],
|
||||||
|
"response_types": ["code"],
|
||||||
|
"token_endpoint_auth_method": "client_secret_post",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
data = response.json()
|
||||||
|
if isinstance(data, dict) and "client_id" in data:
|
||||||
|
return data
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Dynamic client registration failed for {server_url}: {e}")
|
||||||
|
return None
|
||||||
436
autogpt_platform/backend/backend/api/features/mcp/test_routes.py
Normal file
436
autogpt_platform/backend/backend/api/features/mcp/test_routes.py
Normal file
@@ -0,0 +1,436 @@
|
|||||||
|
"""Tests for MCP API routes.
|
||||||
|
|
||||||
|
Uses httpx.AsyncClient with ASGITransport instead of fastapi.testclient.TestClient
|
||||||
|
to avoid creating blocking portals that can corrupt pytest-asyncio's session event loop.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
import fastapi
|
||||||
|
import httpx
|
||||||
|
import pytest
|
||||||
|
import pytest_asyncio
|
||||||
|
from autogpt_libs.auth import get_user_id
|
||||||
|
|
||||||
|
from backend.api.features.mcp.routes import router
|
||||||
|
from backend.blocks.mcp.client import MCPClientError, MCPTool
|
||||||
|
from backend.util.request import HTTPClientError
|
||||||
|
|
||||||
|
app = fastapi.FastAPI()
|
||||||
|
app.include_router(router)
|
||||||
|
app.dependency_overrides[get_user_id] = lambda: "test-user-id"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest_asyncio.fixture(scope="module")
|
||||||
|
async def client():
|
||||||
|
transport = httpx.ASGITransport(app=app)
|
||||||
|
async with httpx.AsyncClient(transport=transport, base_url="http://test") as c:
|
||||||
|
yield c
|
||||||
|
|
||||||
|
|
||||||
|
class TestDiscoverTools:
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_success(self, client):
|
||||||
|
mock_tools = [
|
||||||
|
MCPTool(
|
||||||
|
name="get_weather",
|
||||||
|
description="Get weather for a city",
|
||||||
|
input_schema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {"city": {"type": "string"}},
|
||||||
|
"required": ["city"],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
MCPTool(
|
||||||
|
name="add_numbers",
|
||||||
|
description="Add two numbers",
|
||||||
|
input_schema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"a": {"type": "number"},
|
||||||
|
"b": {"type": "number"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
):
|
||||||
|
mock_cm.store.get_creds_by_provider = AsyncMock(return_value=[])
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.initialize = AsyncMock(
|
||||||
|
return_value={
|
||||||
|
"protocolVersion": "2025-03-26",
|
||||||
|
"serverInfo": {"name": "test-server"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
instance.list_tools = AsyncMock(return_value=mock_tools)
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/discover-tools",
|
||||||
|
json={"server_url": "https://mcp.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
data = response.json()
|
||||||
|
assert len(data["tools"]) == 2
|
||||||
|
assert data["tools"][0]["name"] == "get_weather"
|
||||||
|
assert data["tools"][1]["name"] == "add_numbers"
|
||||||
|
assert data["server_name"] == "test-server"
|
||||||
|
assert data["protocol_version"] == "2025-03-26"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_with_auth_token(self, client):
|
||||||
|
with patch("backend.api.features.mcp.routes.MCPClient") as MockClient:
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.initialize = AsyncMock(
|
||||||
|
return_value={"serverInfo": {}, "protocolVersion": "2025-03-26"}
|
||||||
|
)
|
||||||
|
instance.list_tools = AsyncMock(return_value=[])
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/discover-tools",
|
||||||
|
json={
|
||||||
|
"server_url": "https://mcp.example.com/mcp",
|
||||||
|
"auth_token": "my-secret-token",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
MockClient.assert_called_once_with(
|
||||||
|
"https://mcp.example.com/mcp",
|
||||||
|
auth_token="my-secret-token",
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_auto_uses_stored_credential(self, client):
|
||||||
|
"""When no explicit token is given, stored MCP credentials are used."""
|
||||||
|
from pydantic import SecretStr
|
||||||
|
|
||||||
|
from backend.data.model import OAuth2Credentials
|
||||||
|
|
||||||
|
stored_cred = OAuth2Credentials(
|
||||||
|
provider="mcp",
|
||||||
|
title="MCP: example.com",
|
||||||
|
access_token=SecretStr("stored-token-123"),
|
||||||
|
refresh_token=None,
|
||||||
|
access_token_expires_at=None,
|
||||||
|
refresh_token_expires_at=None,
|
||||||
|
scopes=[],
|
||||||
|
metadata={"mcp_server_url": "https://mcp.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
):
|
||||||
|
mock_cm.store.get_creds_by_provider = AsyncMock(return_value=[stored_cred])
|
||||||
|
mock_cm.refresh_if_needed = AsyncMock(return_value=stored_cred)
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.initialize = AsyncMock(
|
||||||
|
return_value={"serverInfo": {}, "protocolVersion": "2025-03-26"}
|
||||||
|
)
|
||||||
|
instance.list_tools = AsyncMock(return_value=[])
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/discover-tools",
|
||||||
|
json={"server_url": "https://mcp.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
MockClient.assert_called_once_with(
|
||||||
|
"https://mcp.example.com/mcp",
|
||||||
|
auth_token="stored-token-123",
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_mcp_error(self, client):
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
):
|
||||||
|
mock_cm.store.get_creds_by_provider = AsyncMock(return_value=[])
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.initialize = AsyncMock(
|
||||||
|
side_effect=MCPClientError("Connection refused")
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/discover-tools",
|
||||||
|
json={"server_url": "https://bad-server.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 502
|
||||||
|
assert "Connection refused" in response.json()["detail"]
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_generic_error(self, client):
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
):
|
||||||
|
mock_cm.store.get_creds_by_provider = AsyncMock(return_value=[])
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.initialize = AsyncMock(side_effect=Exception("Network timeout"))
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/discover-tools",
|
||||||
|
json={"server_url": "https://timeout.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 502
|
||||||
|
assert "Failed to connect" in response.json()["detail"]
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_auth_required(self, client):
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
):
|
||||||
|
mock_cm.store.get_creds_by_provider = AsyncMock(return_value=[])
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.initialize = AsyncMock(
|
||||||
|
side_effect=HTTPClientError("HTTP 401 Error: Unauthorized", 401)
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/discover-tools",
|
||||||
|
json={"server_url": "https://auth-server.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 401
|
||||||
|
assert "requires authentication" in response.json()["detail"]
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_forbidden(self, client):
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
):
|
||||||
|
mock_cm.store.get_creds_by_provider = AsyncMock(return_value=[])
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.initialize = AsyncMock(
|
||||||
|
side_effect=HTTPClientError("HTTP 403 Error: Forbidden", 403)
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/discover-tools",
|
||||||
|
json={"server_url": "https://auth-server.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 401
|
||||||
|
assert "requires authentication" in response.json()["detail"]
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_discover_tools_missing_url(self, client):
|
||||||
|
response = await client.post("/discover-tools", json={})
|
||||||
|
assert response.status_code == 422
|
||||||
|
|
||||||
|
|
||||||
|
class TestOAuthLogin:
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_oauth_login_success(self, client):
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
patch("backend.api.features.mcp.routes.settings") as mock_settings,
|
||||||
|
patch(
|
||||||
|
"backend.api.features.mcp.routes._register_mcp_client"
|
||||||
|
) as mock_register,
|
||||||
|
):
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.discover_auth = AsyncMock(
|
||||||
|
return_value={
|
||||||
|
"authorization_servers": ["https://auth.sentry.io"],
|
||||||
|
"resource": "https://mcp.sentry.dev/mcp",
|
||||||
|
"scopes_supported": ["openid"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
instance.discover_auth_server_metadata = AsyncMock(
|
||||||
|
return_value={
|
||||||
|
"authorization_endpoint": "https://auth.sentry.io/authorize",
|
||||||
|
"token_endpoint": "https://auth.sentry.io/token",
|
||||||
|
"registration_endpoint": "https://auth.sentry.io/register",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
mock_register.return_value = {
|
||||||
|
"client_id": "registered-client-id",
|
||||||
|
"client_secret": "registered-secret",
|
||||||
|
}
|
||||||
|
mock_cm.store.store_state_token = AsyncMock(
|
||||||
|
return_value=("state-token-123", "code-challenge-abc")
|
||||||
|
)
|
||||||
|
mock_settings.config.frontend_base_url = "http://localhost:3000"
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/oauth/login",
|
||||||
|
json={"server_url": "https://mcp.sentry.dev/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
data = response.json()
|
||||||
|
assert "login_url" in data
|
||||||
|
assert data["state_token"] == "state-token-123"
|
||||||
|
assert "auth.sentry.io/authorize" in data["login_url"]
|
||||||
|
assert "registered-client-id" in data["login_url"]
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_oauth_login_no_oauth_support(self, client):
|
||||||
|
with patch("backend.api.features.mcp.routes.MCPClient") as MockClient:
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.discover_auth = AsyncMock(return_value=None)
|
||||||
|
instance.discover_auth_server_metadata = AsyncMock(return_value=None)
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/oauth/login",
|
||||||
|
json={"server_url": "https://simple-server.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 400
|
||||||
|
assert "does not advertise OAuth" in response.json()["detail"]
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_oauth_login_fallback_to_public_client(self, client):
|
||||||
|
"""When DCR is unavailable, falls back to default public client ID."""
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.MCPClient") as MockClient,
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
patch("backend.api.features.mcp.routes.settings") as mock_settings,
|
||||||
|
):
|
||||||
|
instance = MockClient.return_value
|
||||||
|
instance.discover_auth = AsyncMock(
|
||||||
|
return_value={
|
||||||
|
"authorization_servers": ["https://auth.example.com"],
|
||||||
|
"resource": "https://mcp.example.com/mcp",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
instance.discover_auth_server_metadata = AsyncMock(
|
||||||
|
return_value={
|
||||||
|
"authorization_endpoint": "https://auth.example.com/authorize",
|
||||||
|
"token_endpoint": "https://auth.example.com/token",
|
||||||
|
# No registration_endpoint
|
||||||
|
}
|
||||||
|
)
|
||||||
|
mock_cm.store.store_state_token = AsyncMock(
|
||||||
|
return_value=("state-abc", "challenge-xyz")
|
||||||
|
)
|
||||||
|
mock_settings.config.frontend_base_url = "http://localhost:3000"
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/oauth/login",
|
||||||
|
json={"server_url": "https://mcp.example.com/mcp"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
data = response.json()
|
||||||
|
assert "autogpt-platform" in data["login_url"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestOAuthCallback:
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_oauth_callback_success(self, client):
|
||||||
|
from pydantic import SecretStr
|
||||||
|
|
||||||
|
from backend.data.model import OAuth2Credentials
|
||||||
|
|
||||||
|
mock_creds = OAuth2Credentials(
|
||||||
|
provider="mcp",
|
||||||
|
title=None,
|
||||||
|
access_token=SecretStr("access-token-xyz"),
|
||||||
|
refresh_token=None,
|
||||||
|
access_token_expires_at=None,
|
||||||
|
refresh_token_expires_at=None,
|
||||||
|
scopes=[],
|
||||||
|
metadata={
|
||||||
|
"mcp_token_url": "https://auth.sentry.io/token",
|
||||||
|
"mcp_resource_url": "https://mcp.sentry.dev/mcp",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
patch("backend.api.features.mcp.routes.settings") as mock_settings,
|
||||||
|
patch("backend.api.features.mcp.routes.MCPOAuthHandler") as MockHandler,
|
||||||
|
):
|
||||||
|
mock_settings.config.frontend_base_url = "http://localhost:3000"
|
||||||
|
|
||||||
|
# Mock state verification
|
||||||
|
mock_state = AsyncMock()
|
||||||
|
mock_state.state_metadata = {
|
||||||
|
"authorize_url": "https://auth.sentry.io/authorize",
|
||||||
|
"token_url": "https://auth.sentry.io/token",
|
||||||
|
"client_id": "test-client-id",
|
||||||
|
"client_secret": "test-secret",
|
||||||
|
"server_url": "https://mcp.sentry.dev/mcp",
|
||||||
|
}
|
||||||
|
mock_state.scopes = ["openid"]
|
||||||
|
mock_state.code_verifier = "verifier-123"
|
||||||
|
mock_cm.store.verify_state_token = AsyncMock(return_value=mock_state)
|
||||||
|
mock_cm.create = AsyncMock()
|
||||||
|
|
||||||
|
handler_instance = MockHandler.return_value
|
||||||
|
handler_instance.exchange_code_for_tokens = AsyncMock(
|
||||||
|
return_value=mock_creds
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock old credential cleanup
|
||||||
|
mock_cm.store.get_creds_by_provider = AsyncMock(return_value=[])
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/oauth/callback",
|
||||||
|
json={"code": "auth-code-abc", "state_token": "state-token-123"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
data = response.json()
|
||||||
|
assert "id" in data
|
||||||
|
assert data["provider"] == "mcp"
|
||||||
|
assert data["type"] == "oauth2"
|
||||||
|
mock_cm.create.assert_called_once()
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_oauth_callback_invalid_state(self, client):
|
||||||
|
with patch("backend.api.features.mcp.routes.creds_manager") as mock_cm:
|
||||||
|
mock_cm.store.verify_state_token = AsyncMock(return_value=None)
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/oauth/callback",
|
||||||
|
json={"code": "auth-code", "state_token": "bad-state"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 400
|
||||||
|
assert "Invalid or expired" in response.json()["detail"]
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_oauth_callback_token_exchange_fails(self, client):
|
||||||
|
with (
|
||||||
|
patch("backend.api.features.mcp.routes.creds_manager") as mock_cm,
|
||||||
|
patch("backend.api.features.mcp.routes.settings") as mock_settings,
|
||||||
|
patch("backend.api.features.mcp.routes.MCPOAuthHandler") as MockHandler,
|
||||||
|
):
|
||||||
|
mock_settings.config.frontend_base_url = "http://localhost:3000"
|
||||||
|
mock_state = AsyncMock()
|
||||||
|
mock_state.state_metadata = {
|
||||||
|
"authorize_url": "https://auth.example.com/authorize",
|
||||||
|
"token_url": "https://auth.example.com/token",
|
||||||
|
"client_id": "cid",
|
||||||
|
"server_url": "https://mcp.example.com/mcp",
|
||||||
|
}
|
||||||
|
mock_state.scopes = []
|
||||||
|
mock_state.code_verifier = "v"
|
||||||
|
mock_cm.store.verify_state_token = AsyncMock(return_value=mock_state)
|
||||||
|
|
||||||
|
handler_instance = MockHandler.return_value
|
||||||
|
handler_instance.exchange_code_for_tokens = AsyncMock(
|
||||||
|
side_effect=RuntimeError("Token exchange failed")
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/oauth/callback",
|
||||||
|
json={"code": "bad-code", "state_token": "state"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 400
|
||||||
|
assert "token exchange failed" in response.json()["detail"].lower()
|
||||||
@@ -5,8 +5,8 @@ from typing import Optional
|
|||||||
import aiohttp
|
import aiohttp
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
|
|
||||||
|
from backend.blocks import get_block
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
from backend.data.block import get_block
|
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
from .models import ApiResponse, ChatRequest, GraphData
|
from .models import ApiResponse, ChatRequest, GraphData
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ class BlockHandler(ContentHandler):
|
|||||||
|
|
||||||
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
|
async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
|
||||||
"""Fetch blocks without embeddings."""
|
"""Fetch blocks without embeddings."""
|
||||||
from backend.data.block import get_blocks
|
from backend.blocks import get_blocks
|
||||||
|
|
||||||
# Get all available blocks
|
# Get all available blocks
|
||||||
all_blocks = get_blocks()
|
all_blocks = get_blocks()
|
||||||
@@ -249,7 +249,7 @@ class BlockHandler(ContentHandler):
|
|||||||
|
|
||||||
async def get_stats(self) -> dict[str, int]:
|
async def get_stats(self) -> dict[str, int]:
|
||||||
"""Get statistics about block embedding coverage."""
|
"""Get statistics about block embedding coverage."""
|
||||||
from backend.data.block import get_blocks
|
from backend.blocks import get_blocks
|
||||||
|
|
||||||
all_blocks = get_blocks()
|
all_blocks = get_blocks()
|
||||||
|
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ async def test_block_handler_get_missing_items(mocker):
|
|||||||
mock_existing = []
|
mock_existing = []
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -135,7 +135,7 @@ async def test_block_handler_get_stats(mocker):
|
|||||||
mock_embedded = [{"count": 2}]
|
mock_embedded = [{"count": 2}]
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -327,7 +327,7 @@ async def test_block_handler_handles_missing_attributes():
|
|||||||
mock_blocks = {"block-minimal": mock_block_class}
|
mock_blocks = {"block-minimal": mock_block_class}
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
@@ -360,7 +360,7 @@ async def test_block_handler_skips_failed_blocks():
|
|||||||
mock_blocks = {"good-block": good_block, "bad-block": bad_block}
|
mock_blocks = {"good-block": good_block, "bad-block": bad_block}
|
||||||
|
|
||||||
with patch(
|
with patch(
|
||||||
"backend.data.block.get_blocks",
|
"backend.blocks.get_blocks",
|
||||||
return_value=mock_blocks,
|
return_value=mock_blocks,
|
||||||
):
|
):
|
||||||
with patch(
|
with patch(
|
||||||
|
|||||||
@@ -662,7 +662,7 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]:
|
|||||||
)
|
)
|
||||||
current_ids = {row["id"] for row in valid_agents}
|
current_ids = {row["id"] for row in valid_agents}
|
||||||
elif content_type == ContentType.BLOCK:
|
elif content_type == ContentType.BLOCK:
|
||||||
from backend.data.block import get_blocks
|
from backend.blocks import get_blocks
|
||||||
|
|
||||||
current_ids = set(get_blocks().keys())
|
current_ids = set(get_blocks().keys())
|
||||||
elif content_type == ContentType.DOCUMENTATION:
|
elif content_type == ContentType.DOCUMENTATION:
|
||||||
|
|||||||
@@ -7,15 +7,6 @@ from replicate.client import Client as ReplicateClient
|
|||||||
from replicate.exceptions import ReplicateError
|
from replicate.exceptions import ReplicateError
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.blocks.ideogram import (
|
|
||||||
AspectRatio,
|
|
||||||
ColorPalettePreset,
|
|
||||||
IdeogramModelBlock,
|
|
||||||
IdeogramModelName,
|
|
||||||
MagicPromptOption,
|
|
||||||
StyleType,
|
|
||||||
UpscaleOption,
|
|
||||||
)
|
|
||||||
from backend.data.graph import GraphBaseMeta
|
from backend.data.graph import GraphBaseMeta
|
||||||
from backend.data.model import CredentialsMetaInput, ProviderName
|
from backend.data.model import CredentialsMetaInput, ProviderName
|
||||||
from backend.integrations.credentials_store import ideogram_credentials
|
from backend.integrations.credentials_store import ideogram_credentials
|
||||||
@@ -50,6 +41,16 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes
|
|||||||
if not ideogram_credentials.api_key:
|
if not ideogram_credentials.api_key:
|
||||||
raise ValueError("Missing Ideogram API key")
|
raise ValueError("Missing Ideogram API key")
|
||||||
|
|
||||||
|
from backend.blocks.ideogram import (
|
||||||
|
AspectRatio,
|
||||||
|
ColorPalettePreset,
|
||||||
|
IdeogramModelBlock,
|
||||||
|
IdeogramModelName,
|
||||||
|
MagicPromptOption,
|
||||||
|
StyleType,
|
||||||
|
UpscaleOption,
|
||||||
|
)
|
||||||
|
|
||||||
name = graph.name
|
name = graph.name
|
||||||
description = f"{name} ({graph.description})" if graph.description else name
|
description = f"{name} ({graph.description})" if graph.description else name
|
||||||
|
|
||||||
|
|||||||
@@ -40,10 +40,11 @@ from backend.api.model import (
|
|||||||
UpdateTimezoneRequest,
|
UpdateTimezoneRequest,
|
||||||
UploadFileResponse,
|
UploadFileResponse,
|
||||||
)
|
)
|
||||||
|
from backend.blocks import get_block, get_blocks
|
||||||
from backend.data import execution as execution_db
|
from backend.data import execution as execution_db
|
||||||
from backend.data import graph as graph_db
|
from backend.data import graph as graph_db
|
||||||
from backend.data.auth import api_key as api_key_db
|
from backend.data.auth import api_key as api_key_db
|
||||||
from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks
|
from backend.data.block import BlockInput, CompletedBlockOutput
|
||||||
from backend.data.credit import (
|
from backend.data.credit import (
|
||||||
AutoTopUpConfig,
|
AutoTopUpConfig,
|
||||||
RefundRequest,
|
RefundRequest,
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import fastapi
|
|||||||
from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||||
from fastapi.responses import Response
|
from fastapi.responses import Response
|
||||||
|
|
||||||
from backend.data.workspace import get_workspace, get_workspace_file
|
from backend.data.workspace import WorkspaceFile, get_workspace, get_workspace_file
|
||||||
from backend.util.workspace_storage import get_workspace_storage
|
from backend.util.workspace_storage import get_workspace_storage
|
||||||
|
|
||||||
|
|
||||||
@@ -44,11 +44,11 @@ router = fastapi.APIRouter(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _create_streaming_response(content: bytes, file) -> Response:
|
def _create_streaming_response(content: bytes, file: WorkspaceFile) -> Response:
|
||||||
"""Create a streaming response for file content."""
|
"""Create a streaming response for file content."""
|
||||||
return Response(
|
return Response(
|
||||||
content=content,
|
content=content,
|
||||||
media_type=file.mimeType,
|
media_type=file.mime_type,
|
||||||
headers={
|
headers={
|
||||||
"Content-Disposition": _sanitize_filename_for_header(file.name),
|
"Content-Disposition": _sanitize_filename_for_header(file.name),
|
||||||
"Content-Length": str(len(content)),
|
"Content-Length": str(len(content)),
|
||||||
@@ -56,7 +56,7 @@ def _create_streaming_response(content: bytes, file) -> Response:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def _create_file_download_response(file) -> Response:
|
async def _create_file_download_response(file: WorkspaceFile) -> Response:
|
||||||
"""
|
"""
|
||||||
Create a download response for a workspace file.
|
Create a download response for a workspace file.
|
||||||
|
|
||||||
@@ -66,33 +66,33 @@ async def _create_file_download_response(file) -> Response:
|
|||||||
storage = await get_workspace_storage()
|
storage = await get_workspace_storage()
|
||||||
|
|
||||||
# For local storage, stream the file directly
|
# For local storage, stream the file directly
|
||||||
if file.storagePath.startswith("local://"):
|
if file.storage_path.startswith("local://"):
|
||||||
content = await storage.retrieve(file.storagePath)
|
content = await storage.retrieve(file.storage_path)
|
||||||
return _create_streaming_response(content, file)
|
return _create_streaming_response(content, file)
|
||||||
|
|
||||||
# For GCS, try to redirect to signed URL, fall back to streaming
|
# For GCS, try to redirect to signed URL, fall back to streaming
|
||||||
try:
|
try:
|
||||||
url = await storage.get_download_url(file.storagePath, expires_in=300)
|
url = await storage.get_download_url(file.storage_path, expires_in=300)
|
||||||
# If we got back an API path (fallback), stream directly instead
|
# If we got back an API path (fallback), stream directly instead
|
||||||
if url.startswith("/api/"):
|
if url.startswith("/api/"):
|
||||||
content = await storage.retrieve(file.storagePath)
|
content = await storage.retrieve(file.storage_path)
|
||||||
return _create_streaming_response(content, file)
|
return _create_streaming_response(content, file)
|
||||||
return fastapi.responses.RedirectResponse(url=url, status_code=302)
|
return fastapi.responses.RedirectResponse(url=url, status_code=302)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Log the signed URL failure with context
|
# Log the signed URL failure with context
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Failed to get signed URL for file {file.id} "
|
f"Failed to get signed URL for file {file.id} "
|
||||||
f"(storagePath={file.storagePath}): {e}",
|
f"(storagePath={file.storage_path}): {e}",
|
||||||
exc_info=True,
|
exc_info=True,
|
||||||
)
|
)
|
||||||
# Fall back to streaming directly from GCS
|
# Fall back to streaming directly from GCS
|
||||||
try:
|
try:
|
||||||
content = await storage.retrieve(file.storagePath)
|
content = await storage.retrieve(file.storage_path)
|
||||||
return _create_streaming_response(content, file)
|
return _create_streaming_response(content, file)
|
||||||
except Exception as fallback_error:
|
except Exception as fallback_error:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Fallback streaming also failed for file {file.id} "
|
f"Fallback streaming also failed for file {file.id} "
|
||||||
f"(storagePath={file.storagePath}): {fallback_error}",
|
f"(storagePath={file.storage_path}): {fallback_error}",
|
||||||
exc_info=True,
|
exc_info=True,
|
||||||
)
|
)
|
||||||
raise
|
raise
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ import backend.api.features.executions.review.routes
|
|||||||
import backend.api.features.library.db
|
import backend.api.features.library.db
|
||||||
import backend.api.features.library.model
|
import backend.api.features.library.model
|
||||||
import backend.api.features.library.routes
|
import backend.api.features.library.routes
|
||||||
|
import backend.api.features.mcp.routes as mcp_routes
|
||||||
import backend.api.features.oauth
|
import backend.api.features.oauth
|
||||||
import backend.api.features.otto.routes
|
import backend.api.features.otto.routes
|
||||||
import backend.api.features.postmark.postmark
|
import backend.api.features.postmark.postmark
|
||||||
@@ -40,11 +41,11 @@ import backend.data.user
|
|||||||
import backend.integrations.webhooks.utils
|
import backend.integrations.webhooks.utils
|
||||||
import backend.util.service
|
import backend.util.service
|
||||||
import backend.util.settings
|
import backend.util.settings
|
||||||
from backend.api.features.chat.completion_consumer import (
|
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||||
|
from backend.copilot.completion_consumer import (
|
||||||
start_completion_consumer,
|
start_completion_consumer,
|
||||||
stop_completion_consumer,
|
stop_completion_consumer,
|
||||||
)
|
)
|
||||||
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
|
||||||
from backend.data.model import Credentials
|
from backend.data.model import Credentials
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.monitoring.instrumentation import instrument_fastapi
|
from backend.monitoring.instrumentation import instrument_fastapi
|
||||||
@@ -343,6 +344,11 @@ app.include_router(
|
|||||||
tags=["workspace"],
|
tags=["workspace"],
|
||||||
prefix="/api/workspace",
|
prefix="/api/workspace",
|
||||||
)
|
)
|
||||||
|
app.include_router(
|
||||||
|
mcp_routes.router,
|
||||||
|
tags=["v2", "mcp"],
|
||||||
|
prefix="/api/mcp",
|
||||||
|
)
|
||||||
app.include_router(
|
app.include_router(
|
||||||
backend.api.features.oauth.router,
|
backend.api.features.oauth.router,
|
||||||
tags=["oauth"],
|
tags=["oauth"],
|
||||||
|
|||||||
@@ -38,7 +38,9 @@ def main(**kwargs):
|
|||||||
|
|
||||||
from backend.api.rest_api import AgentServer
|
from backend.api.rest_api import AgentServer
|
||||||
from backend.api.ws_api import WebsocketServer
|
from backend.api.ws_api import WebsocketServer
|
||||||
from backend.executor import DatabaseManager, ExecutionManager, Scheduler
|
from backend.copilot.executor.manager import CoPilotExecutor
|
||||||
|
from backend.data.db_manager import DatabaseManager
|
||||||
|
from backend.executor import ExecutionManager, Scheduler
|
||||||
from backend.notifications import NotificationManager
|
from backend.notifications import NotificationManager
|
||||||
|
|
||||||
run_processes(
|
run_processes(
|
||||||
@@ -48,6 +50,7 @@ def main(**kwargs):
|
|||||||
WebsocketServer(),
|
WebsocketServer(),
|
||||||
AgentServer(),
|
AgentServer(),
|
||||||
ExecutionManager(),
|
ExecutionManager(),
|
||||||
|
CoPilotExecutor(),
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -3,22 +3,19 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, TypeVar
|
from typing import Sequence, Type, TypeVar
|
||||||
|
|
||||||
|
from backend.blocks._base import AnyBlockSchema, BlockType
|
||||||
from backend.util.cache import cached
|
from backend.util.cache import cached
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.data.block import Block
|
|
||||||
|
|
||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl_seconds=3600)
|
@cached(ttl_seconds=3600)
|
||||||
def load_all_blocks() -> dict[str, type["Block"]]:
|
def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
|
||||||
from backend.data.block import Block
|
from backend.blocks._base import Block
|
||||||
from backend.util.settings import Config
|
from backend.util.settings import Config
|
||||||
|
|
||||||
# Check if example blocks should be loaded from settings
|
# Check if example blocks should be loaded from settings
|
||||||
@@ -50,8 +47,8 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
importlib.import_module(f".{module}", package=__name__)
|
importlib.import_module(f".{module}", package=__name__)
|
||||||
|
|
||||||
# Load all Block instances from the available modules
|
# Load all Block instances from the available modules
|
||||||
available_blocks: dict[str, type["Block"]] = {}
|
available_blocks: dict[str, type["AnyBlockSchema"]] = {}
|
||||||
for block_cls in all_subclasses(Block):
|
for block_cls in _all_subclasses(Block):
|
||||||
class_name = block_cls.__name__
|
class_name = block_cls.__name__
|
||||||
|
|
||||||
if class_name.endswith("Base"):
|
if class_name.endswith("Base"):
|
||||||
@@ -64,7 +61,7 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
"please name the class with 'Base' at the end"
|
"please name the class with 'Base' at the end"
|
||||||
)
|
)
|
||||||
|
|
||||||
block = block_cls.create()
|
block = block_cls() # pyright: ignore[reportAbstractUsage]
|
||||||
|
|
||||||
if not isinstance(block.id, str) or len(block.id) != 36:
|
if not isinstance(block.id, str) or len(block.id) != 36:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -105,7 +102,7 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
available_blocks[block.id] = block_cls
|
available_blocks[block.id] = block_cls
|
||||||
|
|
||||||
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
|
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
|
||||||
from backend.data.block import is_block_auth_configured
|
from ._utils import is_block_auth_configured
|
||||||
|
|
||||||
filtered_blocks = {}
|
filtered_blocks = {}
|
||||||
for block_id, block_cls in available_blocks.items():
|
for block_id, block_cls in available_blocks.items():
|
||||||
@@ -115,11 +112,48 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
|||||||
return filtered_blocks
|
return filtered_blocks
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["load_all_blocks"]
|
def _all_subclasses(cls: type[T]) -> list[type[T]]:
|
||||||
|
|
||||||
|
|
||||||
def all_subclasses(cls: type[T]) -> list[type[T]]:
|
|
||||||
subclasses = cls.__subclasses__()
|
subclasses = cls.__subclasses__()
|
||||||
for subclass in subclasses:
|
for subclass in subclasses:
|
||||||
subclasses += all_subclasses(subclass)
|
subclasses += _all_subclasses(subclass)
|
||||||
return subclasses
|
return subclasses
|
||||||
|
|
||||||
|
|
||||||
|
# ============== Block access helper functions ============== #
|
||||||
|
|
||||||
|
|
||||||
|
def get_blocks() -> dict[str, Type["AnyBlockSchema"]]:
|
||||||
|
return load_all_blocks()
|
||||||
|
|
||||||
|
|
||||||
|
# Note on the return type annotation: https://github.com/microsoft/pyright/issues/10281
|
||||||
|
def get_block(block_id: str) -> "AnyBlockSchema | None":
|
||||||
|
cls = get_blocks().get(block_id)
|
||||||
|
return cls() if cls else None
|
||||||
|
|
||||||
|
|
||||||
|
@cached(ttl_seconds=3600)
|
||||||
|
def get_webhook_block_ids() -> Sequence[str]:
|
||||||
|
return [
|
||||||
|
id
|
||||||
|
for id, B in get_blocks().items()
|
||||||
|
if B().block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@cached(ttl_seconds=3600)
|
||||||
|
def get_io_block_ids() -> Sequence[str]:
|
||||||
|
return [
|
||||||
|
id
|
||||||
|
for id, B in get_blocks().items()
|
||||||
|
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@cached(ttl_seconds=3600)
|
||||||
|
def get_human_in_the_loop_block_ids() -> Sequence[str]:
|
||||||
|
return [
|
||||||
|
id
|
||||||
|
for id, B in get_blocks().items()
|
||||||
|
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
|
||||||
|
]
|
||||||
|
|||||||
740
autogpt_platform/backend/backend/blocks/_base.py
Normal file
740
autogpt_platform/backend/backend/blocks/_base.py
Normal file
@@ -0,0 +1,740 @@
|
|||||||
|
import inspect
|
||||||
|
import logging
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from enum import Enum
|
||||||
|
from typing import (
|
||||||
|
TYPE_CHECKING,
|
||||||
|
Any,
|
||||||
|
Callable,
|
||||||
|
ClassVar,
|
||||||
|
Generic,
|
||||||
|
Optional,
|
||||||
|
Type,
|
||||||
|
TypeAlias,
|
||||||
|
TypeVar,
|
||||||
|
cast,
|
||||||
|
get_origin,
|
||||||
|
)
|
||||||
|
|
||||||
|
import jsonref
|
||||||
|
import jsonschema
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from backend.data.block import BlockInput, BlockOutput, BlockOutputEntry
|
||||||
|
from backend.data.model import (
|
||||||
|
Credentials,
|
||||||
|
CredentialsFieldInfo,
|
||||||
|
CredentialsMetaInput,
|
||||||
|
SchemaField,
|
||||||
|
is_credentials_field_name,
|
||||||
|
)
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util import json
|
||||||
|
from backend.util.exceptions import (
|
||||||
|
BlockError,
|
||||||
|
BlockExecutionError,
|
||||||
|
BlockInputError,
|
||||||
|
BlockOutputError,
|
||||||
|
BlockUnknownError,
|
||||||
|
)
|
||||||
|
from backend.util.settings import Config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import ContributorDetails, NodeExecutionStats
|
||||||
|
|
||||||
|
from ..data.graph import Link
|
||||||
|
|
||||||
|
app_config = Config()
|
||||||
|
|
||||||
|
|
||||||
|
BlockTestOutput = BlockOutputEntry | tuple[str, Callable[[Any], bool]]
|
||||||
|
|
||||||
|
|
||||||
|
class BlockType(Enum):
|
||||||
|
STANDARD = "Standard"
|
||||||
|
INPUT = "Input"
|
||||||
|
OUTPUT = "Output"
|
||||||
|
NOTE = "Note"
|
||||||
|
WEBHOOK = "Webhook"
|
||||||
|
WEBHOOK_MANUAL = "Webhook (manual)"
|
||||||
|
AGENT = "Agent"
|
||||||
|
AI = "AI"
|
||||||
|
AYRSHARE = "Ayrshare"
|
||||||
|
HUMAN_IN_THE_LOOP = "Human In The Loop"
|
||||||
|
MCP_TOOL = "MCP Tool"
|
||||||
|
|
||||||
|
|
||||||
|
class BlockCategory(Enum):
|
||||||
|
AI = "Block that leverages AI to perform a task."
|
||||||
|
SOCIAL = "Block that interacts with social media platforms."
|
||||||
|
TEXT = "Block that processes text data."
|
||||||
|
SEARCH = "Block that searches or extracts information from the internet."
|
||||||
|
BASIC = "Block that performs basic operations."
|
||||||
|
INPUT = "Block that interacts with input of the graph."
|
||||||
|
OUTPUT = "Block that interacts with output of the graph."
|
||||||
|
LOGIC = "Programming logic to control the flow of your agent"
|
||||||
|
COMMUNICATION = "Block that interacts with communication platforms."
|
||||||
|
DEVELOPER_TOOLS = "Developer tools such as GitHub blocks."
|
||||||
|
DATA = "Block that interacts with structured data."
|
||||||
|
HARDWARE = "Block that interacts with hardware."
|
||||||
|
AGENT = "Block that interacts with other agents."
|
||||||
|
CRM = "Block that interacts with CRM services."
|
||||||
|
SAFETY = (
|
||||||
|
"Block that provides AI safety mechanisms such as detecting harmful content"
|
||||||
|
)
|
||||||
|
PRODUCTIVITY = "Block that helps with productivity"
|
||||||
|
ISSUE_TRACKING = "Block that helps with issue tracking"
|
||||||
|
MULTIMEDIA = "Block that interacts with multimedia content"
|
||||||
|
MARKETING = "Block that helps with marketing"
|
||||||
|
|
||||||
|
def dict(self) -> dict[str, str]:
|
||||||
|
return {"category": self.name, "description": self.value}
|
||||||
|
|
||||||
|
|
||||||
|
class BlockCostType(str, Enum):
|
||||||
|
RUN = "run" # cost X credits per run
|
||||||
|
BYTE = "byte" # cost X credits per byte
|
||||||
|
SECOND = "second" # cost X credits per second
|
||||||
|
|
||||||
|
|
||||||
|
class BlockCost(BaseModel):
|
||||||
|
cost_amount: int
|
||||||
|
cost_filter: BlockInput
|
||||||
|
cost_type: BlockCostType
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
cost_amount: int,
|
||||||
|
cost_type: BlockCostType = BlockCostType.RUN,
|
||||||
|
cost_filter: Optional[BlockInput] = None,
|
||||||
|
**data: Any,
|
||||||
|
) -> None:
|
||||||
|
super().__init__(
|
||||||
|
cost_amount=cost_amount,
|
||||||
|
cost_filter=cost_filter or {},
|
||||||
|
cost_type=cost_type,
|
||||||
|
**data,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BlockInfo(BaseModel):
|
||||||
|
id: str
|
||||||
|
name: str
|
||||||
|
inputSchema: dict[str, Any]
|
||||||
|
outputSchema: dict[str, Any]
|
||||||
|
costs: list[BlockCost]
|
||||||
|
description: str
|
||||||
|
categories: list[dict[str, str]]
|
||||||
|
contributors: list[dict[str, Any]]
|
||||||
|
staticOutput: bool
|
||||||
|
uiType: str
|
||||||
|
|
||||||
|
|
||||||
|
class BlockSchema(BaseModel):
|
||||||
|
cached_jsonschema: ClassVar[dict[str, Any]]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def jsonschema(cls) -> dict[str, Any]:
|
||||||
|
if cls.cached_jsonschema:
|
||||||
|
return cls.cached_jsonschema
|
||||||
|
|
||||||
|
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
|
||||||
|
|
||||||
|
def ref_to_dict(obj):
|
||||||
|
if isinstance(obj, dict):
|
||||||
|
# OpenAPI <3.1 does not support sibling fields that has a $ref key
|
||||||
|
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
|
||||||
|
keys = {"allOf", "anyOf", "oneOf"}
|
||||||
|
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
|
||||||
|
if one_key:
|
||||||
|
obj.update(obj[one_key][0])
|
||||||
|
|
||||||
|
return {
|
||||||
|
key: ref_to_dict(value)
|
||||||
|
for key, value in obj.items()
|
||||||
|
if not key.startswith("$") and key != one_key
|
||||||
|
}
|
||||||
|
elif isinstance(obj, list):
|
||||||
|
return [ref_to_dict(item) for item in obj]
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
|
||||||
|
|
||||||
|
return cls.cached_jsonschema
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def validate_data(cls, data: BlockInput) -> str | None:
|
||||||
|
return json.validate_with_jsonschema(
|
||||||
|
schema=cls.jsonschema(),
|
||||||
|
data={k: v for k, v in data.items() if v is not None},
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_mismatch_error(cls, data: BlockInput) -> str | None:
|
||||||
|
return cls.validate_data(data)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_field_schema(cls, field_name: str) -> dict[str, Any]:
|
||||||
|
model_schema = cls.jsonschema().get("properties", {})
|
||||||
|
if not model_schema:
|
||||||
|
raise ValueError(f"Invalid model schema {cls}")
|
||||||
|
|
||||||
|
property_schema = model_schema.get(field_name)
|
||||||
|
if not property_schema:
|
||||||
|
raise ValueError(f"Invalid property name {field_name}")
|
||||||
|
|
||||||
|
return property_schema
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def validate_field(cls, field_name: str, data: BlockInput) -> str | None:
|
||||||
|
"""
|
||||||
|
Validate the data against a specific property (one of the input/output name).
|
||||||
|
Returns the validation error message if the data does not match the schema.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
property_schema = cls.get_field_schema(field_name)
|
||||||
|
jsonschema.validate(json.to_dict(data), property_schema)
|
||||||
|
return None
|
||||||
|
except jsonschema.ValidationError as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_fields(cls) -> set[str]:
|
||||||
|
return set(cls.model_fields.keys())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_required_fields(cls) -> set[str]:
|
||||||
|
return {
|
||||||
|
field
|
||||||
|
for field, field_info in cls.model_fields.items()
|
||||||
|
if field_info.is_required()
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __pydantic_init_subclass__(cls, **kwargs):
|
||||||
|
"""Validates the schema definition. Rules:
|
||||||
|
- Fields with annotation `CredentialsMetaInput` MUST be
|
||||||
|
named `credentials` or `*_credentials`
|
||||||
|
- Fields named `credentials` or `*_credentials` MUST be
|
||||||
|
of type `CredentialsMetaInput`
|
||||||
|
"""
|
||||||
|
super().__pydantic_init_subclass__(**kwargs)
|
||||||
|
|
||||||
|
# Reset cached JSON schema to prevent inheriting it from parent class
|
||||||
|
cls.cached_jsonschema = {}
|
||||||
|
|
||||||
|
credentials_fields = cls.get_credentials_fields()
|
||||||
|
|
||||||
|
for field_name in cls.get_fields():
|
||||||
|
if is_credentials_field_name(field_name):
|
||||||
|
if field_name not in credentials_fields:
|
||||||
|
raise TypeError(
|
||||||
|
f"Credentials field '{field_name}' on {cls.__qualname__} "
|
||||||
|
f"is not of type {CredentialsMetaInput.__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
CredentialsMetaInput.validate_credentials_field_schema(
|
||||||
|
cls.get_field_schema(field_name), field_name
|
||||||
|
)
|
||||||
|
|
||||||
|
elif field_name in credentials_fields:
|
||||||
|
raise KeyError(
|
||||||
|
f"Credentials field '{field_name}' on {cls.__qualname__} "
|
||||||
|
"has invalid name: must be 'credentials' or *_credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]:
|
||||||
|
return {
|
||||||
|
field_name: info.annotation
|
||||||
|
for field_name, info in cls.model_fields.items()
|
||||||
|
if (
|
||||||
|
inspect.isclass(info.annotation)
|
||||||
|
and issubclass(
|
||||||
|
get_origin(info.annotation) or info.annotation,
|
||||||
|
CredentialsMetaInput,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
|
||||||
|
|
||||||
|
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If multiple fields have the same kwarg_name, as this would
|
||||||
|
cause silent overwriting and only the last field would be processed.
|
||||||
|
"""
|
||||||
|
result: dict[str, dict[str, Any]] = {}
|
||||||
|
schema = cls.jsonschema()
|
||||||
|
properties = schema.get("properties", {})
|
||||||
|
|
||||||
|
for field_name, field_schema in properties.items():
|
||||||
|
auto_creds = field_schema.get("auto_credentials")
|
||||||
|
if auto_creds:
|
||||||
|
kwarg_name = auto_creds.get("kwarg_name", "credentials")
|
||||||
|
if kwarg_name in result:
|
||||||
|
raise ValueError(
|
||||||
|
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
|
||||||
|
f"in fields '{result[kwarg_name]['field_name']}' and "
|
||||||
|
f"'{field_name}' on {cls.__qualname__}"
|
||||||
|
)
|
||||||
|
result[kwarg_name] = {
|
||||||
|
"field_name": field_name,
|
||||||
|
"config": auto_creds,
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
# Regular credentials fields
|
||||||
|
for field_name in cls.get_credentials_fields().keys():
|
||||||
|
result[field_name] = CredentialsFieldInfo.model_validate(
|
||||||
|
cls.get_field_schema(field_name), by_alias=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
|
||||||
|
for kwarg_name, info in cls.get_auto_credentials_fields().items():
|
||||||
|
config = info["config"]
|
||||||
|
# Build a schema-like dict that CredentialsFieldInfo can parse
|
||||||
|
auto_schema = {
|
||||||
|
"credentials_provider": [config.get("provider", "google")],
|
||||||
|
"credentials_types": [config.get("type", "oauth2")],
|
||||||
|
"credentials_scopes": config.get("scopes"),
|
||||||
|
}
|
||||||
|
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||||
|
auto_schema, by_alias=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
||||||
|
return data # Return as is, by default.
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||||
|
input_fields_from_nodes = {link.sink_name for link in links}
|
||||||
|
return input_fields_from_nodes - set(data)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
||||||
|
return cls.get_required_fields() - set(data)
|
||||||
|
|
||||||
|
|
||||||
|
class BlockSchemaInput(BlockSchema):
|
||||||
|
"""
|
||||||
|
Base schema class for block inputs.
|
||||||
|
All block input schemas should extend this class for consistency.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BlockSchemaOutput(BlockSchema):
|
||||||
|
"""
|
||||||
|
Base schema class for block outputs that includes a standard error field.
|
||||||
|
All block output schemas should extend this class to ensure consistent error handling.
|
||||||
|
"""
|
||||||
|
|
||||||
|
error: str = SchemaField(
|
||||||
|
description="Error message if the operation failed", default=""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchemaInput)
|
||||||
|
BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchemaOutput)
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyInputSchema(BlockSchemaInput):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyOutputSchema(BlockSchemaOutput):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# For backward compatibility - will be deprecated
|
||||||
|
EmptySchema = EmptyOutputSchema
|
||||||
|
|
||||||
|
|
||||||
|
# --8<-- [start:BlockWebhookConfig]
|
||||||
|
class BlockManualWebhookConfig(BaseModel):
|
||||||
|
"""
|
||||||
|
Configuration model for webhook-triggered blocks on which
|
||||||
|
the user has to manually set up the webhook at the provider.
|
||||||
|
"""
|
||||||
|
|
||||||
|
provider: ProviderName
|
||||||
|
"""The service provider that the webhook connects to"""
|
||||||
|
|
||||||
|
webhook_type: str
|
||||||
|
"""
|
||||||
|
Identifier for the webhook type. E.g. GitHub has repo and organization level hooks.
|
||||||
|
|
||||||
|
Only for use in the corresponding `WebhooksManager`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
event_filter_input: str = ""
|
||||||
|
"""
|
||||||
|
Name of the block's event filter input.
|
||||||
|
Leave empty if the corresponding webhook doesn't have distinct event/payload types.
|
||||||
|
"""
|
||||||
|
|
||||||
|
event_format: str = "{event}"
|
||||||
|
"""
|
||||||
|
Template string for the event(s) that a block instance subscribes to.
|
||||||
|
Applied individually to each event selected in the event filter input.
|
||||||
|
|
||||||
|
Example: `"pull_request.{event}"` -> `"pull_request.opened"`
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class BlockWebhookConfig(BlockManualWebhookConfig):
|
||||||
|
"""
|
||||||
|
Configuration model for webhook-triggered blocks for which
|
||||||
|
the webhook can be automatically set up through the provider's API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
resource_format: str
|
||||||
|
"""
|
||||||
|
Template string for the resource that a block instance subscribes to.
|
||||||
|
Fields will be filled from the block's inputs (except `payload`).
|
||||||
|
|
||||||
|
Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented)
|
||||||
|
|
||||||
|
Only for use in the corresponding `WebhooksManager`.
|
||||||
|
"""
|
||||||
|
# --8<-- [end:BlockWebhookConfig]
|
||||||
|
|
||||||
|
|
||||||
|
class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
id: str = "",
|
||||||
|
description: str = "",
|
||||||
|
contributors: list["ContributorDetails"] = [],
|
||||||
|
categories: set[BlockCategory] | None = None,
|
||||||
|
input_schema: Type[BlockSchemaInputType] = EmptyInputSchema,
|
||||||
|
output_schema: Type[BlockSchemaOutputType] = EmptyOutputSchema,
|
||||||
|
test_input: BlockInput | list[BlockInput] | None = None,
|
||||||
|
test_output: BlockTestOutput | list[BlockTestOutput] | None = None,
|
||||||
|
test_mock: dict[str, Any] | None = None,
|
||||||
|
test_credentials: Optional[Credentials | dict[str, Credentials]] = None,
|
||||||
|
disabled: bool = False,
|
||||||
|
static_output: bool = False,
|
||||||
|
block_type: BlockType = BlockType.STANDARD,
|
||||||
|
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
|
||||||
|
is_sensitive_action: bool = False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the block with the given schema.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id: The unique identifier for the block, this value will be persisted in the
|
||||||
|
DB. So it should be a unique and constant across the application run.
|
||||||
|
Use the UUID format for the ID.
|
||||||
|
description: The description of the block, explaining what the block does.
|
||||||
|
contributors: The list of contributors who contributed to the block.
|
||||||
|
input_schema: The schema, defined as a Pydantic model, for the input data.
|
||||||
|
output_schema: The schema, defined as a Pydantic model, for the output data.
|
||||||
|
test_input: The list or single sample input data for the block, for testing.
|
||||||
|
test_output: The list or single expected output if the test_input is run.
|
||||||
|
test_mock: function names on the block implementation to mock on test run.
|
||||||
|
disabled: If the block is disabled, it will not be available for execution.
|
||||||
|
static_output: Whether the output links of the block are static by default.
|
||||||
|
"""
|
||||||
|
from backend.data.model import NodeExecutionStats
|
||||||
|
|
||||||
|
self.id = id
|
||||||
|
self.input_schema = input_schema
|
||||||
|
self.output_schema = output_schema
|
||||||
|
self.test_input = test_input
|
||||||
|
self.test_output = test_output
|
||||||
|
self.test_mock = test_mock
|
||||||
|
self.test_credentials = test_credentials
|
||||||
|
self.description = description
|
||||||
|
self.categories = categories or set()
|
||||||
|
self.contributors = contributors or set()
|
||||||
|
self.disabled = disabled
|
||||||
|
self.static_output = static_output
|
||||||
|
self.block_type = block_type
|
||||||
|
self.webhook_config = webhook_config
|
||||||
|
self.is_sensitive_action = is_sensitive_action
|
||||||
|
self.execution_stats: "NodeExecutionStats" = NodeExecutionStats()
|
||||||
|
|
||||||
|
if self.webhook_config:
|
||||||
|
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||||
|
# Enforce presence of credentials field on auto-setup webhook blocks
|
||||||
|
if not (cred_fields := self.input_schema.get_credentials_fields()):
|
||||||
|
raise TypeError(
|
||||||
|
"credentials field is required on auto-setup webhook blocks"
|
||||||
|
)
|
||||||
|
# Disallow multiple credentials inputs on webhook blocks
|
||||||
|
elif len(cred_fields) > 1:
|
||||||
|
raise ValueError(
|
||||||
|
"Multiple credentials inputs not supported on webhook blocks"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.block_type = BlockType.WEBHOOK
|
||||||
|
else:
|
||||||
|
self.block_type = BlockType.WEBHOOK_MANUAL
|
||||||
|
|
||||||
|
# Enforce shape of webhook event filter, if present
|
||||||
|
if self.webhook_config.event_filter_input:
|
||||||
|
event_filter_field = self.input_schema.model_fields[
|
||||||
|
self.webhook_config.event_filter_input
|
||||||
|
]
|
||||||
|
if not (
|
||||||
|
isinstance(event_filter_field.annotation, type)
|
||||||
|
and issubclass(event_filter_field.annotation, BaseModel)
|
||||||
|
and all(
|
||||||
|
field.annotation is bool
|
||||||
|
for field in event_filter_field.annotation.model_fields.values()
|
||||||
|
)
|
||||||
|
):
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"{self.name} has an invalid webhook event selector: "
|
||||||
|
"field must be a BaseModel and all its fields must be boolean"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Enforce presence of 'payload' input
|
||||||
|
if "payload" not in self.input_schema.model_fields:
|
||||||
|
raise TypeError(
|
||||||
|
f"{self.name} is webhook-triggered but has no 'payload' input"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Disable webhook-triggered block if webhook functionality not available
|
||||||
|
if not app_config.platform_base_url:
|
||||||
|
self.disabled = True
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput:
|
||||||
|
"""
|
||||||
|
Run the block with the given input data.
|
||||||
|
Args:
|
||||||
|
input_data: The input data with the structure of input_schema.
|
||||||
|
|
||||||
|
Kwargs: Currently 14/02/2025 these include
|
||||||
|
graph_id: The ID of the graph.
|
||||||
|
node_id: The ID of the node.
|
||||||
|
graph_exec_id: The ID of the graph execution.
|
||||||
|
node_exec_id: The ID of the node execution.
|
||||||
|
user_id: The ID of the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A Generator that yields (output_name, output_data).
|
||||||
|
output_name: One of the output name defined in Block's output_schema.
|
||||||
|
output_data: The data for the output_name, matching the defined schema.
|
||||||
|
"""
|
||||||
|
# --- satisfy the type checker, never executed -------------
|
||||||
|
if False: # noqa: SIM115
|
||||||
|
yield "name", "value" # pyright: ignore[reportMissingYield]
|
||||||
|
raise NotImplementedError(f"{self.name} does not implement the run method.")
|
||||||
|
|
||||||
|
async def run_once(
|
||||||
|
self, input_data: BlockSchemaInputType, output: str, **kwargs
|
||||||
|
) -> Any:
|
||||||
|
async for item in self.run(input_data, **kwargs):
|
||||||
|
name, data = item
|
||||||
|
if name == output:
|
||||||
|
return data
|
||||||
|
raise ValueError(f"{self.name} did not produce any output for {output}")
|
||||||
|
|
||||||
|
def merge_stats(self, stats: "NodeExecutionStats") -> "NodeExecutionStats":
|
||||||
|
self.execution_stats += stats
|
||||||
|
return self.execution_stats
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self.__class__.__name__
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
"id": self.id,
|
||||||
|
"name": self.name,
|
||||||
|
"inputSchema": self.input_schema.jsonschema(),
|
||||||
|
"outputSchema": self.output_schema.jsonschema(),
|
||||||
|
"description": self.description,
|
||||||
|
"categories": [category.dict() for category in self.categories],
|
||||||
|
"contributors": [
|
||||||
|
contributor.model_dump() for contributor in self.contributors
|
||||||
|
],
|
||||||
|
"staticOutput": self.static_output,
|
||||||
|
"uiType": self.block_type.value,
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_info(self) -> BlockInfo:
|
||||||
|
from backend.data.credit import get_block_cost
|
||||||
|
|
||||||
|
return BlockInfo(
|
||||||
|
id=self.id,
|
||||||
|
name=self.name,
|
||||||
|
inputSchema=self.input_schema.jsonschema(),
|
||||||
|
outputSchema=self.output_schema.jsonschema(),
|
||||||
|
costs=get_block_cost(self),
|
||||||
|
description=self.description,
|
||||||
|
categories=[category.dict() for category in self.categories],
|
||||||
|
contributors=[
|
||||||
|
contributor.model_dump() for contributor in self.contributors
|
||||||
|
],
|
||||||
|
staticOutput=self.static_output,
|
||||||
|
uiType=self.block_type.value,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
async for output_name, output_data in self._execute(input_data, **kwargs):
|
||||||
|
yield output_name, output_data
|
||||||
|
except Exception as ex:
|
||||||
|
if isinstance(ex, BlockError):
|
||||||
|
raise ex
|
||||||
|
else:
|
||||||
|
raise (
|
||||||
|
BlockExecutionError
|
||||||
|
if isinstance(ex, ValueError)
|
||||||
|
else BlockUnknownError
|
||||||
|
)(
|
||||||
|
message=str(ex),
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
) from ex
|
||||||
|
|
||||||
|
async def is_block_exec_need_review(
|
||||||
|
self,
|
||||||
|
input_data: BlockInput,
|
||||||
|
*,
|
||||||
|
user_id: str,
|
||||||
|
node_id: str,
|
||||||
|
node_exec_id: str,
|
||||||
|
graph_exec_id: str,
|
||||||
|
graph_id: str,
|
||||||
|
graph_version: int,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
|
**kwargs,
|
||||||
|
) -> tuple[bool, BlockInput]:
|
||||||
|
"""
|
||||||
|
Check if this block execution needs human review and handle the review process.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (should_pause, input_data_to_use)
|
||||||
|
- should_pause: True if execution should be paused for review
|
||||||
|
- input_data_to_use: The input data to use (may be modified by reviewer)
|
||||||
|
"""
|
||||||
|
if not (
|
||||||
|
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
|
||||||
|
):
|
||||||
|
return False, input_data
|
||||||
|
|
||||||
|
from backend.blocks.helpers.review import HITLReviewHelper
|
||||||
|
|
||||||
|
# Handle the review request and get decision
|
||||||
|
decision = await HITLReviewHelper.handle_review_decision(
|
||||||
|
input_data=input_data,
|
||||||
|
user_id=user_id,
|
||||||
|
node_id=node_id,
|
||||||
|
node_exec_id=node_exec_id,
|
||||||
|
graph_exec_id=graph_exec_id,
|
||||||
|
graph_id=graph_id,
|
||||||
|
graph_version=graph_version,
|
||||||
|
block_name=self.name,
|
||||||
|
editable=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if decision is None:
|
||||||
|
# We're awaiting review - pause execution
|
||||||
|
return True, input_data
|
||||||
|
|
||||||
|
if not decision.should_proceed:
|
||||||
|
# Review was rejected, raise an error to stop execution
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Block execution rejected by reviewer: {decision.message}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Review was approved - use the potentially modified data
|
||||||
|
# ReviewResult.data must be a dict for block inputs
|
||||||
|
reviewed_data = decision.review_result.data
|
||||||
|
if not isinstance(reviewed_data, dict):
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
return False, reviewed_data
|
||||||
|
|
||||||
|
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||||
|
# Check for review requirement only if running within a graph execution context
|
||||||
|
# Direct block execution (e.g., from chat) skips the review process
|
||||||
|
has_graph_context = all(
|
||||||
|
key in kwargs
|
||||||
|
for key in (
|
||||||
|
"node_exec_id",
|
||||||
|
"graph_exec_id",
|
||||||
|
"graph_id",
|
||||||
|
"execution_context",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if has_graph_context:
|
||||||
|
should_pause, input_data = await self.is_block_exec_need_review(
|
||||||
|
input_data, **kwargs
|
||||||
|
)
|
||||||
|
if should_pause:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Validate the input data (original or reviewer-modified) once
|
||||||
|
if error := self.input_schema.validate_data(input_data):
|
||||||
|
raise BlockInputError(
|
||||||
|
message=f"Unable to execute block with invalid input data: {error}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the validated input data
|
||||||
|
async for output_name, output_data in self.run(
|
||||||
|
self.input_schema(**{k: v for k, v in input_data.items() if v is not None}),
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
if output_name == "error":
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=output_data, block_name=self.name, block_id=self.id
|
||||||
|
)
|
||||||
|
if self.block_type == BlockType.STANDARD and (
|
||||||
|
error := self.output_schema.validate_field(output_name, output_data)
|
||||||
|
):
|
||||||
|
raise BlockOutputError(
|
||||||
|
message=f"Block produced an invalid output data: {error}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=self.id,
|
||||||
|
)
|
||||||
|
yield output_name, output_data
|
||||||
|
|
||||||
|
def is_triggered_by_event_type(
|
||||||
|
self, trigger_config: dict[str, Any], event_type: str
|
||||||
|
) -> bool:
|
||||||
|
if not self.webhook_config:
|
||||||
|
raise TypeError("This method can't be used on non-trigger blocks")
|
||||||
|
if not self.webhook_config.event_filter_input:
|
||||||
|
return True
|
||||||
|
event_filter = trigger_config.get(self.webhook_config.event_filter_input)
|
||||||
|
if not event_filter:
|
||||||
|
raise ValueError("Event filter is not configured on trigger")
|
||||||
|
return event_type in [
|
||||||
|
self.webhook_config.event_format.format(event=k)
|
||||||
|
for k in event_filter
|
||||||
|
if event_filter[k] is True
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# Type alias for any block with standard input/output schemas
|
||||||
|
AnyBlockSchema: TypeAlias = Block[BlockSchemaInput, BlockSchemaOutput]
|
||||||
122
autogpt_platform/backend/backend/blocks/_utils.py
Normal file
122
autogpt_platform/backend/backend/blocks/_utils.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
|
||||||
|
from ._base import AnyBlockSchema
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def is_block_auth_configured(
|
||||||
|
block_cls: type[AnyBlockSchema],
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a block has a valid authentication method configured at runtime.
|
||||||
|
|
||||||
|
For example if a block is an OAuth-only block and there env vars are not set,
|
||||||
|
do not show it in the UI.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from backend.sdk.registry import AutoRegistry
|
||||||
|
|
||||||
|
# Create an instance to access input_schema
|
||||||
|
try:
|
||||||
|
block = block_cls()
|
||||||
|
except Exception as e:
|
||||||
|
# If we can't create a block instance, assume it's not OAuth-only
|
||||||
|
logger.error(f"Error creating block instance for {block_cls.__name__}: {e}")
|
||||||
|
return True
|
||||||
|
logger.debug(
|
||||||
|
f"Checking if block {block_cls.__name__} has a valid provider configured"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get all credential inputs from input schema
|
||||||
|
credential_inputs = block.input_schema.get_credentials_fields_info()
|
||||||
|
required_inputs = block.input_schema.get_required_fields()
|
||||||
|
if not credential_inputs:
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} has no credential inputs - Treating as valid"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check credential inputs
|
||||||
|
if len(required_inputs.intersection(credential_inputs.keys())) == 0:
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} has only optional credential inputs"
|
||||||
|
" - will work without credentials configured"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if the credential inputs for this block are correctly configured
|
||||||
|
for field_name, field_info in credential_inputs.items():
|
||||||
|
provider_names = field_info.provider
|
||||||
|
if not provider_names:
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} "
|
||||||
|
f"has credential input '{field_name}' with no provider options"
|
||||||
|
" - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# If a field has multiple possible providers, each one needs to be usable to
|
||||||
|
# prevent breaking the UX
|
||||||
|
for _provider_name in provider_names:
|
||||||
|
provider_name = _provider_name.value
|
||||||
|
if provider_name in ProviderName.__members__.values():
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' is part of the legacy provider system"
|
||||||
|
" - Treating as valid"
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
provider = AutoRegistry.get_provider(provider_name)
|
||||||
|
if not provider:
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"refers to unknown provider '{provider_name}' - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check the provider's supported auth types
|
||||||
|
if field_info.supported_types != provider.supported_auth_types:
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"has mismatched supported auth types (field <> Provider): "
|
||||||
|
f"{field_info.supported_types} != {provider.supported_auth_types}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not (supported_auth_types := provider.supported_auth_types):
|
||||||
|
# No auth methods are been configured for this provider
|
||||||
|
logger.warning(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' "
|
||||||
|
"has no authentication methods configured - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if provider supports OAuth
|
||||||
|
if "oauth2" in supported_auth_types:
|
||||||
|
# Check if OAuth environment variables are set
|
||||||
|
if (oauth_config := provider.oauth_config) and bool(
|
||||||
|
os.getenv(oauth_config.client_id_env_var)
|
||||||
|
and os.getenv(oauth_config.client_secret_env_var)
|
||||||
|
):
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' is configured for OAuth"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' "
|
||||||
|
f"provider '{provider_name}' "
|
||||||
|
"is missing OAuth client ID or secret - Disabling"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Block {block_cls.__name__} credential input '{field_name}' is valid; "
|
||||||
|
f"supported credential types: {', '.join(field_info.supported_types)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Optional
|
from typing import TYPE_CHECKING, Any, Optional
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockInput,
|
BlockInput,
|
||||||
@@ -9,13 +9,15 @@ from backend.data.block import (
|
|||||||
BlockSchema,
|
BlockSchema,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockType,
|
BlockType,
|
||||||
get_block,
|
|
||||||
)
|
)
|
||||||
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
|
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
|
||||||
from backend.data.model import NodeExecutionStats, SchemaField
|
from backend.data.model import NodeExecutionStats, SchemaField
|
||||||
from backend.util.json import validate_with_jsonschema
|
from backend.util.json import validate_with_jsonschema
|
||||||
from backend.util.retry import func_retry
|
from backend.util.retry import func_retry
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import LogMetadata
|
||||||
|
|
||||||
_logger = logging.getLogger(__name__)
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -124,9 +126,10 @@ class AgentExecutorBlock(Block):
|
|||||||
graph_version: int,
|
graph_version: int,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
logger,
|
logger: "LogMetadata",
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
|
|
||||||
|
from backend.blocks import get_block
|
||||||
from backend.data.execution import ExecutionEventType
|
from backend.data.execution import ExecutionEventType
|
||||||
from backend.executor import utils as execution_utils
|
from backend.executor import utils as execution_utils
|
||||||
|
|
||||||
@@ -198,7 +201,7 @@ class AgentExecutorBlock(Block):
|
|||||||
self,
|
self,
|
||||||
graph_exec_id: str,
|
graph_exec_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
logger,
|
logger: "LogMetadata",
|
||||||
) -> None:
|
) -> None:
|
||||||
from backend.executor import utils as execution_utils
|
from backend.executor import utils as execution_utils
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.llm import (
|
from backend.blocks.llm import (
|
||||||
DEFAULT_LLM_MODEL,
|
DEFAULT_LLM_MODEL,
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -11,12 +17,6 @@ from backend.blocks.llm import (
|
|||||||
LLMResponse,
|
LLMResponse,
|
||||||
llm_call,
|
llm_call,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -5,7 +5,12 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.data.block import Block, BlockCategory, BlockSchemaInput, BlockSchemaOutput
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
APIKeyCredentials,
|
APIKeyCredentials,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Literal
|
|||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import Literal
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -10,13 +17,6 @@ from backend.blocks.apollo.models import (
|
|||||||
PrimaryPhone,
|
PrimaryPhone,
|
||||||
SearchOrganizationsRequest,
|
SearchOrganizationsRequest,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,12 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -14,13 +21,6 @@ from backend.blocks.apollo.models import (
|
|||||||
SearchPeopleRequest,
|
SearchPeopleRequest,
|
||||||
SenorityLevels,
|
SenorityLevels,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.apollo._api import ApolloClient
|
from backend.blocks.apollo._api import ApolloClient
|
||||||
from backend.blocks.apollo._auth import (
|
from backend.blocks.apollo._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
@@ -6,13 +13,6 @@ from backend.blocks.apollo._auth import (
|
|||||||
ApolloCredentialsInput,
|
ApolloCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
|
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import CredentialsField, SchemaField
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from backend.data.block import BlockSchemaInput
|
from backend.blocks._base import BlockSchemaInput
|
||||||
from backend.data.model import SchemaField, UserIntegrations
|
from backend.data.model import SchemaField, UserIntegrations
|
||||||
from backend.integrations.ayrshare import AyrshareClient
|
from backend.integrations.ayrshare import AyrshareClient
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import enum
|
import enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -126,6 +126,7 @@ class PrintToConsoleBlock(Block):
|
|||||||
output_schema=PrintToConsoleBlock.Output,
|
output_schema=PrintToConsoleBlock.Output,
|
||||||
test_input={"text": "Hello, World!"},
|
test_input={"text": "Hello, World!"},
|
||||||
is_sensitive_action=True,
|
is_sensitive_action=True,
|
||||||
|
disabled=True, # Disabled per Nick Tindle's request (OPEN-3000)
|
||||||
test_output=[
|
test_output=[
|
||||||
("output", "Hello, World!"),
|
("output", "Hello, World!"),
|
||||||
("status", "printed"),
|
("status", "printed"),
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
from typing import Type
|
from typing import Type
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
import json
|
import json
|
||||||
import shlex
|
import shlex
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Literal, Optional
|
from typing import TYPE_CHECKING, Literal, Optional
|
||||||
|
|
||||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.sandbox_files import (
|
||||||
|
SandboxFileOutput,
|
||||||
|
extract_and_store_sandbox_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import ExecutionContext
|
||||||
|
|
||||||
|
|
||||||
class ClaudeCodeExecutionError(Exception):
|
class ClaudeCodeExecutionError(Exception):
|
||||||
@@ -174,22 +181,15 @@ class ClaudeCodeBlock(Block):
|
|||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
class FileOutput(BaseModel):
|
|
||||||
"""A file extracted from the sandbox."""
|
|
||||||
|
|
||||||
path: str
|
|
||||||
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
|
||||||
name: str
|
|
||||||
content: str
|
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
response: str = SchemaField(
|
response: str = SchemaField(
|
||||||
description="The output/response from Claude Code execution"
|
description="The output/response from Claude Code execution"
|
||||||
)
|
)
|
||||||
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
files: list[SandboxFileOutput] = SchemaField(
|
||||||
description=(
|
description=(
|
||||||
"List of text files created/modified by Claude Code during this execution. "
|
"List of text files created/modified by Claude Code during this execution. "
|
||||||
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
|
||||||
|
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
conversation_history: str = SchemaField(
|
conversation_history: str = SchemaField(
|
||||||
@@ -252,6 +252,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
"relative_path": "index.html",
|
"relative_path": "index.html",
|
||||||
"name": "index.html",
|
"name": "index.html",
|
||||||
"content": "<html>Hello World</html>",
|
"content": "<html>Hello World</html>",
|
||||||
|
"workspace_ref": None,
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
@@ -267,11 +268,12 @@ class ClaudeCodeBlock(Block):
|
|||||||
"execute_claude_code": lambda *args, **kwargs: (
|
"execute_claude_code": lambda *args, **kwargs: (
|
||||||
"Created index.html with hello world content", # response
|
"Created index.html with hello world content", # response
|
||||||
[
|
[
|
||||||
ClaudeCodeBlock.FileOutput(
|
SandboxFileOutput(
|
||||||
path="/home/user/index.html",
|
path="/home/user/index.html",
|
||||||
relative_path="index.html",
|
relative_path="index.html",
|
||||||
name="index.html",
|
name="index.html",
|
||||||
content="<html>Hello World</html>",
|
content="<html>Hello World</html>",
|
||||||
|
workspace_ref=None,
|
||||||
)
|
)
|
||||||
], # files
|
], # files
|
||||||
"User: Create a hello world HTML file\n"
|
"User: Create a hello world HTML file\n"
|
||||||
@@ -294,7 +296,8 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id: str,
|
existing_sandbox_id: str,
|
||||||
conversation_history: str,
|
conversation_history: str,
|
||||||
dispose_sandbox: bool,
|
dispose_sandbox: bool,
|
||||||
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
execution_context: "ExecutionContext",
|
||||||
|
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
|
||||||
"""
|
"""
|
||||||
Execute Claude Code in an E2B sandbox.
|
Execute Claude Code in an E2B sandbox.
|
||||||
|
|
||||||
@@ -449,14 +452,18 @@ class ClaudeCodeBlock(Block):
|
|||||||
else:
|
else:
|
||||||
new_conversation_history = turn_entry
|
new_conversation_history = turn_entry
|
||||||
|
|
||||||
# Extract files created/modified during this run
|
# Extract files created/modified during this run and store to workspace
|
||||||
files = await self._extract_files(
|
sandbox_files = await extract_and_store_sandbox_files(
|
||||||
sandbox, working_directory, start_timestamp
|
sandbox=sandbox,
|
||||||
|
working_directory=working_directory,
|
||||||
|
execution_context=execution_context,
|
||||||
|
since_timestamp=start_timestamp,
|
||||||
|
text_only=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
response,
|
response,
|
||||||
files,
|
sandbox_files, # Already SandboxFileOutput objects
|
||||||
new_conversation_history,
|
new_conversation_history,
|
||||||
current_session_id,
|
current_session_id,
|
||||||
sandbox_id,
|
sandbox_id,
|
||||||
@@ -471,140 +478,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
await sandbox.kill()
|
await sandbox.kill()
|
||||||
|
|
||||||
async def _extract_files(
|
|
||||||
self,
|
|
||||||
sandbox: BaseAsyncSandbox,
|
|
||||||
working_directory: str,
|
|
||||||
since_timestamp: str | None = None,
|
|
||||||
) -> list["ClaudeCodeBlock.FileOutput"]:
|
|
||||||
"""
|
|
||||||
Extract text files created/modified during this Claude Code execution.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sandbox: The E2B sandbox instance
|
|
||||||
working_directory: Directory to search for files
|
|
||||||
since_timestamp: ISO timestamp - only return files modified after this time
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of FileOutput objects with path, relative_path, name, and content
|
|
||||||
"""
|
|
||||||
files: list[ClaudeCodeBlock.FileOutput] = []
|
|
||||||
|
|
||||||
# Text file extensions we can safely read as text
|
|
||||||
text_extensions = {
|
|
||||||
".txt",
|
|
||||||
".md",
|
|
||||||
".html",
|
|
||||||
".htm",
|
|
||||||
".css",
|
|
||||||
".js",
|
|
||||||
".ts",
|
|
||||||
".jsx",
|
|
||||||
".tsx",
|
|
||||||
".json",
|
|
||||||
".xml",
|
|
||||||
".yaml",
|
|
||||||
".yml",
|
|
||||||
".toml",
|
|
||||||
".ini",
|
|
||||||
".cfg",
|
|
||||||
".conf",
|
|
||||||
".py",
|
|
||||||
".rb",
|
|
||||||
".php",
|
|
||||||
".java",
|
|
||||||
".c",
|
|
||||||
".cpp",
|
|
||||||
".h",
|
|
||||||
".hpp",
|
|
||||||
".cs",
|
|
||||||
".go",
|
|
||||||
".rs",
|
|
||||||
".swift",
|
|
||||||
".kt",
|
|
||||||
".scala",
|
|
||||||
".sh",
|
|
||||||
".bash",
|
|
||||||
".zsh",
|
|
||||||
".sql",
|
|
||||||
".graphql",
|
|
||||||
".env",
|
|
||||||
".gitignore",
|
|
||||||
".dockerfile",
|
|
||||||
"Dockerfile",
|
|
||||||
".vue",
|
|
||||||
".svelte",
|
|
||||||
".astro",
|
|
||||||
".mdx",
|
|
||||||
".rst",
|
|
||||||
".tex",
|
|
||||||
".csv",
|
|
||||||
".log",
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
# List files recursively using find command
|
|
||||||
# Exclude node_modules and .git directories, but allow hidden files
|
|
||||||
# like .env and .gitignore (they're filtered by text_extensions later)
|
|
||||||
# Filter by timestamp to only get files created/modified during this run
|
|
||||||
safe_working_dir = shlex.quote(working_directory)
|
|
||||||
timestamp_filter = ""
|
|
||||||
if since_timestamp:
|
|
||||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
|
||||||
find_result = await sandbox.commands.run(
|
|
||||||
f"find {safe_working_dir} -type f "
|
|
||||||
f"{timestamp_filter}"
|
|
||||||
f"-not -path '*/node_modules/*' "
|
|
||||||
f"-not -path '*/.git/*' "
|
|
||||||
f"2>/dev/null"
|
|
||||||
)
|
|
||||||
|
|
||||||
if find_result.stdout:
|
|
||||||
for file_path in find_result.stdout.strip().split("\n"):
|
|
||||||
if not file_path:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's a text file we can read
|
|
||||||
is_text = any(
|
|
||||||
file_path.endswith(ext) for ext in text_extensions
|
|
||||||
) or file_path.endswith("Dockerfile")
|
|
||||||
|
|
||||||
if is_text:
|
|
||||||
try:
|
|
||||||
content = await sandbox.files.read(file_path)
|
|
||||||
# Handle bytes or string
|
|
||||||
if isinstance(content, bytes):
|
|
||||||
content = content.decode("utf-8", errors="replace")
|
|
||||||
|
|
||||||
# Extract filename from path
|
|
||||||
file_name = file_path.split("/")[-1]
|
|
||||||
|
|
||||||
# Calculate relative path by stripping working directory
|
|
||||||
relative_path = file_path
|
|
||||||
if file_path.startswith(working_directory):
|
|
||||||
relative_path = file_path[len(working_directory) :]
|
|
||||||
# Remove leading slash if present
|
|
||||||
if relative_path.startswith("/"):
|
|
||||||
relative_path = relative_path[1:]
|
|
||||||
|
|
||||||
files.append(
|
|
||||||
ClaudeCodeBlock.FileOutput(
|
|
||||||
path=file_path,
|
|
||||||
relative_path=relative_path,
|
|
||||||
name=file_name,
|
|
||||||
content=content,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
# Skip files that can't be read
|
|
||||||
pass
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
# If file extraction fails, return empty results
|
|
||||||
pass
|
|
||||||
|
|
||||||
return files
|
|
||||||
|
|
||||||
def _escape_prompt(self, prompt: str) -> str:
|
def _escape_prompt(self, prompt: str) -> str:
|
||||||
"""Escape the prompt for safe shell execution."""
|
"""Escape the prompt for safe shell execution."""
|
||||||
# Use single quotes and escape any single quotes in the prompt
|
# Use single quotes and escape any single quotes in the prompt
|
||||||
@@ -617,6 +490,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
*,
|
*,
|
||||||
e2b_credentials: APIKeyCredentials,
|
e2b_credentials: APIKeyCredentials,
|
||||||
anthropic_credentials: APIKeyCredentials,
|
anthropic_credentials: APIKeyCredentials,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
@@ -637,6 +511,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id=input_data.sandbox_id,
|
existing_sandbox_id=input_data.sandbox_id,
|
||||||
conversation_history=input_data.conversation_history,
|
conversation_history=input_data.conversation_history,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
|
execution_context=execution_context,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield "response", response
|
yield "response", response
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Literal, Optional
|
from typing import TYPE_CHECKING, Any, Literal, Optional
|
||||||
|
|
||||||
from e2b_code_interpreter import AsyncSandbox
|
from e2b_code_interpreter import AsyncSandbox
|
||||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||||
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
|
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
|
||||||
from pydantic import BaseModel, Field, JsonValue, SecretStr
|
from pydantic import BaseModel, Field, JsonValue, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -20,6 +20,13 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
|
from backend.util.sandbox_files import (
|
||||||
|
SandboxFileOutput,
|
||||||
|
extract_and_store_sandbox_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from backend.executor.utils import ExecutionContext
|
||||||
|
|
||||||
TEST_CREDENTIALS = APIKeyCredentials(
|
TEST_CREDENTIALS = APIKeyCredentials(
|
||||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||||
@@ -85,6 +92,9 @@ class CodeExecutionResult(MainCodeExecutionResult):
|
|||||||
class BaseE2BExecutorMixin:
|
class BaseE2BExecutorMixin:
|
||||||
"""Shared implementation methods for E2B executor blocks."""
|
"""Shared implementation methods for E2B executor blocks."""
|
||||||
|
|
||||||
|
# Default working directory in E2B sandboxes
|
||||||
|
WORKING_DIR = "/home/user"
|
||||||
|
|
||||||
async def execute_code(
|
async def execute_code(
|
||||||
self,
|
self,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
@@ -95,14 +105,21 @@ class BaseE2BExecutorMixin:
|
|||||||
timeout: Optional[int] = None,
|
timeout: Optional[int] = None,
|
||||||
sandbox_id: Optional[str] = None,
|
sandbox_id: Optional[str] = None,
|
||||||
dispose_sandbox: bool = False,
|
dispose_sandbox: bool = False,
|
||||||
|
execution_context: Optional["ExecutionContext"] = None,
|
||||||
|
extract_files: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Unified code execution method that handles all three use cases:
|
Unified code execution method that handles all three use cases:
|
||||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
extract_files: If True and execution_context provided, extract files
|
||||||
|
created/modified during execution and store to workspace.
|
||||||
""" # noqa
|
""" # noqa
|
||||||
sandbox = None
|
sandbox = None
|
||||||
|
files: list[SandboxFileOutput] = []
|
||||||
try:
|
try:
|
||||||
if sandbox_id:
|
if sandbox_id:
|
||||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||||
@@ -118,6 +135,12 @@ class BaseE2BExecutorMixin:
|
|||||||
for cmd in setup_commands:
|
for cmd in setup_commands:
|
||||||
await sandbox.commands.run(cmd)
|
await sandbox.commands.run(cmd)
|
||||||
|
|
||||||
|
# Capture timestamp before execution to scope file extraction
|
||||||
|
start_timestamp = None
|
||||||
|
if extract_files:
|
||||||
|
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
|
||||||
|
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
|
||||||
|
|
||||||
# Execute the code
|
# Execute the code
|
||||||
execution = await sandbox.run_code(
|
execution = await sandbox.run_code(
|
||||||
code,
|
code,
|
||||||
@@ -133,7 +156,24 @@ class BaseE2BExecutorMixin:
|
|||||||
stdout_logs = "".join(execution.logs.stdout)
|
stdout_logs = "".join(execution.logs.stdout)
|
||||||
stderr_logs = "".join(execution.logs.stderr)
|
stderr_logs = "".join(execution.logs.stderr)
|
||||||
|
|
||||||
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
# Extract files created/modified during this execution
|
||||||
|
if extract_files and execution_context:
|
||||||
|
files = await extract_and_store_sandbox_files(
|
||||||
|
sandbox=sandbox,
|
||||||
|
working_directory=self.WORKING_DIR,
|
||||||
|
execution_context=execution_context,
|
||||||
|
since_timestamp=start_timestamp,
|
||||||
|
text_only=False, # Include binary files too
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
results,
|
||||||
|
text_output,
|
||||||
|
stdout_logs,
|
||||||
|
stderr_logs,
|
||||||
|
sandbox.sandbox_id,
|
||||||
|
files,
|
||||||
|
)
|
||||||
finally:
|
finally:
|
||||||
# Dispose of sandbox if requested to reduce usage costs
|
# Dispose of sandbox if requested to reduce usage costs
|
||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
@@ -238,6 +278,12 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
description="Standard output logs from execution"
|
description="Standard output logs from execution"
|
||||||
)
|
)
|
||||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||||
|
files: list[SandboxFileOutput] = SchemaField(
|
||||||
|
description=(
|
||||||
|
"Files created or modified during execution. "
|
||||||
|
"Each file has path, name, content, and workspace_ref (if stored)."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
@@ -259,23 +305,30 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
("results", []),
|
("results", []),
|
||||||
("response", "Hello World"),
|
("response", "Hello World"),
|
||||||
("stdout_logs", "Hello World\n"),
|
("stdout_logs", "Hello World\n"),
|
||||||
|
("files", []),
|
||||||
],
|
],
|
||||||
test_mock={
|
test_mock={
|
||||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
|
||||||
[], # results
|
[], # results
|
||||||
"Hello World", # text_output
|
"Hello World", # text_output
|
||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def run(
|
async def run(
|
||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
credentials: APIKeyCredentials,
|
||||||
|
execution_context: "ExecutionContext",
|
||||||
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _, files = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.code,
|
code=input_data.code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -283,6 +336,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
setup_commands=input_data.setup_commands,
|
setup_commands=input_data.setup_commands,
|
||||||
timeout=input_data.timeout,
|
timeout=input_data.timeout,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
|
execution_context=execution_context,
|
||||||
|
extract_files=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine result object shape & filter out empty formats
|
# Determine result object shape & filter out empty formats
|
||||||
@@ -296,6 +351,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
yield "stdout_logs", stdout
|
yield "stdout_logs", stdout
|
||||||
if stderr:
|
if stderr:
|
||||||
yield "stderr_logs", stderr
|
yield "stderr_logs", stderr
|
||||||
|
# Always yield files (empty list if none)
|
||||||
|
yield "files", [f.model_dump() for f in files]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "error", str(e)
|
yield "error", str(e)
|
||||||
|
|
||||||
@@ -393,6 +450,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -401,7 +459,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.setup_code,
|
code=input_data.setup_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -500,6 +558,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
sandbox_id, # sandbox_id
|
sandbox_id, # sandbox_id
|
||||||
|
[], # files
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -508,7 +567,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.step_code,
|
code=input_data.step_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from openai import AsyncOpenAI
|
|||||||
from openai.types.responses import Response as OpenAIResponse
|
from openai.types.responses import Response as OpenAIResponse
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockManualWebhookConfig,
|
BlockManualWebhookConfig,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any, List
|
from typing import Any, List
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -682,17 +682,219 @@ class ListIsEmptyBlock(Block):
|
|||||||
yield "is_empty", len(input_data.list) == 0
|
yield "is_empty", len(input_data.list) == 0
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# List Concatenation Helpers
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_list_input(item: Any, index: int) -> str | None:
|
||||||
|
"""Validate that an item is a list. Returns error message or None."""
|
||||||
|
if item is None:
|
||||||
|
return None # None is acceptable, will be skipped
|
||||||
|
if not isinstance(item, list):
|
||||||
|
return (
|
||||||
|
f"Invalid input at index {index}: expected a list, "
|
||||||
|
f"got {type(item).__name__}. "
|
||||||
|
f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_all_lists(lists: List[Any]) -> str | None:
|
||||||
|
"""Validate that all items in a sequence are lists. Returns first error or None."""
|
||||||
|
for idx, item in enumerate(lists):
|
||||||
|
error = _validate_list_input(item, idx)
|
||||||
|
if error is not None and item is not None:
|
||||||
|
return error
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _concatenate_lists_simple(lists: List[List[Any]]) -> List[Any]:
|
||||||
|
"""Concatenate a sequence of lists into a single list, skipping None values."""
|
||||||
|
result: List[Any] = []
|
||||||
|
for lst in lists:
|
||||||
|
if lst is None:
|
||||||
|
continue
|
||||||
|
result.extend(lst)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _flatten_nested_list(nested: List[Any], max_depth: int = -1) -> List[Any]:
|
||||||
|
"""
|
||||||
|
Recursively flatten a nested list structure.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
nested: The list to flatten.
|
||||||
|
max_depth: Maximum recursion depth. -1 means unlimited.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A flat list with all nested elements extracted.
|
||||||
|
"""
|
||||||
|
result: List[Any] = []
|
||||||
|
_flatten_recursive(nested, result, current_depth=0, max_depth=max_depth)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
_MAX_FLATTEN_DEPTH = 1000
|
||||||
|
|
||||||
|
|
||||||
|
def _flatten_recursive(
|
||||||
|
items: List[Any],
|
||||||
|
result: List[Any],
|
||||||
|
current_depth: int,
|
||||||
|
max_depth: int,
|
||||||
|
) -> None:
|
||||||
|
"""Internal recursive helper for flattening nested lists."""
|
||||||
|
if current_depth > _MAX_FLATTEN_DEPTH:
|
||||||
|
raise RecursionError(
|
||||||
|
f"Flattening exceeded maximum depth of {_MAX_FLATTEN_DEPTH} levels. "
|
||||||
|
"Input may be too deeply nested."
|
||||||
|
)
|
||||||
|
for item in items:
|
||||||
|
if isinstance(item, list) and (max_depth == -1 or current_depth < max_depth):
|
||||||
|
_flatten_recursive(item, result, current_depth + 1, max_depth)
|
||||||
|
else:
|
||||||
|
result.append(item)
|
||||||
|
|
||||||
|
|
||||||
|
def _deduplicate_list(items: List[Any]) -> List[Any]:
|
||||||
|
"""
|
||||||
|
Remove duplicate elements from a list, preserving order of first occurrences.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
items: The list to deduplicate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list with duplicates removed, maintaining original order.
|
||||||
|
"""
|
||||||
|
seen: set = set()
|
||||||
|
result: List[Any] = []
|
||||||
|
for item in items:
|
||||||
|
item_id = _make_hashable(item)
|
||||||
|
if item_id not in seen:
|
||||||
|
seen.add(item_id)
|
||||||
|
result.append(item)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _make_hashable(item: Any):
|
||||||
|
"""
|
||||||
|
Create a hashable representation of any item for deduplication.
|
||||||
|
Converts unhashable types (dicts, lists) into deterministic tuple structures.
|
||||||
|
"""
|
||||||
|
if isinstance(item, dict):
|
||||||
|
return tuple(
|
||||||
|
sorted(
|
||||||
|
((_make_hashable(k), _make_hashable(v)) for k, v in item.items()),
|
||||||
|
key=lambda x: (str(type(x[0])), str(x[0])),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if isinstance(item, (list, tuple)):
|
||||||
|
return tuple(_make_hashable(i) for i in item)
|
||||||
|
if isinstance(item, set):
|
||||||
|
return frozenset(_make_hashable(i) for i in item)
|
||||||
|
return item
|
||||||
|
|
||||||
|
|
||||||
|
def _filter_none_values(items: List[Any]) -> List[Any]:
|
||||||
|
"""Remove None values from a list."""
|
||||||
|
return [item for item in items if item is not None]
|
||||||
|
|
||||||
|
|
||||||
|
def _compute_nesting_depth(
|
||||||
|
items: Any, current: int = 0, max_depth: int = _MAX_FLATTEN_DEPTH
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Compute the maximum nesting depth of a list structure using iteration to avoid RecursionError.
|
||||||
|
|
||||||
|
Uses a stack-based approach to handle deeply nested structures without hitting Python's
|
||||||
|
recursion limit (~1000 levels).
|
||||||
|
"""
|
||||||
|
if not isinstance(items, list):
|
||||||
|
return current
|
||||||
|
|
||||||
|
# Stack contains tuples of (item, depth)
|
||||||
|
stack = [(items, current)]
|
||||||
|
max_observed_depth = current
|
||||||
|
|
||||||
|
while stack:
|
||||||
|
item, depth = stack.pop()
|
||||||
|
|
||||||
|
if depth > max_depth:
|
||||||
|
return depth
|
||||||
|
|
||||||
|
if not isinstance(item, list):
|
||||||
|
max_observed_depth = max(max_observed_depth, depth)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if len(item) == 0:
|
||||||
|
max_observed_depth = max(max_observed_depth, depth + 1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Add all children to stack with incremented depth
|
||||||
|
for child in item:
|
||||||
|
stack.append((child, depth + 1))
|
||||||
|
|
||||||
|
return max_observed_depth
|
||||||
|
|
||||||
|
|
||||||
|
def _interleave_lists(lists: List[List[Any]]) -> List[Any]:
|
||||||
|
"""
|
||||||
|
Interleave elements from multiple lists in round-robin fashion.
|
||||||
|
Example: [[1,2,3], [a,b], [x,y,z]] -> [1, a, x, 2, b, y, 3, z]
|
||||||
|
"""
|
||||||
|
if not lists:
|
||||||
|
return []
|
||||||
|
filtered = [lst for lst in lists if lst is not None]
|
||||||
|
if not filtered:
|
||||||
|
return []
|
||||||
|
result: List[Any] = []
|
||||||
|
max_len = max(len(lst) for lst in filtered)
|
||||||
|
for i in range(max_len):
|
||||||
|
for lst in filtered:
|
||||||
|
if i < len(lst):
|
||||||
|
result.append(lst[i])
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# List Concatenation Blocks
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
|
||||||
class ConcatenateListsBlock(Block):
|
class ConcatenateListsBlock(Block):
|
||||||
|
"""
|
||||||
|
Concatenates two or more lists into a single list.
|
||||||
|
|
||||||
|
This block accepts a list of lists and combines all their elements
|
||||||
|
in order into one flat output list. It supports options for
|
||||||
|
deduplication and None-filtering to provide flexible list merging
|
||||||
|
capabilities for workflow pipelines.
|
||||||
|
"""
|
||||||
|
|
||||||
class Input(BlockSchemaInput):
|
class Input(BlockSchemaInput):
|
||||||
lists: List[List[Any]] = SchemaField(
|
lists: List[List[Any]] = SchemaField(
|
||||||
description="A list of lists to concatenate together. All lists will be combined in order into a single list.",
|
description="A list of lists to concatenate together. All lists will be combined in order into a single list.",
|
||||||
placeholder="e.g., [[1, 2], [3, 4], [5, 6]]",
|
placeholder="e.g., [[1, 2], [3, 4], [5, 6]]",
|
||||||
)
|
)
|
||||||
|
deduplicate: bool = SchemaField(
|
||||||
|
description="If True, remove duplicate elements from the concatenated result while preserving order.",
|
||||||
|
default=False,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
remove_none: bool = SchemaField(
|
||||||
|
description="If True, remove None values from the concatenated result.",
|
||||||
|
default=False,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
concatenated_list: List[Any] = SchemaField(
|
concatenated_list: List[Any] = SchemaField(
|
||||||
description="The concatenated list containing all elements from all input lists in order."
|
description="The concatenated list containing all elements from all input lists in order."
|
||||||
)
|
)
|
||||||
|
length: int = SchemaField(
|
||||||
|
description="The total number of elements in the concatenated list."
|
||||||
|
)
|
||||||
error: str = SchemaField(
|
error: str = SchemaField(
|
||||||
description="Error message if concatenation failed due to invalid input types."
|
description="Error message if concatenation failed due to invalid input types."
|
||||||
)
|
)
|
||||||
@@ -700,7 +902,7 @@ class ConcatenateListsBlock(Block):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="3cf9298b-5817-4141-9d80-7c2cc5199c8e",
|
id="3cf9298b-5817-4141-9d80-7c2cc5199c8e",
|
||||||
description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.",
|
description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order. Supports optional deduplication and None removal.",
|
||||||
categories={BlockCategory.BASIC},
|
categories={BlockCategory.BASIC},
|
||||||
input_schema=ConcatenateListsBlock.Input,
|
input_schema=ConcatenateListsBlock.Input,
|
||||||
output_schema=ConcatenateListsBlock.Output,
|
output_schema=ConcatenateListsBlock.Output,
|
||||||
@@ -709,29 +911,497 @@ class ConcatenateListsBlock(Block):
|
|||||||
{"lists": [["a", "b"], ["c"], ["d", "e", "f"]]},
|
{"lists": [["a", "b"], ["c"], ["d", "e", "f"]]},
|
||||||
{"lists": [[1, 2], []]},
|
{"lists": [[1, 2], []]},
|
||||||
{"lists": []},
|
{"lists": []},
|
||||||
|
{"lists": [[1, 2, 2, 3], [3, 4]], "deduplicate": True},
|
||||||
|
{"lists": [[1, None, 2], [None, 3]], "remove_none": True},
|
||||||
],
|
],
|
||||||
test_output=[
|
test_output=[
|
||||||
("concatenated_list", [1, 2, 3, 4, 5, 6]),
|
("concatenated_list", [1, 2, 3, 4, 5, 6]),
|
||||||
|
("length", 6),
|
||||||
("concatenated_list", ["a", "b", "c", "d", "e", "f"]),
|
("concatenated_list", ["a", "b", "c", "d", "e", "f"]),
|
||||||
|
("length", 6),
|
||||||
("concatenated_list", [1, 2]),
|
("concatenated_list", [1, 2]),
|
||||||
|
("length", 2),
|
||||||
("concatenated_list", []),
|
("concatenated_list", []),
|
||||||
|
("length", 0),
|
||||||
|
("concatenated_list", [1, 2, 3, 4]),
|
||||||
|
("length", 4),
|
||||||
|
("concatenated_list", [1, 2, 3]),
|
||||||
|
("length", 3),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _validate_inputs(self, lists: List[Any]) -> str | None:
|
||||||
|
return _validate_all_lists(lists)
|
||||||
|
|
||||||
|
def _perform_concatenation(self, lists: List[List[Any]]) -> List[Any]:
|
||||||
|
return _concatenate_lists_simple(lists)
|
||||||
|
|
||||||
|
def _apply_deduplication(self, items: List[Any]) -> List[Any]:
|
||||||
|
return _deduplicate_list(items)
|
||||||
|
|
||||||
|
def _apply_none_removal(self, items: List[Any]) -> List[Any]:
|
||||||
|
return _filter_none_values(items)
|
||||||
|
|
||||||
|
def _post_process(
|
||||||
|
self, items: List[Any], deduplicate: bool, remove_none: bool
|
||||||
|
) -> List[Any]:
|
||||||
|
"""Apply all post-processing steps to the concatenated result."""
|
||||||
|
result = items
|
||||||
|
if remove_none:
|
||||||
|
result = self._apply_none_removal(result)
|
||||||
|
if deduplicate:
|
||||||
|
result = self._apply_deduplication(result)
|
||||||
|
return result
|
||||||
|
|
||||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
concatenated = []
|
# Validate all inputs are lists
|
||||||
for idx, lst in enumerate(input_data.lists):
|
validation_error = self._validate_inputs(input_data.lists)
|
||||||
if lst is None:
|
if validation_error is not None:
|
||||||
# Skip None values to avoid errors
|
yield "error", validation_error
|
||||||
continue
|
return
|
||||||
if not isinstance(lst, list):
|
|
||||||
# Type validation: each item must be a list
|
# Perform concatenation
|
||||||
# Strings are iterable and would cause extend() to iterate character-by-character
|
concatenated = self._perform_concatenation(input_data.lists)
|
||||||
# Non-iterable types would raise TypeError
|
|
||||||
yield "error", (
|
# Apply post-processing
|
||||||
f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. "
|
result = self._post_process(
|
||||||
f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
|
concatenated, input_data.deduplicate, input_data.remove_none
|
||||||
)
|
)
|
||||||
return
|
|
||||||
concatenated.extend(lst)
|
yield "concatenated_list", result
|
||||||
yield "concatenated_list", concatenated
|
yield "length", len(result)
|
||||||
|
|
||||||
|
|
||||||
|
class FlattenListBlock(Block):
|
||||||
|
"""
|
||||||
|
Flattens a nested list structure into a single flat list.
|
||||||
|
|
||||||
|
This block takes a list that may contain nested lists at any depth
|
||||||
|
and produces a single-level list with all leaf elements. Useful
|
||||||
|
for normalizing data structures from multiple sources that may
|
||||||
|
have varying levels of nesting.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
nested_list: List[Any] = SchemaField(
|
||||||
|
description="A potentially nested list to flatten into a single-level list.",
|
||||||
|
placeholder="e.g., [[1, [2, 3]], [4, [5, [6]]]]",
|
||||||
|
)
|
||||||
|
max_depth: int = SchemaField(
|
||||||
|
description="Maximum depth to flatten. -1 means flatten completely. 1 means flatten only one level.",
|
||||||
|
default=-1,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
flattened_list: List[Any] = SchemaField(
|
||||||
|
description="The flattened list with all nested elements extracted."
|
||||||
|
)
|
||||||
|
length: int = SchemaField(
|
||||||
|
description="The number of elements in the flattened list."
|
||||||
|
)
|
||||||
|
original_depth: int = SchemaField(
|
||||||
|
description="The maximum nesting depth of the original input list."
|
||||||
|
)
|
||||||
|
error: str = SchemaField(description="Error message if flattening failed.")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="cc45bb0f-d035-4756-96a7-fe3e36254b4d",
|
||||||
|
description="Flattens a nested list structure into a single flat list. Supports configurable maximum flattening depth.",
|
||||||
|
categories={BlockCategory.BASIC},
|
||||||
|
input_schema=FlattenListBlock.Input,
|
||||||
|
output_schema=FlattenListBlock.Output,
|
||||||
|
test_input=[
|
||||||
|
{"nested_list": [[1, 2], [3, [4, 5]]]},
|
||||||
|
{"nested_list": [1, [2, [3, [4]]]]},
|
||||||
|
{"nested_list": [1, [2, [3, [4]]], 5], "max_depth": 1},
|
||||||
|
{"nested_list": []},
|
||||||
|
{"nested_list": [1, 2, 3]},
|
||||||
|
],
|
||||||
|
test_output=[
|
||||||
|
("flattened_list", [1, 2, 3, 4, 5]),
|
||||||
|
("length", 5),
|
||||||
|
("original_depth", 3),
|
||||||
|
("flattened_list", [1, 2, 3, 4]),
|
||||||
|
("length", 4),
|
||||||
|
("original_depth", 4),
|
||||||
|
("flattened_list", [1, 2, [3, [4]], 5]),
|
||||||
|
("length", 4),
|
||||||
|
("original_depth", 4),
|
||||||
|
("flattened_list", []),
|
||||||
|
("length", 0),
|
||||||
|
("original_depth", 1),
|
||||||
|
("flattened_list", [1, 2, 3]),
|
||||||
|
("length", 3),
|
||||||
|
("original_depth", 1),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def _compute_depth(self, items: List[Any]) -> int:
|
||||||
|
"""Compute the nesting depth of the input list."""
|
||||||
|
return _compute_nesting_depth(items)
|
||||||
|
|
||||||
|
def _flatten(self, items: List[Any], max_depth: int) -> List[Any]:
|
||||||
|
"""Flatten the list to the specified depth."""
|
||||||
|
return _flatten_nested_list(items, max_depth=max_depth)
|
||||||
|
|
||||||
|
def _validate_max_depth(self, max_depth: int) -> str | None:
|
||||||
|
"""Validate the max_depth parameter."""
|
||||||
|
if max_depth < -1:
|
||||||
|
return f"max_depth must be -1 (unlimited) or a non-negative integer, got {max_depth}"
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
# Validate max_depth
|
||||||
|
depth_error = self._validate_max_depth(input_data.max_depth)
|
||||||
|
if depth_error is not None:
|
||||||
|
yield "error", depth_error
|
||||||
|
return
|
||||||
|
|
||||||
|
original_depth = self._compute_depth(input_data.nested_list)
|
||||||
|
flattened = self._flatten(input_data.nested_list, input_data.max_depth)
|
||||||
|
|
||||||
|
yield "flattened_list", flattened
|
||||||
|
yield "length", len(flattened)
|
||||||
|
yield "original_depth", original_depth
|
||||||
|
|
||||||
|
|
||||||
|
class InterleaveListsBlock(Block):
|
||||||
|
"""
|
||||||
|
Interleaves elements from multiple lists in round-robin fashion.
|
||||||
|
|
||||||
|
Given multiple input lists, this block takes one element from each
|
||||||
|
list in turn, producing an output where elements alternate between
|
||||||
|
sources. Lists of different lengths are handled gracefully - shorter
|
||||||
|
lists simply stop contributing once exhausted.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
lists: List[List[Any]] = SchemaField(
|
||||||
|
description="A list of lists to interleave. Elements will be taken in round-robin order.",
|
||||||
|
placeholder="e.g., [[1, 2, 3], ['a', 'b', 'c']]",
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
interleaved_list: List[Any] = SchemaField(
|
||||||
|
description="The interleaved list with elements alternating from each input list."
|
||||||
|
)
|
||||||
|
length: int = SchemaField(
|
||||||
|
description="The total number of elements in the interleaved list."
|
||||||
|
)
|
||||||
|
error: str = SchemaField(description="Error message if interleaving failed.")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="9f616084-1d9f-4f8e-bc00-5b9d2a75cd75",
|
||||||
|
description="Interleaves elements from multiple lists in round-robin fashion, alternating between sources.",
|
||||||
|
categories={BlockCategory.BASIC},
|
||||||
|
input_schema=InterleaveListsBlock.Input,
|
||||||
|
output_schema=InterleaveListsBlock.Output,
|
||||||
|
test_input=[
|
||||||
|
{"lists": [[1, 2, 3], ["a", "b", "c"]]},
|
||||||
|
{"lists": [[1, 2, 3], ["a", "b"], ["x", "y", "z"]]},
|
||||||
|
{"lists": [[1], [2], [3]]},
|
||||||
|
{"lists": []},
|
||||||
|
],
|
||||||
|
test_output=[
|
||||||
|
("interleaved_list", [1, "a", 2, "b", 3, "c"]),
|
||||||
|
("length", 6),
|
||||||
|
("interleaved_list", [1, "a", "x", 2, "b", "y", 3, "z"]),
|
||||||
|
("length", 8),
|
||||||
|
("interleaved_list", [1, 2, 3]),
|
||||||
|
("length", 3),
|
||||||
|
("interleaved_list", []),
|
||||||
|
("length", 0),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def _validate_inputs(self, lists: List[Any]) -> str | None:
|
||||||
|
return _validate_all_lists(lists)
|
||||||
|
|
||||||
|
def _interleave(self, lists: List[List[Any]]) -> List[Any]:
|
||||||
|
return _interleave_lists(lists)
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
validation_error = self._validate_inputs(input_data.lists)
|
||||||
|
if validation_error is not None:
|
||||||
|
yield "error", validation_error
|
||||||
|
return
|
||||||
|
|
||||||
|
result = self._interleave(input_data.lists)
|
||||||
|
yield "interleaved_list", result
|
||||||
|
yield "length", len(result)
|
||||||
|
|
||||||
|
|
||||||
|
class ZipListsBlock(Block):
|
||||||
|
"""
|
||||||
|
Zips multiple lists together into a list of grouped tuples/lists.
|
||||||
|
|
||||||
|
Takes two or more input lists and combines corresponding elements
|
||||||
|
into sub-lists. For example, zipping [1,2,3] and ['a','b','c']
|
||||||
|
produces [[1,'a'], [2,'b'], [3,'c']]. Supports both truncating
|
||||||
|
to shortest list and padding to longest list with a fill value.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
lists: List[List[Any]] = SchemaField(
|
||||||
|
description="A list of lists to zip together. Corresponding elements will be grouped.",
|
||||||
|
placeholder="e.g., [[1, 2, 3], ['a', 'b', 'c']]",
|
||||||
|
)
|
||||||
|
pad_to_longest: bool = SchemaField(
|
||||||
|
description="If True, pad shorter lists with fill_value to match the longest list. If False, truncate to shortest.",
|
||||||
|
default=False,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
fill_value: Any = SchemaField(
|
||||||
|
description="Value to use for padding when pad_to_longest is True.",
|
||||||
|
default=None,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
zipped_list: List[List[Any]] = SchemaField(
|
||||||
|
description="The zipped list of grouped elements."
|
||||||
|
)
|
||||||
|
length: int = SchemaField(
|
||||||
|
description="The number of groups in the zipped result."
|
||||||
|
)
|
||||||
|
error: str = SchemaField(description="Error message if zipping failed.")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="0d0e684f-5cb9-4c4b-b8d1-47a0860e0c07",
|
||||||
|
description="Zips multiple lists together into a list of grouped elements. Supports padding to longest or truncating to shortest.",
|
||||||
|
categories={BlockCategory.BASIC},
|
||||||
|
input_schema=ZipListsBlock.Input,
|
||||||
|
output_schema=ZipListsBlock.Output,
|
||||||
|
test_input=[
|
||||||
|
{"lists": [[1, 2, 3], ["a", "b", "c"]]},
|
||||||
|
{"lists": [[1, 2, 3], ["a", "b"]]},
|
||||||
|
{
|
||||||
|
"lists": [[1, 2], ["a", "b", "c"]],
|
||||||
|
"pad_to_longest": True,
|
||||||
|
"fill_value": 0,
|
||||||
|
},
|
||||||
|
{"lists": []},
|
||||||
|
],
|
||||||
|
test_output=[
|
||||||
|
("zipped_list", [[1, "a"], [2, "b"], [3, "c"]]),
|
||||||
|
("length", 3),
|
||||||
|
("zipped_list", [[1, "a"], [2, "b"]]),
|
||||||
|
("length", 2),
|
||||||
|
("zipped_list", [[1, "a"], [2, "b"], [0, "c"]]),
|
||||||
|
("length", 3),
|
||||||
|
("zipped_list", []),
|
||||||
|
("length", 0),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def _validate_inputs(self, lists: List[Any]) -> str | None:
|
||||||
|
return _validate_all_lists(lists)
|
||||||
|
|
||||||
|
def _zip_truncate(self, lists: List[List[Any]]) -> List[List[Any]]:
|
||||||
|
"""Zip lists, truncating to shortest."""
|
||||||
|
filtered = [lst for lst in lists if lst is not None]
|
||||||
|
if not filtered:
|
||||||
|
return []
|
||||||
|
return [list(group) for group in zip(*filtered)]
|
||||||
|
|
||||||
|
def _zip_pad(self, lists: List[List[Any]], fill_value: Any) -> List[List[Any]]:
|
||||||
|
"""Zip lists, padding shorter ones with fill_value."""
|
||||||
|
if not lists:
|
||||||
|
return []
|
||||||
|
lists = [lst for lst in lists if lst is not None]
|
||||||
|
if not lists:
|
||||||
|
return []
|
||||||
|
max_len = max(len(lst) for lst in lists)
|
||||||
|
result: List[List[Any]] = []
|
||||||
|
for i in range(max_len):
|
||||||
|
group: List[Any] = []
|
||||||
|
for lst in lists:
|
||||||
|
if i < len(lst):
|
||||||
|
group.append(lst[i])
|
||||||
|
else:
|
||||||
|
group.append(fill_value)
|
||||||
|
result.append(group)
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
validation_error = self._validate_inputs(input_data.lists)
|
||||||
|
if validation_error is not None:
|
||||||
|
yield "error", validation_error
|
||||||
|
return
|
||||||
|
|
||||||
|
if not input_data.lists:
|
||||||
|
yield "zipped_list", []
|
||||||
|
yield "length", 0
|
||||||
|
return
|
||||||
|
|
||||||
|
if input_data.pad_to_longest:
|
||||||
|
result = self._zip_pad(input_data.lists, input_data.fill_value)
|
||||||
|
else:
|
||||||
|
result = self._zip_truncate(input_data.lists)
|
||||||
|
|
||||||
|
yield "zipped_list", result
|
||||||
|
yield "length", len(result)
|
||||||
|
|
||||||
|
|
||||||
|
class ListDifferenceBlock(Block):
|
||||||
|
"""
|
||||||
|
Computes the difference between two lists (elements in the first
|
||||||
|
list that are not in the second list).
|
||||||
|
|
||||||
|
This is useful for finding items that exist in one dataset but
|
||||||
|
not in another, such as finding new items, missing items, or
|
||||||
|
items that need to be processed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
list_a: List[Any] = SchemaField(
|
||||||
|
description="The primary list to check elements from.",
|
||||||
|
placeholder="e.g., [1, 2, 3, 4, 5]",
|
||||||
|
)
|
||||||
|
list_b: List[Any] = SchemaField(
|
||||||
|
description="The list to subtract. Elements found here will be removed from list_a.",
|
||||||
|
placeholder="e.g., [3, 4, 5, 6]",
|
||||||
|
)
|
||||||
|
symmetric: bool = SchemaField(
|
||||||
|
description="If True, compute symmetric difference (elements in either list but not both).",
|
||||||
|
default=False,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
difference: List[Any] = SchemaField(
|
||||||
|
description="Elements from list_a not found in list_b (or symmetric difference if enabled)."
|
||||||
|
)
|
||||||
|
length: int = SchemaField(
|
||||||
|
description="The number of elements in the difference result."
|
||||||
|
)
|
||||||
|
error: str = SchemaField(description="Error message if the operation failed.")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="05309873-9d61-447e-96b5-b804e2511829",
|
||||||
|
description="Computes the difference between two lists. Returns elements in the first list not found in the second, or symmetric difference.",
|
||||||
|
categories={BlockCategory.BASIC},
|
||||||
|
input_schema=ListDifferenceBlock.Input,
|
||||||
|
output_schema=ListDifferenceBlock.Output,
|
||||||
|
test_input=[
|
||||||
|
{"list_a": [1, 2, 3, 4, 5], "list_b": [3, 4, 5, 6, 7]},
|
||||||
|
{
|
||||||
|
"list_a": [1, 2, 3, 4, 5],
|
||||||
|
"list_b": [3, 4, 5, 6, 7],
|
||||||
|
"symmetric": True,
|
||||||
|
},
|
||||||
|
{"list_a": ["a", "b", "c"], "list_b": ["b"]},
|
||||||
|
{"list_a": [], "list_b": [1, 2, 3]},
|
||||||
|
],
|
||||||
|
test_output=[
|
||||||
|
("difference", [1, 2]),
|
||||||
|
("length", 2),
|
||||||
|
("difference", [1, 2, 6, 7]),
|
||||||
|
("length", 4),
|
||||||
|
("difference", ["a", "c"]),
|
||||||
|
("length", 2),
|
||||||
|
("difference", []),
|
||||||
|
("length", 0),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def _compute_difference(self, list_a: List[Any], list_b: List[Any]) -> List[Any]:
|
||||||
|
"""Compute elements in list_a not in list_b."""
|
||||||
|
b_hashes = {_make_hashable(item) for item in list_b}
|
||||||
|
return [item for item in list_a if _make_hashable(item) not in b_hashes]
|
||||||
|
|
||||||
|
def _compute_symmetric_difference(
|
||||||
|
self, list_a: List[Any], list_b: List[Any]
|
||||||
|
) -> List[Any]:
|
||||||
|
"""Compute elements in either list but not both."""
|
||||||
|
a_hashes = {_make_hashable(item) for item in list_a}
|
||||||
|
b_hashes = {_make_hashable(item) for item in list_b}
|
||||||
|
only_in_a = [item for item in list_a if _make_hashable(item) not in b_hashes]
|
||||||
|
only_in_b = [item for item in list_b if _make_hashable(item) not in a_hashes]
|
||||||
|
return only_in_a + only_in_b
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
if input_data.symmetric:
|
||||||
|
result = self._compute_symmetric_difference(
|
||||||
|
input_data.list_a, input_data.list_b
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result = self._compute_difference(input_data.list_a, input_data.list_b)
|
||||||
|
|
||||||
|
yield "difference", result
|
||||||
|
yield "length", len(result)
|
||||||
|
|
||||||
|
|
||||||
|
class ListIntersectionBlock(Block):
|
||||||
|
"""
|
||||||
|
Computes the intersection of two lists (elements present in both lists).
|
||||||
|
|
||||||
|
This is useful for finding common items between two datasets,
|
||||||
|
such as shared tags, mutual connections, or overlapping categories.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
list_a: List[Any] = SchemaField(
|
||||||
|
description="The first list to intersect.",
|
||||||
|
placeholder="e.g., [1, 2, 3, 4, 5]",
|
||||||
|
)
|
||||||
|
list_b: List[Any] = SchemaField(
|
||||||
|
description="The second list to intersect.",
|
||||||
|
placeholder="e.g., [3, 4, 5, 6, 7]",
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
intersection: List[Any] = SchemaField(
|
||||||
|
description="Elements present in both list_a and list_b."
|
||||||
|
)
|
||||||
|
length: int = SchemaField(
|
||||||
|
description="The number of elements in the intersection."
|
||||||
|
)
|
||||||
|
error: str = SchemaField(description="Error message if the operation failed.")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="b6eb08b6-dbe3-411b-b9b4-2508cb311a1f",
|
||||||
|
description="Computes the intersection of two lists, returning only elements present in both.",
|
||||||
|
categories={BlockCategory.BASIC},
|
||||||
|
input_schema=ListIntersectionBlock.Input,
|
||||||
|
output_schema=ListIntersectionBlock.Output,
|
||||||
|
test_input=[
|
||||||
|
{"list_a": [1, 2, 3, 4, 5], "list_b": [3, 4, 5, 6, 7]},
|
||||||
|
{"list_a": ["a", "b", "c"], "list_b": ["c", "d", "e"]},
|
||||||
|
{"list_a": [1, 2], "list_b": [3, 4]},
|
||||||
|
{"list_a": [], "list_b": [1, 2, 3]},
|
||||||
|
],
|
||||||
|
test_output=[
|
||||||
|
("intersection", [3, 4, 5]),
|
||||||
|
("length", 3),
|
||||||
|
("intersection", ["c"]),
|
||||||
|
("length", 1),
|
||||||
|
("intersection", []),
|
||||||
|
("length", 0),
|
||||||
|
("intersection", []),
|
||||||
|
("length", 0),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def _compute_intersection(self, list_a: List[Any], list_b: List[Any]) -> List[Any]:
|
||||||
|
"""Compute elements present in both lists, preserving order from list_a."""
|
||||||
|
b_hashes = {_make_hashable(item) for item in list_b}
|
||||||
|
seen: set = set()
|
||||||
|
result: List[Any] = []
|
||||||
|
for item in list_a:
|
||||||
|
h = _make_hashable(item)
|
||||||
|
if h in b_hashes and h not in seen:
|
||||||
|
result.append(item)
|
||||||
|
seen.add(h)
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
result = self._compute_intersection(input_data.list_a, input_data.list_b)
|
||||||
|
yield "intersection", result
|
||||||
|
yield "length", len(result)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import codecs
|
import codecs
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from typing import Any, Literal, cast
|
|||||||
import discord
|
import discord
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
Discord OAuth-based blocks.
|
Discord OAuth-based blocks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from typing import Literal
|
|||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, SecretStr
|
from pydantic import BaseModel, ConfigDict, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ which provides access to LinkedIn profile data and related information.
|
|||||||
import logging
|
import logging
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,6 +3,13 @@ import logging
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.fal._auth import (
|
from backend.blocks.fal._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
@@ -10,13 +17,6 @@ from backend.blocks.fal._auth import (
|
|||||||
FalCredentialsField,
|
FalCredentialsField,
|
||||||
FalCredentialsInput,
|
FalCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.file import store_media_file
|
from backend.util.file import store_media_file
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from pydantic import SecretStr
|
|||||||
from replicate.client import Client as ReplicateClient
|
from replicate.client import Client as ReplicateClient
|
||||||
from replicate.helpers import FileOutput
|
from replicate.helpers import FileOutput
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from urllib.parse import urlparse
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import re
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import base64
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from typing import Any, List, Optional
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from pathlib import Path
|
|||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from gravitas_md2gdocs import to_requests
|
from gravitas_md2gdocs import to_requests
|
||||||
|
|
||||||
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
from backend.blocks._base import (
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from google.oauth2.credentials import Credentials
|
|||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ from enum import Enum
|
|||||||
from google.oauth2.credentials import Credentials
|
from google.oauth2.credentials import Credentials
|
||||||
from googleapiclient.discovery import build
|
from googleapiclient.discovery import build
|
||||||
|
|
||||||
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
from backend.blocks._base import (
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Literal
|
|||||||
import googlemaps
|
import googlemaps
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -9,9 +9,7 @@ from typing import Any, Optional
|
|||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.data.execution import ExecutionStatus
|
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult
|
||||||
from backend.executor.manager import async_update_node_execution_status
|
|
||||||
from backend.util.clients import get_database_manager_async_client
|
from backend.util.clients import get_database_manager_async_client
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -43,6 +41,8 @@ class HITLReviewHelper:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
async def update_node_execution_status(**kwargs) -> None:
|
async def update_node_execution_status(**kwargs) -> None:
|
||||||
"""Update the execution status of a node."""
|
"""Update the execution status of a node."""
|
||||||
|
from backend.executor.manager import async_update_node_execution_status
|
||||||
|
|
||||||
await async_update_node_execution_status(
|
await async_update_node_execution_status(
|
||||||
db_client=get_database_manager_async_client(), **kwargs
|
db_client=get_database_manager_async_client(), **kwargs
|
||||||
)
|
)
|
||||||
@@ -88,12 +88,13 @@ class HITLReviewHelper:
|
|||||||
Raises:
|
Raises:
|
||||||
Exception: If review creation or status update fails
|
Exception: If review creation or status update fails
|
||||||
"""
|
"""
|
||||||
|
from backend.data.execution import ExecutionStatus
|
||||||
|
|
||||||
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
|
||||||
# are handled by the caller:
|
# are handled by the caller:
|
||||||
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
# - HITL blocks check human_in_the_loop_safe_mode in their run() method
|
||||||
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
|
||||||
# This function only handles checking for existing approvals.
|
# This function only handles checking for existing approvals.
|
||||||
|
|
||||||
# Check if this node has already been approved (normal or auto-approval)
|
# Check if this node has already been approved (normal or auto-approval)
|
||||||
if approval_result := await HITLReviewHelper.check_approval(
|
if approval_result := await HITLReviewHelper.check_approval(
|
||||||
node_exec_id=node_exec_id,
|
node_exec_id=node_exec_id,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from typing import Literal
|
|||||||
import aiofiles
|
import aiofiles
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.hubspot._auth import (
|
from backend.blocks._base import (
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.hubspot._auth import (
|
from backend.blocks._base import (
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from backend.blocks.hubspot._auth import (
|
from backend.blocks._base import (
|
||||||
HubSpotCredentials,
|
|
||||||
HubSpotCredentialsField,
|
|
||||||
HubSpotCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.hubspot._auth import (
|
||||||
|
HubSpotCredentials,
|
||||||
|
HubSpotCredentialsField,
|
||||||
|
HubSpotCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ from typing import Any
|
|||||||
|
|
||||||
from prisma.enums import ReviewStatus
|
from prisma.enums import ReviewStatus
|
||||||
|
|
||||||
from backend.blocks.helpers.review import HITLReviewHelper
|
from backend.blocks._base import (
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -12,6 +11,7 @@ from backend.data.block import (
|
|||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
BlockType,
|
BlockType,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.helpers.review import HITLReviewHelper
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.human_review import ReviewResult
|
from backend.data.human_review import ReviewResult
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
@@ -21,43 +21,71 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class HumanInTheLoopBlock(Block):
|
class HumanInTheLoopBlock(Block):
|
||||||
"""
|
"""
|
||||||
This block pauses execution and waits for human approval or modification of the data.
|
Pauses execution and waits for human approval or rejection of the data.
|
||||||
|
|
||||||
When executed, it creates a pending review entry and sets the node execution status
|
When executed, this block creates a pending review entry and sets the node execution
|
||||||
to REVIEW. The execution will remain paused until a human user either:
|
status to REVIEW. The execution remains paused until a human user either approves
|
||||||
- Approves the data (with or without modifications)
|
or rejects the data.
|
||||||
- Rejects the data
|
|
||||||
|
|
||||||
This is useful for workflows that require human validation or intervention before
|
**How it works:**
|
||||||
proceeding to the next steps.
|
- The input data is presented to a human reviewer
|
||||||
|
- The reviewer can approve or reject (and optionally modify the data if editable)
|
||||||
|
- On approval: the data flows out through the `approved_data` output pin
|
||||||
|
- On rejection: the data flows out through the `rejected_data` output pin
|
||||||
|
|
||||||
|
**Important:** The output pins yield the actual data itself, NOT status strings.
|
||||||
|
The approval/rejection decision determines WHICH output pin fires, not the value.
|
||||||
|
You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
|
||||||
|
downstream blocks to the appropriate output pin for each case.
|
||||||
|
|
||||||
|
**Example usage:**
|
||||||
|
- Connect `approved_data` → next step in your workflow (data was approved)
|
||||||
|
- Connect `rejected_data` → error handling or notification (data was rejected)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class Input(BlockSchemaInput):
|
class Input(BlockSchemaInput):
|
||||||
data: Any = SchemaField(description="The data to be reviewed by a human user")
|
data: Any = SchemaField(
|
||||||
|
description="The data to be reviewed by a human user. "
|
||||||
|
"This exact data will be passed through to either approved_data or "
|
||||||
|
"rejected_data output based on the reviewer's decision."
|
||||||
|
)
|
||||||
name: str = SchemaField(
|
name: str = SchemaField(
|
||||||
description="A descriptive name for what this data represents",
|
description="A descriptive name for what this data represents. "
|
||||||
|
"This helps the reviewer understand what they are reviewing.",
|
||||||
)
|
)
|
||||||
editable: bool = SchemaField(
|
editable: bool = SchemaField(
|
||||||
description="Whether the human reviewer can edit the data",
|
description="Whether the human reviewer can edit the data before "
|
||||||
|
"approving or rejecting it",
|
||||||
default=True,
|
default=True,
|
||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
approved_data: Any = SchemaField(
|
approved_data: Any = SchemaField(
|
||||||
description="The data when approved (may be modified by reviewer)"
|
description="Outputs the input data when the reviewer APPROVES it. "
|
||||||
|
"The value is the actual data itself (not a status string like 'APPROVED'). "
|
||||||
|
"If the reviewer edited the data, this contains the modified version. "
|
||||||
|
"Connect downstream blocks here for the 'approved' workflow path."
|
||||||
)
|
)
|
||||||
rejected_data: Any = SchemaField(
|
rejected_data: Any = SchemaField(
|
||||||
description="The data when rejected (may be modified by reviewer)"
|
description="Outputs the input data when the reviewer REJECTS it. "
|
||||||
|
"The value is the actual data itself (not a status string like 'REJECTED'). "
|
||||||
|
"If the reviewer edited the data, this contains the modified version. "
|
||||||
|
"Connect downstream blocks here for the 'rejected' workflow path."
|
||||||
)
|
)
|
||||||
review_message: str = SchemaField(
|
review_message: str = SchemaField(
|
||||||
description="Any message provided by the reviewer", default=""
|
description="Optional message provided by the reviewer explaining their "
|
||||||
|
"decision. Only outputs when the reviewer provides a message; "
|
||||||
|
"this pin does not fire if no message was given.",
|
||||||
|
default="",
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
||||||
description="Pause execution and wait for human approval or modification of data",
|
description="Pause execution for human review. Data flows through "
|
||||||
|
"approved_data or rejected_data output based on the reviewer's decision. "
|
||||||
|
"Outputs contain the actual data, not status strings.",
|
||||||
categories={BlockCategory.BASIC},
|
categories={BlockCategory.BASIC},
|
||||||
input_schema=HumanInTheLoopBlock.Input,
|
input_schema=HumanInTheLoopBlock.Input,
|
||||||
output_schema=HumanInTheLoopBlock.Output,
|
output_schema=HumanInTheLoopBlock.Output,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import Any, Dict, Literal, Optional
|
|||||||
|
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -2,9 +2,7 @@ import copy
|
|||||||
from datetime import date, time
|
from datetime import date, time
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
# Import for Google Drive file input block
|
from backend.blocks._base import (
|
||||||
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
@@ -12,6 +10,9 @@ from backend.data.block import (
|
|||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockType,
|
BlockType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Import for Google Drive file input block
|
||||||
|
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.file import store_media_file
|
from backend.util.file import store_media_file
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.jina._auth import (
|
from backend.blocks._base import (
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from backend.blocks.jina._auth import (
|
from backend.blocks._base import (
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -3,18 +3,18 @@ from urllib.parse import quote
|
|||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from backend.blocks.jina._auth import (
|
from backend.blocks._base import (
|
||||||
JinaCredentials,
|
|
||||||
JinaCredentialsField,
|
|
||||||
JinaCredentialsInput,
|
|
||||||
)
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
BlockSchemaInput,
|
BlockSchemaInput,
|
||||||
BlockSchemaOutput,
|
BlockSchemaOutput,
|
||||||
)
|
)
|
||||||
|
from backend.blocks.jina._auth import (
|
||||||
|
JinaCredentials,
|
||||||
|
JinaCredentialsField,
|
||||||
|
JinaCredentialsInput,
|
||||||
|
)
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,12 @@
|
|||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
|
|
||||||
|
from backend.blocks._base import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
from backend.blocks.jina._auth import (
|
from backend.blocks.jina._auth import (
|
||||||
TEST_CREDENTIALS,
|
TEST_CREDENTIALS,
|
||||||
TEST_CREDENTIALS_INPUT,
|
TEST_CREDENTIALS_INPUT,
|
||||||
@@ -8,15 +15,9 @@ from backend.blocks.jina._auth import (
|
|||||||
JinaCredentialsInput,
|
JinaCredentialsInput,
|
||||||
)
|
)
|
||||||
from backend.blocks.search import GetRequest
|
from backend.blocks.search import GetRequest
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.model import SchemaField
|
from backend.data.model import SchemaField
|
||||||
from backend.util.exceptions import BlockExecutionError
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
from backend.util.request import HTTPClientError, HTTPServerError, validate_url
|
||||||
|
|
||||||
|
|
||||||
class SearchTheWebBlock(Block, GetRequest):
|
class SearchTheWebBlock(Block, GetRequest):
|
||||||
@@ -110,7 +111,12 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
|
|||||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
if input_data.raw_content:
|
if input_data.raw_content:
|
||||||
url = input_data.url
|
try:
|
||||||
|
parsed_url, _, _ = await validate_url(input_data.url, [])
|
||||||
|
url = parsed_url.geturl()
|
||||||
|
except ValueError as e:
|
||||||
|
yield "error", f"Invalid URL: {e}"
|
||||||
|
return
|
||||||
headers = {}
|
headers = {}
|
||||||
else:
|
else:
|
||||||
url = f"https://r.jina.ai/{input_data.url}"
|
url = f"https://r.jina.ai/{input_data.url}"
|
||||||
@@ -119,5 +125,20 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
|
|||||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||||
}
|
}
|
||||||
|
|
||||||
content = await self.get_request(url, json=False, headers=headers)
|
try:
|
||||||
|
content = await self.get_request(url, json=False, headers=headers)
|
||||||
|
except HTTPClientError as e:
|
||||||
|
yield "error", f"Client error ({e.status_code}) fetching {input_data.url}: {e}"
|
||||||
|
return
|
||||||
|
except HTTPServerError as e:
|
||||||
|
yield "error", f"Server error ({e.status_code}) fetching {input_data.url}: {e}"
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
yield "error", f"Failed to fetch {input_data.url}: {e}"
|
||||||
|
return
|
||||||
|
|
||||||
|
if not content:
|
||||||
|
yield "error", f"No content returned for {input_data.url}"
|
||||||
|
return
|
||||||
|
|
||||||
yield "content", content
|
yield "content", content
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from anthropic.types import ToolParam
|
|||||||
from groq import AsyncGroq
|
from groq import AsyncGroq
|
||||||
from pydantic import BaseModel, SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.data.block import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
BlockCategory,
|
BlockCategory,
|
||||||
BlockOutput,
|
BlockOutput,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user