Compare commits

..

10 Commits

Author SHA1 Message Date
Bentlybro
0b2fb655bc style: black formatting 2026-02-12 12:46:20 +00:00
Bentlybro
99f8bf5f0c fix: skip binary file if stat fails to prevent OOM
If the stat command fails (file deleted, permissions issue, etc.),
we now skip the file rather than proceeding to read it with an
unknown size. This prevents potential OOM crashes from large files
where size verification failed.
2026-02-12 12:32:13 +00:00
Bentlybro
3f76f1318b docs: Fix llm.md to match exact schema description 2026-02-12 12:25:29 +00:00
Bentlybro
b011289dd2 fix: Address code review feedback
- Add 50MB size guard for binary files to prevent OOM
- Extract helper function for path resolution (DRY)
- Add logging for file extraction errors
- Remove dead 'Dockerfile' entry from text_extensions
2026-02-12 12:02:45 +00:00
Bentlybro
49c2f578b4 docs: Update llm.md for binary file support in Claude Code block 2026-02-12 11:58:35 +00:00
Bentlybro
7150b7768d fix: Make Dockerfile check case-insensitive 2026-02-12 11:53:57 +00:00
Bentlybro
8c95b03636 fix: Update tests and address code review feedback
- Update test fixtures with is_binary and content_base64 fields
- Move .svg to text_extensions (it's XML-based)
- Make extension matching case-insensitive for both text and binary
2026-02-12 11:45:52 +00:00
Bentlybro
4a8368887f fix: Use format='bytes' for reading binary files from E2B sandbox
Fixes the critical bug where binary files would fail to read because
files.read() defaults to text mode (UTF-8 decoding). Now explicitly
uses format='bytes' which returns a bytearray.
2026-02-12 11:29:43 +00:00
Bentlybro
d46e5e6b6a docs: Update claude_code.md for binary file support 2026-02-12 11:26:58 +00:00
Bentlybro
4e632bbd60 fix(backend): Extract binary files from ClaudeCodeBlock sandbox
Add support for extracting binary files (PDFs, images, etc.) from the E2B
sandbox in ClaudeCodeBlock.

Changes:
- Add binary_extensions set for common binary file types (.pdf, .png, .jpg, etc.)
- Update FileOutput schema with is_binary and content_base64 fields
- Binary files are read as bytes and base64-encoded before returning
- Text files continue to work as before with is_binary=False

Closes SECRT-1897
2026-02-12 11:23:05 +00:00
244 changed files with 4692 additions and 4711 deletions

View File

@@ -5,13 +5,42 @@
!docs/ !docs/
# Platform - Libs # Platform - Libs
!autogpt_platform/autogpt_libs/ !autogpt_platform/autogpt_libs/autogpt_libs/
!autogpt_platform/autogpt_libs/pyproject.toml
!autogpt_platform/autogpt_libs/poetry.lock
!autogpt_platform/autogpt_libs/README.md
# Platform - Backend # Platform - Backend
!autogpt_platform/backend/ !autogpt_platform/backend/backend/
!autogpt_platform/backend/test/e2e_test_data.py
!autogpt_platform/backend/migrations/
!autogpt_platform/backend/schema.prisma
!autogpt_platform/backend/pyproject.toml
!autogpt_platform/backend/poetry.lock
!autogpt_platform/backend/README.md
!autogpt_platform/backend/.env
!autogpt_platform/backend/gen_prisma_types_stub.py
# Platform - Market
!autogpt_platform/market/market/
!autogpt_platform/market/scripts.py
!autogpt_platform/market/schema.prisma
!autogpt_platform/market/pyproject.toml
!autogpt_platform/market/poetry.lock
!autogpt_platform/market/README.md
# Platform - Frontend # Platform - Frontend
!autogpt_platform/frontend/ !autogpt_platform/frontend/src/
!autogpt_platform/frontend/public/
!autogpt_platform/frontend/scripts/
!autogpt_platform/frontend/package.json
!autogpt_platform/frontend/pnpm-lock.yaml
!autogpt_platform/frontend/tsconfig.json
!autogpt_platform/frontend/README.md
## config
!autogpt_platform/frontend/*.config.*
!autogpt_platform/frontend/.env.*
!autogpt_platform/frontend/.env
# Classic - AutoGPT # Classic - AutoGPT
!classic/original_autogpt/autogpt/ !classic/original_autogpt/autogpt/
@@ -35,38 +64,6 @@
# Classic - Frontend # Classic - Frontend
!classic/frontend/build/web/ !classic/frontend/build/web/
# Explicitly re-ignore unwanted files from whitelisted directories # Explicitly re-ignore some folders
# Note: These patterns MUST come after the whitelist rules to take effect .*
**/__pycache__
# Hidden files and directories (but keep frontend .env files needed for build)
**/.*
!autogpt_platform/frontend/.env
!autogpt_platform/frontend/.env.default
!autogpt_platform/frontend/.env.production
# Python artifacts
**/__pycache__/
**/*.pyc
**/*.pyo
**/.venv/
**/.ruff_cache/
**/.pytest_cache/
**/.coverage
**/htmlcov/
# Node artifacts
**/node_modules/
**/.next/
**/storybook-static/
**/playwright-report/
**/test-results/
# Build artifacts
**/dist/
**/build/
!autogpt_platform/frontend/src/**/build/
**/target/
# Logs and temp files
**/*.log
**/*.tmp

View File

@@ -26,6 +26,7 @@ jobs:
setup: setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
components-changed: ${{ steps.filter.outputs.components }} components-changed: ${{ steps.filter.outputs.components }}
steps: steps:
@@ -40,17 +41,28 @@ jobs:
components: components:
- 'autogpt_platform/frontend/src/components/**' - 'autogpt_platform/frontend/src/components/**'
- name: Enable corepack - name: Set up Node.js
run: corepack enable
- name: Set up Node
uses: actions/setup-node@v6 uses: actions/setup-node@v6
with: with:
node-version: "22.18.0" node-version: "22.18.0"
cache: "pnpm"
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
- name: Install dependencies to populate cache - name: Enable corepack
run: corepack enable
- name: Generate cache key
id: cache-key
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile run: pnpm install --frozen-lockfile
lint: lint:
@@ -61,15 +73,22 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v6
- name: Enable corepack - name: Set up Node.js
run: corepack enable
- name: Set up Node
uses: actions/setup-node@v6 uses: actions/setup-node@v6
with: with:
node-version: "22.18.0" node-version: "22.18.0"
cache: "pnpm"
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies - name: Install dependencies
run: pnpm install --frozen-lockfile run: pnpm install --frozen-lockfile
@@ -92,15 +111,22 @@ jobs:
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Enable corepack - name: Set up Node.js
run: corepack enable
- name: Set up Node
uses: actions/setup-node@v6 uses: actions/setup-node@v6
with: with:
node-version: "22.18.0" node-version: "22.18.0"
cache: "pnpm"
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies - name: Install dependencies
run: pnpm install --frozen-lockfile run: pnpm install --frozen-lockfile
@@ -115,8 +141,10 @@ jobs:
exitOnceUploaded: true exitOnceUploaded: true
e2e_test: e2e_test:
name: end-to-end tests
runs-on: big-boi runs-on: big-boi
needs: setup
strategy:
fail-fast: false
steps: steps:
- name: Checkout repository - name: Checkout repository
@@ -124,11 +152,19 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Set up Platform - Copy default supabase .env - name: Set up Node.js
uses: actions/setup-node@v6
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Copy default supabase .env
run: | run: |
cp ../.env.default ../.env cp ../.env.default ../.env
- name: Set up Platform - Copy backend .env and set OpenAI API key - name: Copy backend .env and set OpenAI API key
run: | run: |
cp ../backend/.env.default ../backend/.env cp ../backend/.env.default ../backend/.env
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
@@ -136,125 +172,77 @@ jobs:
# Used by E2E test data script to generate embeddings for approved store agents # Used by E2E test data script to generate embeddings for approved store agents
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- name: Set up Platform - Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
with:
driver: docker-container
driver-opts: network=host
- name: Set up Platform - Expose GHA cache to docker buildx CLI - name: Cache Docker layers
uses: crazy-max/ghaction-github-runtime@v3
- name: Set up Platform - Build Docker images (with cache)
working-directory: autogpt_platform
run: |
pip install pyyaml
# Resolve extends and generate a flat compose file that bake can understand
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
# Add cache configuration to the resolved compose file
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
--source docker-compose.resolved.yml \
--cache-from "type=gha" \
--cache-to "type=gha,mode=max" \
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
--git-ref "${{ github.ref }}"
# Build with bake using the resolved compose file (now includes cache config)
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
env:
NEXT_PUBLIC_PW_TEST: true
- name: Set up tests - Cache E2E test data
id: e2e-data-cache
uses: actions/cache@v5 uses: actions/cache@v5
with: with:
path: /tmp/e2e_test_data.sql path: /tmp/.buildx-cache
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }} key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
restore-keys: |
${{ runner.os }}-buildx-frontend-test-
- name: Set up Platform - Start Supabase DB + Auth - name: Run docker compose
run: | run: |
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
echo "Waiting for database to be ready..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
echo "Waiting for auth service to be ready..."
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
- name: Set up Platform - Run migrations
run: |
echo "Running migrations..."
docker compose -f ../docker-compose.resolved.yml run --rm migrate
echo "✅ Migrations completed"
env: env:
NEXT_PUBLIC_PW_TEST: true DOCKER_BUILDKIT: 1
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
- name: Set up tests - Load cached E2E test data - name: Move cache
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
run: | run: |
echo "✅ Found cached E2E test data, restoring..." rm -rf /tmp/.buildx-cache
{ if [ -d "/tmp/.buildx-cache-new" ]; then
echo "SET session_replication_role = 'replica';" mv /tmp/.buildx-cache-new /tmp/.buildx-cache
cat /tmp/e2e_test_data.sql fi
echo "SET session_replication_role = 'origin';"
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
# Refresh materialized views after restore
docker compose -f ../docker-compose.resolved.yml exec -T db \
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
echo "✅ E2E test data restored from cache" - name: Wait for services to be ready
- name: Set up Platform - Start (all other services)
run: | run: |
docker compose -f ../docker-compose.resolved.yml up -d --no-build
echo "Waiting for rest_server to be ready..." echo "Waiting for rest_server to be ready..."
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..." timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
env: echo "Waiting for database to be ready..."
NEXT_PUBLIC_PW_TEST: true timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
- name: Set up tests - Create E2E test data - name: Create E2E test data
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
run: | run: |
echo "Creating E2E test data..." echo "Creating E2E test data..."
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py # First try to run the script from inside the container
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || { if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
echo "✅ Found e2e_test_data.py in container, running it..."
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
echo "❌ E2E test data creation failed!" echo "❌ E2E test data creation failed!"
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server docker compose -f ../docker-compose.yml logs --tail=50 rest_server
exit 1 exit 1
} }
else
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
# Copy the script into the container and run it
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
echo "❌ Failed to copy script to container"
exit 1
}
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
echo "❌ E2E test data creation failed!"
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
exit 1
}
fi
# Dump auth.users + platform schema for cache (two separate dumps) - name: Restore dependencies cache
echo "Dumping database for cache..." uses: actions/cache@v5
{
docker compose -f ../docker-compose.resolved.yml exec -T db \
pg_dump -U postgres --data-only --column-inserts \
--table='auth.users' postgres
docker compose -f ../docker-compose.resolved.yml exec -T db \
pg_dump -U postgres --data-only --column-inserts \
--schema=platform \
--exclude-table='platform._prisma_migrations' \
--exclude-table='platform.apscheduler_jobs' \
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
postgres
} > /tmp/e2e_test_data.sql
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
- name: Set up tests - Enable corepack
run: corepack enable
- name: Set up tests - Set up Node
uses: actions/setup-node@v6
with: with:
node-version: "22.18.0" path: ~/.pnpm-store
cache: "pnpm" key: ${{ needs.setup.outputs.cache-key }}
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Set up tests - Install dependencies - name: Install dependencies
run: pnpm install --frozen-lockfile run: pnpm install --frozen-lockfile
- name: Set up tests - Install browser 'chromium' - name: Install Browser 'chromium'
run: pnpm playwright install --with-deps chromium run: pnpm playwright install --with-deps chromium
- name: Run Playwright tests - name: Run Playwright tests
@@ -281,7 +269,7 @@ jobs:
- name: Print Final Docker Compose logs - name: Print Final Docker Compose logs
if: always() if: always()
run: docker compose -f ../docker-compose.resolved.yml logs run: docker compose -f ../docker-compose.yml logs
integration_test: integration_test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -293,15 +281,22 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Enable corepack - name: Set up Node.js
run: corepack enable
- name: Set up Node
uses: actions/setup-node@v6 uses: actions/setup-node@v6
with: with:
node-version: "22.18.0" node-version: "22.18.0"
cache: "pnpm"
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies - name: Install dependencies
run: pnpm install --frozen-lockfile run: pnpm install --frozen-lockfile

View File

@@ -1,195 +0,0 @@
#!/usr/bin/env python3
"""
Add cache configuration to a resolved docker-compose file for all services
that have a build key, and ensure image names match what docker compose expects.
"""
import argparse
import yaml
DEFAULT_BRANCH = "dev"
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
def main():
parser = argparse.ArgumentParser(
description="Add cache config to a resolved compose file"
)
parser.add_argument(
"--source",
required=True,
help="Source compose file to read (should be output of `docker compose config`)",
)
parser.add_argument(
"--cache-from",
default="type=gha",
help="Cache source configuration",
)
parser.add_argument(
"--cache-to",
default="type=gha,mode=max",
help="Cache destination configuration",
)
for component in CACHE_BUILDS_FOR_COMPONENTS:
parser.add_argument(
f"--{component}-hash",
default="",
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
)
parser.add_argument(
"--git-ref",
default="",
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
)
args = parser.parse_args()
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
git_ref_scope = ""
if args.git_ref:
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
with open(args.source, "r") as f:
compose = yaml.safe_load(f)
# Get project name from compose file or default
project_name = compose.get("name", "autogpt_platform")
def get_image_name(dockerfile: str, target: str) -> str:
"""Generate image name based on Dockerfile folder and build target."""
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
if len(dockerfile_parts) >= 2:
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
else:
folder_name = "app"
return f"{project_name}-{folder_name}:{target}"
def get_build_key(dockerfile: str, target: str) -> str:
"""Generate a unique key for a Dockerfile+target combination."""
return f"{dockerfile}:{target}"
def get_component(dockerfile: str) -> str | None:
"""Get component name (frontend/backend) from dockerfile path."""
for component in CACHE_BUILDS_FOR_COMPONENTS:
if component in dockerfile:
return component
return None
# First pass: collect all services with build configs and identify duplicates
# Track which (dockerfile, target) combinations we've seen
build_key_to_first_service: dict[str, str] = {}
services_to_build: list[str] = []
services_to_dedupe: list[str] = []
for service_name, service_config in compose.get("services", {}).items():
if "build" not in service_config:
continue
build_config = service_config["build"]
dockerfile = build_config.get("dockerfile", "Dockerfile")
target = build_config.get("target", "default")
build_key = get_build_key(dockerfile, target)
if build_key not in build_key_to_first_service:
# First service with this build config - it will do the actual build
build_key_to_first_service[build_key] = service_name
services_to_build.append(service_name)
else:
# Duplicate - will just use the image from the first service
services_to_dedupe.append(service_name)
# Second pass: configure builds and deduplicate
modified_services = []
for service_name, service_config in compose.get("services", {}).items():
if "build" not in service_config:
continue
build_config = service_config["build"]
dockerfile = build_config.get("dockerfile", "Dockerfile")
target = build_config.get("target", "latest")
image_name = get_image_name(dockerfile, target)
# Set image name for all services (needed for both builders and deduped)
service_config["image"] = image_name
if service_name in services_to_dedupe:
# Remove build config - this service will use the pre-built image
del service_config["build"]
continue
# This service will do the actual build - add cache config
cache_from_list = []
cache_to_list = []
component = get_component(dockerfile)
if not component:
# Skip services that don't clearly match frontend/backend
continue
# Get the hash for this component
component_hash = getattr(args, f"{component}_hash")
# Scope format: platform-{component}-{target}-{hash|ref}
# Example: platform-backend-server-abc123
if "type=gha" in args.cache_from:
# 1. Primary: exact hash match (most specific)
if component_hash:
hash_scope = f"platform-{component}-{target}-{component_hash}"
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
# 2. Fallback: branch-based cache
if git_ref_scope:
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
# 3. Fallback: dev branch cache (for PRs/feature branches)
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
if "type=gha" in args.cache_to:
# Write to both hash-based and branch-based scopes
if component_hash:
hash_scope = f"platform-{component}-{target}-{component_hash}"
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
if git_ref_scope:
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
# Ensure we have at least one cache source/target
if not cache_from_list:
cache_from_list.append(args.cache_from)
if not cache_to_list:
cache_to_list.append(args.cache_to)
build_config["cache_from"] = cache_from_list
build_config["cache_to"] = cache_to_list
modified_services.append(service_name)
# Write back to the same file
with open(args.source, "w") as f:
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
for svc in modified_services:
svc_config = compose["services"][svc]
build_cfg = svc_config.get("build", {})
cache_from_list = build_cfg.get("cache_from", ["none"])
cache_to_list = build_cfg.get("cache_to", ["none"])
print(f" - {svc}")
print(f" image: {svc_config.get('image', 'N/A')}")
print(f" cache_from: {cache_from_list}")
print(f" cache_to: {cache_to_list}")
if services_to_dedupe:
print(
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
)
for svc in services_to_dedupe:
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
if __name__ == "__main__":
main()

View File

@@ -45,11 +45,6 @@ AutoGPT Platform is a monorepo containing:
- Backend/Frontend services use YAML anchors for consistent configuration - Backend/Frontend services use YAML anchors for consistent configuration
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern - Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
### Branching Strategy
- **`dev`** is the main development branch. All PRs should target `dev`.
- **`master`** is the production branch. Only used for production releases.
### Creating Pull Requests ### Creating Pull Requests
- Create the PR against the `dev` branch of the repository. - Create the PR against the `dev` branch of the repository.

View File

@@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
[[package]] [[package]]
name = "cryptography" name = "cryptography"
version = "46.0.5" version = "46.0.4"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false optional = false
python-versions = "!=3.9.0,!=3.9.1,>=3.8" python-versions = "!=3.9.0,!=3.9.1,>=3.8"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"}, {file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"}, {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"}, {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"}, {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
{file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"}, {file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
{file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"}, {file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
{file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"}, {file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"}, {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"}, {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"}, {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
{file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"}, {file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
{file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"}, {file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
{file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"}, {file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"}, {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"}, {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"}, {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
{file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"}, {file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
{file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"}, {file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
{file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"}, {file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"}, {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"}, {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"}, {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"}, {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
{file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"}, {file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
{file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"}, {file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
] ]
[package.dependencies] [package.dependencies]
@@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"] pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
sdist = ["build (>=1.0.0)"] sdist = ["build (>=1.0.0)"]
ssh = ["bcrypt (>=3.1.5)"] ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test-randomorder = ["pytest-randomly"] test-randomorder = ["pytest-randomly"]
[[package]] [[package]]
@@ -570,25 +570,24 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
[[package]] [[package]]
name = "fastapi" name = "fastapi"
version = "0.128.7" version = "0.128.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"}, {file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"}, {file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
] ]
[package.dependencies] [package.dependencies]
annotated-doc = ">=0.0.2" annotated-doc = ">=0.0.2"
pydantic = ">=2.7.0" pydantic = ">=2.7.0"
starlette = ">=0.40.0,<1.0.0" starlette = ">=0.40.0,<0.51.0"
typing-extensions = ">=4.8.0" typing-extensions = ">=4.8.0"
typing-inspection = ">=0.4.2"
[package.extras] [package.extras]
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"] all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
@@ -1063,14 +1062,14 @@ urllib3 = ">=1.26.0,<3"
[[package]] [[package]]
name = "launchdarkly-server-sdk" name = "launchdarkly-server-sdk"
version = "9.15.0" version = "9.14.1"
description = "LaunchDarkly SDK for Python" description = "LaunchDarkly SDK for Python"
optional = false optional = false
python-versions = ">=3.10" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"}, {file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"}, {file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
] ]
[package.dependencies] [package.dependencies]
@@ -1479,14 +1478,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
[[package]] [[package]]
name = "postgrest" name = "postgrest"
version = "2.28.0" version = "2.27.2"
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST." description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"}, {file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"}, {file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
] ]
[package.dependencies] [package.dependencies]
@@ -2249,14 +2248,14 @@ cli = ["click (>=5.0)"]
[[package]] [[package]]
name = "realtime" name = "realtime"
version = "2.28.0" version = "2.27.2"
description = "" description = ""
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"}, {file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"}, {file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
] ]
[package.dependencies] [package.dependencies]
@@ -2437,14 +2436,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
[[package]] [[package]]
name = "storage3" name = "storage3"
version = "2.28.0" version = "2.27.2"
description = "Supabase Storage client for Python." description = "Supabase Storage client for Python."
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"}, {file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"}, {file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
] ]
[package.dependencies] [package.dependencies]
@@ -2488,35 +2487,35 @@ python-dateutil = ">=2.6.0"
[[package]] [[package]]
name = "supabase" name = "supabase"
version = "2.28.0" version = "2.27.2"
description = "Supabase client for Python." description = "Supabase client for Python."
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"}, {file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"}, {file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
] ]
[package.dependencies] [package.dependencies]
httpx = ">=0.26,<0.29" httpx = ">=0.26,<0.29"
postgrest = "2.28.0" postgrest = "2.27.2"
realtime = "2.28.0" realtime = "2.27.2"
storage3 = "2.28.0" storage3 = "2.27.2"
supabase-auth = "2.28.0" supabase-auth = "2.27.2"
supabase-functions = "2.28.0" supabase-functions = "2.27.2"
yarl = ">=1.22.0" yarl = ">=1.22.0"
[[package]] [[package]]
name = "supabase-auth" name = "supabase-auth"
version = "2.28.0" version = "2.27.2"
description = "Python Client Library for Supabase Auth" description = "Python Client Library for Supabase Auth"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"}, {file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"}, {file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
] ]
[package.dependencies] [package.dependencies]
@@ -2526,14 +2525,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
[[package]] [[package]]
name = "supabase-functions" name = "supabase-functions"
version = "2.28.0" version = "2.27.2"
description = "Library for Supabase Functions" description = "Library for Supabase Functions"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"}, {file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"}, {file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
] ]
[package.dependencies] [package.dependencies]
@@ -2912,4 +2911,4 @@ type = ["pytest-mypy"]
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = ">=3.10,<4.0" python-versions = ">=3.10,<4.0"
content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb" content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"

View File

@@ -11,14 +11,14 @@ python = ">=3.10,<4.0"
colorama = "^0.4.6" colorama = "^0.4.6"
cryptography = "^46.0" cryptography = "^46.0"
expiringdict = "^1.2.2" expiringdict = "^1.2.2"
fastapi = "^0.128.7" fastapi = "^0.128.0"
google-cloud-logging = "^3.13.0" google-cloud-logging = "^3.13.0"
launchdarkly-server-sdk = "^9.15.0" launchdarkly-server-sdk = "^9.14.1"
pydantic = "^2.12.5" pydantic = "^2.12.5"
pydantic-settings = "^2.12.0" pydantic-settings = "^2.12.0"
pyjwt = { version = "^2.11.0", extras = ["crypto"] } pyjwt = { version = "^2.11.0", extras = ["crypto"] }
redis = "^6.2.0" redis = "^6.2.0"
supabase = "^2.28.0" supabase = "^2.27.2"
uvicorn = "^0.40.0" uvicorn = "^0.40.0"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]

View File

@@ -1,5 +1,3 @@
# ============================ DEPENDENCY BUILDER ============================ #
FROM debian:13-slim AS builder FROM debian:13-slim AS builder
# Set environment variables # Set environment variables
@@ -53,9 +51,7 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./ COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
RUN poetry run prisma generate && poetry run gen-prisma-stub RUN poetry run prisma generate && poetry run gen-prisma-stub
# ============================== BACKEND SERVER ============================== # FROM debian:13-slim AS server_dependencies
FROM debian:13-slim AS server
WORKDIR /app WORKDIR /app
@@ -67,14 +63,15 @@ ENV POETRY_HOME=/opt/poetry \
ENV PATH=/opt/poetry/bin:$PATH ENV PATH=/opt/poetry/bin:$PATH
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks) # Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc. RUN apt-get update && apt-get install -y \
RUN apt-get update && apt-get install -y --no-install-recommends \
python3.13 \ python3.13 \
python3-pip \ python3-pip \
ffmpeg \ ffmpeg \
imagemagick \ imagemagick \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Copy only necessary files from builder
COPY --from=builder /app /app
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3* COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
# Copy Node.js installation for Prisma # Copy Node.js installation for Prisma
@@ -84,54 +81,30 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
COPY --from=builder /usr/bin/npx /usr/bin/npx COPY --from=builder /usr/bin/npx /usr/bin/npx
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
WORKDIR /app/autogpt_platform/backend
# Copy only the .venv from builder (not the entire /app directory)
# The .venv includes the generated Prisma client
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH" ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
# Copy dependency files + autogpt_libs (path dependency) RUN mkdir -p /app/autogpt_platform/autogpt_libs
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs RUN mkdir -p /app/autogpt_platform/backend
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
# Copy backend code + docs (for Copilot docs search) COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
COPY autogpt_platform/backend ./
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
WORKDIR /app/autogpt_platform/backend
FROM server_dependencies AS migrate
# Migration stage only needs schema and migrations - much lighter than full backend
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
FROM server_dependencies AS server
COPY autogpt_platform/backend /app/autogpt_platform/backend
COPY docs /app/docs COPY docs /app/docs
RUN poetry install --no-ansi --only-root RUN poetry install --no-ansi --only-root
ENV PORT=8000 ENV PORT=8000
CMD ["poetry", "run", "rest"] CMD ["poetry", "run", "rest"]
# =============================== DB MIGRATOR =============================== #
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
FROM debian:13-slim AS migrate
WORKDIR /app/autogpt_platform/backend
ENV DEBIAN_FRONTEND=noninteractive
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
RUN apt-get update && apt-get install -y --no-install-recommends \
python3.13 \
python3-pip \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Copy Node.js from builder (needed for Prisma CLI)
COPY --from=builder /usr/bin/node /usr/bin/node
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
COPY --from=builder /usr/bin/npm /usr/bin/npm
# Copy Prisma binaries
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
# Install prisma-client-py directly (much smaller than copying full venv)
RUN pip3 install prisma>=0.15.0 --break-system-packages
COPY autogpt_platform/backend/schema.prisma ./
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
COPY autogpt_platform/backend/migrations ./migrations

View File

@@ -10,7 +10,7 @@ from typing_extensions import TypedDict
import backend.api.features.store.cache as store_cache import backend.api.features.store.cache as store_cache
import backend.api.features.store.model as store_model import backend.api.features.store.model as store_model
import backend.blocks import backend.data.block
from backend.api.external.middleware import require_permission from backend.api.external.middleware import require_permission
from backend.data import execution as execution_db from backend.data import execution as execution_db
from backend.data import graph as graph_db from backend.data import graph as graph_db
@@ -67,7 +67,7 @@ async def get_user_info(
dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))], dependencies=[Security(require_permission(APIKeyPermission.READ_BLOCK))],
) )
async def get_graph_blocks() -> Sequence[dict[Any, Any]]: async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.blocks.get_blocks().values()] blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled] return [b.to_dict() for b in blocks if not b.disabled]
@@ -83,7 +83,7 @@ async def execute_graph_block(
require_permission(APIKeyPermission.EXECUTE_BLOCK) require_permission(APIKeyPermission.EXECUTE_BLOCK)
), ),
) -> CompletedBlockOutput: ) -> CompletedBlockOutput:
obj = backend.blocks.get_block(block_id) obj = backend.data.block.get_block(block_id)
if not obj: if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
if obj.disabled: if obj.disabled:

View File

@@ -10,15 +10,10 @@ import backend.api.features.library.db as library_db
import backend.api.features.library.model as library_model import backend.api.features.library.model as library_model
import backend.api.features.store.db as store_db import backend.api.features.store.db as store_db
import backend.api.features.store.model as store_model import backend.api.features.store.model as store_model
import backend.data.block
from backend.blocks import load_all_blocks from backend.blocks import load_all_blocks
from backend.blocks._base import (
AnyBlockSchema,
BlockCategory,
BlockInfo,
BlockSchema,
BlockType,
)
from backend.blocks.llm import LlmModel from backend.blocks.llm import LlmModel
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
from backend.data.db import query_raw_with_schema from backend.data.db import query_raw_with_schema
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.util.cache import cached from backend.util.cache import cached
@@ -27,7 +22,7 @@ from backend.util.models import Pagination
from .model import ( from .model import (
BlockCategoryResponse, BlockCategoryResponse,
BlockResponse, BlockResponse,
BlockTypeFilter, BlockType,
CountResponse, CountResponse,
FilterType, FilterType,
Provider, Provider,
@@ -93,7 +88,7 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
def get_blocks( def get_blocks(
*, *,
category: str | None = None, category: str | None = None,
type: BlockTypeFilter | None = None, type: BlockType | None = None,
provider: ProviderName | None = None, provider: ProviderName | None = None,
page: int = 1, page: int = 1,
page_size: int = 50, page_size: int = 50,
@@ -674,9 +669,9 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
for block_type in load_all_blocks().values(): for block_type in load_all_blocks().values():
block: AnyBlockSchema = block_type() block: AnyBlockSchema = block_type()
if block.disabled or block.block_type in ( if block.disabled or block.block_type in (
BlockType.INPUT, backend.data.block.BlockType.INPUT,
BlockType.OUTPUT, backend.data.block.BlockType.OUTPUT,
BlockType.AGENT, backend.data.block.BlockType.AGENT,
): ):
continue continue
# Find the execution count for this block # Find the execution count for this block

View File

@@ -4,7 +4,7 @@ from pydantic import BaseModel
import backend.api.features.library.model as library_model import backend.api.features.library.model as library_model
import backend.api.features.store.model as store_model import backend.api.features.store.model as store_model
from backend.blocks._base import BlockInfo from backend.data.block import BlockInfo
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.util.models import Pagination from backend.util.models import Pagination
@@ -15,7 +15,7 @@ FilterType = Literal[
"my_agents", "my_agents",
] ]
BlockTypeFilter = Literal["all", "input", "action", "output"] BlockType = Literal["all", "input", "action", "output"]
class SearchEntry(BaseModel): class SearchEntry(BaseModel):

View File

@@ -88,7 +88,7 @@ async def get_block_categories(
) )
async def get_blocks( async def get_blocks(
category: Annotated[str | None, fastapi.Query()] = None, category: Annotated[str | None, fastapi.Query()] = None,
type: Annotated[builder_model.BlockTypeFilter | None, fastapi.Query()] = None, type: Annotated[builder_model.BlockType | None, fastapi.Query()] = None,
provider: Annotated[ProviderName | None, fastapi.Query()] = None, provider: Annotated[ProviderName | None, fastapi.Query()] = None,
page: Annotated[int, fastapi.Query()] = 1, page: Annotated[int, fastapi.Query()] = 1,
page_size: Annotated[int, fastapi.Query()] = 50, page_size: Annotated[int, fastapi.Query()] = 50,

View File

@@ -24,7 +24,6 @@ from .tools.models import (
AgentPreviewResponse, AgentPreviewResponse,
AgentSavedResponse, AgentSavedResponse,
AgentsFoundResponse, AgentsFoundResponse,
BlockDetailsResponse,
BlockListResponse, BlockListResponse,
BlockOutputResponse, BlockOutputResponse,
ClarificationNeededResponse, ClarificationNeededResponse,
@@ -972,7 +971,6 @@ ToolResponseUnion = (
| AgentSavedResponse | AgentSavedResponse
| ClarificationNeededResponse | ClarificationNeededResponse
| BlockListResponse | BlockListResponse
| BlockDetailsResponse
| BlockOutputResponse | BlockOutputResponse
| DocSearchResultsResponse | DocSearchResultsResponse
| DocPageResponse | DocPageResponse

View File

@@ -1,154 +0,0 @@
"""Dummy Agent Generator for testing.
Returns mock responses matching the format expected from the external service.
Enable via AGENTGENERATOR_USE_DUMMY=true in settings.
WARNING: This is for testing only. Do not use in production.
"""
import asyncio
import logging
import uuid
from typing import Any
logger = logging.getLogger(__name__)
# Dummy decomposition result (instructions type)
DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = {
"type": "instructions",
"steps": [
{
"description": "Get input from user",
"action": "input",
"block_name": "AgentInputBlock",
},
{
"description": "Process the input",
"action": "process",
"block_name": "TextFormatterBlock",
},
{
"description": "Return output to user",
"action": "output",
"block_name": "AgentOutputBlock",
},
],
}
# Block IDs from backend/blocks/io.py
AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b"
AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4"
def _generate_dummy_agent_json() -> dict[str, Any]:
"""Generate a minimal valid agent JSON for testing."""
input_node_id = str(uuid.uuid4())
output_node_id = str(uuid.uuid4())
return {
"id": str(uuid.uuid4()),
"version": 1,
"is_active": True,
"name": "Dummy Test Agent",
"description": "A dummy agent generated for testing purposes",
"nodes": [
{
"id": input_node_id,
"block_id": AGENT_INPUT_BLOCK_ID,
"input_default": {
"name": "input",
"title": "Input",
"description": "Enter your input",
"placeholder_values": [],
},
"metadata": {"position": {"x": 0, "y": 0}},
},
{
"id": output_node_id,
"block_id": AGENT_OUTPUT_BLOCK_ID,
"input_default": {
"name": "output",
"title": "Output",
"description": "Agent output",
"format": "{output}",
},
"metadata": {"position": {"x": 400, "y": 0}},
},
],
"links": [
{
"id": str(uuid.uuid4()),
"source_id": input_node_id,
"sink_id": output_node_id,
"source_name": "result",
"sink_name": "value",
"is_static": False,
},
],
}
async def decompose_goal_dummy(
description: str,
context: str = "",
library_agents: list[dict[str, Any]] | None = None,
) -> dict[str, Any]:
"""Return dummy decomposition result."""
logger.info("Using dummy agent generator for decompose_goal")
return DUMMY_DECOMPOSITION_RESULT.copy()
async def generate_agent_dummy(
instructions: dict[str, Any],
library_agents: list[dict[str, Any]] | None = None,
operation_id: str | None = None,
task_id: str | None = None,
) -> dict[str, Any]:
"""Return dummy agent JSON after a simulated delay."""
logger.info("Using dummy agent generator for generate_agent (30s delay)")
await asyncio.sleep(30)
return _generate_dummy_agent_json()
async def generate_agent_patch_dummy(
update_request: str,
current_agent: dict[str, Any],
library_agents: list[dict[str, Any]] | None = None,
operation_id: str | None = None,
task_id: str | None = None,
) -> dict[str, Any]:
"""Return dummy patched agent (returns the current agent with updated description)."""
logger.info("Using dummy agent generator for generate_agent_patch")
patched = current_agent.copy()
patched["description"] = (
f"{current_agent.get('description', '')} (updated: {update_request})"
)
return patched
async def customize_template_dummy(
template_agent: dict[str, Any],
modification_request: str,
context: str = "",
) -> dict[str, Any]:
"""Return dummy customized template (returns template with updated description)."""
logger.info("Using dummy agent generator for customize_template")
customized = template_agent.copy()
customized["description"] = (
f"{template_agent.get('description', '')} (customized: {modification_request})"
)
return customized
async def get_blocks_dummy() -> list[dict[str, Any]]:
"""Return dummy blocks list."""
logger.info("Using dummy agent generator for get_blocks")
return [
{"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"},
{"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"},
]
async def health_check_dummy() -> bool:
"""Always returns healthy for dummy service."""
return True

View File

@@ -12,19 +12,8 @@ import httpx
from backend.util.settings import Settings from backend.util.settings import Settings
from .dummy import (
customize_template_dummy,
decompose_goal_dummy,
generate_agent_dummy,
generate_agent_patch_dummy,
get_blocks_dummy,
health_check_dummy,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_dummy_mode_warned = False
def _create_error_response( def _create_error_response(
error_message: str, error_message: str,
@@ -101,26 +90,10 @@ def _get_settings() -> Settings:
return _settings return _settings
def _is_dummy_mode() -> bool:
"""Check if dummy mode is enabled for testing."""
global _dummy_mode_warned
settings = _get_settings()
is_dummy = bool(settings.config.agentgenerator_use_dummy)
if is_dummy and not _dummy_mode_warned:
logger.warning(
"Agent Generator running in DUMMY MODE - returning mock responses. "
"Do not use in production!"
)
_dummy_mode_warned = True
return is_dummy
def is_external_service_configured() -> bool: def is_external_service_configured() -> bool:
"""Check if external Agent Generator service is configured (or dummy mode).""" """Check if external Agent Generator service is configured."""
settings = _get_settings() settings = _get_settings()
return bool(settings.config.agentgenerator_host) or bool( return bool(settings.config.agentgenerator_host)
settings.config.agentgenerator_use_dummy
)
def _get_base_url() -> str: def _get_base_url() -> str:
@@ -164,9 +137,6 @@ async def decompose_goal_external(
- {"type": "error", "error": "...", "error_type": "..."} on error - {"type": "error", "error": "...", "error_type": "..."} on error
Or None on unexpected error Or None on unexpected error
""" """
if _is_dummy_mode():
return await decompose_goal_dummy(description, context, library_agents)
client = _get_client() client = _get_client()
if context: if context:
@@ -256,11 +226,6 @@ async def generate_agent_external(
Returns: Returns:
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
""" """
if _is_dummy_mode():
return await generate_agent_dummy(
instructions, library_agents, operation_id, task_id
)
client = _get_client() client = _get_client()
# Build request payload # Build request payload
@@ -332,11 +297,6 @@ async def generate_agent_patch_external(
Returns: Returns:
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
""" """
if _is_dummy_mode():
return await generate_agent_patch_dummy(
update_request, current_agent, library_agents, operation_id, task_id
)
client = _get_client() client = _get_client()
# Build request payload # Build request payload
@@ -423,11 +383,6 @@ async def customize_template_external(
Returns: Returns:
Customized agent JSON, clarifying questions dict, or error dict on error Customized agent JSON, clarifying questions dict, or error dict on error
""" """
if _is_dummy_mode():
return await customize_template_dummy(
template_agent, modification_request, context
)
client = _get_client() client = _get_client()
request = modification_request request = modification_request
@@ -490,9 +445,6 @@ async def get_blocks_external() -> list[dict[str, Any]] | None:
Returns: Returns:
List of block info dicts or None on error List of block info dicts or None on error
""" """
if _is_dummy_mode():
return await get_blocks_dummy()
client = _get_client() client = _get_client()
try: try:
@@ -526,9 +478,6 @@ async def health_check() -> bool:
if not is_external_service_configured(): if not is_external_service_configured():
return False return False
if _is_dummy_mode():
return await health_check_dummy()
client = _get_client() client = _get_client()
try: try:

View File

@@ -7,13 +7,13 @@ from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
from backend.api.features.chat.tools.models import ( from backend.api.features.chat.tools.models import (
BlockInfoSummary, BlockInfoSummary,
BlockInputFieldInfo,
BlockListResponse, BlockListResponse,
ErrorResponse, ErrorResponse,
NoResultsResponse, NoResultsResponse,
) )
from backend.api.features.store.hybrid_search import unified_hybrid_search from backend.api.features.store.hybrid_search import unified_hybrid_search
from backend.blocks import get_block from backend.data.block import BlockType, get_block
from backend.blocks._base import BlockType
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -54,8 +54,7 @@ class FindBlockTool(BaseTool):
"Blocks are reusable components that perform specific tasks like " "Blocks are reusable components that perform specific tasks like "
"sending emails, making API calls, processing text, etc. " "sending emails, making API calls, processing text, etc. "
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. " "IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
"The response includes each block's id, name, and description. " "The response includes each block's id, required_inputs, and input_schema."
"Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it."
) )
@property @property
@@ -124,7 +123,7 @@ class FindBlockTool(BaseTool):
session_id=session_id, session_id=session_id,
) )
# Enrich results with block information # Enrich results with full block information
blocks: list[BlockInfoSummary] = [] blocks: list[BlockInfoSummary] = []
for result in results: for result in results:
block_id = result["content_id"] block_id = result["content_id"]
@@ -141,11 +140,65 @@ class FindBlockTool(BaseTool):
): ):
continue continue
# Get input/output schemas
input_schema = {}
output_schema = {}
try:
input_schema = block.input_schema.jsonschema()
except Exception as e:
logger.debug(
"Failed to generate input schema for block %s: %s",
block_id,
e,
)
try:
output_schema = block.output_schema.jsonschema()
except Exception as e:
logger.debug(
"Failed to generate output schema for block %s: %s",
block_id,
e,
)
# Get categories from block instance
categories = []
if hasattr(block, "categories") and block.categories:
categories = [cat.value for cat in block.categories]
# Extract required inputs for easier use
required_inputs: list[BlockInputFieldInfo] = []
if input_schema:
properties = input_schema.get("properties", {})
required_fields = set(input_schema.get("required", []))
# Get credential field names to exclude from required inputs
credentials_fields = set(
block.input_schema.get_credentials_fields().keys()
)
for field_name, field_schema in properties.items():
# Skip credential fields - they're handled separately
if field_name in credentials_fields:
continue
required_inputs.append(
BlockInputFieldInfo(
name=field_name,
type=field_schema.get("type", "string"),
description=field_schema.get("description", ""),
required=field_name in required_fields,
default=field_schema.get("default"),
)
)
blocks.append( blocks.append(
BlockInfoSummary( BlockInfoSummary(
id=block_id, id=block_id,
name=block.name, name=block.name,
description=block.description or "", description=block.description or "",
categories=categories,
input_schema=input_schema,
output_schema=output_schema,
required_inputs=required_inputs,
) )
) )
@@ -174,7 +227,8 @@ class FindBlockTool(BaseTool):
return BlockListResponse( return BlockListResponse(
message=( message=(
f"Found {len(blocks)} block(s) matching '{query}'. " f"Found {len(blocks)} block(s) matching '{query}'. "
"To see a block's inputs/outputs and execute it, use run_block with the block's 'id' - providing no inputs." "To execute a block, use run_block with the block's 'id' field "
"and provide 'input_data' matching the block's input_schema."
), ),
blocks=blocks, blocks=blocks,
count=len(blocks), count=len(blocks),

View File

@@ -10,7 +10,7 @@ from backend.api.features.chat.tools.find_block import (
FindBlockTool, FindBlockTool,
) )
from backend.api.features.chat.tools.models import BlockListResponse from backend.api.features.chat.tools.models import BlockListResponse
from backend.blocks._base import BlockType from backend.data.block import BlockType
from ._test_data import make_session from ._test_data import make_session
@@ -18,13 +18,7 @@ _TEST_USER_ID = "test-user-find-block"
def make_mock_block( def make_mock_block(
block_id: str, block_id: str, name: str, block_type: BlockType, disabled: bool = False
name: str,
block_type: BlockType,
disabled: bool = False,
input_schema: dict | None = None,
output_schema: dict | None = None,
credentials_fields: dict | None = None,
): ):
"""Create a mock block for testing.""" """Create a mock block for testing."""
mock = MagicMock() mock = MagicMock()
@@ -34,13 +28,10 @@ def make_mock_block(
mock.block_type = block_type mock.block_type = block_type
mock.disabled = disabled mock.disabled = disabled
mock.input_schema = MagicMock() mock.input_schema = MagicMock()
mock.input_schema.jsonschema.return_value = input_schema or { mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
"properties": {}, mock.input_schema.get_credentials_fields.return_value = {}
"required": [],
}
mock.input_schema.get_credentials_fields.return_value = credentials_fields or {}
mock.output_schema = MagicMock() mock.output_schema = MagicMock()
mock.output_schema.jsonschema.return_value = output_schema or {} mock.output_schema.jsonschema.return_value = {}
mock.categories = [] mock.categories = []
return mock return mock
@@ -146,241 +137,3 @@ class TestFindBlockFiltering:
assert isinstance(response, BlockListResponse) assert isinstance(response, BlockListResponse)
assert len(response.blocks) == 1 assert len(response.blocks) == 1
assert response.blocks[0].id == "normal-block-id" assert response.blocks[0].id == "normal-block-id"
@pytest.mark.asyncio(loop_scope="session")
async def test_response_size_average_chars_per_block(self):
"""Measure average chars per block in the serialized response."""
session = make_session(user_id=_TEST_USER_ID)
# Realistic block definitions modeled after real blocks
block_defs = [
{
"id": "http-block-id",
"name": "Send Web Request",
"input_schema": {
"properties": {
"url": {
"type": "string",
"description": "The URL to send the request to",
},
"method": {
"type": "string",
"description": "The HTTP method to use",
},
"headers": {
"type": "object",
"description": "Headers to include in the request",
},
"json_format": {
"type": "boolean",
"description": "If true, send the body as JSON",
},
"body": {
"type": "object",
"description": "Form/JSON body payload",
},
"credentials": {
"type": "object",
"description": "HTTP credentials",
},
},
"required": ["url", "method"],
},
"output_schema": {
"properties": {
"response": {
"type": "object",
"description": "The response from the server",
},
"client_error": {
"type": "object",
"description": "Errors on 4xx status codes",
},
"server_error": {
"type": "object",
"description": "Errors on 5xx status codes",
},
"error": {
"type": "string",
"description": "Errors for all other exceptions",
},
},
},
"credentials_fields": {"credentials": True},
},
{
"id": "email-block-id",
"name": "Send Email",
"input_schema": {
"properties": {
"to_email": {
"type": "string",
"description": "Recipient email address",
},
"subject": {
"type": "string",
"description": "Subject of the email",
},
"body": {
"type": "string",
"description": "Body of the email",
},
"config": {
"type": "object",
"description": "SMTP Config",
},
"credentials": {
"type": "object",
"description": "SMTP credentials",
},
},
"required": ["to_email", "subject", "body", "credentials"],
},
"output_schema": {
"properties": {
"status": {
"type": "string",
"description": "Status of the email sending operation",
},
"error": {
"type": "string",
"description": "Error message if sending failed",
},
},
},
"credentials_fields": {"credentials": True},
},
{
"id": "claude-code-block-id",
"name": "Claude Code",
"input_schema": {
"properties": {
"e2b_credentials": {
"type": "object",
"description": "API key for E2B platform",
},
"anthropic_credentials": {
"type": "object",
"description": "API key for Anthropic",
},
"prompt": {
"type": "string",
"description": "Task or instruction for Claude Code",
},
"timeout": {
"type": "integer",
"description": "Sandbox timeout in seconds",
},
"setup_commands": {
"type": "array",
"description": "Shell commands to run before execution",
},
"working_directory": {
"type": "string",
"description": "Working directory for Claude Code",
},
"session_id": {
"type": "string",
"description": "Session ID to resume a conversation",
},
"sandbox_id": {
"type": "string",
"description": "Sandbox ID to reconnect to",
},
"conversation_history": {
"type": "string",
"description": "Previous conversation history",
},
"dispose_sandbox": {
"type": "boolean",
"description": "Whether to dispose sandbox after execution",
},
},
"required": [
"e2b_credentials",
"anthropic_credentials",
"prompt",
],
},
"output_schema": {
"properties": {
"response": {
"type": "string",
"description": "Output from Claude Code execution",
},
"files": {
"type": "array",
"description": "Files created/modified by Claude Code",
},
"conversation_history": {
"type": "string",
"description": "Full conversation history",
},
"session_id": {
"type": "string",
"description": "Session ID for this conversation",
},
"sandbox_id": {
"type": "string",
"description": "ID of the sandbox instance",
},
"error": {
"type": "string",
"description": "Error message if execution failed",
},
},
},
"credentials_fields": {
"e2b_credentials": True,
"anthropic_credentials": True,
},
},
]
search_results = [
{"content_id": d["id"], "score": 0.9 - i * 0.1}
for i, d in enumerate(block_defs)
]
mock_blocks = {
d["id"]: make_mock_block(
block_id=d["id"],
name=d["name"],
block_type=BlockType.STANDARD,
input_schema=d["input_schema"],
output_schema=d["output_schema"],
credentials_fields=d["credentials_fields"],
)
for d in block_defs
}
with patch(
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
new_callable=AsyncMock,
return_value=(search_results, len(search_results)),
), patch(
"backend.api.features.chat.tools.find_block.get_block",
side_effect=lambda bid: mock_blocks.get(bid),
):
tool = FindBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID, session=session, query="test"
)
assert isinstance(response, BlockListResponse)
assert response.count == len(block_defs)
total_chars = len(response.model_dump_json())
avg_chars = total_chars // response.count
# Print for visibility in test output
print(f"\nTotal response size: {total_chars} chars")
print(f"Number of blocks: {response.count}")
print(f"Average chars per block: {avg_chars}")
# The old response was ~90K for 10 blocks (~9K per block).
# Previous optimization reduced it to ~1.5K per block (no raw JSON schemas).
# Now with only id/name/description, we expect ~300 chars per block.
assert avg_chars < 500, (
f"Average chars per block ({avg_chars}) exceeds 500. "
f"Total response: {total_chars} chars for {response.count} blocks."
)

View File

@@ -25,7 +25,6 @@ class ResponseType(str, Enum):
AGENT_SAVED = "agent_saved" AGENT_SAVED = "agent_saved"
CLARIFICATION_NEEDED = "clarification_needed" CLARIFICATION_NEEDED = "clarification_needed"
BLOCK_LIST = "block_list" BLOCK_LIST = "block_list"
BLOCK_DETAILS = "block_details"
BLOCK_OUTPUT = "block_output" BLOCK_OUTPUT = "block_output"
DOC_SEARCH_RESULTS = "doc_search_results" DOC_SEARCH_RESULTS = "doc_search_results"
DOC_PAGE = "doc_page" DOC_PAGE = "doc_page"
@@ -335,6 +334,13 @@ class BlockInfoSummary(BaseModel):
id: str id: str
name: str name: str
description: str description: str
categories: list[str]
input_schema: dict[str, Any]
output_schema: dict[str, Any]
required_inputs: list[BlockInputFieldInfo] = Field(
default_factory=list,
description="List of required input fields for this block",
)
class BlockListResponse(ToolResponseBase): class BlockListResponse(ToolResponseBase):
@@ -344,25 +350,10 @@ class BlockListResponse(ToolResponseBase):
blocks: list[BlockInfoSummary] blocks: list[BlockInfoSummary]
count: int count: int
query: str query: str
usage_hint: str = Field(
default="To execute a block, call run_block with block_id set to the block's "
class BlockDetails(BaseModel): "'id' field and input_data containing the required fields from input_schema."
"""Detailed block information.""" )
id: str
name: str
description: str
inputs: dict[str, Any] = {}
outputs: dict[str, Any] = {}
credentials: list[CredentialsMetaInput] = []
class BlockDetailsResponse(ToolResponseBase):
"""Response for block details (first run_block attempt)."""
type: ResponseType = ResponseType.BLOCK_DETAILS
block: BlockDetails
user_authenticated: bool = False
class BlockOutputResponse(ToolResponseBase): class BlockOutputResponse(ToolResponseBase):

View File

@@ -12,8 +12,7 @@ from backend.api.features.chat.tools.find_block import (
COPILOT_EXCLUDED_BLOCK_IDS, COPILOT_EXCLUDED_BLOCK_IDS,
COPILOT_EXCLUDED_BLOCK_TYPES, COPILOT_EXCLUDED_BLOCK_TYPES,
) )
from backend.blocks import get_block from backend.data.block import AnyBlockSchema, get_block
from backend.blocks._base import AnyBlockSchema
from backend.data.execution import ExecutionContext from backend.data.execution import ExecutionContext
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.data.workspace import get_or_create_workspace from backend.data.workspace import get_or_create_workspace
@@ -23,11 +22,8 @@ from backend.util.exceptions import BlockError
from .base import BaseTool from .base import BaseTool
from .helpers import get_inputs_from_schema from .helpers import get_inputs_from_schema
from .models import ( from .models import (
BlockDetails,
BlockDetailsResponse,
BlockOutputResponse, BlockOutputResponse,
ErrorResponse, ErrorResponse,
InputValidationErrorResponse,
SetupInfo, SetupInfo,
SetupRequirementsResponse, SetupRequirementsResponse,
ToolResponseBase, ToolResponseBase,
@@ -54,8 +50,8 @@ class RunBlockTool(BaseTool):
"Execute a specific block with the provided input data. " "Execute a specific block with the provided input data. "
"IMPORTANT: You MUST call find_block first to get the block's 'id' - " "IMPORTANT: You MUST call find_block first to get the block's 'id' - "
"do NOT guess or make up block IDs. " "do NOT guess or make up block IDs. "
"On first attempt (without input_data), returns detailed schema showing " "Use the 'id' from find_block results and provide input_data "
"required inputs and outputs. Then call again with proper input_data to execute." "matching the block's required_inputs."
) )
@property @property
@@ -70,19 +66,11 @@ class RunBlockTool(BaseTool):
"NEVER guess this - always get it from find_block first." "NEVER guess this - always get it from find_block first."
), ),
}, },
"block_name": {
"type": "string",
"description": (
"The block's human-readable name from find_block results. "
"Used for display purposes in the UI."
),
},
"input_data": { "input_data": {
"type": "object", "type": "object",
"description": ( "description": (
"Input values for the block. " "Input values for the block. Use the 'required_inputs' field "
"First call with empty {} to see the block's schema, " "from find_block to see what fields are needed."
"then call again with proper values to execute."
), ),
}, },
}, },
@@ -167,34 +155,6 @@ class RunBlockTool(BaseTool):
await self._resolve_block_credentials(user_id, block, input_data) await self._resolve_block_credentials(user_id, block, input_data)
) )
# Get block schemas for details/validation
try:
input_schema: dict[str, Any] = block.input_schema.jsonschema()
except Exception as e:
logger.warning(
"Failed to generate input schema for block %s: %s",
block_id,
e,
)
return ErrorResponse(
message=f"Block '{block.name}' has an invalid input schema",
error=str(e),
session_id=session_id,
)
try:
output_schema: dict[str, Any] = block.output_schema.jsonschema()
except Exception as e:
logger.warning(
"Failed to generate output schema for block %s: %s",
block_id,
e,
)
return ErrorResponse(
message=f"Block '{block.name}' has an invalid output schema",
error=str(e),
session_id=session_id,
)
if missing_credentials: if missing_credentials:
# Return setup requirements response with missing credentials # Return setup requirements response with missing credentials
credentials_fields_info = block.input_schema.get_credentials_fields_info() credentials_fields_info = block.input_schema.get_credentials_fields_info()
@@ -227,53 +187,6 @@ class RunBlockTool(BaseTool):
graph_version=None, graph_version=None,
) )
# Check if this is a first attempt (required inputs missing)
# Return block details so user can see what inputs are needed
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
required_keys = set(input_schema.get("required", []))
required_non_credential_keys = required_keys - credentials_fields
provided_input_keys = set(input_data.keys()) - credentials_fields
# Check for unknown input fields
valid_fields = (
set(input_schema.get("properties", {}).keys()) - credentials_fields
)
unrecognized_fields = provided_input_keys - valid_fields
if unrecognized_fields:
return InputValidationErrorResponse(
message=(
f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. "
f"Block was not executed. Please use the correct field names from the schema."
),
session_id=session_id,
unrecognized_fields=sorted(unrecognized_fields),
inputs=input_schema,
)
# Show details when not all required non-credential inputs are provided
if not (required_non_credential_keys <= provided_input_keys):
# Get credentials info for the response
credentials_meta = []
for field_name, cred_meta in matched_credentials.items():
credentials_meta.append(cred_meta)
return BlockDetailsResponse(
message=(
f"Block '{block.name}' details. "
"Provide input_data matching the inputs schema to execute the block."
),
session_id=session_id,
block=BlockDetails(
id=block_id,
name=block.name,
description=block.description or "",
inputs=input_schema,
outputs=output_schema,
credentials=credentials_meta,
),
user_authenticated=True,
)
try: try:
# Get or create user's workspace for CoPilot file operations # Get or create user's workspace for CoPilot file operations
workspace = await get_or_create_workspace(user_id) workspace = await get_or_create_workspace(user_id)

View File

@@ -1,17 +1,12 @@
"""Tests for block execution guards and input validation in RunBlockTool.""" """Tests for block execution guards in RunBlockTool."""
from unittest.mock import AsyncMock, MagicMock, patch from unittest.mock import MagicMock, patch
import pytest import pytest
from backend.api.features.chat.tools.models import ( from backend.api.features.chat.tools.models import ErrorResponse
BlockDetailsResponse,
BlockOutputResponse,
ErrorResponse,
InputValidationErrorResponse,
)
from backend.api.features.chat.tools.run_block import RunBlockTool from backend.api.features.chat.tools.run_block import RunBlockTool
from backend.blocks._base import BlockType from backend.data.block import BlockType
from ._test_data import make_session from ._test_data import make_session
@@ -33,39 +28,6 @@ def make_mock_block(
return mock return mock
def make_mock_block_with_schema(
block_id: str,
name: str,
input_properties: dict,
required_fields: list[str],
output_properties: dict | None = None,
):
"""Create a mock block with a defined input/output schema for validation tests."""
mock = MagicMock()
mock.id = block_id
mock.name = name
mock.block_type = BlockType.STANDARD
mock.disabled = False
mock.description = f"Test block: {name}"
input_schema = {
"properties": input_properties,
"required": required_fields,
}
mock.input_schema = MagicMock()
mock.input_schema.jsonschema.return_value = input_schema
mock.input_schema.get_credentials_fields_info.return_value = {}
mock.input_schema.get_credentials_fields.return_value = {}
output_schema = {
"properties": output_properties or {"result": {"type": "string"}},
}
mock.output_schema = MagicMock()
mock.output_schema.jsonschema.return_value = output_schema
return mock
class TestRunBlockFiltering: class TestRunBlockFiltering:
"""Tests for block execution guards in RunBlockTool.""" """Tests for block execution guards in RunBlockTool."""
@@ -142,221 +104,3 @@ class TestRunBlockFiltering:
# (may be other errors like missing credentials, but not the exclusion guard) # (may be other errors like missing credentials, but not the exclusion guard)
if isinstance(response, ErrorResponse): if isinstance(response, ErrorResponse):
assert "cannot be run directly in CoPilot" not in response.message assert "cannot be run directly in CoPilot" not in response.message
class TestRunBlockInputValidation:
"""Tests for input field validation in RunBlockTool.
run_block rejects unknown input field names with InputValidationErrorResponse,
preventing silent failures where incorrect keys would be ignored and the block
would execute with default values instead of the caller's intended values.
"""
@pytest.mark.asyncio(loop_scope="session")
async def test_unknown_input_fields_are_rejected(self):
"""run_block rejects unknown input fields instead of silently ignoring them.
Scenario: The AI Text Generator block has a field called 'model' (for LLM model
selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model'
instead. The block should reject the request and return the valid schema.
"""
session = make_session(user_id=_TEST_USER_ID)
mock_block = make_mock_block_with_schema(
block_id="ai-text-gen-id",
name="AI Text Generator",
input_properties={
"prompt": {"type": "string", "description": "The prompt to send"},
"model": {
"type": "string",
"description": "The LLM model to use",
"default": "gpt-4o-mini",
},
"sys_prompt": {
"type": "string",
"description": "System prompt",
"default": "",
},
},
required_fields=["prompt"],
output_properties={"response": {"type": "string"}},
)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=mock_block,
):
tool = RunBlockTool()
# Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key)
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="ai-text-gen-id",
input_data={
"prompt": "Write a haiku about coding",
"LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model'
},
)
assert isinstance(response, InputValidationErrorResponse)
assert "LLM_Model" in response.unrecognized_fields
assert "Block was not executed" in response.message
assert "inputs" in response.model_dump() # valid schema included
@pytest.mark.asyncio(loop_scope="session")
async def test_multiple_wrong_keys_are_all_reported(self):
"""All unrecognized field names are reported in a single error response."""
session = make_session(user_id=_TEST_USER_ID)
mock_block = make_mock_block_with_schema(
block_id="ai-text-gen-id",
name="AI Text Generator",
input_properties={
"prompt": {"type": "string"},
"model": {"type": "string", "default": "gpt-4o-mini"},
"sys_prompt": {"type": "string", "default": ""},
"retry": {"type": "integer", "default": 3},
},
required_fields=["prompt"],
)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=mock_block,
):
tool = RunBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="ai-text-gen-id",
input_data={
"prompt": "Hello", # correct
"llm_model": "claude-opus-4-6", # WRONG - should be 'model'
"system_prompt": "Be helpful", # WRONG - should be 'sys_prompt'
"retries": 5, # WRONG - should be 'retry'
},
)
assert isinstance(response, InputValidationErrorResponse)
assert set(response.unrecognized_fields) == {
"llm_model",
"system_prompt",
"retries",
}
assert "Block was not executed" in response.message
@pytest.mark.asyncio(loop_scope="session")
async def test_unknown_fields_rejected_even_with_missing_required(self):
"""Unknown fields are caught before the missing-required-fields check."""
session = make_session(user_id=_TEST_USER_ID)
mock_block = make_mock_block_with_schema(
block_id="ai-text-gen-id",
name="AI Text Generator",
input_properties={
"prompt": {"type": "string"},
"model": {"type": "string", "default": "gpt-4o-mini"},
},
required_fields=["prompt"],
)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=mock_block,
):
tool = RunBlockTool()
# 'prompt' is missing AND 'LLM_Model' is an unknown field
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="ai-text-gen-id",
input_data={
"LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing
},
)
# Unknown fields are caught first
assert isinstance(response, InputValidationErrorResponse)
assert "LLM_Model" in response.unrecognized_fields
@pytest.mark.asyncio(loop_scope="session")
async def test_correct_inputs_still_execute(self):
"""Correct input field names pass validation and the block executes."""
session = make_session(user_id=_TEST_USER_ID)
mock_block = make_mock_block_with_schema(
block_id="ai-text-gen-id",
name="AI Text Generator",
input_properties={
"prompt": {"type": "string"},
"model": {"type": "string", "default": "gpt-4o-mini"},
},
required_fields=["prompt"],
)
async def mock_execute(input_data, **kwargs):
yield "response", "Generated text"
mock_block.execute = mock_execute
with (
patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=mock_block,
),
patch(
"backend.api.features.chat.tools.run_block.get_or_create_workspace",
new_callable=AsyncMock,
return_value=MagicMock(id="test-workspace-id"),
),
):
tool = RunBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="ai-text-gen-id",
input_data={
"prompt": "Write a haiku",
"model": "gpt-4o-mini", # correct field name
},
)
assert isinstance(response, BlockOutputResponse)
assert response.success is True
@pytest.mark.asyncio(loop_scope="session")
async def test_missing_required_fields_returns_details(self):
"""Missing required fields returns BlockDetailsResponse with schema."""
session = make_session(user_id=_TEST_USER_ID)
mock_block = make_mock_block_with_schema(
block_id="ai-text-gen-id",
name="AI Text Generator",
input_properties={
"prompt": {"type": "string"},
"model": {"type": "string", "default": "gpt-4o-mini"},
},
required_fields=["prompt"],
)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=mock_block,
):
tool = RunBlockTool()
# Only provide valid optional field, missing required 'prompt'
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="ai-text-gen-id",
input_data={
"model": "gpt-4o-mini", # valid but optional
},
)
assert isinstance(response, BlockDetailsResponse)

View File

@@ -1,153 +0,0 @@
"""Tests for BlockDetailsResponse in RunBlockTool."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.api.features.chat.tools.models import BlockDetailsResponse
from backend.api.features.chat.tools.run_block import RunBlockTool
from backend.blocks._base import BlockType
from backend.data.model import CredentialsMetaInput
from backend.integrations.providers import ProviderName
from ._test_data import make_session
_TEST_USER_ID = "test-user-run-block-details"
def make_mock_block_with_inputs(
block_id: str, name: str, description: str = "Test description"
):
"""Create a mock block with input/output schemas for testing."""
mock = MagicMock()
mock.id = block_id
mock.name = name
mock.description = description
mock.block_type = BlockType.STANDARD
mock.disabled = False
# Input schema with non-credential fields
mock.input_schema = MagicMock()
mock.input_schema.jsonschema.return_value = {
"properties": {
"url": {"type": "string", "description": "URL to fetch"},
"method": {"type": "string", "description": "HTTP method"},
},
"required": ["url"],
}
mock.input_schema.get_credentials_fields.return_value = {}
mock.input_schema.get_credentials_fields_info.return_value = {}
# Output schema
mock.output_schema = MagicMock()
mock.output_schema.jsonschema.return_value = {
"properties": {
"response": {"type": "object", "description": "HTTP response"},
"error": {"type": "string", "description": "Error message"},
}
}
return mock
@pytest.mark.asyncio(loop_scope="session")
async def test_run_block_returns_details_when_no_input_provided():
"""When run_block is called without input_data, it should return BlockDetailsResponse."""
session = make_session(user_id=_TEST_USER_ID)
# Create a block with inputs
http_block = make_mock_block_with_inputs(
"http-block-id", "HTTP Request", "Send HTTP requests"
)
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=http_block,
):
# Mock credentials check to return no missing credentials
with patch.object(
RunBlockTool,
"_resolve_block_credentials",
new_callable=AsyncMock,
return_value=({}, []), # (matched_credentials, missing_credentials)
):
tool = RunBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="http-block-id",
input_data={}, # Empty input data
)
# Should return BlockDetailsResponse showing the schema
assert isinstance(response, BlockDetailsResponse)
assert response.block.id == "http-block-id"
assert response.block.name == "HTTP Request"
assert response.block.description == "Send HTTP requests"
assert "url" in response.block.inputs["properties"]
assert "method" in response.block.inputs["properties"]
assert "response" in response.block.outputs["properties"]
assert response.user_authenticated is True
@pytest.mark.asyncio(loop_scope="session")
async def test_run_block_returns_details_when_only_credentials_provided():
"""When only credentials are provided (no actual input), should return details."""
session = make_session(user_id=_TEST_USER_ID)
# Create a block with both credential and non-credential inputs
mock = MagicMock()
mock.id = "api-block-id"
mock.name = "API Call"
mock.description = "Make API calls"
mock.block_type = BlockType.STANDARD
mock.disabled = False
mock.input_schema = MagicMock()
mock.input_schema.jsonschema.return_value = {
"properties": {
"credentials": {"type": "object", "description": "API credentials"},
"endpoint": {"type": "string", "description": "API endpoint"},
},
"required": ["credentials", "endpoint"],
}
mock.input_schema.get_credentials_fields.return_value = {"credentials": True}
mock.input_schema.get_credentials_fields_info.return_value = {}
mock.output_schema = MagicMock()
mock.output_schema.jsonschema.return_value = {
"properties": {"result": {"type": "object"}}
}
with patch(
"backend.api.features.chat.tools.run_block.get_block",
return_value=mock,
):
with patch.object(
RunBlockTool,
"_resolve_block_credentials",
new_callable=AsyncMock,
return_value=(
{
"credentials": CredentialsMetaInput(
id="cred-id",
provider=ProviderName("test_provider"),
type="api_key",
title="Test Credential",
)
},
[],
),
):
tool = RunBlockTool()
response = await tool._execute(
user_id=_TEST_USER_ID,
session=session,
block_id="api-block-id",
input_data={"credentials": {"some": "cred"}}, # Only credential
)
# Should return details because no non-credential inputs provided
assert isinstance(response, BlockDetailsResponse)
assert response.block.id == "api-block-id"
assert response.block.name == "API Call"

View File

@@ -12,11 +12,12 @@ import backend.api.features.store.image_gen as store_image_gen
import backend.api.features.store.media as store_media import backend.api.features.store.media as store_media
import backend.data.graph as graph_db import backend.data.graph as graph_db
import backend.data.integrations as integrations_db import backend.data.integrations as integrations_db
from backend.data.block import BlockInput
from backend.data.db import transaction from backend.data.db import transaction
from backend.data.execution import get_graph_execution from backend.data.execution import get_graph_execution
from backend.data.graph import GraphSettings from backend.data.graph import GraphSettings
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
from backend.data.model import CredentialsMetaInput, GraphInput from backend.data.model import CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.webhooks.graph_lifecycle_hooks import ( from backend.integrations.webhooks.graph_lifecycle_hooks import (
on_graph_activate, on_graph_activate,
@@ -1129,7 +1130,7 @@ async def create_preset_from_graph_execution(
async def update_preset( async def update_preset(
user_id: str, user_id: str,
preset_id: str, preset_id: str,
inputs: Optional[GraphInput] = None, inputs: Optional[BlockInput] = None,
credentials: Optional[dict[str, CredentialsMetaInput]] = None, credentials: Optional[dict[str, CredentialsMetaInput]] = None,
name: Optional[str] = None, name: Optional[str] = None,
description: Optional[str] = None, description: Optional[str] = None,

View File

@@ -6,12 +6,9 @@ import prisma.enums
import prisma.models import prisma.models
import pydantic import pydantic
from backend.data.block import BlockInput
from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo from backend.data.graph import GraphModel, GraphSettings, GraphTriggerInfo
from backend.data.model import ( from backend.data.model import CredentialsMetaInput, is_credentials_field_name
CredentialsMetaInput,
GraphInput,
is_credentials_field_name,
)
from backend.util.json import loads as json_loads from backend.util.json import loads as json_loads
from backend.util.models import Pagination from backend.util.models import Pagination
@@ -326,7 +323,7 @@ class LibraryAgentPresetCreatable(pydantic.BaseModel):
graph_id: str graph_id: str
graph_version: int graph_version: int
inputs: GraphInput inputs: BlockInput
credentials: dict[str, CredentialsMetaInput] credentials: dict[str, CredentialsMetaInput]
name: str name: str
@@ -355,7 +352,7 @@ class LibraryAgentPresetUpdatable(pydantic.BaseModel):
Request model used when updating a preset for a library agent. Request model used when updating a preset for a library agent.
""" """
inputs: Optional[GraphInput] = None inputs: Optional[BlockInput] = None
credentials: Optional[dict[str, CredentialsMetaInput]] = None credentials: Optional[dict[str, CredentialsMetaInput]] = None
name: Optional[str] = None name: Optional[str] = None
@@ -398,7 +395,7 @@ class LibraryAgentPreset(LibraryAgentPresetCreatable):
"Webhook must be included in AgentPreset query when webhookId is set" "Webhook must be included in AgentPreset query when webhookId is set"
) )
input_data: GraphInput = {} input_data: BlockInput = {}
input_credentials: dict[str, CredentialsMetaInput] = {} input_credentials: dict[str, CredentialsMetaInput] = {}
for preset_input in preset.InputPresets: for preset_input in preset.InputPresets:

View File

@@ -5,8 +5,8 @@ from typing import Optional
import aiohttp import aiohttp
from fastapi import HTTPException from fastapi import HTTPException
from backend.blocks import get_block
from backend.data import graph as graph_db from backend.data import graph as graph_db
from backend.data.block import get_block
from backend.util.settings import Settings from backend.util.settings import Settings
from .models import ApiResponse, ChatRequest, GraphData from .models import ApiResponse, ChatRequest, GraphData

View File

@@ -152,7 +152,7 @@ class BlockHandler(ContentHandler):
async def get_missing_items(self, batch_size: int) -> list[ContentItem]: async def get_missing_items(self, batch_size: int) -> list[ContentItem]:
"""Fetch blocks without embeddings.""" """Fetch blocks without embeddings."""
from backend.blocks import get_blocks from backend.data.block import get_blocks
# Get all available blocks # Get all available blocks
all_blocks = get_blocks() all_blocks = get_blocks()
@@ -249,7 +249,7 @@ class BlockHandler(ContentHandler):
async def get_stats(self) -> dict[str, int]: async def get_stats(self) -> dict[str, int]:
"""Get statistics about block embedding coverage.""" """Get statistics about block embedding coverage."""
from backend.blocks import get_blocks from backend.data.block import get_blocks
all_blocks = get_blocks() all_blocks = get_blocks()

View File

@@ -93,7 +93,7 @@ async def test_block_handler_get_missing_items(mocker):
mock_existing = [] mock_existing = []
with patch( with patch(
"backend.blocks.get_blocks", "backend.data.block.get_blocks",
return_value=mock_blocks, return_value=mock_blocks,
): ):
with patch( with patch(
@@ -135,7 +135,7 @@ async def test_block_handler_get_stats(mocker):
mock_embedded = [{"count": 2}] mock_embedded = [{"count": 2}]
with patch( with patch(
"backend.blocks.get_blocks", "backend.data.block.get_blocks",
return_value=mock_blocks, return_value=mock_blocks,
): ):
with patch( with patch(
@@ -327,7 +327,7 @@ async def test_block_handler_handles_missing_attributes():
mock_blocks = {"block-minimal": mock_block_class} mock_blocks = {"block-minimal": mock_block_class}
with patch( with patch(
"backend.blocks.get_blocks", "backend.data.block.get_blocks",
return_value=mock_blocks, return_value=mock_blocks,
): ):
with patch( with patch(
@@ -360,7 +360,7 @@ async def test_block_handler_skips_failed_blocks():
mock_blocks = {"good-block": good_block, "bad-block": bad_block} mock_blocks = {"good-block": good_block, "bad-block": bad_block}
with patch( with patch(
"backend.blocks.get_blocks", "backend.data.block.get_blocks",
return_value=mock_blocks, return_value=mock_blocks,
): ):
with patch( with patch(

View File

@@ -662,7 +662,7 @@ async def cleanup_orphaned_embeddings() -> dict[str, Any]:
) )
current_ids = {row["id"] for row in valid_agents} current_ids = {row["id"] for row in valid_agents}
elif content_type == ContentType.BLOCK: elif content_type == ContentType.BLOCK:
from backend.blocks import get_blocks from backend.data.block import get_blocks
current_ids = set(get_blocks().keys()) current_ids = set(get_blocks().keys())
elif content_type == ContentType.DOCUMENTATION: elif content_type == ContentType.DOCUMENTATION:

View File

@@ -7,6 +7,15 @@ from replicate.client import Client as ReplicateClient
from replicate.exceptions import ReplicateError from replicate.exceptions import ReplicateError
from replicate.helpers import FileOutput from replicate.helpers import FileOutput
from backend.blocks.ideogram import (
AspectRatio,
ColorPalettePreset,
IdeogramModelBlock,
IdeogramModelName,
MagicPromptOption,
StyleType,
UpscaleOption,
)
from backend.data.graph import GraphBaseMeta from backend.data.graph import GraphBaseMeta
from backend.data.model import CredentialsMetaInput, ProviderName from backend.data.model import CredentialsMetaInput, ProviderName
from backend.integrations.credentials_store import ideogram_credentials from backend.integrations.credentials_store import ideogram_credentials
@@ -41,16 +50,6 @@ async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.Bytes
if not ideogram_credentials.api_key: if not ideogram_credentials.api_key:
raise ValueError("Missing Ideogram API key") raise ValueError("Missing Ideogram API key")
from backend.blocks.ideogram import (
AspectRatio,
ColorPalettePreset,
IdeogramModelBlock,
IdeogramModelName,
MagicPromptOption,
StyleType,
UpscaleOption,
)
name = graph.name name = graph.name
description = f"{name} ({graph.description})" if graph.description else name description = f"{name} ({graph.description})" if graph.description else name

View File

@@ -40,11 +40,10 @@ from backend.api.model import (
UpdateTimezoneRequest, UpdateTimezoneRequest,
UploadFileResponse, UploadFileResponse,
) )
from backend.blocks import get_block, get_blocks
from backend.data import execution as execution_db from backend.data import execution as execution_db
from backend.data import graph as graph_db from backend.data import graph as graph_db
from backend.data.auth import api_key as api_key_db from backend.data.auth import api_key as api_key_db
from backend.data.block import BlockInput, CompletedBlockOutput from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks
from backend.data.credit import ( from backend.data.credit import (
AutoTopUpConfig, AutoTopUpConfig,
RefundRequest, RefundRequest,

View File

@@ -3,19 +3,22 @@ import logging
import os import os
import re import re
from pathlib import Path from pathlib import Path
from typing import Sequence, Type, TypeVar from typing import TYPE_CHECKING, TypeVar
from backend.blocks._base import AnyBlockSchema, BlockType
from backend.util.cache import cached from backend.util.cache import cached
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T") T = TypeVar("T")
@cached(ttl_seconds=3600) @cached(ttl_seconds=3600)
def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]: def load_all_blocks() -> dict[str, type["Block"]]:
from backend.blocks._base import Block from backend.data.block import Block
from backend.util.settings import Config from backend.util.settings import Config
# Check if example blocks should be loaded from settings # Check if example blocks should be loaded from settings
@@ -47,8 +50,8 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
importlib.import_module(f".{module}", package=__name__) importlib.import_module(f".{module}", package=__name__)
# Load all Block instances from the available modules # Load all Block instances from the available modules
available_blocks: dict[str, type["AnyBlockSchema"]] = {} available_blocks: dict[str, type["Block"]] = {}
for block_cls in _all_subclasses(Block): for block_cls in all_subclasses(Block):
class_name = block_cls.__name__ class_name = block_cls.__name__
if class_name.endswith("Base"): if class_name.endswith("Base"):
@@ -61,7 +64,7 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
"please name the class with 'Base' at the end" "please name the class with 'Base' at the end"
) )
block = block_cls() # pyright: ignore[reportAbstractUsage] block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36: if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError( raise ValueError(
@@ -102,7 +105,7 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
available_blocks[block.id] = block_cls available_blocks[block.id] = block_cls
# Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets # Filter out blocks with incomplete auth configs, e.g. missing OAuth server secrets
from ._utils import is_block_auth_configured from backend.data.block import is_block_auth_configured
filtered_blocks = {} filtered_blocks = {}
for block_id, block_cls in available_blocks.items(): for block_id, block_cls in available_blocks.items():
@@ -112,48 +115,11 @@ def load_all_blocks() -> dict[str, type["AnyBlockSchema"]]:
return filtered_blocks return filtered_blocks
def _all_subclasses(cls: type[T]) -> list[type[T]]: __all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
subclasses = cls.__subclasses__() subclasses = cls.__subclasses__()
for subclass in subclasses: for subclass in subclasses:
subclasses += _all_subclasses(subclass) subclasses += all_subclasses(subclass)
return subclasses return subclasses
# ============== Block access helper functions ============== #
def get_blocks() -> dict[str, Type["AnyBlockSchema"]]:
return load_all_blocks()
# Note on the return type annotation: https://github.com/microsoft/pyright/issues/10281
def get_block(block_id: str) -> "AnyBlockSchema | None":
cls = get_blocks().get(block_id)
return cls() if cls else None
@cached(ttl_seconds=3600)
def get_webhook_block_ids() -> Sequence[str]:
return [
id
for id, B in get_blocks().items()
if B().block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL)
]
@cached(ttl_seconds=3600)
def get_io_block_ids() -> Sequence[str]:
return [
id
for id, B in get_blocks().items()
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
]
@cached(ttl_seconds=3600)
def get_human_in_the_loop_block_ids() -> Sequence[str]:
return [
id
for id, B in get_blocks().items()
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
]

View File

@@ -1,739 +0,0 @@
import inspect
import logging
from abc import ABC, abstractmethod
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Generic,
Optional,
Type,
TypeAlias,
TypeVar,
cast,
get_origin,
)
import jsonref
import jsonschema
from pydantic import BaseModel
from backend.data.block import BlockInput, BlockOutput, BlockOutputEntry
from backend.data.model import (
Credentials,
CredentialsFieldInfo,
CredentialsMetaInput,
SchemaField,
is_credentials_field_name,
)
from backend.integrations.providers import ProviderName
from backend.util import json
from backend.util.exceptions import (
BlockError,
BlockExecutionError,
BlockInputError,
BlockOutputError,
BlockUnknownError,
)
from backend.util.settings import Config
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from backend.data.execution import ExecutionContext
from backend.data.model import ContributorDetails, NodeExecutionStats
from ..data.graph import Link
app_config = Config()
BlockTestOutput = BlockOutputEntry | tuple[str, Callable[[Any], bool]]
class BlockType(Enum):
STANDARD = "Standard"
INPUT = "Input"
OUTPUT = "Output"
NOTE = "Note"
WEBHOOK = "Webhook"
WEBHOOK_MANUAL = "Webhook (manual)"
AGENT = "Agent"
AI = "AI"
AYRSHARE = "Ayrshare"
HUMAN_IN_THE_LOOP = "Human In The Loop"
class BlockCategory(Enum):
AI = "Block that leverages AI to perform a task."
SOCIAL = "Block that interacts with social media platforms."
TEXT = "Block that processes text data."
SEARCH = "Block that searches or extracts information from the internet."
BASIC = "Block that performs basic operations."
INPUT = "Block that interacts with input of the graph."
OUTPUT = "Block that interacts with output of the graph."
LOGIC = "Programming logic to control the flow of your agent"
COMMUNICATION = "Block that interacts with communication platforms."
DEVELOPER_TOOLS = "Developer tools such as GitHub blocks."
DATA = "Block that interacts with structured data."
HARDWARE = "Block that interacts with hardware."
AGENT = "Block that interacts with other agents."
CRM = "Block that interacts with CRM services."
SAFETY = (
"Block that provides AI safety mechanisms such as detecting harmful content"
)
PRODUCTIVITY = "Block that helps with productivity"
ISSUE_TRACKING = "Block that helps with issue tracking"
MULTIMEDIA = "Block that interacts with multimedia content"
MARKETING = "Block that helps with marketing"
def dict(self) -> dict[str, str]:
return {"category": self.name, "description": self.value}
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
class BlockCost(BaseModel):
cost_amount: int
cost_filter: BlockInput
cost_type: BlockCostType
def __init__(
self,
cost_amount: int,
cost_type: BlockCostType = BlockCostType.RUN,
cost_filter: Optional[BlockInput] = None,
**data: Any,
) -> None:
super().__init__(
cost_amount=cost_amount,
cost_filter=cost_filter or {},
cost_type=cost_type,
**data,
)
class BlockInfo(BaseModel):
id: str
name: str
inputSchema: dict[str, Any]
outputSchema: dict[str, Any]
costs: list[BlockCost]
description: str
categories: list[dict[str, str]]
contributors: list[dict[str, Any]]
staticOutput: bool
uiType: str
class BlockSchema(BaseModel):
cached_jsonschema: ClassVar[dict[str, Any]]
@classmethod
def jsonschema(cls) -> dict[str, Any]:
if cls.cached_jsonschema:
return cls.cached_jsonschema
model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True)
def ref_to_dict(obj):
if isinstance(obj, dict):
# OpenAPI <3.1 does not support sibling fields that has a $ref key
# So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item.
keys = {"allOf", "anyOf", "oneOf"}
one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None)
if one_key:
obj.update(obj[one_key][0])
return {
key: ref_to_dict(value)
for key, value in obj.items()
if not key.startswith("$") and key != one_key
}
elif isinstance(obj, list):
return [ref_to_dict(item) for item in obj]
return obj
cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model))
return cls.cached_jsonschema
@classmethod
def validate_data(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(
schema=cls.jsonschema(),
data={k: v for k, v in data.items() if v is not None},
)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return cls.validate_data(data)
@classmethod
def get_field_schema(cls, field_name: str) -> dict[str, Any]:
model_schema = cls.jsonschema().get("properties", {})
if not model_schema:
raise ValueError(f"Invalid model schema {cls}")
property_schema = model_schema.get(field_name)
if not property_schema:
raise ValueError(f"Invalid property name {field_name}")
return property_schema
@classmethod
def validate_field(cls, field_name: str, data: BlockInput) -> str | None:
"""
Validate the data against a specific property (one of the input/output name).
Returns the validation error message if the data does not match the schema.
"""
try:
property_schema = cls.get_field_schema(field_name)
jsonschema.validate(json.to_dict(data), property_schema)
return None
except jsonschema.ValidationError as e:
return str(e)
@classmethod
def get_fields(cls) -> set[str]:
return set(cls.model_fields.keys())
@classmethod
def get_required_fields(cls) -> set[str]:
return {
field
for field, field_info in cls.model_fields.items()
if field_info.is_required()
}
@classmethod
def __pydantic_init_subclass__(cls, **kwargs):
"""Validates the schema definition. Rules:
- Fields with annotation `CredentialsMetaInput` MUST be
named `credentials` or `*_credentials`
- Fields named `credentials` or `*_credentials` MUST be
of type `CredentialsMetaInput`
"""
super().__pydantic_init_subclass__(**kwargs)
# Reset cached JSON schema to prevent inheriting it from parent class
cls.cached_jsonschema = {}
credentials_fields = cls.get_credentials_fields()
for field_name in cls.get_fields():
if is_credentials_field_name(field_name):
if field_name not in credentials_fields:
raise TypeError(
f"Credentials field '{field_name}' on {cls.__qualname__} "
f"is not of type {CredentialsMetaInput.__name__}"
)
CredentialsMetaInput.validate_credentials_field_schema(
cls.get_field_schema(field_name), field_name
)
elif field_name in credentials_fields:
raise KeyError(
f"Credentials field '{field_name}' on {cls.__qualname__} "
"has invalid name: must be 'credentials' or *_credentials"
)
@classmethod
def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]:
return {
field_name: info.annotation
for field_name, info in cls.model_fields.items()
if (
inspect.isclass(info.annotation)
and issubclass(
get_origin(info.annotation) or info.annotation,
CredentialsMetaInput,
)
)
}
@classmethod
def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
"""
Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
Raises:
ValueError: If multiple fields have the same kwarg_name, as this would
cause silent overwriting and only the last field would be processed.
"""
result: dict[str, dict[str, Any]] = {}
schema = cls.jsonschema()
properties = schema.get("properties", {})
for field_name, field_schema in properties.items():
auto_creds = field_schema.get("auto_credentials")
if auto_creds:
kwarg_name = auto_creds.get("kwarg_name", "credentials")
if kwarg_name in result:
raise ValueError(
f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
f"in fields '{result[kwarg_name]['field_name']}' and "
f"'{field_name}' on {cls.__qualname__}"
)
result[kwarg_name] = {
"field_name": field_name,
"config": auto_creds,
}
return result
@classmethod
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
result = {}
# Regular credentials fields
for field_name in cls.get_credentials_fields().keys():
result[field_name] = CredentialsFieldInfo.model_validate(
cls.get_field_schema(field_name), by_alias=True
)
# Auto-generated credentials fields (from GoogleDriveFileInput etc.)
for kwarg_name, info in cls.get_auto_credentials_fields().items():
config = info["config"]
# Build a schema-like dict that CredentialsFieldInfo can parse
auto_schema = {
"credentials_provider": [config.get("provider", "google")],
"credentials_types": [config.get("type", "oauth2")],
"credentials_scopes": config.get("scopes"),
}
result[kwarg_name] = CredentialsFieldInfo.model_validate(
auto_schema, by_alias=True
)
return result
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data # Return as is, by default.
@classmethod
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
input_fields_from_nodes = {link.sink_name for link in links}
return input_fields_from_nodes - set(data)
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
return cls.get_required_fields() - set(data)
class BlockSchemaInput(BlockSchema):
"""
Base schema class for block inputs.
All block input schemas should extend this class for consistency.
"""
pass
class BlockSchemaOutput(BlockSchema):
"""
Base schema class for block outputs that includes a standard error field.
All block output schemas should extend this class to ensure consistent error handling.
"""
error: str = SchemaField(
description="Error message if the operation failed", default=""
)
BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchemaInput)
BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchemaOutput)
class EmptyInputSchema(BlockSchemaInput):
pass
class EmptyOutputSchema(BlockSchemaOutput):
pass
# For backward compatibility - will be deprecated
EmptySchema = EmptyOutputSchema
# --8<-- [start:BlockWebhookConfig]
class BlockManualWebhookConfig(BaseModel):
"""
Configuration model for webhook-triggered blocks on which
the user has to manually set up the webhook at the provider.
"""
provider: ProviderName
"""The service provider that the webhook connects to"""
webhook_type: str
"""
Identifier for the webhook type. E.g. GitHub has repo and organization level hooks.
Only for use in the corresponding `WebhooksManager`.
"""
event_filter_input: str = ""
"""
Name of the block's event filter input.
Leave empty if the corresponding webhook doesn't have distinct event/payload types.
"""
event_format: str = "{event}"
"""
Template string for the event(s) that a block instance subscribes to.
Applied individually to each event selected in the event filter input.
Example: `"pull_request.{event}"` -> `"pull_request.opened"`
"""
class BlockWebhookConfig(BlockManualWebhookConfig):
"""
Configuration model for webhook-triggered blocks for which
the webhook can be automatically set up through the provider's API.
"""
resource_format: str
"""
Template string for the resource that a block instance subscribes to.
Fields will be filled from the block's inputs (except `payload`).
Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented)
Only for use in the corresponding `WebhooksManager`.
"""
# --8<-- [end:BlockWebhookConfig]
class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
def __init__(
self,
id: str = "",
description: str = "",
contributors: list["ContributorDetails"] = [],
categories: set[BlockCategory] | None = None,
input_schema: Type[BlockSchemaInputType] = EmptyInputSchema,
output_schema: Type[BlockSchemaOutputType] = EmptyOutputSchema,
test_input: BlockInput | list[BlockInput] | None = None,
test_output: BlockTestOutput | list[BlockTestOutput] | None = None,
test_mock: dict[str, Any] | None = None,
test_credentials: Optional[Credentials | dict[str, Credentials]] = None,
disabled: bool = False,
static_output: bool = False,
block_type: BlockType = BlockType.STANDARD,
webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None,
is_sensitive_action: bool = False,
):
"""
Initialize the block with the given schema.
Args:
id: The unique identifier for the block, this value will be persisted in the
DB. So it should be a unique and constant across the application run.
Use the UUID format for the ID.
description: The description of the block, explaining what the block does.
contributors: The list of contributors who contributed to the block.
input_schema: The schema, defined as a Pydantic model, for the input data.
output_schema: The schema, defined as a Pydantic model, for the output data.
test_input: The list or single sample input data for the block, for testing.
test_output: The list or single expected output if the test_input is run.
test_mock: function names on the block implementation to mock on test run.
disabled: If the block is disabled, it will not be available for execution.
static_output: Whether the output links of the block are static by default.
"""
from backend.data.model import NodeExecutionStats
self.id = id
self.input_schema = input_schema
self.output_schema = output_schema
self.test_input = test_input
self.test_output = test_output
self.test_mock = test_mock
self.test_credentials = test_credentials
self.description = description
self.categories = categories or set()
self.contributors = contributors or set()
self.disabled = disabled
self.static_output = static_output
self.block_type = block_type
self.webhook_config = webhook_config
self.is_sensitive_action = is_sensitive_action
self.execution_stats: "NodeExecutionStats" = NodeExecutionStats()
if self.webhook_config:
if isinstance(self.webhook_config, BlockWebhookConfig):
# Enforce presence of credentials field on auto-setup webhook blocks
if not (cred_fields := self.input_schema.get_credentials_fields()):
raise TypeError(
"credentials field is required on auto-setup webhook blocks"
)
# Disallow multiple credentials inputs on webhook blocks
elif len(cred_fields) > 1:
raise ValueError(
"Multiple credentials inputs not supported on webhook blocks"
)
self.block_type = BlockType.WEBHOOK
else:
self.block_type = BlockType.WEBHOOK_MANUAL
# Enforce shape of webhook event filter, if present
if self.webhook_config.event_filter_input:
event_filter_field = self.input_schema.model_fields[
self.webhook_config.event_filter_input
]
if not (
isinstance(event_filter_field.annotation, type)
and issubclass(event_filter_field.annotation, BaseModel)
and all(
field.annotation is bool
for field in event_filter_field.annotation.model_fields.values()
)
):
raise NotImplementedError(
f"{self.name} has an invalid webhook event selector: "
"field must be a BaseModel and all its fields must be boolean"
)
# Enforce presence of 'payload' input
if "payload" not in self.input_schema.model_fields:
raise TypeError(
f"{self.name} is webhook-triggered but has no 'payload' input"
)
# Disable webhook-triggered block if webhook functionality not available
if not app_config.platform_base_url:
self.disabled = True
@abstractmethod
async def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput:
"""
Run the block with the given input data.
Args:
input_data: The input data with the structure of input_schema.
Kwargs: Currently 14/02/2025 these include
graph_id: The ID of the graph.
node_id: The ID of the node.
graph_exec_id: The ID of the graph execution.
node_exec_id: The ID of the node execution.
user_id: The ID of the user.
Returns:
A Generator that yields (output_name, output_data).
output_name: One of the output name defined in Block's output_schema.
output_data: The data for the output_name, matching the defined schema.
"""
# --- satisfy the type checker, never executed -------------
if False: # noqa: SIM115
yield "name", "value" # pyright: ignore[reportMissingYield]
raise NotImplementedError(f"{self.name} does not implement the run method.")
async def run_once(
self, input_data: BlockSchemaInputType, output: str, **kwargs
) -> Any:
async for item in self.run(input_data, **kwargs):
name, data = item
if name == output:
return data
raise ValueError(f"{self.name} did not produce any output for {output}")
def merge_stats(self, stats: "NodeExecutionStats") -> "NodeExecutionStats":
self.execution_stats += stats
return self.execution_stats
@property
def name(self):
return self.__class__.__name__
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"inputSchema": self.input_schema.jsonschema(),
"outputSchema": self.output_schema.jsonschema(),
"description": self.description,
"categories": [category.dict() for category in self.categories],
"contributors": [
contributor.model_dump() for contributor in self.contributors
],
"staticOutput": self.static_output,
"uiType": self.block_type.value,
}
def get_info(self) -> BlockInfo:
from backend.data.credit import get_block_cost
return BlockInfo(
id=self.id,
name=self.name,
inputSchema=self.input_schema.jsonschema(),
outputSchema=self.output_schema.jsonschema(),
costs=get_block_cost(self),
description=self.description,
categories=[category.dict() for category in self.categories],
contributors=[
contributor.model_dump() for contributor in self.contributors
],
staticOutput=self.static_output,
uiType=self.block_type.value,
)
async def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
try:
async for output_name, output_data in self._execute(input_data, **kwargs):
yield output_name, output_data
except Exception as ex:
if isinstance(ex, BlockError):
raise ex
else:
raise (
BlockExecutionError
if isinstance(ex, ValueError)
else BlockUnknownError
)(
message=str(ex),
block_name=self.name,
block_id=self.id,
) from ex
async def is_block_exec_need_review(
self,
input_data: BlockInput,
*,
user_id: str,
node_id: str,
node_exec_id: str,
graph_exec_id: str,
graph_id: str,
graph_version: int,
execution_context: "ExecutionContext",
**kwargs,
) -> tuple[bool, BlockInput]:
"""
Check if this block execution needs human review and handle the review process.
Returns:
Tuple of (should_pause, input_data_to_use)
- should_pause: True if execution should be paused for review
- input_data_to_use: The input data to use (may be modified by reviewer)
"""
if not (
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
):
return False, input_data
from backend.blocks.helpers.review import HITLReviewHelper
# Handle the review request and get decision
decision = await HITLReviewHelper.handle_review_decision(
input_data=input_data,
user_id=user_id,
node_id=node_id,
node_exec_id=node_exec_id,
graph_exec_id=graph_exec_id,
graph_id=graph_id,
graph_version=graph_version,
block_name=self.name,
editable=True,
)
if decision is None:
# We're awaiting review - pause execution
return True, input_data
if not decision.should_proceed:
# Review was rejected, raise an error to stop execution
raise BlockExecutionError(
message=f"Block execution rejected by reviewer: {decision.message}",
block_name=self.name,
block_id=self.id,
)
# Review was approved - use the potentially modified data
# ReviewResult.data must be a dict for block inputs
reviewed_data = decision.review_result.data
if not isinstance(reviewed_data, dict):
raise BlockExecutionError(
message=f"Review data must be a dict for block input, got {type(reviewed_data).__name__}",
block_name=self.name,
block_id=self.id,
)
return False, reviewed_data
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
# Check for review requirement only if running within a graph execution context
# Direct block execution (e.g., from chat) skips the review process
has_graph_context = all(
key in kwargs
for key in (
"node_exec_id",
"graph_exec_id",
"graph_id",
"execution_context",
)
)
if has_graph_context:
should_pause, input_data = await self.is_block_exec_need_review(
input_data, **kwargs
)
if should_pause:
return
# Validate the input data (original or reviewer-modified) once
if error := self.input_schema.validate_data(input_data):
raise BlockInputError(
message=f"Unable to execute block with invalid input data: {error}",
block_name=self.name,
block_id=self.id,
)
# Use the validated input data
async for output_name, output_data in self.run(
self.input_schema(**{k: v for k, v in input_data.items() if v is not None}),
**kwargs,
):
if output_name == "error":
raise BlockExecutionError(
message=output_data, block_name=self.name, block_id=self.id
)
if self.block_type == BlockType.STANDARD and (
error := self.output_schema.validate_field(output_name, output_data)
):
raise BlockOutputError(
message=f"Block produced an invalid output data: {error}",
block_name=self.name,
block_id=self.id,
)
yield output_name, output_data
def is_triggered_by_event_type(
self, trigger_config: dict[str, Any], event_type: str
) -> bool:
if not self.webhook_config:
raise TypeError("This method can't be used on non-trigger blocks")
if not self.webhook_config.event_filter_input:
return True
event_filter = trigger_config.get(self.webhook_config.event_filter_input)
if not event_filter:
raise ValueError("Event filter is not configured on trigger")
return event_type in [
self.webhook_config.event_format.format(event=k)
for k in event_filter
if event_filter[k] is True
]
# Type alias for any block with standard input/output schemas
AnyBlockSchema: TypeAlias = Block[BlockSchemaInput, BlockSchemaOutput]

View File

@@ -1,122 +0,0 @@
import logging
import os
from backend.integrations.providers import ProviderName
from ._base import AnyBlockSchema
logger = logging.getLogger(__name__)
def is_block_auth_configured(
block_cls: type[AnyBlockSchema],
) -> bool:
"""
Check if a block has a valid authentication method configured at runtime.
For example if a block is an OAuth-only block and there env vars are not set,
do not show it in the UI.
"""
from backend.sdk.registry import AutoRegistry
# Create an instance to access input_schema
try:
block = block_cls()
except Exception as e:
# If we can't create a block instance, assume it's not OAuth-only
logger.error(f"Error creating block instance for {block_cls.__name__}: {e}")
return True
logger.debug(
f"Checking if block {block_cls.__name__} has a valid provider configured"
)
# Get all credential inputs from input schema
credential_inputs = block.input_schema.get_credentials_fields_info()
required_inputs = block.input_schema.get_required_fields()
if not credential_inputs:
logger.debug(
f"Block {block_cls.__name__} has no credential inputs - Treating as valid"
)
return True
# Check credential inputs
if len(required_inputs.intersection(credential_inputs.keys())) == 0:
logger.debug(
f"Block {block_cls.__name__} has only optional credential inputs"
" - will work without credentials configured"
)
# Check if the credential inputs for this block are correctly configured
for field_name, field_info in credential_inputs.items():
provider_names = field_info.provider
if not provider_names:
logger.warning(
f"Block {block_cls.__name__} "
f"has credential input '{field_name}' with no provider options"
" - Disabling"
)
return False
# If a field has multiple possible providers, each one needs to be usable to
# prevent breaking the UX
for _provider_name in provider_names:
provider_name = _provider_name.value
if provider_name in ProviderName.__members__.values():
logger.debug(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' is part of the legacy provider system"
" - Treating as valid"
)
break
provider = AutoRegistry.get_provider(provider_name)
if not provider:
logger.warning(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"refers to unknown provider '{provider_name}' - Disabling"
)
return False
# Check the provider's supported auth types
if field_info.supported_types != provider.supported_auth_types:
logger.warning(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"has mismatched supported auth types (field <> Provider): "
f"{field_info.supported_types} != {provider.supported_auth_types}"
)
if not (supported_auth_types := provider.supported_auth_types):
# No auth methods are been configured for this provider
logger.warning(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' "
"has no authentication methods configured - Disabling"
)
return False
# Check if provider supports OAuth
if "oauth2" in supported_auth_types:
# Check if OAuth environment variables are set
if (oauth_config := provider.oauth_config) and bool(
os.getenv(oauth_config.client_id_env_var)
and os.getenv(oauth_config.client_secret_env_var)
):
logger.debug(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' is configured for OAuth"
)
else:
logger.error(
f"Block {block_cls.__name__} credential input '{field_name}' "
f"provider '{provider_name}' "
"is missing OAuth client ID or secret - Disabling"
)
return False
logger.debug(
f"Block {block_cls.__name__} credential input '{field_name}' is valid; "
f"supported credential types: {', '.join(field_info.supported_types)}"
)
return True

View File

@@ -1,7 +1,7 @@
import logging import logging
from typing import TYPE_CHECKING, Any, Optional from typing import Any, Optional
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockInput, BlockInput,
@@ -9,15 +9,13 @@ from backend.blocks._base import (
BlockSchema, BlockSchema,
BlockSchemaInput, BlockSchemaInput,
BlockType, BlockType,
get_block,
) )
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
from backend.data.model import NodeExecutionStats, SchemaField from backend.data.model import NodeExecutionStats, SchemaField
from backend.util.json import validate_with_jsonschema from backend.util.json import validate_with_jsonschema
from backend.util.retry import func_retry from backend.util.retry import func_retry
if TYPE_CHECKING:
from backend.executor.utils import LogMetadata
_logger = logging.getLogger(__name__) _logger = logging.getLogger(__name__)
@@ -126,10 +124,9 @@ class AgentExecutorBlock(Block):
graph_version: int, graph_version: int,
graph_exec_id: str, graph_exec_id: str,
user_id: str, user_id: str,
logger: "LogMetadata", logger,
) -> BlockOutput: ) -> BlockOutput:
from backend.blocks import get_block
from backend.data.execution import ExecutionEventType from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils from backend.executor import utils as execution_utils
@@ -201,7 +198,7 @@ class AgentExecutorBlock(Block):
self, self,
graph_exec_id: str, graph_exec_id: str,
user_id: str, user_id: str,
logger: "LogMetadata", logger,
) -> None: ) -> None:
from backend.executor import utils as execution_utils from backend.executor import utils as execution_utils

View File

@@ -1,11 +1,5 @@
from typing import Any from typing import Any
from backend.blocks._base import (
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.llm import ( from backend.blocks.llm import (
DEFAULT_LLM_MODEL, DEFAULT_LLM_MODEL,
TEST_CREDENTIALS, TEST_CREDENTIALS,
@@ -17,6 +11,12 @@ from backend.blocks.llm import (
LLMResponse, LLMResponse,
llm_call, llm_call,
) )
from backend.data.block import (
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField

View File

@@ -6,7 +6,7 @@ from pydantic import SecretStr
from replicate.client import Client as ReplicateClient from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput from replicate.helpers import FileOutput
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -5,12 +5,7 @@ from pydantic import SecretStr
from replicate.client import Client as ReplicateClient from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput from replicate.helpers import FileOutput
from backend.blocks._base import ( from backend.data.block import Block, BlockCategory, BlockSchemaInput, BlockSchemaOutput
Block,
BlockCategory,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.execution import ExecutionContext from backend.data.execution import ExecutionContext
from backend.data.model import ( from backend.data.model import (
APIKeyCredentials, APIKeyCredentials,

View File

@@ -6,7 +6,7 @@ from typing import Literal
from pydantic import SecretStr from pydantic import SecretStr
from replicate.client import Client as ReplicateClient from replicate.client import Client as ReplicateClient
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -6,7 +6,7 @@ from typing import Literal
from pydantic import SecretStr from pydantic import SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,10 +1,3 @@
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.apollo._api import ApolloClient from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import ( from backend.blocks.apollo._auth import (
TEST_CREDENTIALS, TEST_CREDENTIALS,
@@ -17,6 +10,13 @@ from backend.blocks.apollo.models import (
PrimaryPhone, PrimaryPhone,
SearchOrganizationsRequest, SearchOrganizationsRequest,
) )
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import CredentialsField, SchemaField from backend.data.model import CredentialsField, SchemaField

View File

@@ -1,12 +1,5 @@
import asyncio import asyncio
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.apollo._api import ApolloClient from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import ( from backend.blocks.apollo._auth import (
TEST_CREDENTIALS, TEST_CREDENTIALS,
@@ -21,6 +14,13 @@ from backend.blocks.apollo.models import (
SearchPeopleRequest, SearchPeopleRequest,
SenorityLevels, SenorityLevels,
) )
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import CredentialsField, SchemaField from backend.data.model import CredentialsField, SchemaField

View File

@@ -1,10 +1,3 @@
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.apollo._api import ApolloClient from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import ( from backend.blocks.apollo._auth import (
TEST_CREDENTIALS, TEST_CREDENTIALS,
@@ -13,6 +6,13 @@ from backend.blocks.apollo._auth import (
ApolloCredentialsInput, ApolloCredentialsInput,
) )
from backend.blocks.apollo.models import Contact, EnrichPersonRequest from backend.blocks.apollo.models import Contact, EnrichPersonRequest
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import CredentialsField, SchemaField from backend.data.model import CredentialsField, SchemaField

View File

@@ -3,7 +3,7 @@ from typing import Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from backend.blocks._base import BlockSchemaInput from backend.data.block import BlockSchemaInput
from backend.data.model import SchemaField, UserIntegrations from backend.data.model import SchemaField, UserIntegrations
from backend.integrations.ayrshare import AyrshareClient from backend.integrations.ayrshare import AyrshareClient
from backend.util.clients import get_database_manager_async_client from backend.util.clients import get_database_manager_async_client

View File

@@ -1,7 +1,7 @@
import enum import enum
from typing import Any from typing import Any
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,7 @@ import os
import re import re
from typing import Type from typing import Type
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,7 +1,7 @@
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,12 +1,14 @@
import base64
import json import json
import logging
import shlex import shlex
import uuid import uuid
from typing import TYPE_CHECKING, Literal, Optional from typing import Literal, Optional
from e2b import AsyncSandbox as BaseAsyncSandbox from e2b import AsyncSandbox as BaseAsyncSandbox
from pydantic import SecretStr from pydantic import BaseModel, SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
@@ -20,13 +22,11 @@ from backend.data.model import (
SchemaField, SchemaField,
) )
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.util.sandbox_files import (
SandboxFileOutput,
extract_and_store_sandbox_files,
)
if TYPE_CHECKING: logger = logging.getLogger(__name__)
from backend.executor.utils import ExecutionContext
# Maximum size for binary files to extract (50MB)
MAX_BINARY_FILE_SIZE = 50 * 1024 * 1024
class ClaudeCodeExecutionError(Exception): class ClaudeCodeExecutionError(Exception):
@@ -181,15 +181,27 @@ class ClaudeCodeBlock(Block):
advanced=True, advanced=True,
) )
class FileOutput(BaseModel):
"""A file extracted from the sandbox."""
path: str
relative_path: str # Path relative to working directory (for GitHub, etc.)
name: str
content: str # Text content for text files, empty string for binary files
is_binary: bool = False # True if this is a binary file
content_base64: Optional[str] = None # Base64-encoded content for binary files
class Output(BlockSchemaOutput): class Output(BlockSchemaOutput):
response: str = SchemaField( response: str = SchemaField(
description="The output/response from Claude Code execution" description="The output/response from Claude Code execution"
) )
files: list[SandboxFileOutput] = SchemaField( files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
description=( description=(
"List of text files created/modified by Claude Code during this execution. " "List of files created/modified by Claude Code during this execution. "
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. " "Each file has 'path', 'relative_path', 'name', 'content', 'is_binary', "
"workspace_ref contains a workspace:// URI if the file was stored to workspace." "and 'content_base64' fields. For text files, 'content' contains the text "
"and 'is_binary' is False. For binary files (PDFs, images, etc.), "
"'is_binary' is True and 'content_base64' contains the base64-encoded data."
) )
) )
conversation_history: str = SchemaField( conversation_history: str = SchemaField(
@@ -252,7 +264,8 @@ class ClaudeCodeBlock(Block):
"relative_path": "index.html", "relative_path": "index.html",
"name": "index.html", "name": "index.html",
"content": "<html>Hello World</html>", "content": "<html>Hello World</html>",
"workspace_ref": None, "is_binary": False,
"content_base64": None,
} }
], ],
), ),
@@ -268,12 +281,13 @@ class ClaudeCodeBlock(Block):
"execute_claude_code": lambda *args, **kwargs: ( "execute_claude_code": lambda *args, **kwargs: (
"Created index.html with hello world content", # response "Created index.html with hello world content", # response
[ [
SandboxFileOutput( ClaudeCodeBlock.FileOutput(
path="/home/user/index.html", path="/home/user/index.html",
relative_path="index.html", relative_path="index.html",
name="index.html", name="index.html",
content="<html>Hello World</html>", content="<html>Hello World</html>",
workspace_ref=None, is_binary=False,
content_base64=None,
) )
], # files ], # files
"User: Create a hello world HTML file\n" "User: Create a hello world HTML file\n"
@@ -296,8 +310,7 @@ class ClaudeCodeBlock(Block):
existing_sandbox_id: str, existing_sandbox_id: str,
conversation_history: str, conversation_history: str,
dispose_sandbox: bool, dispose_sandbox: bool,
execution_context: "ExecutionContext", ) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
""" """
Execute Claude Code in an E2B sandbox. Execute Claude Code in an E2B sandbox.
@@ -452,18 +465,14 @@ class ClaudeCodeBlock(Block):
else: else:
new_conversation_history = turn_entry new_conversation_history = turn_entry
# Extract files created/modified during this run and store to workspace # Extract files created/modified during this run
sandbox_files = await extract_and_store_sandbox_files( files = await self._extract_files(
sandbox=sandbox, sandbox, working_directory, start_timestamp
working_directory=working_directory,
execution_context=execution_context,
since_timestamp=start_timestamp,
text_only=True,
) )
return ( return (
response, response,
sandbox_files, # Already SandboxFileOutput objects files,
new_conversation_history, new_conversation_history,
current_session_id, current_session_id,
sandbox_id, sandbox_id,
@@ -478,6 +487,233 @@ class ClaudeCodeBlock(Block):
if dispose_sandbox and sandbox: if dispose_sandbox and sandbox:
await sandbox.kill() await sandbox.kill()
async def _extract_files(
self,
sandbox: BaseAsyncSandbox,
working_directory: str,
since_timestamp: str | None = None,
) -> list["ClaudeCodeBlock.FileOutput"]:
"""
Extract text files created/modified during this Claude Code execution.
Args:
sandbox: The E2B sandbox instance
working_directory: Directory to search for files
since_timestamp: ISO timestamp - only return files modified after this time
Returns:
List of FileOutput objects with path, relative_path, name, and content
"""
files: list[ClaudeCodeBlock.FileOutput] = []
# Text file extensions we can safely read as text
text_extensions = {
".txt",
".md",
".html",
".htm",
".css",
".js",
".ts",
".jsx",
".tsx",
".json",
".xml",
".yaml",
".yml",
".toml",
".ini",
".cfg",
".conf",
".py",
".rb",
".php",
".java",
".c",
".cpp",
".h",
".hpp",
".cs",
".go",
".rs",
".swift",
".kt",
".scala",
".sh",
".bash",
".zsh",
".sql",
".graphql",
".env",
".gitignore",
".dockerfile",
".vue",
".svelte",
".astro",
".mdx",
".rst",
".tex",
".csv",
".log",
".svg", # SVG is XML-based text
}
# Binary file extensions we can read and base64-encode
binary_extensions = {
# Images
".png",
".jpg",
".jpeg",
".gif",
".webp",
".ico",
".bmp",
".tiff",
".tif",
# Documents
".pdf",
# Archives (useful for downloads)
".zip",
".tar",
".gz",
".7z",
# Audio/Video (if small enough)
".mp3",
".wav",
".mp4",
".webm",
# Other binary formats
".woff",
".woff2",
".ttf",
".otf",
".eot",
".bin",
".exe",
".dll",
".so",
".dylib",
}
try:
# List files recursively using find command
# Exclude node_modules and .git directories, but allow hidden files
# like .env and .gitignore (they're filtered by text_extensions later)
# Filter by timestamp to only get files created/modified during this run
safe_working_dir = shlex.quote(working_directory)
timestamp_filter = ""
if since_timestamp:
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
find_result = await sandbox.commands.run(
f"find {safe_working_dir} -type f "
f"{timestamp_filter}"
f"-not -path '*/node_modules/*' "
f"-not -path '*/.git/*' "
f"2>/dev/null"
)
if find_result.stdout:
for file_path in find_result.stdout.strip().split("\n"):
if not file_path:
continue
# Check if it's a text file we can read (case-insensitive)
file_path_lower = file_path.lower()
is_text = any(
file_path_lower.endswith(ext) for ext in text_extensions
) or file_path_lower.endswith("dockerfile")
# Check if it's a binary file we should extract
is_binary = any(
file_path_lower.endswith(ext) for ext in binary_extensions
)
# Helper to extract filename and relative path
def get_file_info(path: str, work_dir: str) -> tuple[str, str]:
name = path.split("/")[-1]
rel_path = path
if path.startswith(work_dir):
rel_path = path[len(work_dir) :]
if rel_path.startswith("/"):
rel_path = rel_path[1:]
return name, rel_path
if is_text:
try:
content = await sandbox.files.read(file_path)
# Handle bytes or string
if isinstance(content, bytes):
content = content.decode("utf-8", errors="replace")
file_name, relative_path = get_file_info(
file_path, working_directory
)
files.append(
ClaudeCodeBlock.FileOutput(
path=file_path,
relative_path=relative_path,
name=file_name,
content=content,
is_binary=False,
content_base64=None,
)
)
except Exception as e:
logger.warning(f"Failed to read text file {file_path}: {e}")
elif is_binary:
try:
# Check file size before reading to avoid OOM
stat_result = await sandbox.commands.run(
f"stat -c %s {shlex.quote(file_path)} 2>/dev/null"
)
if stat_result.exit_code != 0 or not stat_result.stdout:
logger.warning(
f"Skipping binary file {file_path}: "
f"could not determine file size"
)
continue
file_size = int(stat_result.stdout.strip())
if file_size > MAX_BINARY_FILE_SIZE:
logger.warning(
f"Skipping binary file {file_path}: "
f"size {file_size} exceeds limit "
f"{MAX_BINARY_FILE_SIZE}"
)
continue
# Read binary file as bytes using format="bytes"
content_bytes = await sandbox.files.read(
file_path, format="bytes"
)
# Base64 encode the binary content
content_b64 = base64.b64encode(content_bytes).decode(
"ascii"
)
file_name, relative_path = get_file_info(
file_path, working_directory
)
files.append(
ClaudeCodeBlock.FileOutput(
path=file_path,
relative_path=relative_path,
name=file_name,
content="", # Empty for binary files
is_binary=True,
content_base64=content_b64,
)
)
except Exception as e:
logger.warning(
f"Failed to read binary file {file_path}: {e}"
)
except Exception as e:
logger.warning(f"File extraction failed: {e}")
return files
def _escape_prompt(self, prompt: str) -> str: def _escape_prompt(self, prompt: str) -> str:
"""Escape the prompt for safe shell execution.""" """Escape the prompt for safe shell execution."""
# Use single quotes and escape any single quotes in the prompt # Use single quotes and escape any single quotes in the prompt
@@ -490,7 +726,6 @@ class ClaudeCodeBlock(Block):
*, *,
e2b_credentials: APIKeyCredentials, e2b_credentials: APIKeyCredentials,
anthropic_credentials: APIKeyCredentials, anthropic_credentials: APIKeyCredentials,
execution_context: "ExecutionContext",
**kwargs, **kwargs,
) -> BlockOutput: ) -> BlockOutput:
try: try:
@@ -511,7 +746,6 @@ class ClaudeCodeBlock(Block):
existing_sandbox_id=input_data.sandbox_id, existing_sandbox_id=input_data.sandbox_id,
conversation_history=input_data.conversation_history, conversation_history=input_data.conversation_history,
dispose_sandbox=input_data.dispose_sandbox, dispose_sandbox=input_data.dispose_sandbox,
execution_context=execution_context,
) )
yield "response", response yield "response", response

View File

@@ -1,12 +1,12 @@
from enum import Enum from enum import Enum
from typing import TYPE_CHECKING, Any, Literal, Optional from typing import Any, Literal, Optional
from e2b_code_interpreter import AsyncSandbox from e2b_code_interpreter import AsyncSandbox
from e2b_code_interpreter import Result as E2BExecutionResult from e2b_code_interpreter import Result as E2BExecutionResult
from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart from e2b_code_interpreter.charts import Chart as E2BExecutionResultChart
from pydantic import BaseModel, Field, JsonValue, SecretStr from pydantic import BaseModel, Field, JsonValue, SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
@@ -20,13 +20,6 @@ from backend.data.model import (
SchemaField, SchemaField,
) )
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.util.sandbox_files import (
SandboxFileOutput,
extract_and_store_sandbox_files,
)
if TYPE_CHECKING:
from backend.executor.utils import ExecutionContext
TEST_CREDENTIALS = APIKeyCredentials( TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef", id="01234567-89ab-cdef-0123-456789abcdef",
@@ -92,9 +85,6 @@ class CodeExecutionResult(MainCodeExecutionResult):
class BaseE2BExecutorMixin: class BaseE2BExecutorMixin:
"""Shared implementation methods for E2B executor blocks.""" """Shared implementation methods for E2B executor blocks."""
# Default working directory in E2B sandboxes
WORKING_DIR = "/home/user"
async def execute_code( async def execute_code(
self, self,
api_key: str, api_key: str,
@@ -105,21 +95,14 @@ class BaseE2BExecutorMixin:
timeout: Optional[int] = None, timeout: Optional[int] = None,
sandbox_id: Optional[str] = None, sandbox_id: Optional[str] = None,
dispose_sandbox: bool = False, dispose_sandbox: bool = False,
execution_context: Optional["ExecutionContext"] = None,
extract_files: bool = False,
): ):
""" """
Unified code execution method that handles all three use cases: Unified code execution method that handles all three use cases:
1. Create new sandbox and execute (ExecuteCodeBlock) 1. Create new sandbox and execute (ExecuteCodeBlock)
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock) 2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock) 3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
Args:
extract_files: If True and execution_context provided, extract files
created/modified during execution and store to workspace.
""" # noqa """ # noqa
sandbox = None sandbox = None
files: list[SandboxFileOutput] = []
try: try:
if sandbox_id: if sandbox_id:
# Connect to existing sandbox (ExecuteCodeStepBlock case) # Connect to existing sandbox (ExecuteCodeStepBlock case)
@@ -135,12 +118,6 @@ class BaseE2BExecutorMixin:
for cmd in setup_commands: for cmd in setup_commands:
await sandbox.commands.run(cmd) await sandbox.commands.run(cmd)
# Capture timestamp before execution to scope file extraction
start_timestamp = None
if extract_files:
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
# Execute the code # Execute the code
execution = await sandbox.run_code( execution = await sandbox.run_code(
code, code,
@@ -156,24 +133,7 @@ class BaseE2BExecutorMixin:
stdout_logs = "".join(execution.logs.stdout) stdout_logs = "".join(execution.logs.stdout)
stderr_logs = "".join(execution.logs.stderr) stderr_logs = "".join(execution.logs.stderr)
# Extract files created/modified during this execution return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
if extract_files and execution_context:
files = await extract_and_store_sandbox_files(
sandbox=sandbox,
working_directory=self.WORKING_DIR,
execution_context=execution_context,
since_timestamp=start_timestamp,
text_only=False, # Include binary files too
)
return (
results,
text_output,
stdout_logs,
stderr_logs,
sandbox.sandbox_id,
files,
)
finally: finally:
# Dispose of sandbox if requested to reduce usage costs # Dispose of sandbox if requested to reduce usage costs
if dispose_sandbox and sandbox: if dispose_sandbox and sandbox:
@@ -278,12 +238,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
description="Standard output logs from execution" description="Standard output logs from execution"
) )
stderr_logs: str = SchemaField(description="Standard error logs from execution") stderr_logs: str = SchemaField(description="Standard error logs from execution")
files: list[SandboxFileOutput] = SchemaField(
description=(
"Files created or modified during execution. "
"Each file has path, name, content, and workspace_ref (if stored)."
),
)
def __init__(self): def __init__(self):
super().__init__( super().__init__(
@@ -305,30 +259,23 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
("results", []), ("results", []),
("response", "Hello World"), ("response", "Hello World"),
("stdout_logs", "Hello World\n"), ("stdout_logs", "Hello World\n"),
("files", []),
], ],
test_mock={ test_mock={
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa "execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
[], # results [], # results
"Hello World", # text_output "Hello World", # text_output
"Hello World\n", # stdout_logs "Hello World\n", # stdout_logs
"", # stderr_logs "", # stderr_logs
"sandbox_id", # sandbox_id "sandbox_id", # sandbox_id
[], # files
), ),
}, },
) )
async def run( async def run(
self, self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
input_data: Input,
*,
credentials: APIKeyCredentials,
execution_context: "ExecutionContext",
**kwargs,
) -> BlockOutput: ) -> BlockOutput:
try: try:
results, text_output, stdout, stderr, _, files = await self.execute_code( results, text_output, stdout, stderr, _ = await self.execute_code(
api_key=credentials.api_key.get_secret_value(), api_key=credentials.api_key.get_secret_value(),
code=input_data.code, code=input_data.code,
language=input_data.language, language=input_data.language,
@@ -336,8 +283,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
setup_commands=input_data.setup_commands, setup_commands=input_data.setup_commands,
timeout=input_data.timeout, timeout=input_data.timeout,
dispose_sandbox=input_data.dispose_sandbox, dispose_sandbox=input_data.dispose_sandbox,
execution_context=execution_context,
extract_files=True,
) )
# Determine result object shape & filter out empty formats # Determine result object shape & filter out empty formats
@@ -351,8 +296,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
yield "stdout_logs", stdout yield "stdout_logs", stdout
if stderr: if stderr:
yield "stderr_logs", stderr yield "stderr_logs", stderr
# Always yield files (empty list if none)
yield "files", [f.model_dump() for f in files]
except Exception as e: except Exception as e:
yield "error", str(e) yield "error", str(e)
@@ -450,7 +393,6 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
"Hello World\n", # stdout_logs "Hello World\n", # stdout_logs
"", # stderr_logs "", # stderr_logs
"sandbox_id", # sandbox_id "sandbox_id", # sandbox_id
[], # files
), ),
}, },
) )
@@ -459,7 +401,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput: ) -> BlockOutput:
try: try:
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code( _, text_output, stdout, stderr, sandbox_id = await self.execute_code(
api_key=credentials.api_key.get_secret_value(), api_key=credentials.api_key.get_secret_value(),
code=input_data.setup_code, code=input_data.setup_code,
language=input_data.language, language=input_data.language,
@@ -558,7 +500,6 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
"Hello World\n", # stdout_logs "Hello World\n", # stdout_logs
"", # stderr_logs "", # stderr_logs
sandbox_id, # sandbox_id sandbox_id, # sandbox_id
[], # files
), ),
}, },
) )
@@ -567,7 +508,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput: ) -> BlockOutput:
try: try:
results, text_output, stdout, stderr, _, _ = await self.execute_code( results, text_output, stdout, stderr, _ = await self.execute_code(
api_key=credentials.api_key.get_secret_value(), api_key=credentials.api_key.get_secret_value(),
code=input_data.step_code, code=input_data.step_code,
language=input_data.language, language=input_data.language,

View File

@@ -1,6 +1,6 @@
import re import re
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -6,7 +6,7 @@ from openai import AsyncOpenAI
from openai.types.responses import Response as OpenAIResponse from openai.types.responses import Response as OpenAIResponse
from pydantic import SecretStr from pydantic import SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,6 +1,6 @@
from pydantic import BaseModel from pydantic import BaseModel
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockManualWebhookConfig, BlockManualWebhookConfig,

View File

@@ -1,4 +1,4 @@
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,6 +1,6 @@
from typing import Any, List from typing import Any, List
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,6 +1,6 @@
import codecs import codecs
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -8,7 +8,7 @@ from typing import Any, Literal, cast
import discord import discord
from pydantic import SecretStr from pydantic import SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,7 @@
Discord OAuth-based blocks. Discord OAuth-based blocks.
""" """
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -7,7 +7,7 @@ from typing import Literal
from pydantic import BaseModel, ConfigDict, SecretStr from pydantic import BaseModel, ConfigDict, SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,7 @@
import codecs import codecs
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -8,7 +8,7 @@ which provides access to LinkedIn profile data and related information.
import logging import logging
from typing import Optional from typing import Optional
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -3,13 +3,6 @@ import logging
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.fal._auth import ( from backend.blocks.fal._auth import (
TEST_CREDENTIALS, TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT, TEST_CREDENTIALS_INPUT,
@@ -17,6 +10,13 @@ from backend.blocks.fal._auth import (
FalCredentialsField, FalCredentialsField,
FalCredentialsInput, FalCredentialsInput,
) )
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.execution import ExecutionContext from backend.data.execution import ExecutionContext
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.file import store_media_file from backend.util.file import store_media_file

View File

@@ -5,7 +5,7 @@ from pydantic import SecretStr
from replicate.client import Client as ReplicateClient from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput from replicate.helpers import FileOutput
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Optional
from pydantic import BaseModel from pydantic import BaseModel
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -5,7 +5,7 @@ from typing import Optional
from typing_extensions import TypedDict from typing_extensions import TypedDict
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -3,7 +3,7 @@ from urllib.parse import urlparse
from typing_extensions import TypedDict from typing_extensions import TypedDict
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,7 @@ import re
from typing_extensions import TypedDict from typing_extensions import TypedDict
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,7 @@ import base64
from typing_extensions import TypedDict from typing_extensions import TypedDict
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -4,7 +4,7 @@ from typing import Any, List, Optional
from typing_extensions import TypedDict from typing_extensions import TypedDict
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Optional
from pydantic import BaseModel from pydantic import BaseModel
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -4,7 +4,7 @@ from pathlib import Path
from pydantic import BaseModel from pydantic import BaseModel
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -8,7 +8,7 @@ from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build from googleapiclient.discovery import build
from pydantic import BaseModel from pydantic import BaseModel
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -7,14 +7,14 @@ from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build from googleapiclient.discovery import build
from gravitas_md2gdocs import to_requests from gravitas_md2gdocs import to_requests
from backend.blocks._base import ( from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.settings import Settings from backend.util.settings import Settings

View File

@@ -14,7 +14,7 @@ from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build from googleapiclient.discovery import build
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -7,14 +7,14 @@ from enum import Enum
from google.oauth2.credentials import Credentials from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build from googleapiclient.discovery import build
from backend.blocks._base import ( from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.settings import Settings from backend.util.settings import Settings

View File

@@ -3,7 +3,7 @@ from typing import Literal
import googlemaps import googlemaps
from pydantic import BaseModel, SecretStr from pydantic import BaseModel, SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -9,7 +9,9 @@ from typing import Any, Optional
from prisma.enums import ReviewStatus from prisma.enums import ReviewStatus
from pydantic import BaseModel from pydantic import BaseModel
from backend.data.execution import ExecutionStatus
from backend.data.human_review import ReviewResult from backend.data.human_review import ReviewResult
from backend.executor.manager import async_update_node_execution_status
from backend.util.clients import get_database_manager_async_client from backend.util.clients import get_database_manager_async_client
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -41,8 +43,6 @@ class HITLReviewHelper:
@staticmethod @staticmethod
async def update_node_execution_status(**kwargs) -> None: async def update_node_execution_status(**kwargs) -> None:
"""Update the execution status of a node.""" """Update the execution status of a node."""
from backend.executor.manager import async_update_node_execution_status
await async_update_node_execution_status( await async_update_node_execution_status(
db_client=get_database_manager_async_client(), **kwargs db_client=get_database_manager_async_client(), **kwargs
) )
@@ -88,13 +88,12 @@ class HITLReviewHelper:
Raises: Raises:
Exception: If review creation or status update fails Exception: If review creation or status update fails
""" """
from backend.data.execution import ExecutionStatus
# Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode) # Note: Safe mode checks (human_in_the_loop_safe_mode, sensitive_action_safe_mode)
# are handled by the caller: # are handled by the caller:
# - HITL blocks check human_in_the_loop_safe_mode in their run() method # - HITL blocks check human_in_the_loop_safe_mode in their run() method
# - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review() # - Sensitive action blocks check sensitive_action_safe_mode in is_block_exec_need_review()
# This function only handles checking for existing approvals. # This function only handles checking for existing approvals.
# Check if this node has already been approved (normal or auto-approval) # Check if this node has already been approved (normal or auto-approval)
if approval_result := await HITLReviewHelper.check_approval( if approval_result := await HITLReviewHelper.check_approval(
node_exec_id=node_exec_id, node_exec_id=node_exec_id,

View File

@@ -8,7 +8,7 @@ from typing import Literal
import aiofiles import aiofiles
from pydantic import SecretStr from pydantic import SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import ( from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.request import Requests from backend.util.request import Requests

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import ( from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.request import Requests from backend.util.request import Requests

View File

@@ -1,17 +1,17 @@
from datetime import datetime, timedelta from datetime import datetime, timedelta
from backend.blocks._base import ( from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.request import Requests from backend.util.request import Requests

View File

@@ -3,7 +3,8 @@ from typing import Any
from prisma.enums import ReviewStatus from prisma.enums import ReviewStatus
from backend.blocks._base import ( from backend.blocks.helpers.review import HITLReviewHelper
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
@@ -11,7 +12,6 @@ from backend.blocks._base import (
BlockSchemaOutput, BlockSchemaOutput,
BlockType, BlockType,
) )
from backend.blocks.helpers.review import HITLReviewHelper
from backend.data.execution import ExecutionContext from backend.data.execution import ExecutionContext
from backend.data.human_review import ReviewResult from backend.data.human_review import ReviewResult
from backend.data.model import SchemaField from backend.data.model import SchemaField

View File

@@ -3,7 +3,7 @@ from typing import Any, Dict, Literal, Optional
from pydantic import SecretStr from pydantic import SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,9 @@ import copy
from datetime import date, time from datetime import date, time
from typing import Any, Optional from typing import Any, Optional
from backend.blocks._base import ( # Import for Google Drive file input block
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
@@ -10,9 +12,6 @@ from backend.blocks._base import (
BlockSchemaInput, BlockSchemaInput,
BlockType, BlockType,
) )
# Import for Google Drive file input block
from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
from backend.data.execution import ExecutionContext from backend.data.execution import ExecutionContext
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.file import store_media_file from backend.util.file import store_media_file

View File

@@ -1,6 +1,6 @@
from typing import Any from typing import Any
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import ( from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.request import Requests from backend.util.request import Requests

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import ( from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.request import Requests from backend.util.request import Requests

View File

@@ -3,18 +3,18 @@ from urllib.parse import quote
from typing_extensions import TypedDict from typing_extensions import TypedDict
from backend.blocks._base import ( from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.request import Requests from backend.util.request import Requests

View File

@@ -1,12 +1,5 @@
from urllib.parse import quote from urllib.parse import quote
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.jina._auth import ( from backend.blocks.jina._auth import (
TEST_CREDENTIALS, TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT, TEST_CREDENTIALS_INPUT,
@@ -15,6 +8,13 @@ from backend.blocks.jina._auth import (
JinaCredentialsInput, JinaCredentialsInput,
) )
from backend.blocks.search import GetRequest from backend.blocks.search import GetRequest
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError from backend.util.exceptions import BlockExecutionError

View File

@@ -15,7 +15,7 @@ from anthropic.types import ToolParam
from groq import AsyncGroq from groq import AsyncGroq
from pydantic import BaseModel, SecretStr from pydantic import BaseModel, SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,7 @@ import operator
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import List, Literal
from pydantic import SecretStr from pydantic import SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Any, Literal, Optional, Union
from mem0 import MemoryClient from mem0 import MemoryClient
from pydantic import BaseModel, SecretStr from pydantic import BaseModel, SecretStr
from backend.blocks._base import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
from backend.data.model import ( from backend.data.model import (
APIKeyCredentials, APIKeyCredentials,
CredentialsField, CredentialsField,

View File

@@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional
from pydantic import model_validator from pydantic import model_validator
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -2,7 +2,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -4,7 +4,7 @@ from typing import List, Optional
from pydantic import BaseModel from pydantic import BaseModel
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,15 +1,15 @@
from backend.blocks._base import ( from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,
BlockSchemaInput, BlockSchemaInput,
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.request import Requests from backend.util.request import Requests
from backend.util.type import MediaFileType from backend.util.type import MediaFileType

View File

@@ -6,7 +6,7 @@ from typing import Any, Literal
import openai import openai
from pydantic import SecretStr from pydantic import SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -1,7 +1,7 @@
import logging import logging
from typing import Any, Literal from typing import Any, Literal
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -3,7 +3,7 @@ from typing import Any, Literal
from pinecone import Pinecone, ServerlessSpec from pinecone import Pinecone, ServerlessSpec
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -6,7 +6,7 @@ import praw
from praw.models import Comment, MoreComments, Submission from praw.models import Comment, MoreComments, Submission
from pydantic import BaseModel, SecretStr from pydantic import BaseModel, SecretStr
from backend.blocks._base import ( from backend.data.block import (
Block, Block,
BlockCategory, BlockCategory,
BlockOutput, BlockOutput,

View File

@@ -4,19 +4,19 @@ from enum import Enum
from pydantic import SecretStr from pydantic import SecretStr
from replicate.client import Client as ReplicateClient from replicate.client import Client as ReplicateClient
from backend.blocks._base import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.blocks.replicate._auth import ( from backend.blocks.replicate._auth import (
TEST_CREDENTIALS, TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT, TEST_CREDENTIALS_INPUT,
ReplicateCredentialsInput, ReplicateCredentialsInput,
) )
from backend.blocks.replicate._helper import ReplicateOutputs, extract_result from backend.blocks.replicate._helper import ReplicateOutputs, extract_result
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchemaInput,
BlockSchemaOutput,
)
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField

Some files were not shown because too many files have changed in this diff Show More