mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 08:14:58 -05:00
Compare commits
3 Commits
dev
...
fix/copilo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5348d97437 | ||
|
|
6573d987ea | ||
|
|
ae8ce8b4ca |
@@ -5,13 +5,42 @@
|
|||||||
!docs/
|
!docs/
|
||||||
|
|
||||||
# Platform - Libs
|
# Platform - Libs
|
||||||
!autogpt_platform/autogpt_libs/
|
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||||
|
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||||
|
!autogpt_platform/autogpt_libs/poetry.lock
|
||||||
|
!autogpt_platform/autogpt_libs/README.md
|
||||||
|
|
||||||
# Platform - Backend
|
# Platform - Backend
|
||||||
!autogpt_platform/backend/
|
!autogpt_platform/backend/backend/
|
||||||
|
!autogpt_platform/backend/test/e2e_test_data.py
|
||||||
|
!autogpt_platform/backend/migrations/
|
||||||
|
!autogpt_platform/backend/schema.prisma
|
||||||
|
!autogpt_platform/backend/pyproject.toml
|
||||||
|
!autogpt_platform/backend/poetry.lock
|
||||||
|
!autogpt_platform/backend/README.md
|
||||||
|
!autogpt_platform/backend/.env
|
||||||
|
!autogpt_platform/backend/gen_prisma_types_stub.py
|
||||||
|
|
||||||
|
# Platform - Market
|
||||||
|
!autogpt_platform/market/market/
|
||||||
|
!autogpt_platform/market/scripts.py
|
||||||
|
!autogpt_platform/market/schema.prisma
|
||||||
|
!autogpt_platform/market/pyproject.toml
|
||||||
|
!autogpt_platform/market/poetry.lock
|
||||||
|
!autogpt_platform/market/README.md
|
||||||
|
|
||||||
# Platform - Frontend
|
# Platform - Frontend
|
||||||
!autogpt_platform/frontend/
|
!autogpt_platform/frontend/src/
|
||||||
|
!autogpt_platform/frontend/public/
|
||||||
|
!autogpt_platform/frontend/scripts/
|
||||||
|
!autogpt_platform/frontend/package.json
|
||||||
|
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
|
!autogpt_platform/frontend/tsconfig.json
|
||||||
|
!autogpt_platform/frontend/README.md
|
||||||
|
## config
|
||||||
|
!autogpt_platform/frontend/*.config.*
|
||||||
|
!autogpt_platform/frontend/.env.*
|
||||||
|
!autogpt_platform/frontend/.env
|
||||||
|
|
||||||
# Classic - AutoGPT
|
# Classic - AutoGPT
|
||||||
!classic/original_autogpt/autogpt/
|
!classic/original_autogpt/autogpt/
|
||||||
@@ -35,38 +64,6 @@
|
|||||||
# Classic - Frontend
|
# Classic - Frontend
|
||||||
!classic/frontend/build/web/
|
!classic/frontend/build/web/
|
||||||
|
|
||||||
# Explicitly re-ignore unwanted files from whitelisted directories
|
# Explicitly re-ignore some folders
|
||||||
# Note: These patterns MUST come after the whitelist rules to take effect
|
.*
|
||||||
|
**/__pycache__
|
||||||
# Hidden files and directories (but keep frontend .env files needed for build)
|
|
||||||
**/.*
|
|
||||||
!autogpt_platform/frontend/.env
|
|
||||||
!autogpt_platform/frontend/.env.default
|
|
||||||
!autogpt_platform/frontend/.env.production
|
|
||||||
|
|
||||||
# Python artifacts
|
|
||||||
**/__pycache__/
|
|
||||||
**/*.pyc
|
|
||||||
**/*.pyo
|
|
||||||
**/.venv/
|
|
||||||
**/.ruff_cache/
|
|
||||||
**/.pytest_cache/
|
|
||||||
**/.coverage
|
|
||||||
**/htmlcov/
|
|
||||||
|
|
||||||
# Node artifacts
|
|
||||||
**/node_modules/
|
|
||||||
**/.next/
|
|
||||||
**/storybook-static/
|
|
||||||
**/playwright-report/
|
|
||||||
**/test-results/
|
|
||||||
|
|
||||||
# Build artifacts
|
|
||||||
**/dist/
|
|
||||||
**/build/
|
|
||||||
!autogpt_platform/frontend/src/**/build/
|
|
||||||
**/target/
|
|
||||||
|
|
||||||
# Logs and temp files
|
|
||||||
**/*.log
|
|
||||||
**/*.tmp
|
|
||||||
|
|||||||
249
.github/workflows/platform-frontend-ci.yml
vendored
249
.github/workflows/platform-frontend-ci.yml
vendored
@@ -26,6 +26,7 @@ jobs:
|
|||||||
setup:
|
setup:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
|
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||||
components-changed: ${{ steps.filter.outputs.components }}
|
components-changed: ${{ steps.filter.outputs.components }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -40,17 +41,28 @@ jobs:
|
|||||||
components:
|
components:
|
||||||
- 'autogpt_platform/frontend/src/components/**'
|
- 'autogpt_platform/frontend/src/components/**'
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
|
||||||
|
|
||||||
- name: Install dependencies to populate cache
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Generate cache key
|
||||||
|
id: cache-key
|
||||||
|
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Cache dependencies
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ steps.cache-key.outputs.key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@@ -61,15 +73,22 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Restore dependencies cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -92,15 +111,22 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Restore dependencies cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -115,8 +141,10 @@ jobs:
|
|||||||
exitOnceUploaded: true
|
exitOnceUploaded: true
|
||||||
|
|
||||||
e2e_test:
|
e2e_test:
|
||||||
name: end-to-end tests
|
|
||||||
runs-on: big-boi
|
runs-on: big-boi
|
||||||
|
needs: setup
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -124,11 +152,19 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Platform - Copy default supabase .env
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: "22.18.0"
|
||||||
|
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Copy default supabase .env
|
||||||
run: |
|
run: |
|
||||||
cp ../.env.default ../.env
|
cp ../.env.default ../.env
|
||||||
|
|
||||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
- name: Copy backend .env and set OpenAI API key
|
||||||
run: |
|
run: |
|
||||||
cp ../backend/.env.default ../backend/.env
|
cp ../backend/.env.default ../backend/.env
|
||||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||||
@@ -136,125 +172,77 @@ jobs:
|
|||||||
# Used by E2E test data script to generate embeddings for approved store agents
|
# Used by E2E test data script to generate embeddings for approved store agents
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
|
||||||
- name: Set up Platform - Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
|
||||||
driver: docker-container
|
|
||||||
driver-opts: network=host
|
|
||||||
|
|
||||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
- name: Cache Docker layers
|
||||||
uses: crazy-max/ghaction-github-runtime@v3
|
|
||||||
|
|
||||||
- name: Set up Platform - Build Docker images (with cache)
|
|
||||||
working-directory: autogpt_platform
|
|
||||||
run: |
|
|
||||||
pip install pyyaml
|
|
||||||
|
|
||||||
# Resolve extends and generate a flat compose file that bake can understand
|
|
||||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
|
||||||
|
|
||||||
# Add cache configuration to the resolved compose file
|
|
||||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
|
||||||
--source docker-compose.resolved.yml \
|
|
||||||
--cache-from "type=gha" \
|
|
||||||
--cache-to "type=gha,mode=max" \
|
|
||||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
|
||||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
|
||||||
--git-ref "${{ github.ref }}"
|
|
||||||
|
|
||||||
# Build with bake using the resolved compose file (now includes cache config)
|
|
||||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
|
||||||
env:
|
|
||||||
NEXT_PUBLIC_PW_TEST: true
|
|
||||||
|
|
||||||
- name: Set up tests - Cache E2E test data
|
|
||||||
id: e2e-data-cache
|
|
||||||
uses: actions/cache@v5
|
uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
path: /tmp/e2e_test_data.sql
|
path: /tmp/.buildx-cache
|
||||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-frontend-test-
|
||||||
|
|
||||||
- name: Set up Platform - Start Supabase DB + Auth
|
- name: Run docker compose
|
||||||
run: |
|
run: |
|
||||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||||
echo "Waiting for database to be ready..."
|
|
||||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
|
||||||
echo "Waiting for auth service to be ready..."
|
|
||||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
|
||||||
|
|
||||||
- name: Set up Platform - Run migrations
|
|
||||||
run: |
|
|
||||||
echo "Running migrations..."
|
|
||||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
|
||||||
echo "✅ Migrations completed"
|
|
||||||
env:
|
env:
|
||||||
NEXT_PUBLIC_PW_TEST: true
|
DOCKER_BUILDKIT: 1
|
||||||
|
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||||
|
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||||
|
|
||||||
- name: Set up tests - Load cached E2E test data
|
- name: Move cache
|
||||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
|
||||||
run: |
|
run: |
|
||||||
echo "✅ Found cached E2E test data, restoring..."
|
rm -rf /tmp/.buildx-cache
|
||||||
{
|
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||||
echo "SET session_replication_role = 'replica';"
|
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||||
cat /tmp/e2e_test_data.sql
|
fi
|
||||||
echo "SET session_replication_role = 'origin';"
|
|
||||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
|
||||||
# Refresh materialized views after restore
|
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
|
||||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
|
||||||
|
|
||||||
echo "✅ E2E test data restored from cache"
|
- name: Wait for services to be ready
|
||||||
|
|
||||||
- name: Set up Platform - Start (all other services)
|
|
||||||
run: |
|
run: |
|
||||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
|
||||||
echo "Waiting for rest_server to be ready..."
|
echo "Waiting for rest_server to be ready..."
|
||||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||||
env:
|
echo "Waiting for database to be ready..."
|
||||||
NEXT_PUBLIC_PW_TEST: true
|
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||||
|
|
||||||
- name: Set up tests - Create E2E test data
|
- name: Create E2E test data
|
||||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
run: |
|
||||||
echo "Creating E2E test data..."
|
echo "Creating E2E test data..."
|
||||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
# First try to run the script from inside the container
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||||
echo "❌ E2E test data creation failed!"
|
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||||
exit 1
|
echo "❌ E2E test data creation failed!"
|
||||||
}
|
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
else
|
||||||
|
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||||
|
# Copy the script into the container and run it
|
||||||
|
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||||
|
echo "❌ Failed to copy script to container"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||||
|
echo "❌ E2E test data creation failed!"
|
||||||
|
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
- name: Restore dependencies cache
|
||||||
echo "Dumping database for cache..."
|
uses: actions/cache@v5
|
||||||
{
|
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
|
||||||
pg_dump -U postgres --data-only --column-inserts \
|
|
||||||
--table='auth.users' postgres
|
|
||||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
|
||||||
pg_dump -U postgres --data-only --column-inserts \
|
|
||||||
--schema=platform \
|
|
||||||
--exclude-table='platform._prisma_migrations' \
|
|
||||||
--exclude-table='platform.apscheduler_jobs' \
|
|
||||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
|
||||||
postgres
|
|
||||||
} > /tmp/e2e_test_data.sql
|
|
||||||
|
|
||||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
|
||||||
|
|
||||||
- name: Set up tests - Enable corepack
|
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up tests - Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
path: ~/.pnpm-store
|
||||||
cache: "pnpm"
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Set up tests - Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
- name: Set up tests - Install browser 'chromium'
|
- name: Install Browser 'chromium'
|
||||||
run: pnpm playwright install --with-deps chromium
|
run: pnpm playwright install --with-deps chromium
|
||||||
|
|
||||||
- name: Run Playwright tests
|
- name: Run Playwright tests
|
||||||
@@ -281,7 +269,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Print Final Docker Compose logs
|
- name: Print Final Docker Compose logs
|
||||||
if: always()
|
if: always()
|
||||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
run: docker compose -f ../docker-compose.yml logs
|
||||||
|
|
||||||
integration_test:
|
integration_test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -293,15 +281,22 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Set up Node.js
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set up Node
|
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22.18.0"
|
node-version: "22.18.0"
|
||||||
cache: "pnpm"
|
|
||||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Restore dependencies cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ needs.setup.outputs.cache-key }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|||||||
@@ -1,195 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Add cache configuration to a resolved docker-compose file for all services
|
|
||||||
that have a build key, and ensure image names match what docker compose expects.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_BRANCH = "dev"
|
|
||||||
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Add cache config to a resolved compose file"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--source",
|
|
||||||
required=True,
|
|
||||||
help="Source compose file to read (should be output of `docker compose config`)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--cache-from",
|
|
||||||
default="type=gha",
|
|
||||||
help="Cache source configuration",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--cache-to",
|
|
||||||
default="type=gha,mode=max",
|
|
||||||
help="Cache destination configuration",
|
|
||||||
)
|
|
||||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
|
||||||
parser.add_argument(
|
|
||||||
f"--{component}-hash",
|
|
||||||
default="",
|
|
||||||
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--git-ref",
|
|
||||||
default="",
|
|
||||||
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
|
|
||||||
git_ref_scope = ""
|
|
||||||
if args.git_ref:
|
|
||||||
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
|
|
||||||
|
|
||||||
with open(args.source, "r") as f:
|
|
||||||
compose = yaml.safe_load(f)
|
|
||||||
|
|
||||||
# Get project name from compose file or default
|
|
||||||
project_name = compose.get("name", "autogpt_platform")
|
|
||||||
|
|
||||||
def get_image_name(dockerfile: str, target: str) -> str:
|
|
||||||
"""Generate image name based on Dockerfile folder and build target."""
|
|
||||||
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
|
||||||
if len(dockerfile_parts) >= 2:
|
|
||||||
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
|
||||||
else:
|
|
||||||
folder_name = "app"
|
|
||||||
return f"{project_name}-{folder_name}:{target}"
|
|
||||||
|
|
||||||
def get_build_key(dockerfile: str, target: str) -> str:
|
|
||||||
"""Generate a unique key for a Dockerfile+target combination."""
|
|
||||||
return f"{dockerfile}:{target}"
|
|
||||||
|
|
||||||
def get_component(dockerfile: str) -> str | None:
|
|
||||||
"""Get component name (frontend/backend) from dockerfile path."""
|
|
||||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
|
||||||
if component in dockerfile:
|
|
||||||
return component
|
|
||||||
return None
|
|
||||||
|
|
||||||
# First pass: collect all services with build configs and identify duplicates
|
|
||||||
# Track which (dockerfile, target) combinations we've seen
|
|
||||||
build_key_to_first_service: dict[str, str] = {}
|
|
||||||
services_to_build: list[str] = []
|
|
||||||
services_to_dedupe: list[str] = []
|
|
||||||
|
|
||||||
for service_name, service_config in compose.get("services", {}).items():
|
|
||||||
if "build" not in service_config:
|
|
||||||
continue
|
|
||||||
|
|
||||||
build_config = service_config["build"]
|
|
||||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
|
||||||
target = build_config.get("target", "default")
|
|
||||||
build_key = get_build_key(dockerfile, target)
|
|
||||||
|
|
||||||
if build_key not in build_key_to_first_service:
|
|
||||||
# First service with this build config - it will do the actual build
|
|
||||||
build_key_to_first_service[build_key] = service_name
|
|
||||||
services_to_build.append(service_name)
|
|
||||||
else:
|
|
||||||
# Duplicate - will just use the image from the first service
|
|
||||||
services_to_dedupe.append(service_name)
|
|
||||||
|
|
||||||
# Second pass: configure builds and deduplicate
|
|
||||||
modified_services = []
|
|
||||||
for service_name, service_config in compose.get("services", {}).items():
|
|
||||||
if "build" not in service_config:
|
|
||||||
continue
|
|
||||||
|
|
||||||
build_config = service_config["build"]
|
|
||||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
|
||||||
target = build_config.get("target", "latest")
|
|
||||||
image_name = get_image_name(dockerfile, target)
|
|
||||||
|
|
||||||
# Set image name for all services (needed for both builders and deduped)
|
|
||||||
service_config["image"] = image_name
|
|
||||||
|
|
||||||
if service_name in services_to_dedupe:
|
|
||||||
# Remove build config - this service will use the pre-built image
|
|
||||||
del service_config["build"]
|
|
||||||
continue
|
|
||||||
|
|
||||||
# This service will do the actual build - add cache config
|
|
||||||
cache_from_list = []
|
|
||||||
cache_to_list = []
|
|
||||||
|
|
||||||
component = get_component(dockerfile)
|
|
||||||
if not component:
|
|
||||||
# Skip services that don't clearly match frontend/backend
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get the hash for this component
|
|
||||||
component_hash = getattr(args, f"{component}_hash")
|
|
||||||
|
|
||||||
# Scope format: platform-{component}-{target}-{hash|ref}
|
|
||||||
# Example: platform-backend-server-abc123
|
|
||||||
|
|
||||||
if "type=gha" in args.cache_from:
|
|
||||||
# 1. Primary: exact hash match (most specific)
|
|
||||||
if component_hash:
|
|
||||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
|
||||||
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
|
|
||||||
|
|
||||||
# 2. Fallback: branch-based cache
|
|
||||||
if git_ref_scope:
|
|
||||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
|
||||||
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
|
|
||||||
|
|
||||||
# 3. Fallback: dev branch cache (for PRs/feature branches)
|
|
||||||
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
|
|
||||||
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
|
|
||||||
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
|
|
||||||
|
|
||||||
if "type=gha" in args.cache_to:
|
|
||||||
# Write to both hash-based and branch-based scopes
|
|
||||||
if component_hash:
|
|
||||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
|
||||||
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
|
|
||||||
|
|
||||||
if git_ref_scope:
|
|
||||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
|
||||||
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
|
|
||||||
|
|
||||||
# Ensure we have at least one cache source/target
|
|
||||||
if not cache_from_list:
|
|
||||||
cache_from_list.append(args.cache_from)
|
|
||||||
if not cache_to_list:
|
|
||||||
cache_to_list.append(args.cache_to)
|
|
||||||
|
|
||||||
build_config["cache_from"] = cache_from_list
|
|
||||||
build_config["cache_to"] = cache_to_list
|
|
||||||
modified_services.append(service_name)
|
|
||||||
|
|
||||||
# Write back to the same file
|
|
||||||
with open(args.source, "w") as f:
|
|
||||||
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
|
||||||
|
|
||||||
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
|
||||||
for svc in modified_services:
|
|
||||||
svc_config = compose["services"][svc]
|
|
||||||
build_cfg = svc_config.get("build", {})
|
|
||||||
cache_from_list = build_cfg.get("cache_from", ["none"])
|
|
||||||
cache_to_list = build_cfg.get("cache_to", ["none"])
|
|
||||||
print(f" - {svc}")
|
|
||||||
print(f" image: {svc_config.get('image', 'N/A')}")
|
|
||||||
print(f" cache_from: {cache_from_list}")
|
|
||||||
print(f" cache_to: {cache_to_list}")
|
|
||||||
if services_to_dedupe:
|
|
||||||
print(
|
|
||||||
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
|
||||||
)
|
|
||||||
for svc in services_to_dedupe:
|
|
||||||
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -45,11 +45,6 @@ AutoGPT Platform is a monorepo containing:
|
|||||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||||
|
|
||||||
### Branching Strategy
|
|
||||||
|
|
||||||
- **`dev`** is the main development branch. All PRs should target `dev`.
|
|
||||||
- **`master`** is the production branch. Only used for production releases.
|
|
||||||
|
|
||||||
### Creating Pull Requests
|
### Creating Pull Requests
|
||||||
|
|
||||||
- Create the PR against the `dev` branch of the repository.
|
- Create the PR against the `dev` branch of the repository.
|
||||||
|
|||||||
169
autogpt_platform/autogpt_libs/poetry.lock
generated
169
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cryptography"
|
name = "cryptography"
|
||||||
version = "46.0.5"
|
version = "46.0.4"
|
||||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"},
|
{file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"},
|
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"},
|
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"},
|
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"},
|
{file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
|
||||||
{file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"},
|
{file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"},
|
{file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"},
|
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"},
|
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"},
|
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"},
|
{file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
|
||||||
{file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"},
|
{file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"},
|
{file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"},
|
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"},
|
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"},
|
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"},
|
{file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
|
||||||
{file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"},
|
{file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
|
||||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"},
|
{file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
|
||||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"},
|
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
|
||||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"},
|
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
|
||||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"},
|
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
|
||||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"},
|
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
|
||||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"},
|
{file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
|
||||||
{file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"},
|
{file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
|||||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||||
sdist = ["build (>=1.0.0)"]
|
sdist = ["build (>=1.0.0)"]
|
||||||
ssh = ["bcrypt (>=3.1.5)"]
|
ssh = ["bcrypt (>=3.1.5)"]
|
||||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||||
test-randomorder = ["pytest-randomly"]
|
test-randomorder = ["pytest-randomly"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -570,25 +570,24 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastapi"
|
name = "fastapi"
|
||||||
version = "0.128.7"
|
version = "0.128.0"
|
||||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
{file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
|
||||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
{file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
annotated-doc = ">=0.0.2"
|
annotated-doc = ">=0.0.2"
|
||||||
pydantic = ">=2.7.0"
|
pydantic = ">=2.7.0"
|
||||||
starlette = ">=0.40.0,<1.0.0"
|
starlette = ">=0.40.0,<0.51.0"
|
||||||
typing-extensions = ">=4.8.0"
|
typing-extensions = ">=4.8.0"
|
||||||
typing-inspection = ">=0.4.2"
|
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"]
|
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
|
|
||||||
@@ -1063,14 +1062,14 @@ urllib3 = ">=1.26.0,<3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "launchdarkly-server-sdk"
|
name = "launchdarkly-server-sdk"
|
||||||
version = "9.15.0"
|
version = "9.14.1"
|
||||||
description = "LaunchDarkly SDK for Python"
|
description = "LaunchDarkly SDK for Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.10"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -1479,14 +1478,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "postgrest"
|
name = "postgrest"
|
||||||
version = "2.28.0"
|
version = "2.27.2"
|
||||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
{file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
|
||||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
{file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2249,14 +2248,14 @@ cli = ["click (>=5.0)"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "realtime"
|
name = "realtime"
|
||||||
version = "2.28.0"
|
version = "2.27.2"
|
||||||
description = ""
|
description = ""
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
{file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
|
||||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
{file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2437,14 +2436,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "storage3"
|
name = "storage3"
|
||||||
version = "2.28.0"
|
version = "2.27.2"
|
||||||
description = "Supabase Storage client for Python."
|
description = "Supabase Storage client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
{file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
|
||||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
{file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2488,35 +2487,35 @@ python-dateutil = ">=2.6.0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase"
|
name = "supabase"
|
||||||
version = "2.28.0"
|
version = "2.27.2"
|
||||||
description = "Supabase client for Python."
|
description = "Supabase client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
{file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
|
||||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
{file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
httpx = ">=0.26,<0.29"
|
httpx = ">=0.26,<0.29"
|
||||||
postgrest = "2.28.0"
|
postgrest = "2.27.2"
|
||||||
realtime = "2.28.0"
|
realtime = "2.27.2"
|
||||||
storage3 = "2.28.0"
|
storage3 = "2.27.2"
|
||||||
supabase-auth = "2.28.0"
|
supabase-auth = "2.27.2"
|
||||||
supabase-functions = "2.28.0"
|
supabase-functions = "2.27.2"
|
||||||
yarl = ">=1.22.0"
|
yarl = ">=1.22.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-auth"
|
name = "supabase-auth"
|
||||||
version = "2.28.0"
|
version = "2.27.2"
|
||||||
description = "Python Client Library for Supabase Auth"
|
description = "Python Client Library for Supabase Auth"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
{file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
|
||||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
{file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2526,14 +2525,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-functions"
|
name = "supabase-functions"
|
||||||
version = "2.28.0"
|
version = "2.27.2"
|
||||||
description = "Library for Supabase Functions"
|
description = "Library for Supabase Functions"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
{file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
|
||||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
{file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2912,4 +2911,4 @@ type = ["pytest-mypy"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<4.0"
|
python-versions = ">=3.10,<4.0"
|
||||||
content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb"
|
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
|
||||||
|
|||||||
@@ -11,14 +11,14 @@ python = ">=3.10,<4.0"
|
|||||||
colorama = "^0.4.6"
|
colorama = "^0.4.6"
|
||||||
cryptography = "^46.0"
|
cryptography = "^46.0"
|
||||||
expiringdict = "^1.2.2"
|
expiringdict = "^1.2.2"
|
||||||
fastapi = "^0.128.7"
|
fastapi = "^0.128.0"
|
||||||
google-cloud-logging = "^3.13.0"
|
google-cloud-logging = "^3.13.0"
|
||||||
launchdarkly-server-sdk = "^9.15.0"
|
launchdarkly-server-sdk = "^9.14.1"
|
||||||
pydantic = "^2.12.5"
|
pydantic = "^2.12.5"
|
||||||
pydantic-settings = "^2.12.0"
|
pydantic-settings = "^2.12.0"
|
||||||
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
||||||
redis = "^6.2.0"
|
redis = "^6.2.0"
|
||||||
supabase = "^2.28.0"
|
supabase = "^2.27.2"
|
||||||
uvicorn = "^0.40.0"
|
uvicorn = "^0.40.0"
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
# ============================ DEPENDENCY BUILDER ============================ #
|
|
||||||
|
|
||||||
FROM debian:13-slim AS builder
|
FROM debian:13-slim AS builder
|
||||||
|
|
||||||
# Set environment variables
|
# Set environment variables
|
||||||
@@ -53,9 +51,7 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
|||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
# ============================== BACKEND SERVER ============================== #
|
FROM debian:13-slim AS server_dependencies
|
||||||
|
|
||||||
FROM debian:13-slim AS server
|
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@@ -67,14 +63,15 @@ ENV POETRY_HOME=/opt/poetry \
|
|||||||
ENV PATH=/opt/poetry/bin:$PATH
|
ENV PATH=/opt/poetry/bin:$PATH
|
||||||
|
|
||||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||||
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
RUN apt-get update && apt-get install -y \
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
python3.13 \
|
python3.13 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
ffmpeg \
|
ffmpeg \
|
||||||
imagemagick \
|
imagemagick \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy only necessary files from builder
|
||||||
|
COPY --from=builder /app /app
|
||||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||||
# Copy Node.js installation for Prisma
|
# Copy Node.js installation for Prisma
|
||||||
@@ -84,54 +81,30 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|||||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
# Copy only the .venv from builder (not the entire /app directory)
|
|
||||||
# The .venv includes the generated Prisma client
|
|
||||||
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
|
||||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||||
|
|
||||||
# Copy dependency files + autogpt_libs (path dependency)
|
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
RUN mkdir -p /app/autogpt_platform/backend
|
||||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
|
||||||
|
|
||||||
# Copy backend code + docs (for Copilot docs search)
|
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||||
COPY autogpt_platform/backend ./
|
|
||||||
|
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||||
|
|
||||||
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
|
FROM server_dependencies AS migrate
|
||||||
|
|
||||||
|
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||||
|
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||||
|
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
||||||
|
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||||
|
|
||||||
|
FROM server_dependencies AS server
|
||||||
|
|
||||||
|
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||||
COPY docs /app/docs
|
COPY docs /app/docs
|
||||||
RUN poetry install --no-ansi --only-root
|
RUN poetry install --no-ansi --only-root
|
||||||
|
|
||||||
ENV PORT=8000
|
ENV PORT=8000
|
||||||
|
|
||||||
CMD ["poetry", "run", "rest"]
|
CMD ["poetry", "run", "rest"]
|
||||||
|
|
||||||
# =============================== DB MIGRATOR =============================== #
|
|
||||||
|
|
||||||
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
|
||||||
FROM debian:13-slim AS migrate
|
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
|
|
||||||
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
python3.13 \
|
|
||||||
python3-pip \
|
|
||||||
ca-certificates \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Copy Node.js from builder (needed for Prisma CLI)
|
|
||||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
|
||||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
|
||||||
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|
||||||
|
|
||||||
# Copy Prisma binaries
|
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
|
||||||
|
|
||||||
# Install prisma-client-py directly (much smaller than copying full venv)
|
|
||||||
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
|
||||||
|
|
||||||
COPY autogpt_platform/backend/schema.prisma ./
|
|
||||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
|
||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
|
||||||
COPY autogpt_platform/backend/migrations ./migrations
|
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ from .tools.models import (
|
|||||||
AgentPreviewResponse,
|
AgentPreviewResponse,
|
||||||
AgentSavedResponse,
|
AgentSavedResponse,
|
||||||
AgentsFoundResponse,
|
AgentsFoundResponse,
|
||||||
BlockDetailsResponse,
|
|
||||||
BlockListResponse,
|
BlockListResponse,
|
||||||
BlockOutputResponse,
|
BlockOutputResponse,
|
||||||
ClarificationNeededResponse,
|
ClarificationNeededResponse,
|
||||||
@@ -972,7 +971,6 @@ ToolResponseUnion = (
|
|||||||
| AgentSavedResponse
|
| AgentSavedResponse
|
||||||
| ClarificationNeededResponse
|
| ClarificationNeededResponse
|
||||||
| BlockListResponse
|
| BlockListResponse
|
||||||
| BlockDetailsResponse
|
|
||||||
| BlockOutputResponse
|
| BlockOutputResponse
|
||||||
| DocSearchResultsResponse
|
| DocSearchResultsResponse
|
||||||
| DocPageResponse
|
| DocPageResponse
|
||||||
|
|||||||
@@ -1,154 +0,0 @@
|
|||||||
"""Dummy Agent Generator for testing.
|
|
||||||
|
|
||||||
Returns mock responses matching the format expected from the external service.
|
|
||||||
Enable via AGENTGENERATOR_USE_DUMMY=true in settings.
|
|
||||||
|
|
||||||
WARNING: This is for testing only. Do not use in production.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import uuid
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Dummy decomposition result (instructions type)
|
|
||||||
DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = {
|
|
||||||
"type": "instructions",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"description": "Get input from user",
|
|
||||||
"action": "input",
|
|
||||||
"block_name": "AgentInputBlock",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Process the input",
|
|
||||||
"action": "process",
|
|
||||||
"block_name": "TextFormatterBlock",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Return output to user",
|
|
||||||
"action": "output",
|
|
||||||
"block_name": "AgentOutputBlock",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
# Block IDs from backend/blocks/io.py
|
|
||||||
AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b"
|
|
||||||
AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4"
|
|
||||||
|
|
||||||
|
|
||||||
def _generate_dummy_agent_json() -> dict[str, Any]:
|
|
||||||
"""Generate a minimal valid agent JSON for testing."""
|
|
||||||
input_node_id = str(uuid.uuid4())
|
|
||||||
output_node_id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
return {
|
|
||||||
"id": str(uuid.uuid4()),
|
|
||||||
"version": 1,
|
|
||||||
"is_active": True,
|
|
||||||
"name": "Dummy Test Agent",
|
|
||||||
"description": "A dummy agent generated for testing purposes",
|
|
||||||
"nodes": [
|
|
||||||
{
|
|
||||||
"id": input_node_id,
|
|
||||||
"block_id": AGENT_INPUT_BLOCK_ID,
|
|
||||||
"input_default": {
|
|
||||||
"name": "input",
|
|
||||||
"title": "Input",
|
|
||||||
"description": "Enter your input",
|
|
||||||
"placeholder_values": [],
|
|
||||||
},
|
|
||||||
"metadata": {"position": {"x": 0, "y": 0}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": output_node_id,
|
|
||||||
"block_id": AGENT_OUTPUT_BLOCK_ID,
|
|
||||||
"input_default": {
|
|
||||||
"name": "output",
|
|
||||||
"title": "Output",
|
|
||||||
"description": "Agent output",
|
|
||||||
"format": "{output}",
|
|
||||||
},
|
|
||||||
"metadata": {"position": {"x": 400, "y": 0}},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"links": [
|
|
||||||
{
|
|
||||||
"id": str(uuid.uuid4()),
|
|
||||||
"source_id": input_node_id,
|
|
||||||
"sink_id": output_node_id,
|
|
||||||
"source_name": "result",
|
|
||||||
"sink_name": "value",
|
|
||||||
"is_static": False,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
async def decompose_goal_dummy(
|
|
||||||
description: str,
|
|
||||||
context: str = "",
|
|
||||||
library_agents: list[dict[str, Any]] | None = None,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy decomposition result."""
|
|
||||||
logger.info("Using dummy agent generator for decompose_goal")
|
|
||||||
return DUMMY_DECOMPOSITION_RESULT.copy()
|
|
||||||
|
|
||||||
|
|
||||||
async def generate_agent_dummy(
|
|
||||||
instructions: dict[str, Any],
|
|
||||||
library_agents: list[dict[str, Any]] | None = None,
|
|
||||||
operation_id: str | None = None,
|
|
||||||
task_id: str | None = None,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy agent JSON after a simulated delay."""
|
|
||||||
logger.info("Using dummy agent generator for generate_agent (30s delay)")
|
|
||||||
await asyncio.sleep(30)
|
|
||||||
return _generate_dummy_agent_json()
|
|
||||||
|
|
||||||
|
|
||||||
async def generate_agent_patch_dummy(
|
|
||||||
update_request: str,
|
|
||||||
current_agent: dict[str, Any],
|
|
||||||
library_agents: list[dict[str, Any]] | None = None,
|
|
||||||
operation_id: str | None = None,
|
|
||||||
task_id: str | None = None,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy patched agent (returns the current agent with updated description)."""
|
|
||||||
logger.info("Using dummy agent generator for generate_agent_patch")
|
|
||||||
patched = current_agent.copy()
|
|
||||||
patched["description"] = (
|
|
||||||
f"{current_agent.get('description', '')} (updated: {update_request})"
|
|
||||||
)
|
|
||||||
return patched
|
|
||||||
|
|
||||||
|
|
||||||
async def customize_template_dummy(
|
|
||||||
template_agent: dict[str, Any],
|
|
||||||
modification_request: str,
|
|
||||||
context: str = "",
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Return dummy customized template (returns template with updated description)."""
|
|
||||||
logger.info("Using dummy agent generator for customize_template")
|
|
||||||
customized = template_agent.copy()
|
|
||||||
customized["description"] = (
|
|
||||||
f"{template_agent.get('description', '')} (customized: {modification_request})"
|
|
||||||
)
|
|
||||||
return customized
|
|
||||||
|
|
||||||
|
|
||||||
async def get_blocks_dummy() -> list[dict[str, Any]]:
|
|
||||||
"""Return dummy blocks list."""
|
|
||||||
logger.info("Using dummy agent generator for get_blocks")
|
|
||||||
return [
|
|
||||||
{"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"},
|
|
||||||
{"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"},
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
async def health_check_dummy() -> bool:
|
|
||||||
"""Always returns healthy for dummy service."""
|
|
||||||
return True
|
|
||||||
@@ -12,19 +12,8 @@ import httpx
|
|||||||
|
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
from .dummy import (
|
|
||||||
customize_template_dummy,
|
|
||||||
decompose_goal_dummy,
|
|
||||||
generate_agent_dummy,
|
|
||||||
generate_agent_patch_dummy,
|
|
||||||
get_blocks_dummy,
|
|
||||||
health_check_dummy,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
_dummy_mode_warned = False
|
|
||||||
|
|
||||||
|
|
||||||
def _create_error_response(
|
def _create_error_response(
|
||||||
error_message: str,
|
error_message: str,
|
||||||
@@ -101,26 +90,10 @@ def _get_settings() -> Settings:
|
|||||||
return _settings
|
return _settings
|
||||||
|
|
||||||
|
|
||||||
def _is_dummy_mode() -> bool:
|
|
||||||
"""Check if dummy mode is enabled for testing."""
|
|
||||||
global _dummy_mode_warned
|
|
||||||
settings = _get_settings()
|
|
||||||
is_dummy = bool(settings.config.agentgenerator_use_dummy)
|
|
||||||
if is_dummy and not _dummy_mode_warned:
|
|
||||||
logger.warning(
|
|
||||||
"Agent Generator running in DUMMY MODE - returning mock responses. "
|
|
||||||
"Do not use in production!"
|
|
||||||
)
|
|
||||||
_dummy_mode_warned = True
|
|
||||||
return is_dummy
|
|
||||||
|
|
||||||
|
|
||||||
def is_external_service_configured() -> bool:
|
def is_external_service_configured() -> bool:
|
||||||
"""Check if external Agent Generator service is configured (or dummy mode)."""
|
"""Check if external Agent Generator service is configured."""
|
||||||
settings = _get_settings()
|
settings = _get_settings()
|
||||||
return bool(settings.config.agentgenerator_host) or bool(
|
return bool(settings.config.agentgenerator_host)
|
||||||
settings.config.agentgenerator_use_dummy
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_base_url() -> str:
|
def _get_base_url() -> str:
|
||||||
@@ -164,9 +137,6 @@ async def decompose_goal_external(
|
|||||||
- {"type": "error", "error": "...", "error_type": "..."} on error
|
- {"type": "error", "error": "...", "error_type": "..."} on error
|
||||||
Or None on unexpected error
|
Or None on unexpected error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await decompose_goal_dummy(description, context, library_agents)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
if context:
|
if context:
|
||||||
@@ -256,11 +226,6 @@ async def generate_agent_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await generate_agent_dummy(
|
|
||||||
instructions, library_agents, operation_id, task_id
|
|
||||||
)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -332,11 +297,6 @@ async def generate_agent_patch_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await generate_agent_patch_dummy(
|
|
||||||
update_request, current_agent, library_agents, operation_id, task_id
|
|
||||||
)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
# Build request payload
|
# Build request payload
|
||||||
@@ -423,11 +383,6 @@ async def customize_template_external(
|
|||||||
Returns:
|
Returns:
|
||||||
Customized agent JSON, clarifying questions dict, or error dict on error
|
Customized agent JSON, clarifying questions dict, or error dict on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await customize_template_dummy(
|
|
||||||
template_agent, modification_request, context
|
|
||||||
)
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
request = modification_request
|
request = modification_request
|
||||||
@@ -490,9 +445,6 @@ async def get_blocks_external() -> list[dict[str, Any]] | None:
|
|||||||
Returns:
|
Returns:
|
||||||
List of block info dicts or None on error
|
List of block info dicts or None on error
|
||||||
"""
|
"""
|
||||||
if _is_dummy_mode():
|
|
||||||
return await get_blocks_dummy()
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -526,9 +478,6 @@ async def health_check() -> bool:
|
|||||||
if not is_external_service_configured():
|
if not is_external_service_configured():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if _is_dummy_mode():
|
|
||||||
return await health_check_dummy()
|
|
||||||
|
|
||||||
client = _get_client()
|
client = _get_client()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ from backend.api.features.chat.model import ChatSession
|
|||||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||||
from backend.api.features.chat.tools.models import (
|
from backend.api.features.chat.tools.models import (
|
||||||
BlockInfoSummary,
|
BlockInfoSummary,
|
||||||
|
BlockInputFieldInfo,
|
||||||
BlockListResponse,
|
BlockListResponse,
|
||||||
ErrorResponse,
|
ErrorResponse,
|
||||||
NoResultsResponse,
|
NoResultsResponse,
|
||||||
@@ -54,8 +55,7 @@ class FindBlockTool(BaseTool):
|
|||||||
"Blocks are reusable components that perform specific tasks like "
|
"Blocks are reusable components that perform specific tasks like "
|
||||||
"sending emails, making API calls, processing text, etc. "
|
"sending emails, making API calls, processing text, etc. "
|
||||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||||
"The response includes each block's id, name, and description. "
|
"The response includes each block's id, required_inputs, and input_schema."
|
||||||
"Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -124,7 +124,7 @@ class FindBlockTool(BaseTool):
|
|||||||
session_id=session_id,
|
session_id=session_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Enrich results with block information
|
# Enrich results with full block information
|
||||||
blocks: list[BlockInfoSummary] = []
|
blocks: list[BlockInfoSummary] = []
|
||||||
for result in results:
|
for result in results:
|
||||||
block_id = result["content_id"]
|
block_id = result["content_id"]
|
||||||
@@ -141,11 +141,65 @@ class FindBlockTool(BaseTool):
|
|||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Get input/output schemas
|
||||||
|
input_schema = {}
|
||||||
|
output_schema = {}
|
||||||
|
try:
|
||||||
|
input_schema = block.input_schema.jsonschema()
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(
|
||||||
|
"Failed to generate input schema for block %s: %s",
|
||||||
|
block_id,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
output_schema = block.output_schema.jsonschema()
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(
|
||||||
|
"Failed to generate output schema for block %s: %s",
|
||||||
|
block_id,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get categories from block instance
|
||||||
|
categories = []
|
||||||
|
if hasattr(block, "categories") and block.categories:
|
||||||
|
categories = [cat.value for cat in block.categories]
|
||||||
|
|
||||||
|
# Extract required inputs for easier use
|
||||||
|
required_inputs: list[BlockInputFieldInfo] = []
|
||||||
|
if input_schema:
|
||||||
|
properties = input_schema.get("properties", {})
|
||||||
|
required_fields = set(input_schema.get("required", []))
|
||||||
|
# Get credential field names to exclude from required inputs
|
||||||
|
credentials_fields = set(
|
||||||
|
block.input_schema.get_credentials_fields().keys()
|
||||||
|
)
|
||||||
|
|
||||||
|
for field_name, field_schema in properties.items():
|
||||||
|
# Skip credential fields - they're handled separately
|
||||||
|
if field_name in credentials_fields:
|
||||||
|
continue
|
||||||
|
|
||||||
|
required_inputs.append(
|
||||||
|
BlockInputFieldInfo(
|
||||||
|
name=field_name,
|
||||||
|
type=field_schema.get("type", "string"),
|
||||||
|
description=field_schema.get("description", ""),
|
||||||
|
required=field_name in required_fields,
|
||||||
|
default=field_schema.get("default"),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
blocks.append(
|
blocks.append(
|
||||||
BlockInfoSummary(
|
BlockInfoSummary(
|
||||||
id=block_id,
|
id=block_id,
|
||||||
name=block.name,
|
name=block.name,
|
||||||
description=block.description or "",
|
description=block.description or "",
|
||||||
|
categories=categories,
|
||||||
|
input_schema=input_schema,
|
||||||
|
output_schema=output_schema,
|
||||||
|
required_inputs=required_inputs,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -174,7 +228,8 @@ class FindBlockTool(BaseTool):
|
|||||||
return BlockListResponse(
|
return BlockListResponse(
|
||||||
message=(
|
message=(
|
||||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||||
"To see a block's inputs/outputs and execute it, use run_block with the block's 'id' - providing no inputs."
|
"To execute a block, use run_block with the block's 'id' field "
|
||||||
|
"and provide 'input_data' matching the block's input_schema."
|
||||||
),
|
),
|
||||||
blocks=blocks,
|
blocks=blocks,
|
||||||
count=len(blocks),
|
count=len(blocks),
|
||||||
|
|||||||
@@ -18,13 +18,7 @@ _TEST_USER_ID = "test-user-find-block"
|
|||||||
|
|
||||||
|
|
||||||
def make_mock_block(
|
def make_mock_block(
|
||||||
block_id: str,
|
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
||||||
name: str,
|
|
||||||
block_type: BlockType,
|
|
||||||
disabled: bool = False,
|
|
||||||
input_schema: dict | None = None,
|
|
||||||
output_schema: dict | None = None,
|
|
||||||
credentials_fields: dict | None = None,
|
|
||||||
):
|
):
|
||||||
"""Create a mock block for testing."""
|
"""Create a mock block for testing."""
|
||||||
mock = MagicMock()
|
mock = MagicMock()
|
||||||
@@ -34,13 +28,10 @@ def make_mock_block(
|
|||||||
mock.block_type = block_type
|
mock.block_type = block_type
|
||||||
mock.disabled = disabled
|
mock.disabled = disabled
|
||||||
mock.input_schema = MagicMock()
|
mock.input_schema = MagicMock()
|
||||||
mock.input_schema.jsonschema.return_value = input_schema or {
|
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
||||||
"properties": {},
|
mock.input_schema.get_credentials_fields.return_value = {}
|
||||||
"required": [],
|
|
||||||
}
|
|
||||||
mock.input_schema.get_credentials_fields.return_value = credentials_fields or {}
|
|
||||||
mock.output_schema = MagicMock()
|
mock.output_schema = MagicMock()
|
||||||
mock.output_schema.jsonschema.return_value = output_schema or {}
|
mock.output_schema.jsonschema.return_value = {}
|
||||||
mock.categories = []
|
mock.categories = []
|
||||||
return mock
|
return mock
|
||||||
|
|
||||||
@@ -146,241 +137,3 @@ class TestFindBlockFiltering:
|
|||||||
assert isinstance(response, BlockListResponse)
|
assert isinstance(response, BlockListResponse)
|
||||||
assert len(response.blocks) == 1
|
assert len(response.blocks) == 1
|
||||||
assert response.blocks[0].id == "normal-block-id"
|
assert response.blocks[0].id == "normal-block-id"
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_response_size_average_chars_per_block(self):
|
|
||||||
"""Measure average chars per block in the serialized response."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
# Realistic block definitions modeled after real blocks
|
|
||||||
block_defs = [
|
|
||||||
{
|
|
||||||
"id": "http-block-id",
|
|
||||||
"name": "Send Web Request",
|
|
||||||
"input_schema": {
|
|
||||||
"properties": {
|
|
||||||
"url": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The URL to send the request to",
|
|
||||||
},
|
|
||||||
"method": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The HTTP method to use",
|
|
||||||
},
|
|
||||||
"headers": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Headers to include in the request",
|
|
||||||
},
|
|
||||||
"json_format": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "If true, send the body as JSON",
|
|
||||||
},
|
|
||||||
"body": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Form/JSON body payload",
|
|
||||||
},
|
|
||||||
"credentials": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "HTTP credentials",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"required": ["url", "method"],
|
|
||||||
},
|
|
||||||
"output_schema": {
|
|
||||||
"properties": {
|
|
||||||
"response": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "The response from the server",
|
|
||||||
},
|
|
||||||
"client_error": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Errors on 4xx status codes",
|
|
||||||
},
|
|
||||||
"server_error": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Errors on 5xx status codes",
|
|
||||||
},
|
|
||||||
"error": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Errors for all other exceptions",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"credentials_fields": {"credentials": True},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "email-block-id",
|
|
||||||
"name": "Send Email",
|
|
||||||
"input_schema": {
|
|
||||||
"properties": {
|
|
||||||
"to_email": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Recipient email address",
|
|
||||||
},
|
|
||||||
"subject": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Subject of the email",
|
|
||||||
},
|
|
||||||
"body": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Body of the email",
|
|
||||||
},
|
|
||||||
"config": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "SMTP Config",
|
|
||||||
},
|
|
||||||
"credentials": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "SMTP credentials",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"required": ["to_email", "subject", "body", "credentials"],
|
|
||||||
},
|
|
||||||
"output_schema": {
|
|
||||||
"properties": {
|
|
||||||
"status": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Status of the email sending operation",
|
|
||||||
},
|
|
||||||
"error": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Error message if sending failed",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"credentials_fields": {"credentials": True},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "claude-code-block-id",
|
|
||||||
"name": "Claude Code",
|
|
||||||
"input_schema": {
|
|
||||||
"properties": {
|
|
||||||
"e2b_credentials": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "API key for E2B platform",
|
|
||||||
},
|
|
||||||
"anthropic_credentials": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "API key for Anthropic",
|
|
||||||
},
|
|
||||||
"prompt": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Task or instruction for Claude Code",
|
|
||||||
},
|
|
||||||
"timeout": {
|
|
||||||
"type": "integer",
|
|
||||||
"description": "Sandbox timeout in seconds",
|
|
||||||
},
|
|
||||||
"setup_commands": {
|
|
||||||
"type": "array",
|
|
||||||
"description": "Shell commands to run before execution",
|
|
||||||
},
|
|
||||||
"working_directory": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Working directory for Claude Code",
|
|
||||||
},
|
|
||||||
"session_id": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Session ID to resume a conversation",
|
|
||||||
},
|
|
||||||
"sandbox_id": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Sandbox ID to reconnect to",
|
|
||||||
},
|
|
||||||
"conversation_history": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Previous conversation history",
|
|
||||||
},
|
|
||||||
"dispose_sandbox": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Whether to dispose sandbox after execution",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"e2b_credentials",
|
|
||||||
"anthropic_credentials",
|
|
||||||
"prompt",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
"output_schema": {
|
|
||||||
"properties": {
|
|
||||||
"response": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Output from Claude Code execution",
|
|
||||||
},
|
|
||||||
"files": {
|
|
||||||
"type": "array",
|
|
||||||
"description": "Files created/modified by Claude Code",
|
|
||||||
},
|
|
||||||
"conversation_history": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Full conversation history",
|
|
||||||
},
|
|
||||||
"session_id": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Session ID for this conversation",
|
|
||||||
},
|
|
||||||
"sandbox_id": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "ID of the sandbox instance",
|
|
||||||
},
|
|
||||||
"error": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Error message if execution failed",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"credentials_fields": {
|
|
||||||
"e2b_credentials": True,
|
|
||||||
"anthropic_credentials": True,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
search_results = [
|
|
||||||
{"content_id": d["id"], "score": 0.9 - i * 0.1}
|
|
||||||
for i, d in enumerate(block_defs)
|
|
||||||
]
|
|
||||||
mock_blocks = {
|
|
||||||
d["id"]: make_mock_block(
|
|
||||||
block_id=d["id"],
|
|
||||||
name=d["name"],
|
|
||||||
block_type=BlockType.STANDARD,
|
|
||||||
input_schema=d["input_schema"],
|
|
||||||
output_schema=d["output_schema"],
|
|
||||||
credentials_fields=d["credentials_fields"],
|
|
||||||
)
|
|
||||||
for d in block_defs
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
|
||||||
new_callable=AsyncMock,
|
|
||||||
return_value=(search_results, len(search_results)),
|
|
||||||
), patch(
|
|
||||||
"backend.api.features.chat.tools.find_block.get_block",
|
|
||||||
side_effect=lambda bid: mock_blocks.get(bid),
|
|
||||||
):
|
|
||||||
tool = FindBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID, session=session, query="test"
|
|
||||||
)
|
|
||||||
|
|
||||||
assert isinstance(response, BlockListResponse)
|
|
||||||
assert response.count == len(block_defs)
|
|
||||||
|
|
||||||
total_chars = len(response.model_dump_json())
|
|
||||||
avg_chars = total_chars // response.count
|
|
||||||
|
|
||||||
# Print for visibility in test output
|
|
||||||
print(f"\nTotal response size: {total_chars} chars")
|
|
||||||
print(f"Number of blocks: {response.count}")
|
|
||||||
print(f"Average chars per block: {avg_chars}")
|
|
||||||
|
|
||||||
# The old response was ~90K for 10 blocks (~9K per block).
|
|
||||||
# Previous optimization reduced it to ~1.5K per block (no raw JSON schemas).
|
|
||||||
# Now with only id/name/description, we expect ~300 chars per block.
|
|
||||||
assert avg_chars < 500, (
|
|
||||||
f"Average chars per block ({avg_chars}) exceeds 500. "
|
|
||||||
f"Total response: {total_chars} chars for {response.count} blocks."
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ class ResponseType(str, Enum):
|
|||||||
AGENT_SAVED = "agent_saved"
|
AGENT_SAVED = "agent_saved"
|
||||||
CLARIFICATION_NEEDED = "clarification_needed"
|
CLARIFICATION_NEEDED = "clarification_needed"
|
||||||
BLOCK_LIST = "block_list"
|
BLOCK_LIST = "block_list"
|
||||||
BLOCK_DETAILS = "block_details"
|
|
||||||
BLOCK_OUTPUT = "block_output"
|
BLOCK_OUTPUT = "block_output"
|
||||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||||
DOC_PAGE = "doc_page"
|
DOC_PAGE = "doc_page"
|
||||||
@@ -335,6 +334,13 @@ class BlockInfoSummary(BaseModel):
|
|||||||
id: str
|
id: str
|
||||||
name: str
|
name: str
|
||||||
description: str
|
description: str
|
||||||
|
categories: list[str]
|
||||||
|
input_schema: dict[str, Any]
|
||||||
|
output_schema: dict[str, Any]
|
||||||
|
required_inputs: list[BlockInputFieldInfo] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="List of required input fields for this block",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BlockListResponse(ToolResponseBase):
|
class BlockListResponse(ToolResponseBase):
|
||||||
@@ -344,25 +350,10 @@ class BlockListResponse(ToolResponseBase):
|
|||||||
blocks: list[BlockInfoSummary]
|
blocks: list[BlockInfoSummary]
|
||||||
count: int
|
count: int
|
||||||
query: str
|
query: str
|
||||||
|
usage_hint: str = Field(
|
||||||
|
default="To execute a block, call run_block with block_id set to the block's "
|
||||||
class BlockDetails(BaseModel):
|
"'id' field and input_data containing the required fields from input_schema."
|
||||||
"""Detailed block information."""
|
)
|
||||||
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
description: str
|
|
||||||
inputs: dict[str, Any] = {}
|
|
||||||
outputs: dict[str, Any] = {}
|
|
||||||
credentials: list[CredentialsMetaInput] = []
|
|
||||||
|
|
||||||
|
|
||||||
class BlockDetailsResponse(ToolResponseBase):
|
|
||||||
"""Response for block details (first run_block attempt)."""
|
|
||||||
|
|
||||||
type: ResponseType = ResponseType.BLOCK_DETAILS
|
|
||||||
block: BlockDetails
|
|
||||||
user_authenticated: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
class BlockOutputResponse(ToolResponseBase):
|
class BlockOutputResponse(ToolResponseBase):
|
||||||
|
|||||||
@@ -23,11 +23,8 @@ from backend.util.exceptions import BlockError
|
|||||||
from .base import BaseTool
|
from .base import BaseTool
|
||||||
from .helpers import get_inputs_from_schema
|
from .helpers import get_inputs_from_schema
|
||||||
from .models import (
|
from .models import (
|
||||||
BlockDetails,
|
|
||||||
BlockDetailsResponse,
|
|
||||||
BlockOutputResponse,
|
BlockOutputResponse,
|
||||||
ErrorResponse,
|
ErrorResponse,
|
||||||
InputValidationErrorResponse,
|
|
||||||
SetupInfo,
|
SetupInfo,
|
||||||
SetupRequirementsResponse,
|
SetupRequirementsResponse,
|
||||||
ToolResponseBase,
|
ToolResponseBase,
|
||||||
@@ -54,8 +51,8 @@ class RunBlockTool(BaseTool):
|
|||||||
"Execute a specific block with the provided input data. "
|
"Execute a specific block with the provided input data. "
|
||||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||||
"do NOT guess or make up block IDs. "
|
"do NOT guess or make up block IDs. "
|
||||||
"On first attempt (without input_data), returns detailed schema showing "
|
"Use the 'id' from find_block results and provide input_data "
|
||||||
"required inputs and outputs. Then call again with proper input_data to execute."
|
"matching the block's required_inputs."
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -70,19 +67,11 @@ class RunBlockTool(BaseTool):
|
|||||||
"NEVER guess this - always get it from find_block first."
|
"NEVER guess this - always get it from find_block first."
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
"block_name": {
|
|
||||||
"type": "string",
|
|
||||||
"description": (
|
|
||||||
"The block's human-readable name from find_block results. "
|
|
||||||
"Used for display purposes in the UI."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"input_data": {
|
"input_data": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": (
|
"description": (
|
||||||
"Input values for the block. "
|
"Input values for the block. Use the 'required_inputs' field "
|
||||||
"First call with empty {} to see the block's schema, "
|
"from find_block to see what fields are needed."
|
||||||
"then call again with proper values to execute."
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -167,34 +156,6 @@ class RunBlockTool(BaseTool):
|
|||||||
await self._resolve_block_credentials(user_id, block, input_data)
|
await self._resolve_block_credentials(user_id, block, input_data)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get block schemas for details/validation
|
|
||||||
try:
|
|
||||||
input_schema: dict[str, Any] = block.input_schema.jsonschema()
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to generate input schema for block %s: %s",
|
|
||||||
block_id,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
return ErrorResponse(
|
|
||||||
message=f"Block '{block.name}' has an invalid input schema",
|
|
||||||
error=str(e),
|
|
||||||
session_id=session_id,
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
output_schema: dict[str, Any] = block.output_schema.jsonschema()
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to generate output schema for block %s: %s",
|
|
||||||
block_id,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
return ErrorResponse(
|
|
||||||
message=f"Block '{block.name}' has an invalid output schema",
|
|
||||||
error=str(e),
|
|
||||||
session_id=session_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
if missing_credentials:
|
if missing_credentials:
|
||||||
# Return setup requirements response with missing credentials
|
# Return setup requirements response with missing credentials
|
||||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||||
@@ -227,53 +188,6 @@ class RunBlockTool(BaseTool):
|
|||||||
graph_version=None,
|
graph_version=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check if this is a first attempt (required inputs missing)
|
|
||||||
# Return block details so user can see what inputs are needed
|
|
||||||
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
|
||||||
required_keys = set(input_schema.get("required", []))
|
|
||||||
required_non_credential_keys = required_keys - credentials_fields
|
|
||||||
provided_input_keys = set(input_data.keys()) - credentials_fields
|
|
||||||
|
|
||||||
# Check for unknown input fields
|
|
||||||
valid_fields = (
|
|
||||||
set(input_schema.get("properties", {}).keys()) - credentials_fields
|
|
||||||
)
|
|
||||||
unrecognized_fields = provided_input_keys - valid_fields
|
|
||||||
if unrecognized_fields:
|
|
||||||
return InputValidationErrorResponse(
|
|
||||||
message=(
|
|
||||||
f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. "
|
|
||||||
f"Block was not executed. Please use the correct field names from the schema."
|
|
||||||
),
|
|
||||||
session_id=session_id,
|
|
||||||
unrecognized_fields=sorted(unrecognized_fields),
|
|
||||||
inputs=input_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Show details when not all required non-credential inputs are provided
|
|
||||||
if not (required_non_credential_keys <= provided_input_keys):
|
|
||||||
# Get credentials info for the response
|
|
||||||
credentials_meta = []
|
|
||||||
for field_name, cred_meta in matched_credentials.items():
|
|
||||||
credentials_meta.append(cred_meta)
|
|
||||||
|
|
||||||
return BlockDetailsResponse(
|
|
||||||
message=(
|
|
||||||
f"Block '{block.name}' details. "
|
|
||||||
"Provide input_data matching the inputs schema to execute the block."
|
|
||||||
),
|
|
||||||
session_id=session_id,
|
|
||||||
block=BlockDetails(
|
|
||||||
id=block_id,
|
|
||||||
name=block.name,
|
|
||||||
description=block.description or "",
|
|
||||||
inputs=input_schema,
|
|
||||||
outputs=output_schema,
|
|
||||||
credentials=credentials_meta,
|
|
||||||
),
|
|
||||||
user_authenticated=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Get or create user's workspace for CoPilot file operations
|
# Get or create user's workspace for CoPilot file operations
|
||||||
workspace = await get_or_create_workspace(user_id)
|
workspace = await get_or_create_workspace(user_id)
|
||||||
|
|||||||
@@ -1,15 +1,10 @@
|
|||||||
"""Tests for block execution guards and input validation in RunBlockTool."""
|
"""Tests for block execution guards in RunBlockTool."""
|
||||||
|
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from backend.api.features.chat.tools.models import (
|
from backend.api.features.chat.tools.models import ErrorResponse
|
||||||
BlockDetailsResponse,
|
|
||||||
BlockOutputResponse,
|
|
||||||
ErrorResponse,
|
|
||||||
InputValidationErrorResponse,
|
|
||||||
)
|
|
||||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||||
from backend.blocks._base import BlockType
|
from backend.blocks._base import BlockType
|
||||||
|
|
||||||
@@ -33,39 +28,6 @@ def make_mock_block(
|
|||||||
return mock
|
return mock
|
||||||
|
|
||||||
|
|
||||||
def make_mock_block_with_schema(
|
|
||||||
block_id: str,
|
|
||||||
name: str,
|
|
||||||
input_properties: dict,
|
|
||||||
required_fields: list[str],
|
|
||||||
output_properties: dict | None = None,
|
|
||||||
):
|
|
||||||
"""Create a mock block with a defined input/output schema for validation tests."""
|
|
||||||
mock = MagicMock()
|
|
||||||
mock.id = block_id
|
|
||||||
mock.name = name
|
|
||||||
mock.block_type = BlockType.STANDARD
|
|
||||||
mock.disabled = False
|
|
||||||
mock.description = f"Test block: {name}"
|
|
||||||
|
|
||||||
input_schema = {
|
|
||||||
"properties": input_properties,
|
|
||||||
"required": required_fields,
|
|
||||||
}
|
|
||||||
mock.input_schema = MagicMock()
|
|
||||||
mock.input_schema.jsonschema.return_value = input_schema
|
|
||||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
|
||||||
mock.input_schema.get_credentials_fields.return_value = {}
|
|
||||||
|
|
||||||
output_schema = {
|
|
||||||
"properties": output_properties or {"result": {"type": "string"}},
|
|
||||||
}
|
|
||||||
mock.output_schema = MagicMock()
|
|
||||||
mock.output_schema.jsonschema.return_value = output_schema
|
|
||||||
|
|
||||||
return mock
|
|
||||||
|
|
||||||
|
|
||||||
class TestRunBlockFiltering:
|
class TestRunBlockFiltering:
|
||||||
"""Tests for block execution guards in RunBlockTool."""
|
"""Tests for block execution guards in RunBlockTool."""
|
||||||
|
|
||||||
@@ -142,221 +104,3 @@ class TestRunBlockFiltering:
|
|||||||
# (may be other errors like missing credentials, but not the exclusion guard)
|
# (may be other errors like missing credentials, but not the exclusion guard)
|
||||||
if isinstance(response, ErrorResponse):
|
if isinstance(response, ErrorResponse):
|
||||||
assert "cannot be run directly in CoPilot" not in response.message
|
assert "cannot be run directly in CoPilot" not in response.message
|
||||||
|
|
||||||
|
|
||||||
class TestRunBlockInputValidation:
|
|
||||||
"""Tests for input field validation in RunBlockTool.
|
|
||||||
|
|
||||||
run_block rejects unknown input field names with InputValidationErrorResponse,
|
|
||||||
preventing silent failures where incorrect keys would be ignored and the block
|
|
||||||
would execute with default values instead of the caller's intended values.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_unknown_input_fields_are_rejected(self):
|
|
||||||
"""run_block rejects unknown input fields instead of silently ignoring them.
|
|
||||||
|
|
||||||
Scenario: The AI Text Generator block has a field called 'model' (for LLM model
|
|
||||||
selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model'
|
|
||||||
instead. The block should reject the request and return the valid schema.
|
|
||||||
"""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
mock_block = make_mock_block_with_schema(
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
name="AI Text Generator",
|
|
||||||
input_properties={
|
|
||||||
"prompt": {"type": "string", "description": "The prompt to send"},
|
|
||||||
"model": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The LLM model to use",
|
|
||||||
"default": "gpt-4o-mini",
|
|
||||||
},
|
|
||||||
"sys_prompt": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "System prompt",
|
|
||||||
"default": "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required_fields=["prompt"],
|
|
||||||
output_properties={"response": {"type": "string"}},
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=mock_block,
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
|
|
||||||
# Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key)
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
input_data={
|
|
||||||
"prompt": "Write a haiku about coding",
|
|
||||||
"LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model'
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
assert isinstance(response, InputValidationErrorResponse)
|
|
||||||
assert "LLM_Model" in response.unrecognized_fields
|
|
||||||
assert "Block was not executed" in response.message
|
|
||||||
assert "inputs" in response.model_dump() # valid schema included
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_multiple_wrong_keys_are_all_reported(self):
|
|
||||||
"""All unrecognized field names are reported in a single error response."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
mock_block = make_mock_block_with_schema(
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
name="AI Text Generator",
|
|
||||||
input_properties={
|
|
||||||
"prompt": {"type": "string"},
|
|
||||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
|
||||||
"sys_prompt": {"type": "string", "default": ""},
|
|
||||||
"retry": {"type": "integer", "default": 3},
|
|
||||||
},
|
|
||||||
required_fields=["prompt"],
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=mock_block,
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
input_data={
|
|
||||||
"prompt": "Hello", # correct
|
|
||||||
"llm_model": "claude-opus-4-6", # WRONG - should be 'model'
|
|
||||||
"system_prompt": "Be helpful", # WRONG - should be 'sys_prompt'
|
|
||||||
"retries": 5, # WRONG - should be 'retry'
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
assert isinstance(response, InputValidationErrorResponse)
|
|
||||||
assert set(response.unrecognized_fields) == {
|
|
||||||
"llm_model",
|
|
||||||
"system_prompt",
|
|
||||||
"retries",
|
|
||||||
}
|
|
||||||
assert "Block was not executed" in response.message
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_unknown_fields_rejected_even_with_missing_required(self):
|
|
||||||
"""Unknown fields are caught before the missing-required-fields check."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
mock_block = make_mock_block_with_schema(
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
name="AI Text Generator",
|
|
||||||
input_properties={
|
|
||||||
"prompt": {"type": "string"},
|
|
||||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
|
||||||
},
|
|
||||||
required_fields=["prompt"],
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=mock_block,
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
|
|
||||||
# 'prompt' is missing AND 'LLM_Model' is an unknown field
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
input_data={
|
|
||||||
"LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
# Unknown fields are caught first
|
|
||||||
assert isinstance(response, InputValidationErrorResponse)
|
|
||||||
assert "LLM_Model" in response.unrecognized_fields
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_correct_inputs_still_execute(self):
|
|
||||||
"""Correct input field names pass validation and the block executes."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
mock_block = make_mock_block_with_schema(
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
name="AI Text Generator",
|
|
||||||
input_properties={
|
|
||||||
"prompt": {"type": "string"},
|
|
||||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
|
||||||
},
|
|
||||||
required_fields=["prompt"],
|
|
||||||
)
|
|
||||||
|
|
||||||
async def mock_execute(input_data, **kwargs):
|
|
||||||
yield "response", "Generated text"
|
|
||||||
|
|
||||||
mock_block.execute = mock_execute
|
|
||||||
|
|
||||||
with (
|
|
||||||
patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=mock_block,
|
|
||||||
),
|
|
||||||
patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_or_create_workspace",
|
|
||||||
new_callable=AsyncMock,
|
|
||||||
return_value=MagicMock(id="test-workspace-id"),
|
|
||||||
),
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
input_data={
|
|
||||||
"prompt": "Write a haiku",
|
|
||||||
"model": "gpt-4o-mini", # correct field name
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
assert isinstance(response, BlockOutputResponse)
|
|
||||||
assert response.success is True
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_missing_required_fields_returns_details(self):
|
|
||||||
"""Missing required fields returns BlockDetailsResponse with schema."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
mock_block = make_mock_block_with_schema(
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
name="AI Text Generator",
|
|
||||||
input_properties={
|
|
||||||
"prompt": {"type": "string"},
|
|
||||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
|
||||||
},
|
|
||||||
required_fields=["prompt"],
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=mock_block,
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
|
|
||||||
# Only provide valid optional field, missing required 'prompt'
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="ai-text-gen-id",
|
|
||||||
input_data={
|
|
||||||
"model": "gpt-4o-mini", # valid but optional
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
assert isinstance(response, BlockDetailsResponse)
|
|
||||||
|
|||||||
@@ -1,153 +0,0 @@
|
|||||||
"""Tests for BlockDetailsResponse in RunBlockTool."""
|
|
||||||
|
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from backend.api.features.chat.tools.models import BlockDetailsResponse
|
|
||||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
|
||||||
from backend.blocks._base import BlockType
|
|
||||||
from backend.data.model import CredentialsMetaInput
|
|
||||||
from backend.integrations.providers import ProviderName
|
|
||||||
|
|
||||||
from ._test_data import make_session
|
|
||||||
|
|
||||||
_TEST_USER_ID = "test-user-run-block-details"
|
|
||||||
|
|
||||||
|
|
||||||
def make_mock_block_with_inputs(
|
|
||||||
block_id: str, name: str, description: str = "Test description"
|
|
||||||
):
|
|
||||||
"""Create a mock block with input/output schemas for testing."""
|
|
||||||
mock = MagicMock()
|
|
||||||
mock.id = block_id
|
|
||||||
mock.name = name
|
|
||||||
mock.description = description
|
|
||||||
mock.block_type = BlockType.STANDARD
|
|
||||||
mock.disabled = False
|
|
||||||
|
|
||||||
# Input schema with non-credential fields
|
|
||||||
mock.input_schema = MagicMock()
|
|
||||||
mock.input_schema.jsonschema.return_value = {
|
|
||||||
"properties": {
|
|
||||||
"url": {"type": "string", "description": "URL to fetch"},
|
|
||||||
"method": {"type": "string", "description": "HTTP method"},
|
|
||||||
},
|
|
||||||
"required": ["url"],
|
|
||||||
}
|
|
||||||
mock.input_schema.get_credentials_fields.return_value = {}
|
|
||||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
|
||||||
|
|
||||||
# Output schema
|
|
||||||
mock.output_schema = MagicMock()
|
|
||||||
mock.output_schema.jsonschema.return_value = {
|
|
||||||
"properties": {
|
|
||||||
"response": {"type": "object", "description": "HTTP response"},
|
|
||||||
"error": {"type": "string", "description": "Error message"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return mock
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_run_block_returns_details_when_no_input_provided():
|
|
||||||
"""When run_block is called without input_data, it should return BlockDetailsResponse."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
# Create a block with inputs
|
|
||||||
http_block = make_mock_block_with_inputs(
|
|
||||||
"http-block-id", "HTTP Request", "Send HTTP requests"
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=http_block,
|
|
||||||
):
|
|
||||||
# Mock credentials check to return no missing credentials
|
|
||||||
with patch.object(
|
|
||||||
RunBlockTool,
|
|
||||||
"_resolve_block_credentials",
|
|
||||||
new_callable=AsyncMock,
|
|
||||||
return_value=({}, []), # (matched_credentials, missing_credentials)
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="http-block-id",
|
|
||||||
input_data={}, # Empty input data
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should return BlockDetailsResponse showing the schema
|
|
||||||
assert isinstance(response, BlockDetailsResponse)
|
|
||||||
assert response.block.id == "http-block-id"
|
|
||||||
assert response.block.name == "HTTP Request"
|
|
||||||
assert response.block.description == "Send HTTP requests"
|
|
||||||
assert "url" in response.block.inputs["properties"]
|
|
||||||
assert "method" in response.block.inputs["properties"]
|
|
||||||
assert "response" in response.block.outputs["properties"]
|
|
||||||
assert response.user_authenticated is True
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio(loop_scope="session")
|
|
||||||
async def test_run_block_returns_details_when_only_credentials_provided():
|
|
||||||
"""When only credentials are provided (no actual input), should return details."""
|
|
||||||
session = make_session(user_id=_TEST_USER_ID)
|
|
||||||
|
|
||||||
# Create a block with both credential and non-credential inputs
|
|
||||||
mock = MagicMock()
|
|
||||||
mock.id = "api-block-id"
|
|
||||||
mock.name = "API Call"
|
|
||||||
mock.description = "Make API calls"
|
|
||||||
mock.block_type = BlockType.STANDARD
|
|
||||||
mock.disabled = False
|
|
||||||
|
|
||||||
mock.input_schema = MagicMock()
|
|
||||||
mock.input_schema.jsonschema.return_value = {
|
|
||||||
"properties": {
|
|
||||||
"credentials": {"type": "object", "description": "API credentials"},
|
|
||||||
"endpoint": {"type": "string", "description": "API endpoint"},
|
|
||||||
},
|
|
||||||
"required": ["credentials", "endpoint"],
|
|
||||||
}
|
|
||||||
mock.input_schema.get_credentials_fields.return_value = {"credentials": True}
|
|
||||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
|
||||||
|
|
||||||
mock.output_schema = MagicMock()
|
|
||||||
mock.output_schema.jsonschema.return_value = {
|
|
||||||
"properties": {"result": {"type": "object"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"backend.api.features.chat.tools.run_block.get_block",
|
|
||||||
return_value=mock,
|
|
||||||
):
|
|
||||||
with patch.object(
|
|
||||||
RunBlockTool,
|
|
||||||
"_resolve_block_credentials",
|
|
||||||
new_callable=AsyncMock,
|
|
||||||
return_value=(
|
|
||||||
{
|
|
||||||
"credentials": CredentialsMetaInput(
|
|
||||||
id="cred-id",
|
|
||||||
provider=ProviderName("test_provider"),
|
|
||||||
type="api_key",
|
|
||||||
title="Test Credential",
|
|
||||||
)
|
|
||||||
},
|
|
||||||
[],
|
|
||||||
),
|
|
||||||
):
|
|
||||||
tool = RunBlockTool()
|
|
||||||
response = await tool._execute(
|
|
||||||
user_id=_TEST_USER_ID,
|
|
||||||
session=session,
|
|
||||||
block_id="api-block-id",
|
|
||||||
input_data={"credentials": {"some": "cred"}}, # Only credential
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should return details because no non-credential inputs provided
|
|
||||||
assert isinstance(response, BlockDetailsResponse)
|
|
||||||
assert response.block.id == "api-block-id"
|
|
||||||
assert response.block.name == "API Call"
|
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
import json
|
import json
|
||||||
import shlex
|
import shlex
|
||||||
import uuid
|
import uuid
|
||||||
from typing import TYPE_CHECKING, Literal, Optional
|
from typing import Literal, Optional
|
||||||
|
|
||||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
from e2b import AsyncSandbox as BaseAsyncSandbox
|
||||||
from pydantic import SecretStr
|
from pydantic import BaseModel, SecretStr
|
||||||
|
|
||||||
from backend.blocks._base import (
|
from backend.blocks._base import (
|
||||||
Block,
|
Block,
|
||||||
@@ -20,13 +20,6 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.sandbox_files import (
|
|
||||||
SandboxFileOutput,
|
|
||||||
extract_and_store_sandbox_files,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.executor.utils import ExecutionContext
|
|
||||||
|
|
||||||
|
|
||||||
class ClaudeCodeExecutionError(Exception):
|
class ClaudeCodeExecutionError(Exception):
|
||||||
@@ -181,15 +174,22 @@ class ClaudeCodeBlock(Block):
|
|||||||
advanced=True,
|
advanced=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
class FileOutput(BaseModel):
|
||||||
|
"""A file extracted from the sandbox."""
|
||||||
|
|
||||||
|
path: str
|
||||||
|
relative_path: str # Path relative to working directory (for GitHub, etc.)
|
||||||
|
name: str
|
||||||
|
content: str
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
class Output(BlockSchemaOutput):
|
||||||
response: str = SchemaField(
|
response: str = SchemaField(
|
||||||
description="The output/response from Claude Code execution"
|
description="The output/response from Claude Code execution"
|
||||||
)
|
)
|
||||||
files: list[SandboxFileOutput] = SchemaField(
|
files: list["ClaudeCodeBlock.FileOutput"] = SchemaField(
|
||||||
description=(
|
description=(
|
||||||
"List of text files created/modified by Claude Code during this execution. "
|
"List of text files created/modified by Claude Code during this execution. "
|
||||||
"Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. "
|
"Each file has 'path', 'relative_path', 'name', and 'content' fields."
|
||||||
"workspace_ref contains a workspace:// URI if the file was stored to workspace."
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
conversation_history: str = SchemaField(
|
conversation_history: str = SchemaField(
|
||||||
@@ -252,7 +252,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
"relative_path": "index.html",
|
"relative_path": "index.html",
|
||||||
"name": "index.html",
|
"name": "index.html",
|
||||||
"content": "<html>Hello World</html>",
|
"content": "<html>Hello World</html>",
|
||||||
"workspace_ref": None,
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
@@ -268,12 +267,11 @@ class ClaudeCodeBlock(Block):
|
|||||||
"execute_claude_code": lambda *args, **kwargs: (
|
"execute_claude_code": lambda *args, **kwargs: (
|
||||||
"Created index.html with hello world content", # response
|
"Created index.html with hello world content", # response
|
||||||
[
|
[
|
||||||
SandboxFileOutput(
|
ClaudeCodeBlock.FileOutput(
|
||||||
path="/home/user/index.html",
|
path="/home/user/index.html",
|
||||||
relative_path="index.html",
|
relative_path="index.html",
|
||||||
name="index.html",
|
name="index.html",
|
||||||
content="<html>Hello World</html>",
|
content="<html>Hello World</html>",
|
||||||
workspace_ref=None,
|
|
||||||
)
|
)
|
||||||
], # files
|
], # files
|
||||||
"User: Create a hello world HTML file\n"
|
"User: Create a hello world HTML file\n"
|
||||||
@@ -296,8 +294,7 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id: str,
|
existing_sandbox_id: str,
|
||||||
conversation_history: str,
|
conversation_history: str,
|
||||||
dispose_sandbox: bool,
|
dispose_sandbox: bool,
|
||||||
execution_context: "ExecutionContext",
|
) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]:
|
||||||
) -> tuple[str, list[SandboxFileOutput], str, str, str]:
|
|
||||||
"""
|
"""
|
||||||
Execute Claude Code in an E2B sandbox.
|
Execute Claude Code in an E2B sandbox.
|
||||||
|
|
||||||
@@ -452,18 +449,14 @@ class ClaudeCodeBlock(Block):
|
|||||||
else:
|
else:
|
||||||
new_conversation_history = turn_entry
|
new_conversation_history = turn_entry
|
||||||
|
|
||||||
# Extract files created/modified during this run and store to workspace
|
# Extract files created/modified during this run
|
||||||
sandbox_files = await extract_and_store_sandbox_files(
|
files = await self._extract_files(
|
||||||
sandbox=sandbox,
|
sandbox, working_directory, start_timestamp
|
||||||
working_directory=working_directory,
|
|
||||||
execution_context=execution_context,
|
|
||||||
since_timestamp=start_timestamp,
|
|
||||||
text_only=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
response,
|
response,
|
||||||
sandbox_files, # Already SandboxFileOutput objects
|
files,
|
||||||
new_conversation_history,
|
new_conversation_history,
|
||||||
current_session_id,
|
current_session_id,
|
||||||
sandbox_id,
|
sandbox_id,
|
||||||
@@ -478,6 +471,140 @@ class ClaudeCodeBlock(Block):
|
|||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
await sandbox.kill()
|
await sandbox.kill()
|
||||||
|
|
||||||
|
async def _extract_files(
|
||||||
|
self,
|
||||||
|
sandbox: BaseAsyncSandbox,
|
||||||
|
working_directory: str,
|
||||||
|
since_timestamp: str | None = None,
|
||||||
|
) -> list["ClaudeCodeBlock.FileOutput"]:
|
||||||
|
"""
|
||||||
|
Extract text files created/modified during this Claude Code execution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sandbox: The E2B sandbox instance
|
||||||
|
working_directory: Directory to search for files
|
||||||
|
since_timestamp: ISO timestamp - only return files modified after this time
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of FileOutput objects with path, relative_path, name, and content
|
||||||
|
"""
|
||||||
|
files: list[ClaudeCodeBlock.FileOutput] = []
|
||||||
|
|
||||||
|
# Text file extensions we can safely read as text
|
||||||
|
text_extensions = {
|
||||||
|
".txt",
|
||||||
|
".md",
|
||||||
|
".html",
|
||||||
|
".htm",
|
||||||
|
".css",
|
||||||
|
".js",
|
||||||
|
".ts",
|
||||||
|
".jsx",
|
||||||
|
".tsx",
|
||||||
|
".json",
|
||||||
|
".xml",
|
||||||
|
".yaml",
|
||||||
|
".yml",
|
||||||
|
".toml",
|
||||||
|
".ini",
|
||||||
|
".cfg",
|
||||||
|
".conf",
|
||||||
|
".py",
|
||||||
|
".rb",
|
||||||
|
".php",
|
||||||
|
".java",
|
||||||
|
".c",
|
||||||
|
".cpp",
|
||||||
|
".h",
|
||||||
|
".hpp",
|
||||||
|
".cs",
|
||||||
|
".go",
|
||||||
|
".rs",
|
||||||
|
".swift",
|
||||||
|
".kt",
|
||||||
|
".scala",
|
||||||
|
".sh",
|
||||||
|
".bash",
|
||||||
|
".zsh",
|
||||||
|
".sql",
|
||||||
|
".graphql",
|
||||||
|
".env",
|
||||||
|
".gitignore",
|
||||||
|
".dockerfile",
|
||||||
|
"Dockerfile",
|
||||||
|
".vue",
|
||||||
|
".svelte",
|
||||||
|
".astro",
|
||||||
|
".mdx",
|
||||||
|
".rst",
|
||||||
|
".tex",
|
||||||
|
".csv",
|
||||||
|
".log",
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# List files recursively using find command
|
||||||
|
# Exclude node_modules and .git directories, but allow hidden files
|
||||||
|
# like .env and .gitignore (they're filtered by text_extensions later)
|
||||||
|
# Filter by timestamp to only get files created/modified during this run
|
||||||
|
safe_working_dir = shlex.quote(working_directory)
|
||||||
|
timestamp_filter = ""
|
||||||
|
if since_timestamp:
|
||||||
|
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
||||||
|
find_result = await sandbox.commands.run(
|
||||||
|
f"find {safe_working_dir} -type f "
|
||||||
|
f"{timestamp_filter}"
|
||||||
|
f"-not -path '*/node_modules/*' "
|
||||||
|
f"-not -path '*/.git/*' "
|
||||||
|
f"2>/dev/null"
|
||||||
|
)
|
||||||
|
|
||||||
|
if find_result.stdout:
|
||||||
|
for file_path in find_result.stdout.strip().split("\n"):
|
||||||
|
if not file_path:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if it's a text file we can read
|
||||||
|
is_text = any(
|
||||||
|
file_path.endswith(ext) for ext in text_extensions
|
||||||
|
) or file_path.endswith("Dockerfile")
|
||||||
|
|
||||||
|
if is_text:
|
||||||
|
try:
|
||||||
|
content = await sandbox.files.read(file_path)
|
||||||
|
# Handle bytes or string
|
||||||
|
if isinstance(content, bytes):
|
||||||
|
content = content.decode("utf-8", errors="replace")
|
||||||
|
|
||||||
|
# Extract filename from path
|
||||||
|
file_name = file_path.split("/")[-1]
|
||||||
|
|
||||||
|
# Calculate relative path by stripping working directory
|
||||||
|
relative_path = file_path
|
||||||
|
if file_path.startswith(working_directory):
|
||||||
|
relative_path = file_path[len(working_directory) :]
|
||||||
|
# Remove leading slash if present
|
||||||
|
if relative_path.startswith("/"):
|
||||||
|
relative_path = relative_path[1:]
|
||||||
|
|
||||||
|
files.append(
|
||||||
|
ClaudeCodeBlock.FileOutput(
|
||||||
|
path=file_path,
|
||||||
|
relative_path=relative_path,
|
||||||
|
name=file_name,
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
# Skip files that can't be read
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# If file extraction fails, return empty results
|
||||||
|
pass
|
||||||
|
|
||||||
|
return files
|
||||||
|
|
||||||
def _escape_prompt(self, prompt: str) -> str:
|
def _escape_prompt(self, prompt: str) -> str:
|
||||||
"""Escape the prompt for safe shell execution."""
|
"""Escape the prompt for safe shell execution."""
|
||||||
# Use single quotes and escape any single quotes in the prompt
|
# Use single quotes and escape any single quotes in the prompt
|
||||||
@@ -490,7 +617,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
*,
|
*,
|
||||||
e2b_credentials: APIKeyCredentials,
|
e2b_credentials: APIKeyCredentials,
|
||||||
anthropic_credentials: APIKeyCredentials,
|
anthropic_credentials: APIKeyCredentials,
|
||||||
execution_context: "ExecutionContext",
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
@@ -511,7 +637,6 @@ class ClaudeCodeBlock(Block):
|
|||||||
existing_sandbox_id=input_data.sandbox_id,
|
existing_sandbox_id=input_data.sandbox_id,
|
||||||
conversation_history=input_data.conversation_history,
|
conversation_history=input_data.conversation_history,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
execution_context=execution_context,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
yield "response", response
|
yield "response", response
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import TYPE_CHECKING, Any, Literal, Optional
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
from e2b_code_interpreter import AsyncSandbox
|
from e2b_code_interpreter import AsyncSandbox
|
||||||
from e2b_code_interpreter import Result as E2BExecutionResult
|
from e2b_code_interpreter import Result as E2BExecutionResult
|
||||||
@@ -20,13 +20,6 @@ from backend.data.model import (
|
|||||||
SchemaField,
|
SchemaField,
|
||||||
)
|
)
|
||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util.sandbox_files import (
|
|
||||||
SandboxFileOutput,
|
|
||||||
extract_and_store_sandbox_files,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from backend.executor.utils import ExecutionContext
|
|
||||||
|
|
||||||
TEST_CREDENTIALS = APIKeyCredentials(
|
TEST_CREDENTIALS = APIKeyCredentials(
|
||||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||||
@@ -92,9 +85,6 @@ class CodeExecutionResult(MainCodeExecutionResult):
|
|||||||
class BaseE2BExecutorMixin:
|
class BaseE2BExecutorMixin:
|
||||||
"""Shared implementation methods for E2B executor blocks."""
|
"""Shared implementation methods for E2B executor blocks."""
|
||||||
|
|
||||||
# Default working directory in E2B sandboxes
|
|
||||||
WORKING_DIR = "/home/user"
|
|
||||||
|
|
||||||
async def execute_code(
|
async def execute_code(
|
||||||
self,
|
self,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
@@ -105,21 +95,14 @@ class BaseE2BExecutorMixin:
|
|||||||
timeout: Optional[int] = None,
|
timeout: Optional[int] = None,
|
||||||
sandbox_id: Optional[str] = None,
|
sandbox_id: Optional[str] = None,
|
||||||
dispose_sandbox: bool = False,
|
dispose_sandbox: bool = False,
|
||||||
execution_context: Optional["ExecutionContext"] = None,
|
|
||||||
extract_files: bool = False,
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Unified code execution method that handles all three use cases:
|
Unified code execution method that handles all three use cases:
|
||||||
1. Create new sandbox and execute (ExecuteCodeBlock)
|
1. Create new sandbox and execute (ExecuteCodeBlock)
|
||||||
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock)
|
||||||
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
3. Connect to existing sandbox and execute (ExecuteCodeStepBlock)
|
||||||
|
|
||||||
Args:
|
|
||||||
extract_files: If True and execution_context provided, extract files
|
|
||||||
created/modified during execution and store to workspace.
|
|
||||||
""" # noqa
|
""" # noqa
|
||||||
sandbox = None
|
sandbox = None
|
||||||
files: list[SandboxFileOutput] = []
|
|
||||||
try:
|
try:
|
||||||
if sandbox_id:
|
if sandbox_id:
|
||||||
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
# Connect to existing sandbox (ExecuteCodeStepBlock case)
|
||||||
@@ -135,12 +118,6 @@ class BaseE2BExecutorMixin:
|
|||||||
for cmd in setup_commands:
|
for cmd in setup_commands:
|
||||||
await sandbox.commands.run(cmd)
|
await sandbox.commands.run(cmd)
|
||||||
|
|
||||||
# Capture timestamp before execution to scope file extraction
|
|
||||||
start_timestamp = None
|
|
||||||
if extract_files:
|
|
||||||
ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S")
|
|
||||||
start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None
|
|
||||||
|
|
||||||
# Execute the code
|
# Execute the code
|
||||||
execution = await sandbox.run_code(
|
execution = await sandbox.run_code(
|
||||||
code,
|
code,
|
||||||
@@ -156,24 +133,7 @@ class BaseE2BExecutorMixin:
|
|||||||
stdout_logs = "".join(execution.logs.stdout)
|
stdout_logs = "".join(execution.logs.stdout)
|
||||||
stderr_logs = "".join(execution.logs.stderr)
|
stderr_logs = "".join(execution.logs.stderr)
|
||||||
|
|
||||||
# Extract files created/modified during this execution
|
return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id
|
||||||
if extract_files and execution_context:
|
|
||||||
files = await extract_and_store_sandbox_files(
|
|
||||||
sandbox=sandbox,
|
|
||||||
working_directory=self.WORKING_DIR,
|
|
||||||
execution_context=execution_context,
|
|
||||||
since_timestamp=start_timestamp,
|
|
||||||
text_only=False, # Include binary files too
|
|
||||||
)
|
|
||||||
|
|
||||||
return (
|
|
||||||
results,
|
|
||||||
text_output,
|
|
||||||
stdout_logs,
|
|
||||||
stderr_logs,
|
|
||||||
sandbox.sandbox_id,
|
|
||||||
files,
|
|
||||||
)
|
|
||||||
finally:
|
finally:
|
||||||
# Dispose of sandbox if requested to reduce usage costs
|
# Dispose of sandbox if requested to reduce usage costs
|
||||||
if dispose_sandbox and sandbox:
|
if dispose_sandbox and sandbox:
|
||||||
@@ -278,12 +238,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
description="Standard output logs from execution"
|
description="Standard output logs from execution"
|
||||||
)
|
)
|
||||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||||
files: list[SandboxFileOutput] = SchemaField(
|
|
||||||
description=(
|
|
||||||
"Files created or modified during execution. "
|
|
||||||
"Each file has path, name, content, and workspace_ref (if stored)."
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
@@ -305,30 +259,23 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
("results", []),
|
("results", []),
|
||||||
("response", "Hello World"),
|
("response", "Hello World"),
|
||||||
("stdout_logs", "Hello World\n"),
|
("stdout_logs", "Hello World\n"),
|
||||||
("files", []),
|
|
||||||
],
|
],
|
||||||
test_mock={
|
test_mock={
|
||||||
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa
|
"execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa
|
||||||
[], # results
|
[], # results
|
||||||
"Hello World", # text_output
|
"Hello World", # text_output
|
||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
[], # files
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def run(
|
async def run(
|
||||||
self,
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
input_data: Input,
|
|
||||||
*,
|
|
||||||
credentials: APIKeyCredentials,
|
|
||||||
execution_context: "ExecutionContext",
|
|
||||||
**kwargs,
|
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _, files = await self.execute_code(
|
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.code,
|
code=input_data.code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -336,8 +283,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
setup_commands=input_data.setup_commands,
|
setup_commands=input_data.setup_commands,
|
||||||
timeout=input_data.timeout,
|
timeout=input_data.timeout,
|
||||||
dispose_sandbox=input_data.dispose_sandbox,
|
dispose_sandbox=input_data.dispose_sandbox,
|
||||||
execution_context=execution_context,
|
|
||||||
extract_files=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine result object shape & filter out empty formats
|
# Determine result object shape & filter out empty formats
|
||||||
@@ -351,8 +296,6 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin):
|
|||||||
yield "stdout_logs", stdout
|
yield "stdout_logs", stdout
|
||||||
if stderr:
|
if stderr:
|
||||||
yield "stderr_logs", stderr
|
yield "stderr_logs", stderr
|
||||||
# Always yield files (empty list if none)
|
|
||||||
yield "files", [f.model_dump() for f in files]
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "error", str(e)
|
yield "error", str(e)
|
||||||
|
|
||||||
@@ -450,7 +393,6 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
"sandbox_id", # sandbox_id
|
"sandbox_id", # sandbox_id
|
||||||
[], # files
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -459,7 +401,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
_, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code(
|
_, text_output, stdout, stderr, sandbox_id = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.setup_code,
|
code=input_data.setup_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
@@ -558,7 +500,6 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
"Hello World\n", # stdout_logs
|
"Hello World\n", # stdout_logs
|
||||||
"", # stderr_logs
|
"", # stderr_logs
|
||||||
sandbox_id, # sandbox_id
|
sandbox_id, # sandbox_id
|
||||||
[], # files
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -567,7 +508,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin):
|
|||||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
try:
|
try:
|
||||||
results, text_output, stdout, stderr, _, _ = await self.execute_code(
|
results, text_output, stdout, stderr, _ = await self.execute_code(
|
||||||
api_key=credentials.api_key.get_secret_value(),
|
api_key=credentials.api_key.get_secret_value(),
|
||||||
code=input_data.step_code,
|
code=input_data.step_code,
|
||||||
language=input_data.language,
|
language=input_data.language,
|
||||||
|
|||||||
@@ -1,288 +0,0 @@
|
|||||||
"""
|
|
||||||
Shared utilities for extracting and storing files from E2B sandboxes.
|
|
||||||
|
|
||||||
This module provides common file extraction and workspace storage functionality
|
|
||||||
for blocks that run code in E2B sandboxes (Claude Code, Code Executor, etc.).
|
|
||||||
"""
|
|
||||||
|
|
||||||
import base64
|
|
||||||
import logging
|
|
||||||
import mimetypes
|
|
||||||
import shlex
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from backend.util.file import store_media_file
|
|
||||||
from backend.util.type import MediaFileType
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from e2b import AsyncSandbox as BaseAsyncSandbox
|
|
||||||
|
|
||||||
from backend.executor.utils import ExecutionContext
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Text file extensions that can be safely read and stored as text
|
|
||||||
TEXT_EXTENSIONS = {
|
|
||||||
".txt",
|
|
||||||
".md",
|
|
||||||
".html",
|
|
||||||
".htm",
|
|
||||||
".css",
|
|
||||||
".js",
|
|
||||||
".ts",
|
|
||||||
".jsx",
|
|
||||||
".tsx",
|
|
||||||
".json",
|
|
||||||
".xml",
|
|
||||||
".yaml",
|
|
||||||
".yml",
|
|
||||||
".toml",
|
|
||||||
".ini",
|
|
||||||
".cfg",
|
|
||||||
".conf",
|
|
||||||
".py",
|
|
||||||
".rb",
|
|
||||||
".php",
|
|
||||||
".java",
|
|
||||||
".c",
|
|
||||||
".cpp",
|
|
||||||
".h",
|
|
||||||
".hpp",
|
|
||||||
".cs",
|
|
||||||
".go",
|
|
||||||
".rs",
|
|
||||||
".swift",
|
|
||||||
".kt",
|
|
||||||
".scala",
|
|
||||||
".sh",
|
|
||||||
".bash",
|
|
||||||
".zsh",
|
|
||||||
".sql",
|
|
||||||
".graphql",
|
|
||||||
".env",
|
|
||||||
".gitignore",
|
|
||||||
".dockerfile",
|
|
||||||
"Dockerfile",
|
|
||||||
".vue",
|
|
||||||
".svelte",
|
|
||||||
".astro",
|
|
||||||
".mdx",
|
|
||||||
".rst",
|
|
||||||
".tex",
|
|
||||||
".csv",
|
|
||||||
".log",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class SandboxFileOutput(BaseModel):
|
|
||||||
"""A file extracted from a sandbox and optionally stored in workspace."""
|
|
||||||
|
|
||||||
path: str
|
|
||||||
"""Full path in the sandbox."""
|
|
||||||
|
|
||||||
relative_path: str
|
|
||||||
"""Path relative to the working directory."""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
"""Filename only."""
|
|
||||||
|
|
||||||
content: str
|
|
||||||
"""File content as text (for backward compatibility)."""
|
|
||||||
|
|
||||||
workspace_ref: str | None = None
|
|
||||||
"""Workspace reference (workspace://{id}#mime) if stored, None otherwise."""
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ExtractedFile:
|
|
||||||
"""Internal representation of an extracted file before storage."""
|
|
||||||
|
|
||||||
path: str
|
|
||||||
relative_path: str
|
|
||||||
name: str
|
|
||||||
content: bytes
|
|
||||||
is_text: bool
|
|
||||||
|
|
||||||
|
|
||||||
async def extract_sandbox_files(
|
|
||||||
sandbox: "BaseAsyncSandbox",
|
|
||||||
working_directory: str,
|
|
||||||
since_timestamp: str | None = None,
|
|
||||||
text_only: bool = True,
|
|
||||||
) -> list[ExtractedFile]:
|
|
||||||
"""
|
|
||||||
Extract files from an E2B sandbox.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sandbox: The E2B sandbox instance
|
|
||||||
working_directory: Directory to search for files
|
|
||||||
since_timestamp: ISO timestamp - only return files modified after this time
|
|
||||||
text_only: If True, only extract text files (default). If False, extract all files.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of ExtractedFile objects with path, content, and metadata
|
|
||||||
"""
|
|
||||||
files: list[ExtractedFile] = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Build find command
|
|
||||||
safe_working_dir = shlex.quote(working_directory)
|
|
||||||
timestamp_filter = ""
|
|
||||||
if since_timestamp:
|
|
||||||
timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} "
|
|
||||||
|
|
||||||
find_result = await sandbox.commands.run(
|
|
||||||
f"find {safe_working_dir} -type f "
|
|
||||||
f"{timestamp_filter}"
|
|
||||||
f"-not -path '*/node_modules/*' "
|
|
||||||
f"-not -path '*/.git/*' "
|
|
||||||
f"2>/dev/null"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not find_result.stdout:
|
|
||||||
return files
|
|
||||||
|
|
||||||
for file_path in find_result.stdout.strip().split("\n"):
|
|
||||||
if not file_path:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's a text file
|
|
||||||
is_text = any(file_path.endswith(ext) for ext in TEXT_EXTENSIONS)
|
|
||||||
|
|
||||||
# Skip non-text files if text_only mode
|
|
||||||
if text_only and not is_text:
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Read file content as bytes
|
|
||||||
content = await sandbox.files.read(file_path, format="bytes")
|
|
||||||
if isinstance(content, str):
|
|
||||||
content = content.encode("utf-8")
|
|
||||||
elif isinstance(content, bytearray):
|
|
||||||
content = bytes(content)
|
|
||||||
|
|
||||||
# Extract filename from path
|
|
||||||
file_name = file_path.split("/")[-1]
|
|
||||||
|
|
||||||
# Calculate relative path
|
|
||||||
relative_path = file_path
|
|
||||||
if file_path.startswith(working_directory):
|
|
||||||
relative_path = file_path[len(working_directory) :]
|
|
||||||
if relative_path.startswith("/"):
|
|
||||||
relative_path = relative_path[1:]
|
|
||||||
|
|
||||||
files.append(
|
|
||||||
ExtractedFile(
|
|
||||||
path=file_path,
|
|
||||||
relative_path=relative_path,
|
|
||||||
name=file_name,
|
|
||||||
content=content,
|
|
||||||
is_text=is_text,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.debug(f"Failed to read file {file_path}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"File extraction failed: {e}")
|
|
||||||
|
|
||||||
return files
|
|
||||||
|
|
||||||
|
|
||||||
async def store_sandbox_files(
|
|
||||||
extracted_files: list[ExtractedFile],
|
|
||||||
execution_context: "ExecutionContext",
|
|
||||||
) -> list[SandboxFileOutput]:
|
|
||||||
"""
|
|
||||||
Store extracted sandbox files to workspace and return output objects.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
extracted_files: List of files extracted from sandbox
|
|
||||||
execution_context: Execution context for workspace storage
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of SandboxFileOutput objects with workspace refs
|
|
||||||
"""
|
|
||||||
outputs: list[SandboxFileOutput] = []
|
|
||||||
|
|
||||||
for file in extracted_files:
|
|
||||||
# Decode content for text files (for backward compat content field)
|
|
||||||
if file.is_text:
|
|
||||||
try:
|
|
||||||
content_str = file.content.decode("utf-8", errors="replace")
|
|
||||||
except Exception:
|
|
||||||
content_str = ""
|
|
||||||
else:
|
|
||||||
content_str = f"[Binary file: {len(file.content)} bytes]"
|
|
||||||
|
|
||||||
# Build data URI (needed for storage and as binary fallback)
|
|
||||||
mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream"
|
|
||||||
data_uri = f"data:{mime_type};base64,{base64.b64encode(file.content).decode()}"
|
|
||||||
|
|
||||||
# Try to store in workspace
|
|
||||||
workspace_ref: str | None = None
|
|
||||||
try:
|
|
||||||
result = await store_media_file(
|
|
||||||
file=MediaFileType(data_uri),
|
|
||||||
execution_context=execution_context,
|
|
||||||
return_format="for_block_output",
|
|
||||||
)
|
|
||||||
if result.startswith("workspace://"):
|
|
||||||
workspace_ref = result
|
|
||||||
elif not file.is_text:
|
|
||||||
# Non-workspace context (graph execution): store_media_file
|
|
||||||
# returned a data URI — use it as content so binary data isn't lost.
|
|
||||||
content_str = result
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Failed to store file {file.name} to workspace: {e}")
|
|
||||||
# For binary files, fall back to data URI to prevent data loss
|
|
||||||
if not file.is_text:
|
|
||||||
content_str = data_uri
|
|
||||||
|
|
||||||
outputs.append(
|
|
||||||
SandboxFileOutput(
|
|
||||||
path=file.path,
|
|
||||||
relative_path=file.relative_path,
|
|
||||||
name=file.name,
|
|
||||||
content=content_str,
|
|
||||||
workspace_ref=workspace_ref,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return outputs
|
|
||||||
|
|
||||||
|
|
||||||
async def extract_and_store_sandbox_files(
|
|
||||||
sandbox: "BaseAsyncSandbox",
|
|
||||||
working_directory: str,
|
|
||||||
execution_context: "ExecutionContext",
|
|
||||||
since_timestamp: str | None = None,
|
|
||||||
text_only: bool = True,
|
|
||||||
) -> list[SandboxFileOutput]:
|
|
||||||
"""
|
|
||||||
Extract files from sandbox and store them in workspace.
|
|
||||||
|
|
||||||
This is the main entry point combining extraction and storage.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sandbox: The E2B sandbox instance
|
|
||||||
working_directory: Directory to search for files
|
|
||||||
execution_context: Execution context for workspace storage
|
|
||||||
since_timestamp: ISO timestamp - only return files modified after this time
|
|
||||||
text_only: If True, only extract text files
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of SandboxFileOutput objects with content and workspace refs
|
|
||||||
"""
|
|
||||||
extracted = await extract_sandbox_files(
|
|
||||||
sandbox=sandbox,
|
|
||||||
working_directory=working_directory,
|
|
||||||
since_timestamp=since_timestamp,
|
|
||||||
text_only=text_only,
|
|
||||||
)
|
|
||||||
|
|
||||||
return await store_sandbox_files(extracted, execution_context)
|
|
||||||
@@ -368,10 +368,6 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
|||||||
default=600,
|
default=600,
|
||||||
description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)",
|
description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)",
|
||||||
)
|
)
|
||||||
agentgenerator_use_dummy: bool = Field(
|
|
||||||
default=False,
|
|
||||||
description="Use dummy agent generator responses for testing (bypasses external service)",
|
|
||||||
)
|
|
||||||
|
|
||||||
enable_example_blocks: bool = Field(
|
enable_example_blocks: bool = Field(
|
||||||
default=False,
|
default=False,
|
||||||
|
|||||||
68
autogpt_platform/backend/poetry.lock
generated
68
autogpt_platform/backend/poetry.lock
generated
@@ -441,14 +441,14 @@ develop = true
|
|||||||
colorama = "^0.4.6"
|
colorama = "^0.4.6"
|
||||||
cryptography = "^46.0"
|
cryptography = "^46.0"
|
||||||
expiringdict = "^1.2.2"
|
expiringdict = "^1.2.2"
|
||||||
fastapi = "^0.128.7"
|
fastapi = "^0.128.0"
|
||||||
google-cloud-logging = "^3.13.0"
|
google-cloud-logging = "^3.13.0"
|
||||||
launchdarkly-server-sdk = "^9.15.0"
|
launchdarkly-server-sdk = "^9.14.1"
|
||||||
pydantic = "^2.12.5"
|
pydantic = "^2.12.5"
|
||||||
pydantic-settings = "^2.12.0"
|
pydantic-settings = "^2.12.0"
|
||||||
pyjwt = {version = "^2.11.0", extras = ["crypto"]}
|
pyjwt = {version = "^2.11.0", extras = ["crypto"]}
|
||||||
redis = "^6.2.0"
|
redis = "^6.2.0"
|
||||||
supabase = "^2.28.0"
|
supabase = "^2.27.2"
|
||||||
uvicorn = "^0.40.0"
|
uvicorn = "^0.40.0"
|
||||||
|
|
||||||
[package.source]
|
[package.source]
|
||||||
@@ -1382,14 +1382,14 @@ tzdata = "*"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastapi"
|
name = "fastapi"
|
||||||
version = "0.128.7"
|
version = "0.128.6"
|
||||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
{file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"},
|
||||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
{file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -3117,14 +3117,14 @@ urllib3 = ">=1.26.0,<3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "launchdarkly-server-sdk"
|
name = "launchdarkly-server-sdk"
|
||||||
version = "9.15.0"
|
version = "9.14.1"
|
||||||
description = "LaunchDarkly SDK for Python"
|
description = "LaunchDarkly SDK for Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.10"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -4728,14 +4728,14 @@ tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "postgrest"
|
name = "postgrest"
|
||||||
version = "2.28.0"
|
version = "2.27.3"
|
||||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
{file = "postgrest-2.27.3-py3-none-any.whl", hash = "sha256:ed79123af7127edd78d538bfe8351d277e45b1a36994a4dbf57ae27dde87a7b7"},
|
||||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
{file = "postgrest-2.27.3.tar.gz", hash = "sha256:c2e2679addfc8eaab23197bad7ddaee6cbb4cbe8c483ebd2d2e5219543037cc3"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -6260,14 +6260,14 @@ all = ["numpy"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "realtime"
|
name = "realtime"
|
||||||
version = "2.28.0"
|
version = "2.27.3"
|
||||||
description = ""
|
description = ""
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
{file = "realtime-2.27.3-py3-none-any.whl", hash = "sha256:f571115f86988e33c41c895cb3fba2eaa1b693aeaede3617288f44274ca90f43"},
|
||||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
{file = "realtime-2.27.3.tar.gz", hash = "sha256:02b082243107656a5ef3fb63e8e2ab4c40bc199abb45adb8a42ed63f089a1041"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -7024,14 +7024,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "storage3"
|
name = "storage3"
|
||||||
version = "2.28.0"
|
version = "2.27.3"
|
||||||
description = "Supabase Storage client for Python."
|
description = "Supabase Storage client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
{file = "storage3-2.27.3-py3-none-any.whl", hash = "sha256:11a05b7da84bccabeeea12d940bca3760cf63fe6ca441868677335cfe4fdfbe0"},
|
||||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
{file = "storage3-2.27.3.tar.gz", hash = "sha256:dc1a4a010cf36d5482c5cb6c1c28fc5f00e23284342b89e4ae43b5eae8501ddb"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -7091,35 +7091,35 @@ typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase"
|
name = "supabase"
|
||||||
version = "2.28.0"
|
version = "2.27.3"
|
||||||
description = "Supabase client for Python."
|
description = "Supabase client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
{file = "supabase-2.27.3-py3-none-any.whl", hash = "sha256:082a74642fcf9954693f1ce8c251baf23e4bda26ffdbc8dcd4c99c82e60d69ff"},
|
||||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
{file = "supabase-2.27.3.tar.gz", hash = "sha256:5e5a348232ac4315c1032ddd687278f0b982465471f0cbb52bca7e6a66495ff3"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
httpx = ">=0.26,<0.29"
|
httpx = ">=0.26,<0.29"
|
||||||
postgrest = "2.28.0"
|
postgrest = "2.27.3"
|
||||||
realtime = "2.28.0"
|
realtime = "2.27.3"
|
||||||
storage3 = "2.28.0"
|
storage3 = "2.27.3"
|
||||||
supabase-auth = "2.28.0"
|
supabase-auth = "2.27.3"
|
||||||
supabase-functions = "2.28.0"
|
supabase-functions = "2.27.3"
|
||||||
yarl = ">=1.22.0"
|
yarl = ">=1.22.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-auth"
|
name = "supabase-auth"
|
||||||
version = "2.28.0"
|
version = "2.27.3"
|
||||||
description = "Python Client Library for Supabase Auth"
|
description = "Python Client Library for Supabase Auth"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
{file = "supabase_auth-2.27.3-py3-none-any.whl", hash = "sha256:82a4262eaad85383319d394dab0eea11fcf3ebd774062aef8ea3874ae2f02579"},
|
||||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
{file = "supabase_auth-2.27.3.tar.gz", hash = "sha256:39894d4bc60b6f23b5cff4d0d7d4c1659e5d69563cadf014d4896f780ca8ca78"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -7129,14 +7129,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-functions"
|
name = "supabase-functions"
|
||||||
version = "2.28.0"
|
version = "2.27.3"
|
||||||
description = "Library for Supabase Functions"
|
description = "Library for Supabase Functions"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
{file = "supabase_functions-2.27.3-py3-none-any.whl", hash = "sha256:9d14a931d49ede1c6cf5fbfceb11c44061535ba1c3f310f15384964d86a83d9e"},
|
||||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
{file = "supabase_functions-2.27.3.tar.gz", hash = "sha256:e954f1646da8ca6e7e16accef58d0884a5f97b25956ee98e7d4927a210ed92f9"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<3.14"
|
python-versions = ">=3.10,<3.14"
|
||||||
content-hash = "fa9c5deadf593e815dd2190f58e22152373900603f5f244b9616cd721de84d2f"
|
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
|
|||||||
sqlalchemy = "^2.0.40"
|
sqlalchemy = "^2.0.40"
|
||||||
strenum = "^0.4.9"
|
strenum = "^0.4.9"
|
||||||
stripe = "^11.5.0"
|
stripe = "^11.5.0"
|
||||||
supabase = "2.28.0"
|
supabase = "2.27.3"
|
||||||
tenacity = "^9.1.4"
|
tenacity = "^9.1.4"
|
||||||
todoist-api-python = "^2.1.7"
|
todoist-api-python = "^2.1.7"
|
||||||
tweepy = "^4.16.0"
|
tweepy = "^4.16.0"
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ class TestServiceConfiguration:
|
|||||||
"""Test that external service is not configured when host is empty."""
|
"""Test that external service is not configured when host is empty."""
|
||||||
mock_settings = MagicMock()
|
mock_settings = MagicMock()
|
||||||
mock_settings.config.agentgenerator_host = ""
|
mock_settings.config.agentgenerator_host = ""
|
||||||
mock_settings.config.agentgenerator_use_dummy = False
|
|
||||||
|
|
||||||
with patch.object(service, "_get_settings", return_value=mock_settings):
|
with patch.object(service, "_get_settings", return_value=mock_settings):
|
||||||
assert service.is_external_service_configured() is False
|
assert service.is_external_service_configured() is False
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ services:
|
|||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: migrate
|
target: migrate
|
||||||
command: ["sh", "-c", "prisma generate && python3 gen_prisma_types_stub.py && prisma migrate deploy"]
|
command: ["sh", "-c", "poetry run prisma generate && poetry run gen-prisma-stub && poetry run prisma migrate deploy"]
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
@@ -56,7 +56,7 @@ services:
|
|||||||
test:
|
test:
|
||||||
[
|
[
|
||||||
"CMD-SHELL",
|
"CMD-SHELL",
|
||||||
"prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
"poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||||
]
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
|
|||||||
@@ -22,11 +22,6 @@ Sentry.init({
|
|||||||
|
|
||||||
enabled: shouldEnable,
|
enabled: shouldEnable,
|
||||||
|
|
||||||
// Suppress cross-origin stylesheet errors from Sentry Replay (rrweb)
|
|
||||||
// serializing DOM snapshots with cross-origin stylesheets
|
|
||||||
// (e.g., from browser extensions or CDN-loaded CSS)
|
|
||||||
ignoreErrors: [/Not allowed to access cross-origin stylesheet/],
|
|
||||||
|
|
||||||
// Add optional integrations for additional features
|
// Add optional integrations for additional features
|
||||||
integrations: [
|
integrations: [
|
||||||
Sentry.captureConsoleIntegration(),
|
Sentry.captureConsoleIntegration(),
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ export const ChatMessagesContainer = ({
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<Conversation className="min-h-0 flex-1">
|
<Conversation className="min-h-0 flex-1">
|
||||||
<ConversationContent className="flex flex-1 flex-col gap-6 px-3 py-6">
|
<ConversationContent className="flex min-h-screen flex-1 flex-col gap-6 px-3 py-6">
|
||||||
{isLoading && messages.length === 0 && (
|
{isLoading && messages.length === 0 && (
|
||||||
<div className="flex min-h-full flex-1 items-center justify-center">
|
<div className="flex min-h-full flex-1 items-center justify-center">
|
||||||
<LoadingSpinner className="text-neutral-600" />
|
<LoadingSpinner className="text-neutral-600" />
|
||||||
|
|||||||
@@ -0,0 +1,10 @@
|
|||||||
|
import { parseAsString, useQueryState } from "nuqs";
|
||||||
|
|
||||||
|
export function useCopilotSessionId() {
|
||||||
|
const [urlSessionId, setUrlSessionId] = useQueryState(
|
||||||
|
"sessionId",
|
||||||
|
parseAsString,
|
||||||
|
);
|
||||||
|
|
||||||
|
return { urlSessionId, setUrlSessionId };
|
||||||
|
}
|
||||||
@@ -1,126 +0,0 @@
|
|||||||
import { getGetV2GetSessionQueryKey } from "@/app/api/__generated__/endpoints/chat/chat";
|
|
||||||
import { useQueryClient } from "@tanstack/react-query";
|
|
||||||
import type { UIDataTypes, UIMessage, UITools } from "ai";
|
|
||||||
import { useCallback, useEffect, useRef } from "react";
|
|
||||||
import { convertChatSessionMessagesToUiMessages } from "../helpers/convertChatSessionToUiMessages";
|
|
||||||
|
|
||||||
const OPERATING_TYPES = new Set([
|
|
||||||
"operation_started",
|
|
||||||
"operation_pending",
|
|
||||||
"operation_in_progress",
|
|
||||||
]);
|
|
||||||
|
|
||||||
const POLL_INTERVAL_MS = 1_500;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Detects whether any message contains a tool part whose output indicates
|
|
||||||
* a long-running operation is still in progress.
|
|
||||||
*/
|
|
||||||
function hasOperatingTool(
|
|
||||||
messages: UIMessage<unknown, UIDataTypes, UITools>[],
|
|
||||||
) {
|
|
||||||
for (const msg of messages) {
|
|
||||||
for (const part of msg.parts) {
|
|
||||||
if (!part.type.startsWith("tool-")) continue;
|
|
||||||
const toolPart = part as { output?: unknown };
|
|
||||||
if (!toolPart.output) continue;
|
|
||||||
const output =
|
|
||||||
typeof toolPart.output === "string"
|
|
||||||
? safeParse(toolPart.output)
|
|
||||||
: toolPart.output;
|
|
||||||
if (
|
|
||||||
output &&
|
|
||||||
typeof output === "object" &&
|
|
||||||
"type" in output &&
|
|
||||||
OPERATING_TYPES.has((output as { type: string }).type)
|
|
||||||
) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
function safeParse(value: string): unknown {
|
|
||||||
try {
|
|
||||||
return JSON.parse(value);
|
|
||||||
} catch {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Polls the session endpoint while any tool is in an "operating" state
|
|
||||||
* (operation_started / operation_pending / operation_in_progress).
|
|
||||||
*
|
|
||||||
* When the session data shows the tool output has changed (e.g. to
|
|
||||||
* agent_saved), it calls `setMessages` with the updated messages.
|
|
||||||
*/
|
|
||||||
export function useLongRunningToolPolling(
|
|
||||||
sessionId: string | null,
|
|
||||||
messages: UIMessage<unknown, UIDataTypes, UITools>[],
|
|
||||||
setMessages: (
|
|
||||||
updater: (
|
|
||||||
prev: UIMessage<unknown, UIDataTypes, UITools>[],
|
|
||||||
) => UIMessage<unknown, UIDataTypes, UITools>[],
|
|
||||||
) => void,
|
|
||||||
) {
|
|
||||||
const queryClient = useQueryClient();
|
|
||||||
const intervalRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
|
||||||
|
|
||||||
const stopPolling = useCallback(() => {
|
|
||||||
if (intervalRef.current) {
|
|
||||||
clearInterval(intervalRef.current);
|
|
||||||
intervalRef.current = null;
|
|
||||||
}
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const poll = useCallback(async () => {
|
|
||||||
if (!sessionId) return;
|
|
||||||
|
|
||||||
// Invalidate the query cache so the next fetch gets fresh data
|
|
||||||
await queryClient.invalidateQueries({
|
|
||||||
queryKey: getGetV2GetSessionQueryKey(sessionId),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Fetch fresh session data
|
|
||||||
const data = queryClient.getQueryData<{
|
|
||||||
status: number;
|
|
||||||
data: { messages?: unknown[] };
|
|
||||||
}>(getGetV2GetSessionQueryKey(sessionId));
|
|
||||||
|
|
||||||
if (data?.status !== 200 || !data.data.messages) return;
|
|
||||||
|
|
||||||
const freshMessages = convertChatSessionMessagesToUiMessages(
|
|
||||||
sessionId,
|
|
||||||
data.data.messages,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!freshMessages || freshMessages.length === 0) return;
|
|
||||||
|
|
||||||
// Update when the long-running tool completed
|
|
||||||
if (!hasOperatingTool(freshMessages)) {
|
|
||||||
setMessages(() => freshMessages);
|
|
||||||
stopPolling();
|
|
||||||
}
|
|
||||||
}, [sessionId, queryClient, setMessages, stopPolling]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
const shouldPoll = hasOperatingTool(messages);
|
|
||||||
|
|
||||||
// Always clear any previous interval first so we never leak timers
|
|
||||||
// when the effect re-runs due to dependency changes (e.g. messages
|
|
||||||
// updating as the LLM streams text after the tool call).
|
|
||||||
stopPolling();
|
|
||||||
|
|
||||||
if (shouldPoll && sessionId) {
|
|
||||||
intervalRef.current = setInterval(() => {
|
|
||||||
poll();
|
|
||||||
}, POLL_INTERVAL_MS);
|
|
||||||
}
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
stopPolling();
|
|
||||||
};
|
|
||||||
}, [messages, sessionId, poll, stopPolling]);
|
|
||||||
}
|
|
||||||
@@ -1,30 +1,24 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { WarningDiamondIcon } from "@phosphor-icons/react";
|
||||||
import { Text } from "@/components/atoms/Text/Text";
|
|
||||||
import {
|
|
||||||
BookOpenIcon,
|
|
||||||
CheckFatIcon,
|
|
||||||
PencilSimpleIcon,
|
|
||||||
WarningDiamondIcon,
|
|
||||||
} from "@phosphor-icons/react";
|
|
||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import NextLink from "next/link";
|
|
||||||
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||||
|
import { ProgressBar } from "../../components/ProgressBar/ProgressBar";
|
||||||
import {
|
import {
|
||||||
ContentCardDescription,
|
ContentCardDescription,
|
||||||
ContentCodeBlock,
|
ContentCodeBlock,
|
||||||
ContentGrid,
|
ContentGrid,
|
||||||
ContentHint,
|
ContentHint,
|
||||||
|
ContentLink,
|
||||||
ContentMessage,
|
ContentMessage,
|
||||||
} from "../../components/ToolAccordion/AccordionContent";
|
} from "../../components/ToolAccordion/AccordionContent";
|
||||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||||
|
import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress";
|
||||||
import {
|
import {
|
||||||
ClarificationQuestionsCard,
|
ClarificationQuestionsCard,
|
||||||
ClarifyingQuestion,
|
ClarifyingQuestion,
|
||||||
} from "./components/ClarificationQuestionsCard";
|
} from "./components/ClarificationQuestionsCard";
|
||||||
import { MiniGame } from "./components/MiniGame/MiniGame";
|
|
||||||
import {
|
import {
|
||||||
AccordionIcon,
|
AccordionIcon,
|
||||||
formatMaybeJson,
|
formatMaybeJson,
|
||||||
@@ -58,7 +52,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
|
|||||||
const icon = <AccordionIcon />;
|
const icon = <AccordionIcon />;
|
||||||
|
|
||||||
if (isAgentSavedOutput(output)) {
|
if (isAgentSavedOutput(output)) {
|
||||||
return { icon, title: output.agent_name, expanded: true };
|
return { icon, title: output.agent_name };
|
||||||
}
|
}
|
||||||
if (isAgentPreviewOutput(output)) {
|
if (isAgentPreviewOutput(output)) {
|
||||||
return {
|
return {
|
||||||
@@ -84,7 +78,6 @@ function getAccordionMeta(output: CreateAgentToolOutput) {
|
|||||||
return {
|
return {
|
||||||
icon,
|
icon,
|
||||||
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
title: "Creating agent, this may take a few minutes. Sit back and relax.",
|
||||||
expanded: true,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
return {
|
return {
|
||||||
@@ -114,6 +107,8 @@ export function CreateAgentTool({ part }: Props) {
|
|||||||
isOperationPendingOutput(output) ||
|
isOperationPendingOutput(output) ||
|
||||||
isOperationInProgressOutput(output));
|
isOperationInProgressOutput(output));
|
||||||
|
|
||||||
|
const progress = useAsymptoticProgress(isOperating);
|
||||||
|
|
||||||
const hasExpandableContent =
|
const hasExpandableContent =
|
||||||
part.state === "output-available" &&
|
part.state === "output-available" &&
|
||||||
!!output &&
|
!!output &&
|
||||||
@@ -157,53 +152,31 @@ export function CreateAgentTool({ part }: Props) {
|
|||||||
<ToolAccordion {...getAccordionMeta(output)}>
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{isOperating && (
|
{isOperating && (
|
||||||
<ContentGrid>
|
<ContentGrid>
|
||||||
<MiniGame />
|
<ProgressBar value={progress} />
|
||||||
<ContentHint>
|
<ContentHint>
|
||||||
This could take a few minutes — play while you wait!
|
This could take a few minutes, grab a coffee ☕
|
||||||
</ContentHint>
|
</ContentHint>
|
||||||
</ContentGrid>
|
</ContentGrid>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{isAgentSavedOutput(output) && (
|
{isAgentSavedOutput(output) && (
|
||||||
<div className="rounded-xl border border-border/60 bg-card p-4 shadow-sm">
|
<ContentGrid>
|
||||||
<div className="flex items-baseline gap-2">
|
<ContentMessage>{output.message}</ContentMessage>
|
||||||
<CheckFatIcon
|
<div className="flex flex-wrap gap-2">
|
||||||
size={18}
|
<ContentLink href={output.library_agent_link}>
|
||||||
weight="regular"
|
Open in library
|
||||||
className="relative top-1 text-green-500"
|
</ContentLink>
|
||||||
/>
|
<ContentLink href={output.agent_page_link}>
|
||||||
<Text
|
Open in builder
|
||||||
variant="body-medium"
|
</ContentLink>
|
||||||
className="text-blacks mb-2 text-[16px]"
|
|
||||||
>
|
|
||||||
{output.message}
|
|
||||||
</Text>
|
|
||||||
</div>
|
</div>
|
||||||
<div className="mt-3 flex flex-wrap gap-4">
|
<ContentCodeBlock>
|
||||||
<Button variant="outline" size="small">
|
{truncateText(
|
||||||
<NextLink
|
formatMaybeJson({ agent_id: output.agent_id }),
|
||||||
href={output.library_agent_link}
|
800,
|
||||||
className="inline-flex items-center gap-1.5"
|
)}
|
||||||
target="_blank"
|
</ContentCodeBlock>
|
||||||
rel="noopener noreferrer"
|
</ContentGrid>
|
||||||
>
|
|
||||||
<BookOpenIcon size={14} weight="regular" />
|
|
||||||
Open in library
|
|
||||||
</NextLink>
|
|
||||||
</Button>
|
|
||||||
<Button variant="outline" size="small">
|
|
||||||
<NextLink
|
|
||||||
href={output.agent_page_link}
|
|
||||||
target="_blank"
|
|
||||||
rel="noopener noreferrer"
|
|
||||||
className="inline-flex items-center gap-1.5"
|
|
||||||
>
|
|
||||||
<PencilSimpleIcon size={14} weight="regular" />
|
|
||||||
Open in builder
|
|
||||||
</NextLink>
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{isAgentPreviewOutput(output) && (
|
{isAgentPreviewOutput(output) && (
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useMiniGame } from "./useMiniGame";
|
|
||||||
|
|
||||||
export function MiniGame() {
|
|
||||||
const { canvasRef } = useMiniGame();
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div
|
|
||||||
className="w-full overflow-hidden rounded-md bg-background text-foreground"
|
|
||||||
style={{ border: "1px solid #d17fff" }}
|
|
||||||
>
|
|
||||||
<canvas
|
|
||||||
ref={canvasRef}
|
|
||||||
tabIndex={0}
|
|
||||||
className="block w-full outline-none"
|
|
||||||
style={{ imageRendering: "pixelated" }}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,579 +0,0 @@
|
|||||||
import { useEffect, useRef } from "react";
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
/* Constants */
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
|
|
||||||
const CANVAS_HEIGHT = 150;
|
|
||||||
const GRAVITY = 0.55;
|
|
||||||
const JUMP_FORCE = -9.5;
|
|
||||||
const BASE_SPEED = 3;
|
|
||||||
const SPEED_INCREMENT = 0.0008;
|
|
||||||
const SPAWN_MIN = 70;
|
|
||||||
const SPAWN_MAX = 130;
|
|
||||||
const CHAR_SIZE = 18;
|
|
||||||
const CHAR_X = 50;
|
|
||||||
const GROUND_PAD = 20;
|
|
||||||
const STORAGE_KEY = "copilot-minigame-highscore";
|
|
||||||
|
|
||||||
// Colors
|
|
||||||
const COLOR_BG = "#E8EAF6";
|
|
||||||
const COLOR_CHAR = "#263238";
|
|
||||||
const COLOR_BOSS = "#F50057";
|
|
||||||
|
|
||||||
// Boss
|
|
||||||
const BOSS_SIZE = 36;
|
|
||||||
const BOSS_ENTER_SPEED = 2;
|
|
||||||
const BOSS_LEAVE_SPEED = 3;
|
|
||||||
const BOSS_SHOOT_COOLDOWN = 90;
|
|
||||||
const BOSS_SHOTS_TO_EVADE = 5;
|
|
||||||
const BOSS_INTERVAL = 20; // every N score
|
|
||||||
const PROJ_SPEED = 4.5;
|
|
||||||
const PROJ_SIZE = 12;
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
/* Types */
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
|
|
||||||
interface Obstacle {
|
|
||||||
x: number;
|
|
||||||
width: number;
|
|
||||||
height: number;
|
|
||||||
scored: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface Projectile {
|
|
||||||
x: number;
|
|
||||||
y: number;
|
|
||||||
speed: number;
|
|
||||||
evaded: boolean;
|
|
||||||
type: "low" | "high";
|
|
||||||
}
|
|
||||||
|
|
||||||
interface BossState {
|
|
||||||
phase: "inactive" | "entering" | "fighting" | "leaving";
|
|
||||||
x: number;
|
|
||||||
targetX: number;
|
|
||||||
shotsEvaded: number;
|
|
||||||
cooldown: number;
|
|
||||||
projectiles: Projectile[];
|
|
||||||
bob: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface GameState {
|
|
||||||
charY: number;
|
|
||||||
vy: number;
|
|
||||||
obstacles: Obstacle[];
|
|
||||||
score: number;
|
|
||||||
highScore: number;
|
|
||||||
speed: number;
|
|
||||||
frame: number;
|
|
||||||
nextSpawn: number;
|
|
||||||
running: boolean;
|
|
||||||
over: boolean;
|
|
||||||
groundY: number;
|
|
||||||
boss: BossState;
|
|
||||||
bossThreshold: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
/* Helpers */
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
|
|
||||||
function randInt(min: number, max: number) {
|
|
||||||
return Math.floor(Math.random() * (max - min + 1)) + min;
|
|
||||||
}
|
|
||||||
|
|
||||||
function readHighScore(): number {
|
|
||||||
try {
|
|
||||||
return parseInt(localStorage.getItem(STORAGE_KEY) || "0", 10) || 0;
|
|
||||||
} catch {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function writeHighScore(score: number) {
|
|
||||||
try {
|
|
||||||
localStorage.setItem(STORAGE_KEY, String(score));
|
|
||||||
} catch {
|
|
||||||
/* noop */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function makeBoss(): BossState {
|
|
||||||
return {
|
|
||||||
phase: "inactive",
|
|
||||||
x: 0,
|
|
||||||
targetX: 0,
|
|
||||||
shotsEvaded: 0,
|
|
||||||
cooldown: 0,
|
|
||||||
projectiles: [],
|
|
||||||
bob: 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
function makeState(groundY: number): GameState {
|
|
||||||
return {
|
|
||||||
charY: groundY - CHAR_SIZE,
|
|
||||||
vy: 0,
|
|
||||||
obstacles: [],
|
|
||||||
score: 0,
|
|
||||||
highScore: readHighScore(),
|
|
||||||
speed: BASE_SPEED,
|
|
||||||
frame: 0,
|
|
||||||
nextSpawn: randInt(SPAWN_MIN, SPAWN_MAX),
|
|
||||||
running: false,
|
|
||||||
over: false,
|
|
||||||
groundY,
|
|
||||||
boss: makeBoss(),
|
|
||||||
bossThreshold: BOSS_INTERVAL,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
function gameOver(s: GameState) {
|
|
||||||
s.running = false;
|
|
||||||
s.over = true;
|
|
||||||
if (s.score > s.highScore) {
|
|
||||||
s.highScore = s.score;
|
|
||||||
writeHighScore(s.score);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
/* Projectile collision — shared between fighting & leaving phases */
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
|
|
||||||
/** Returns true if the player died. */
|
|
||||||
function tickProjectiles(s: GameState): boolean {
|
|
||||||
const boss = s.boss;
|
|
||||||
|
|
||||||
for (const p of boss.projectiles) {
|
|
||||||
p.x -= p.speed;
|
|
||||||
|
|
||||||
if (!p.evaded && p.x + PROJ_SIZE < CHAR_X) {
|
|
||||||
p.evaded = true;
|
|
||||||
boss.shotsEvaded++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collision
|
|
||||||
if (
|
|
||||||
!p.evaded &&
|
|
||||||
CHAR_X + CHAR_SIZE > p.x &&
|
|
||||||
CHAR_X < p.x + PROJ_SIZE &&
|
|
||||||
s.charY + CHAR_SIZE > p.y &&
|
|
||||||
s.charY < p.y + PROJ_SIZE
|
|
||||||
) {
|
|
||||||
gameOver(s);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
boss.projectiles = boss.projectiles.filter((p) => p.x + PROJ_SIZE > -20);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
/* Update */
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
|
|
||||||
function update(s: GameState, canvasWidth: number) {
|
|
||||||
if (!s.running) return;
|
|
||||||
|
|
||||||
s.frame++;
|
|
||||||
|
|
||||||
// Speed only ramps during regular play
|
|
||||||
if (s.boss.phase === "inactive") {
|
|
||||||
s.speed = BASE_SPEED + s.frame * SPEED_INCREMENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Character physics (always active) ---- //
|
|
||||||
s.vy += GRAVITY;
|
|
||||||
s.charY += s.vy;
|
|
||||||
if (s.charY + CHAR_SIZE >= s.groundY) {
|
|
||||||
s.charY = s.groundY - CHAR_SIZE;
|
|
||||||
s.vy = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Trigger boss ---- //
|
|
||||||
if (s.boss.phase === "inactive" && s.score >= s.bossThreshold) {
|
|
||||||
s.boss.phase = "entering";
|
|
||||||
s.boss.x = canvasWidth + 10;
|
|
||||||
s.boss.targetX = canvasWidth - BOSS_SIZE - 40;
|
|
||||||
s.boss.shotsEvaded = 0;
|
|
||||||
s.boss.cooldown = BOSS_SHOOT_COOLDOWN;
|
|
||||||
s.boss.projectiles = [];
|
|
||||||
s.obstacles = [];
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Boss: entering ---- //
|
|
||||||
if (s.boss.phase === "entering") {
|
|
||||||
s.boss.bob = Math.sin(s.frame * 0.05) * 3;
|
|
||||||
s.boss.x -= BOSS_ENTER_SPEED;
|
|
||||||
if (s.boss.x <= s.boss.targetX) {
|
|
||||||
s.boss.x = s.boss.targetX;
|
|
||||||
s.boss.phase = "fighting";
|
|
||||||
}
|
|
||||||
return; // no obstacles while entering
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Boss: fighting ---- //
|
|
||||||
if (s.boss.phase === "fighting") {
|
|
||||||
s.boss.bob = Math.sin(s.frame * 0.05) * 3;
|
|
||||||
|
|
||||||
// Shoot
|
|
||||||
s.boss.cooldown--;
|
|
||||||
if (s.boss.cooldown <= 0) {
|
|
||||||
const isLow = Math.random() < 0.5;
|
|
||||||
s.boss.projectiles.push({
|
|
||||||
x: s.boss.x - PROJ_SIZE,
|
|
||||||
y: isLow ? s.groundY - 14 : s.groundY - 70,
|
|
||||||
speed: PROJ_SPEED,
|
|
||||||
evaded: false,
|
|
||||||
type: isLow ? "low" : "high",
|
|
||||||
});
|
|
||||||
s.boss.cooldown = BOSS_SHOOT_COOLDOWN;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tickProjectiles(s)) return;
|
|
||||||
|
|
||||||
// Boss defeated?
|
|
||||||
if (s.boss.shotsEvaded >= BOSS_SHOTS_TO_EVADE) {
|
|
||||||
s.boss.phase = "leaving";
|
|
||||||
s.score += 5; // bonus
|
|
||||||
s.bossThreshold = s.score + BOSS_INTERVAL;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Boss: leaving ---- //
|
|
||||||
if (s.boss.phase === "leaving") {
|
|
||||||
s.boss.bob = Math.sin(s.frame * 0.05) * 3;
|
|
||||||
s.boss.x += BOSS_LEAVE_SPEED;
|
|
||||||
|
|
||||||
// Still check in-flight projectiles
|
|
||||||
if (tickProjectiles(s)) return;
|
|
||||||
|
|
||||||
if (s.boss.x > canvasWidth + 50) {
|
|
||||||
s.boss = makeBoss();
|
|
||||||
s.nextSpawn = s.frame + randInt(SPAWN_MIN / 2, SPAWN_MAX / 2);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Regular obstacle play ---- //
|
|
||||||
if (s.frame >= s.nextSpawn) {
|
|
||||||
s.obstacles.push({
|
|
||||||
x: canvasWidth + 10,
|
|
||||||
width: randInt(10, 16),
|
|
||||||
height: randInt(20, 48),
|
|
||||||
scored: false,
|
|
||||||
});
|
|
||||||
s.nextSpawn = s.frame + randInt(SPAWN_MIN, SPAWN_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const o of s.obstacles) {
|
|
||||||
o.x -= s.speed;
|
|
||||||
if (!o.scored && o.x + o.width < CHAR_X) {
|
|
||||||
o.scored = true;
|
|
||||||
s.score++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.obstacles = s.obstacles.filter((o) => o.x + o.width > -20);
|
|
||||||
|
|
||||||
for (const o of s.obstacles) {
|
|
||||||
const oY = s.groundY - o.height;
|
|
||||||
if (
|
|
||||||
CHAR_X + CHAR_SIZE > o.x &&
|
|
||||||
CHAR_X < o.x + o.width &&
|
|
||||||
s.charY + CHAR_SIZE > oY
|
|
||||||
) {
|
|
||||||
gameOver(s);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
/* Drawing */
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
|
|
||||||
function drawBoss(ctx: CanvasRenderingContext2D, s: GameState, bg: string) {
|
|
||||||
const bx = s.boss.x;
|
|
||||||
const by = s.groundY - BOSS_SIZE + s.boss.bob;
|
|
||||||
|
|
||||||
// Body
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = COLOR_BOSS;
|
|
||||||
ctx.globalAlpha = 0.9;
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.roundRect(bx, by, BOSS_SIZE, BOSS_SIZE, 4);
|
|
||||||
ctx.fill();
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Eyes
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = bg;
|
|
||||||
const eyeY = by + 13;
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.arc(bx + 10, eyeY, 4, 0, Math.PI * 2);
|
|
||||||
ctx.fill();
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.arc(bx + 26, eyeY, 4, 0, Math.PI * 2);
|
|
||||||
ctx.fill();
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Angry eyebrows
|
|
||||||
ctx.save();
|
|
||||||
ctx.strokeStyle = bg;
|
|
||||||
ctx.lineWidth = 2;
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.moveTo(bx + 5, eyeY - 7);
|
|
||||||
ctx.lineTo(bx + 14, eyeY - 4);
|
|
||||||
ctx.stroke();
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.moveTo(bx + 31, eyeY - 7);
|
|
||||||
ctx.lineTo(bx + 22, eyeY - 4);
|
|
||||||
ctx.stroke();
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Zigzag mouth
|
|
||||||
ctx.save();
|
|
||||||
ctx.strokeStyle = bg;
|
|
||||||
ctx.lineWidth = 1.5;
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.moveTo(bx + 10, by + 27);
|
|
||||||
ctx.lineTo(bx + 14, by + 24);
|
|
||||||
ctx.lineTo(bx + 18, by + 27);
|
|
||||||
ctx.lineTo(bx + 22, by + 24);
|
|
||||||
ctx.lineTo(bx + 26, by + 27);
|
|
||||||
ctx.stroke();
|
|
||||||
ctx.restore();
|
|
||||||
}
|
|
||||||
|
|
||||||
function drawProjectiles(ctx: CanvasRenderingContext2D, boss: BossState) {
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = COLOR_BOSS;
|
|
||||||
ctx.globalAlpha = 0.8;
|
|
||||||
for (const p of boss.projectiles) {
|
|
||||||
if (p.evaded) continue;
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.arc(
|
|
||||||
p.x + PROJ_SIZE / 2,
|
|
||||||
p.y + PROJ_SIZE / 2,
|
|
||||||
PROJ_SIZE / 2,
|
|
||||||
0,
|
|
||||||
Math.PI * 2,
|
|
||||||
);
|
|
||||||
ctx.fill();
|
|
||||||
}
|
|
||||||
ctx.restore();
|
|
||||||
}
|
|
||||||
|
|
||||||
function draw(
|
|
||||||
ctx: CanvasRenderingContext2D,
|
|
||||||
s: GameState,
|
|
||||||
w: number,
|
|
||||||
h: number,
|
|
||||||
fg: string,
|
|
||||||
started: boolean,
|
|
||||||
) {
|
|
||||||
ctx.fillStyle = COLOR_BG;
|
|
||||||
ctx.fillRect(0, 0, w, h);
|
|
||||||
|
|
||||||
// Ground
|
|
||||||
ctx.save();
|
|
||||||
ctx.strokeStyle = fg;
|
|
||||||
ctx.globalAlpha = 0.15;
|
|
||||||
ctx.setLineDash([4, 4]);
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.moveTo(0, s.groundY);
|
|
||||||
ctx.lineTo(w, s.groundY);
|
|
||||||
ctx.stroke();
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Character
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = COLOR_CHAR;
|
|
||||||
ctx.globalAlpha = 0.85;
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.roundRect(CHAR_X, s.charY, CHAR_SIZE, CHAR_SIZE, 3);
|
|
||||||
ctx.fill();
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Eyes
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = COLOR_BG;
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.arc(CHAR_X + 6, s.charY + 7, 2.5, 0, Math.PI * 2);
|
|
||||||
ctx.fill();
|
|
||||||
ctx.beginPath();
|
|
||||||
ctx.arc(CHAR_X + 12, s.charY + 7, 2.5, 0, Math.PI * 2);
|
|
||||||
ctx.fill();
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Obstacles
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = fg;
|
|
||||||
ctx.globalAlpha = 0.55;
|
|
||||||
for (const o of s.obstacles) {
|
|
||||||
ctx.fillRect(o.x, s.groundY - o.height, o.width, o.height);
|
|
||||||
}
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Boss + projectiles
|
|
||||||
if (s.boss.phase !== "inactive") {
|
|
||||||
drawBoss(ctx, s, COLOR_BG);
|
|
||||||
drawProjectiles(ctx, s.boss);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Score HUD
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = fg;
|
|
||||||
ctx.globalAlpha = 0.5;
|
|
||||||
ctx.font = "bold 11px monospace";
|
|
||||||
ctx.textAlign = "right";
|
|
||||||
ctx.fillText(`Score: ${s.score}`, w - 12, 20);
|
|
||||||
ctx.fillText(`Best: ${s.highScore}`, w - 12, 34);
|
|
||||||
if (s.boss.phase === "fighting") {
|
|
||||||
ctx.fillText(
|
|
||||||
`Evade: ${s.boss.shotsEvaded}/${BOSS_SHOTS_TO_EVADE}`,
|
|
||||||
w - 12,
|
|
||||||
48,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
ctx.restore();
|
|
||||||
|
|
||||||
// Prompts
|
|
||||||
if (!started && !s.running && !s.over) {
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = fg;
|
|
||||||
ctx.globalAlpha = 0.5;
|
|
||||||
ctx.font = "12px sans-serif";
|
|
||||||
ctx.textAlign = "center";
|
|
||||||
ctx.fillText("Click or press Space to play while you wait", w / 2, h / 2);
|
|
||||||
ctx.restore();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s.over) {
|
|
||||||
ctx.save();
|
|
||||||
ctx.fillStyle = fg;
|
|
||||||
ctx.globalAlpha = 0.7;
|
|
||||||
ctx.font = "bold 13px sans-serif";
|
|
||||||
ctx.textAlign = "center";
|
|
||||||
ctx.fillText("Game Over", w / 2, h / 2 - 8);
|
|
||||||
ctx.font = "11px sans-serif";
|
|
||||||
ctx.fillText("Click or Space to restart", w / 2, h / 2 + 10);
|
|
||||||
ctx.restore();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
/* Hook */
|
|
||||||
/* ------------------------------------------------------------------ */
|
|
||||||
|
|
||||||
export function useMiniGame() {
|
|
||||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
|
||||||
const stateRef = useRef<GameState | null>(null);
|
|
||||||
const rafRef = useRef(0);
|
|
||||||
const startedRef = useRef(false);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
const canvas = canvasRef.current;
|
|
||||||
if (!canvas) return;
|
|
||||||
|
|
||||||
const container = canvas.parentElement;
|
|
||||||
if (container) {
|
|
||||||
canvas.width = container.clientWidth;
|
|
||||||
canvas.height = CANVAS_HEIGHT;
|
|
||||||
}
|
|
||||||
|
|
||||||
const groundY = canvas.height - GROUND_PAD;
|
|
||||||
stateRef.current = makeState(groundY);
|
|
||||||
|
|
||||||
const style = getComputedStyle(canvas);
|
|
||||||
let fg = style.color || "#71717a";
|
|
||||||
|
|
||||||
// -------------------------------------------------------------- //
|
|
||||||
// Jump //
|
|
||||||
// -------------------------------------------------------------- //
|
|
||||||
function jump() {
|
|
||||||
const s = stateRef.current;
|
|
||||||
if (!s) return;
|
|
||||||
|
|
||||||
if (s.over) {
|
|
||||||
const hs = s.highScore;
|
|
||||||
const gy = s.groundY;
|
|
||||||
stateRef.current = makeState(gy);
|
|
||||||
stateRef.current.highScore = hs;
|
|
||||||
stateRef.current.running = true;
|
|
||||||
startedRef.current = true;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!s.running) {
|
|
||||||
s.running = true;
|
|
||||||
startedRef.current = true;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only jump when on the ground
|
|
||||||
if (s.charY + CHAR_SIZE >= s.groundY) {
|
|
||||||
s.vy = JUMP_FORCE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function onKey(e: KeyboardEvent) {
|
|
||||||
if (e.code === "Space" || e.key === " ") {
|
|
||||||
e.preventDefault();
|
|
||||||
jump();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function onClick() {
|
|
||||||
canvas?.focus();
|
|
||||||
jump();
|
|
||||||
}
|
|
||||||
|
|
||||||
// -------------------------------------------------------------- //
|
|
||||||
// Loop //
|
|
||||||
// -------------------------------------------------------------- //
|
|
||||||
function loop() {
|
|
||||||
const s = stateRef.current;
|
|
||||||
if (!canvas || !s) return;
|
|
||||||
const ctx = canvas.getContext("2d");
|
|
||||||
if (!ctx) return;
|
|
||||||
|
|
||||||
update(s, canvas.width);
|
|
||||||
draw(ctx, s, canvas.width, canvas.height, fg, startedRef.current);
|
|
||||||
rafRef.current = requestAnimationFrame(loop);
|
|
||||||
}
|
|
||||||
|
|
||||||
rafRef.current = requestAnimationFrame(loop);
|
|
||||||
|
|
||||||
canvas.addEventListener("click", onClick);
|
|
||||||
canvas.addEventListener("keydown", onKey);
|
|
||||||
|
|
||||||
const observer = new ResizeObserver((entries) => {
|
|
||||||
for (const entry of entries) {
|
|
||||||
canvas.width = entry.contentRect.width;
|
|
||||||
canvas.height = CANVAS_HEIGHT;
|
|
||||||
if (stateRef.current) {
|
|
||||||
stateRef.current.groundY = canvas.height - GROUND_PAD;
|
|
||||||
}
|
|
||||||
const cs = getComputedStyle(canvas);
|
|
||||||
fg = cs.color || fg;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if (container) observer.observe(container);
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
cancelAnimationFrame(rafRef.current);
|
|
||||||
canvas.removeEventListener("click", onClick);
|
|
||||||
canvas.removeEventListener("keydown", onKey);
|
|
||||||
observer.disconnect();
|
|
||||||
};
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
return { canvasRef };
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,6 @@
|
|||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||||
import { BlockDetailsCard } from "./components/BlockDetailsCard/BlockDetailsCard";
|
|
||||||
import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard";
|
import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard";
|
||||||
import { ErrorCard } from "./components/ErrorCard/ErrorCard";
|
import { ErrorCard } from "./components/ErrorCard/ErrorCard";
|
||||||
import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard";
|
import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard";
|
||||||
@@ -12,7 +11,6 @@ import {
|
|||||||
getAnimationText,
|
getAnimationText,
|
||||||
getRunBlockToolOutput,
|
getRunBlockToolOutput,
|
||||||
isRunBlockBlockOutput,
|
isRunBlockBlockOutput,
|
||||||
isRunBlockDetailsOutput,
|
|
||||||
isRunBlockErrorOutput,
|
isRunBlockErrorOutput,
|
||||||
isRunBlockSetupRequirementsOutput,
|
isRunBlockSetupRequirementsOutput,
|
||||||
ToolIcon,
|
ToolIcon,
|
||||||
@@ -43,7 +41,6 @@ export function RunBlockTool({ part }: Props) {
|
|||||||
part.state === "output-available" &&
|
part.state === "output-available" &&
|
||||||
!!output &&
|
!!output &&
|
||||||
(isRunBlockBlockOutput(output) ||
|
(isRunBlockBlockOutput(output) ||
|
||||||
isRunBlockDetailsOutput(output) ||
|
|
||||||
isRunBlockSetupRequirementsOutput(output) ||
|
isRunBlockSetupRequirementsOutput(output) ||
|
||||||
isRunBlockErrorOutput(output));
|
isRunBlockErrorOutput(output));
|
||||||
|
|
||||||
@@ -61,10 +58,6 @@ export function RunBlockTool({ part }: Props) {
|
|||||||
<ToolAccordion {...getAccordionMeta(output)}>
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||||
|
|
||||||
{isRunBlockDetailsOutput(output) && (
|
|
||||||
<BlockDetailsCard output={output} />
|
|
||||||
)}
|
|
||||||
|
|
||||||
{isRunBlockSetupRequirementsOutput(output) && (
|
{isRunBlockSetupRequirementsOutput(output) && (
|
||||||
<SetupRequirementsCard output={output} />
|
<SetupRequirementsCard output={output} />
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -1,188 +0,0 @@
|
|||||||
import type { Meta, StoryObj } from "@storybook/nextjs";
|
|
||||||
import { ResponseType } from "@/app/api/__generated__/models/responseType";
|
|
||||||
import type { BlockDetailsResponse } from "../../helpers";
|
|
||||||
import { BlockDetailsCard } from "./BlockDetailsCard";
|
|
||||||
|
|
||||||
const meta: Meta<typeof BlockDetailsCard> = {
|
|
||||||
title: "Copilot/RunBlock/BlockDetailsCard",
|
|
||||||
component: BlockDetailsCard,
|
|
||||||
parameters: {
|
|
||||||
layout: "centered",
|
|
||||||
},
|
|
||||||
tags: ["autodocs"],
|
|
||||||
decorators: [
|
|
||||||
(Story) => (
|
|
||||||
<div style={{ maxWidth: 480 }}>
|
|
||||||
<Story />
|
|
||||||
</div>
|
|
||||||
),
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
export default meta;
|
|
||||||
type Story = StoryObj<typeof meta>;
|
|
||||||
|
|
||||||
const baseBlock: BlockDetailsResponse = {
|
|
||||||
type: ResponseType.block_details,
|
|
||||||
message:
|
|
||||||
"Here are the details for the GetWeather block. Provide the required inputs to run it.",
|
|
||||||
session_id: "session-123",
|
|
||||||
user_authenticated: true,
|
|
||||||
block: {
|
|
||||||
id: "block-abc-123",
|
|
||||||
name: "GetWeather",
|
|
||||||
description: "Fetches current weather data for a given location.",
|
|
||||||
inputs: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
location: {
|
|
||||||
title: "Location",
|
|
||||||
type: "string",
|
|
||||||
description:
|
|
||||||
"City name or coordinates (e.g. 'London' or '51.5,-0.1')",
|
|
||||||
},
|
|
||||||
units: {
|
|
||||||
title: "Units",
|
|
||||||
type: "string",
|
|
||||||
description: "Temperature units: 'metric' or 'imperial'",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ["location"],
|
|
||||||
},
|
|
||||||
outputs: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
temperature: {
|
|
||||||
title: "Temperature",
|
|
||||||
type: "number",
|
|
||||||
description: "Current temperature in the requested units",
|
|
||||||
},
|
|
||||||
condition: {
|
|
||||||
title: "Condition",
|
|
||||||
type: "string",
|
|
||||||
description: "Weather condition description (e.g. 'Sunny', 'Rain')",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
credentials: [],
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Default: Story = {
|
|
||||||
args: {
|
|
||||||
output: baseBlock,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const InputsOnly: Story = {
|
|
||||||
args: {
|
|
||||||
output: {
|
|
||||||
...baseBlock,
|
|
||||||
message: "This block requires inputs. No outputs are defined.",
|
|
||||||
block: {
|
|
||||||
...baseBlock.block,
|
|
||||||
outputs: {},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const OutputsOnly: Story = {
|
|
||||||
args: {
|
|
||||||
output: {
|
|
||||||
...baseBlock,
|
|
||||||
message: "This block has no required inputs.",
|
|
||||||
block: {
|
|
||||||
...baseBlock.block,
|
|
||||||
inputs: {},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const ManyFields: Story = {
|
|
||||||
args: {
|
|
||||||
output: {
|
|
||||||
...baseBlock,
|
|
||||||
message: "Block with many input and output fields.",
|
|
||||||
block: {
|
|
||||||
...baseBlock.block,
|
|
||||||
name: "SendEmail",
|
|
||||||
description: "Sends an email via SMTP.",
|
|
||||||
inputs: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
to: {
|
|
||||||
title: "To",
|
|
||||||
type: "string",
|
|
||||||
description: "Recipient email address",
|
|
||||||
},
|
|
||||||
subject: {
|
|
||||||
title: "Subject",
|
|
||||||
type: "string",
|
|
||||||
description: "Email subject line",
|
|
||||||
},
|
|
||||||
body: {
|
|
||||||
title: "Body",
|
|
||||||
type: "string",
|
|
||||||
description: "Email body content",
|
|
||||||
},
|
|
||||||
cc: {
|
|
||||||
title: "CC",
|
|
||||||
type: "string",
|
|
||||||
description: "CC recipients (comma-separated)",
|
|
||||||
},
|
|
||||||
bcc: {
|
|
||||||
title: "BCC",
|
|
||||||
type: "string",
|
|
||||||
description: "BCC recipients (comma-separated)",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ["to", "subject", "body"],
|
|
||||||
},
|
|
||||||
outputs: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
message_id: {
|
|
||||||
title: "Message ID",
|
|
||||||
type: "string",
|
|
||||||
description: "Unique ID of the sent email",
|
|
||||||
},
|
|
||||||
status: {
|
|
||||||
title: "Status",
|
|
||||||
type: "string",
|
|
||||||
description: "Delivery status",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const NoFieldDescriptions: Story = {
|
|
||||||
args: {
|
|
||||||
output: {
|
|
||||||
...baseBlock,
|
|
||||||
message: "Fields without descriptions.",
|
|
||||||
block: {
|
|
||||||
...baseBlock.block,
|
|
||||||
name: "SimpleBlock",
|
|
||||||
inputs: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
input_a: { title: "Input A", type: "string" },
|
|
||||||
input_b: { title: "Input B", type: "number" },
|
|
||||||
},
|
|
||||||
required: ["input_a"],
|
|
||||||
},
|
|
||||||
outputs: {
|
|
||||||
type: "object",
|
|
||||||
properties: {
|
|
||||||
result: { title: "Result", type: "string" },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import type { BlockDetailsResponse } from "../../helpers";
|
|
||||||
import {
|
|
||||||
ContentBadge,
|
|
||||||
ContentCard,
|
|
||||||
ContentCardDescription,
|
|
||||||
ContentCardTitle,
|
|
||||||
ContentGrid,
|
|
||||||
ContentMessage,
|
|
||||||
} from "../../../../components/ToolAccordion/AccordionContent";
|
|
||||||
|
|
||||||
interface Props {
|
|
||||||
output: BlockDetailsResponse;
|
|
||||||
}
|
|
||||||
|
|
||||||
function SchemaFieldList({
|
|
||||||
title,
|
|
||||||
properties,
|
|
||||||
required,
|
|
||||||
}: {
|
|
||||||
title: string;
|
|
||||||
properties: Record<string, unknown>;
|
|
||||||
required?: string[];
|
|
||||||
}) {
|
|
||||||
const entries = Object.entries(properties);
|
|
||||||
if (entries.length === 0) return null;
|
|
||||||
|
|
||||||
const requiredSet = new Set(required ?? []);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<ContentCard>
|
|
||||||
<ContentCardTitle className="text-xs">{title}</ContentCardTitle>
|
|
||||||
<div className="mt-2 grid gap-2">
|
|
||||||
{entries.map(([name, schema]) => {
|
|
||||||
const field = schema as Record<string, unknown> | undefined;
|
|
||||||
const fieldTitle =
|
|
||||||
typeof field?.title === "string" ? field.title : name;
|
|
||||||
const fieldType =
|
|
||||||
typeof field?.type === "string" ? field.type : "unknown";
|
|
||||||
const description =
|
|
||||||
typeof field?.description === "string"
|
|
||||||
? field.description
|
|
||||||
: undefined;
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div key={name} className="rounded-xl border p-2">
|
|
||||||
<div className="flex items-center justify-between gap-2">
|
|
||||||
<ContentCardTitle className="text-xs">
|
|
||||||
{fieldTitle}
|
|
||||||
</ContentCardTitle>
|
|
||||||
<div className="flex gap-1">
|
|
||||||
<ContentBadge>{fieldType}</ContentBadge>
|
|
||||||
{requiredSet.has(name) && (
|
|
||||||
<ContentBadge>Required</ContentBadge>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
{description && (
|
|
||||||
<ContentCardDescription className="mt-1 text-xs">
|
|
||||||
{description}
|
|
||||||
</ContentCardDescription>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</ContentCard>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
export function BlockDetailsCard({ output }: Props) {
|
|
||||||
const inputs = output.block.inputs as {
|
|
||||||
properties?: Record<string, unknown>;
|
|
||||||
required?: string[];
|
|
||||||
} | null;
|
|
||||||
const outputs = output.block.outputs as {
|
|
||||||
properties?: Record<string, unknown>;
|
|
||||||
required?: string[];
|
|
||||||
} | null;
|
|
||||||
|
|
||||||
return (
|
|
||||||
<ContentGrid>
|
|
||||||
<ContentMessage>{output.message}</ContentMessage>
|
|
||||||
|
|
||||||
{inputs?.properties && Object.keys(inputs.properties).length > 0 && (
|
|
||||||
<SchemaFieldList
|
|
||||||
title="Inputs"
|
|
||||||
properties={inputs.properties}
|
|
||||||
required={inputs.required}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{outputs?.properties && Object.keys(outputs.properties).length > 0 && (
|
|
||||||
<SchemaFieldList
|
|
||||||
title="Outputs"
|
|
||||||
properties={outputs.properties}
|
|
||||||
required={outputs.required}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</ContentGrid>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -10,37 +10,18 @@ import {
|
|||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||||
|
|
||||||
/** Block details returned on first run_block attempt (before input_data provided). */
|
|
||||||
export interface BlockDetailsResponse {
|
|
||||||
type: typeof ResponseType.block_details;
|
|
||||||
message: string;
|
|
||||||
session_id?: string | null;
|
|
||||||
block: {
|
|
||||||
id: string;
|
|
||||||
name: string;
|
|
||||||
description: string;
|
|
||||||
inputs: Record<string, unknown>;
|
|
||||||
outputs: Record<string, unknown>;
|
|
||||||
credentials: unknown[];
|
|
||||||
};
|
|
||||||
user_authenticated: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface RunBlockInput {
|
export interface RunBlockInput {
|
||||||
block_id?: string;
|
block_id?: string;
|
||||||
block_name?: string;
|
|
||||||
input_data?: Record<string, unknown>;
|
input_data?: Record<string, unknown>;
|
||||||
}
|
}
|
||||||
|
|
||||||
export type RunBlockToolOutput =
|
export type RunBlockToolOutput =
|
||||||
| SetupRequirementsResponse
|
| SetupRequirementsResponse
|
||||||
| BlockDetailsResponse
|
|
||||||
| BlockOutputResponse
|
| BlockOutputResponse
|
||||||
| ErrorResponse;
|
| ErrorResponse;
|
||||||
|
|
||||||
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
||||||
ResponseType.setup_requirements,
|
ResponseType.setup_requirements,
|
||||||
ResponseType.block_details,
|
|
||||||
ResponseType.block_output,
|
ResponseType.block_output,
|
||||||
ResponseType.error,
|
ResponseType.error,
|
||||||
]);
|
]);
|
||||||
@@ -54,15 +35,6 @@ export function isRunBlockSetupRequirementsOutput(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isRunBlockDetailsOutput(
|
|
||||||
output: RunBlockToolOutput,
|
|
||||||
): output is BlockDetailsResponse {
|
|
||||||
return (
|
|
||||||
output.type === ResponseType.block_details ||
|
|
||||||
("block" in output && typeof output.block === "object")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
export function isRunBlockBlockOutput(
|
export function isRunBlockBlockOutput(
|
||||||
output: RunBlockToolOutput,
|
output: RunBlockToolOutput,
|
||||||
): output is BlockOutputResponse {
|
): output is BlockOutputResponse {
|
||||||
@@ -92,7 +64,6 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
|
|||||||
return output as RunBlockToolOutput;
|
return output as RunBlockToolOutput;
|
||||||
}
|
}
|
||||||
if ("block_id" in output) return output as BlockOutputResponse;
|
if ("block_id" in output) return output as BlockOutputResponse;
|
||||||
if ("block" in output) return output as BlockDetailsResponse;
|
|
||||||
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
||||||
if ("error" in output || "details" in output)
|
if ("error" in output || "details" in output)
|
||||||
return output as ErrorResponse;
|
return output as ErrorResponse;
|
||||||
@@ -113,25 +84,17 @@ export function getAnimationText(part: {
|
|||||||
output?: unknown;
|
output?: unknown;
|
||||||
}): string {
|
}): string {
|
||||||
const input = part.input as RunBlockInput | undefined;
|
const input = part.input as RunBlockInput | undefined;
|
||||||
const blockName = input?.block_name?.trim();
|
|
||||||
const blockId = input?.block_id?.trim();
|
const blockId = input?.block_id?.trim();
|
||||||
// Prefer block_name if available, otherwise fall back to block_id
|
const blockText = blockId ? ` "${blockId}"` : "";
|
||||||
const blockText = blockName
|
|
||||||
? ` "${blockName}"`
|
|
||||||
: blockId
|
|
||||||
? ` "${blockId}"`
|
|
||||||
: "";
|
|
||||||
|
|
||||||
switch (part.state) {
|
switch (part.state) {
|
||||||
case "input-streaming":
|
case "input-streaming":
|
||||||
case "input-available":
|
case "input-available":
|
||||||
return `Running${blockText}`;
|
return `Running the block${blockText}`;
|
||||||
case "output-available": {
|
case "output-available": {
|
||||||
const output = parseOutput(part.output);
|
const output = parseOutput(part.output);
|
||||||
if (!output) return `Running${blockText}`;
|
if (!output) return `Running the block${blockText}`;
|
||||||
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
||||||
if (isRunBlockDetailsOutput(output))
|
|
||||||
return `Details for "${output.block.name}"`;
|
|
||||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||||
return `Setup needed for "${output.setup_info.agent_name}"`;
|
return `Setup needed for "${output.setup_info.agent_name}"`;
|
||||||
}
|
}
|
||||||
@@ -195,21 +158,6 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isRunBlockDetailsOutput(output)) {
|
|
||||||
const inputKeys = Object.keys(
|
|
||||||
(output.block.inputs as { properties?: Record<string, unknown> })
|
|
||||||
?.properties ?? {},
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
icon,
|
|
||||||
title: output.block.name,
|
|
||||||
description:
|
|
||||||
inputKeys.length > 0
|
|
||||||
? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available`
|
|
||||||
: output.message,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||||
const missingCredsCount = Object.keys(
|
const missingCredsCount = Object.keys(
|
||||||
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
||||||
|
|||||||
@@ -1,14 +1,10 @@
|
|||||||
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
|
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
|
||||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
|
||||||
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
||||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||||
import { useChat } from "@ai-sdk/react";
|
import { useChat } from "@ai-sdk/react";
|
||||||
import { DefaultChatTransport } from "ai";
|
import { DefaultChatTransport } from "ai";
|
||||||
import { useEffect, useMemo, useRef, useState } from "react";
|
import { useEffect, useMemo, useState } from "react";
|
||||||
import { useChatSession } from "./useChatSession";
|
import { useChatSession } from "./useChatSession";
|
||||||
import { useLongRunningToolPolling } from "./hooks/useLongRunningToolPolling";
|
|
||||||
|
|
||||||
const STREAM_START_TIMEOUT_MS = 12_000;
|
|
||||||
|
|
||||||
export function useCopilotPage() {
|
export function useCopilotPage() {
|
||||||
const { isUserLoading, isLoggedIn } = useSupabase();
|
const { isUserLoading, isLoggedIn } = useSupabase();
|
||||||
@@ -56,24 +52,6 @@ export function useCopilotPage() {
|
|||||||
transport: transport ?? undefined,
|
transport: transport ?? undefined,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Abort the stream if the backend doesn't start sending data within 12s.
|
|
||||||
const stopRef = useRef(stop);
|
|
||||||
stopRef.current = stop;
|
|
||||||
useEffect(() => {
|
|
||||||
if (status !== "submitted") return;
|
|
||||||
|
|
||||||
const timer = setTimeout(() => {
|
|
||||||
stopRef.current();
|
|
||||||
toast({
|
|
||||||
title: "Stream timed out",
|
|
||||||
description: "The server took too long to respond. Please try again.",
|
|
||||||
variant: "destructive",
|
|
||||||
});
|
|
||||||
}, STREAM_START_TIMEOUT_MS);
|
|
||||||
|
|
||||||
return () => clearTimeout(timer);
|
|
||||||
}, [status]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
||||||
setMessages((prev) => {
|
setMessages((prev) => {
|
||||||
@@ -82,11 +60,6 @@ export function useCopilotPage() {
|
|||||||
});
|
});
|
||||||
}, [hydratedMessages, setMessages]);
|
}, [hydratedMessages, setMessages]);
|
||||||
|
|
||||||
// Poll session endpoint when a long-running tool (create_agent, edit_agent)
|
|
||||||
// is in progress. When the backend completes, the session data will contain
|
|
||||||
// the final tool output — this hook detects the change and updates messages.
|
|
||||||
useLongRunningToolPolling(sessionId, messages, setMessages);
|
|
||||||
|
|
||||||
// Clear messages when session is null
|
// Clear messages when session is null
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!sessionId) setMessages([]);
|
if (!sessionId) setMessages([]);
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ export function ScheduleListItem({
|
|||||||
description={formatDistanceToNow(schedule.next_run_time, {
|
description={formatDistanceToNow(schedule.next_run_time, {
|
||||||
addSuffix: true,
|
addSuffix: true,
|
||||||
})}
|
})}
|
||||||
descriptionTitle={new Date(schedule.next_run_time).toString()}
|
|
||||||
onClick={onClick}
|
onClick={onClick}
|
||||||
selected={selected}
|
selected={selected}
|
||||||
icon={
|
icon={
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import React from "react";
|
|||||||
interface Props {
|
interface Props {
|
||||||
title: string;
|
title: string;
|
||||||
description?: string;
|
description?: string;
|
||||||
descriptionTitle?: string;
|
|
||||||
icon?: React.ReactNode;
|
icon?: React.ReactNode;
|
||||||
selected?: boolean;
|
selected?: boolean;
|
||||||
onClick?: () => void;
|
onClick?: () => void;
|
||||||
@@ -17,7 +16,6 @@ interface Props {
|
|||||||
export function SidebarItemCard({
|
export function SidebarItemCard({
|
||||||
title,
|
title,
|
||||||
description,
|
description,
|
||||||
descriptionTitle,
|
|
||||||
icon,
|
icon,
|
||||||
selected,
|
selected,
|
||||||
onClick,
|
onClick,
|
||||||
@@ -40,11 +38,7 @@ export function SidebarItemCard({
|
|||||||
>
|
>
|
||||||
{title}
|
{title}
|
||||||
</Text>
|
</Text>
|
||||||
<Text
|
<Text variant="body" className="leading-tight !text-zinc-500">
|
||||||
variant="body"
|
|
||||||
className="leading-tight !text-zinc-500"
|
|
||||||
title={descriptionTitle}
|
|
||||||
>
|
|
||||||
{description}
|
{description}
|
||||||
</Text>
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -81,9 +81,6 @@ export function TaskListItem({
|
|||||||
? formatDistanceToNow(run.started_at, { addSuffix: true })
|
? formatDistanceToNow(run.started_at, { addSuffix: true })
|
||||||
: "—"
|
: "—"
|
||||||
}
|
}
|
||||||
descriptionTitle={
|
|
||||||
run.started_at ? new Date(run.started_at).toString() : undefined
|
|
||||||
}
|
|
||||||
onClick={onClick}
|
onClick={onClick}
|
||||||
selected={selected}
|
selected={selected}
|
||||||
actions={
|
actions={
|
||||||
|
|||||||
@@ -1053,7 +1053,6 @@
|
|||||||
"$ref": "#/components/schemas/ClarificationNeededResponse"
|
"$ref": "#/components/schemas/ClarificationNeededResponse"
|
||||||
},
|
},
|
||||||
{ "$ref": "#/components/schemas/BlockListResponse" },
|
{ "$ref": "#/components/schemas/BlockListResponse" },
|
||||||
{ "$ref": "#/components/schemas/BlockDetailsResponse" },
|
|
||||||
{ "$ref": "#/components/schemas/BlockOutputResponse" },
|
{ "$ref": "#/components/schemas/BlockOutputResponse" },
|
||||||
{ "$ref": "#/components/schemas/DocSearchResultsResponse" },
|
{ "$ref": "#/components/schemas/DocSearchResultsResponse" },
|
||||||
{ "$ref": "#/components/schemas/DocPageResponse" },
|
{ "$ref": "#/components/schemas/DocPageResponse" },
|
||||||
@@ -6959,58 +6958,6 @@
|
|||||||
"enum": ["run", "byte", "second"],
|
"enum": ["run", "byte", "second"],
|
||||||
"title": "BlockCostType"
|
"title": "BlockCostType"
|
||||||
},
|
},
|
||||||
"BlockDetails": {
|
|
||||||
"properties": {
|
|
||||||
"id": { "type": "string", "title": "Id" },
|
|
||||||
"name": { "type": "string", "title": "Name" },
|
|
||||||
"description": { "type": "string", "title": "Description" },
|
|
||||||
"inputs": {
|
|
||||||
"additionalProperties": true,
|
|
||||||
"type": "object",
|
|
||||||
"title": "Inputs",
|
|
||||||
"default": {}
|
|
||||||
},
|
|
||||||
"outputs": {
|
|
||||||
"additionalProperties": true,
|
|
||||||
"type": "object",
|
|
||||||
"title": "Outputs",
|
|
||||||
"default": {}
|
|
||||||
},
|
|
||||||
"credentials": {
|
|
||||||
"items": { "$ref": "#/components/schemas/CredentialsMetaInput" },
|
|
||||||
"type": "array",
|
|
||||||
"title": "Credentials",
|
|
||||||
"default": []
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"type": "object",
|
|
||||||
"required": ["id", "name", "description"],
|
|
||||||
"title": "BlockDetails",
|
|
||||||
"description": "Detailed block information."
|
|
||||||
},
|
|
||||||
"BlockDetailsResponse": {
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"$ref": "#/components/schemas/ResponseType",
|
|
||||||
"default": "block_details"
|
|
||||||
},
|
|
||||||
"message": { "type": "string", "title": "Message" },
|
|
||||||
"session_id": {
|
|
||||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
|
||||||
"title": "Session Id"
|
|
||||||
},
|
|
||||||
"block": { "$ref": "#/components/schemas/BlockDetails" },
|
|
||||||
"user_authenticated": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "User Authenticated",
|
|
||||||
"default": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"type": "object",
|
|
||||||
"required": ["message", "block"],
|
|
||||||
"title": "BlockDetailsResponse",
|
|
||||||
"description": "Response for block details (first run_block attempt)."
|
|
||||||
},
|
|
||||||
"BlockInfo": {
|
"BlockInfo": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"id": { "type": "string", "title": "Id" },
|
"id": { "type": "string", "title": "Id" },
|
||||||
@@ -7066,13 +7013,62 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"id": { "type": "string", "title": "Id" },
|
"id": { "type": "string", "title": "Id" },
|
||||||
"name": { "type": "string", "title": "Name" },
|
"name": { "type": "string", "title": "Name" },
|
||||||
"description": { "type": "string", "title": "Description" }
|
"description": { "type": "string", "title": "Description" },
|
||||||
|
"categories": {
|
||||||
|
"items": { "type": "string" },
|
||||||
|
"type": "array",
|
||||||
|
"title": "Categories"
|
||||||
|
},
|
||||||
|
"input_schema": {
|
||||||
|
"additionalProperties": true,
|
||||||
|
"type": "object",
|
||||||
|
"title": "Input Schema"
|
||||||
|
},
|
||||||
|
"output_schema": {
|
||||||
|
"additionalProperties": true,
|
||||||
|
"type": "object",
|
||||||
|
"title": "Output Schema"
|
||||||
|
},
|
||||||
|
"required_inputs": {
|
||||||
|
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
|
||||||
|
"type": "array",
|
||||||
|
"title": "Required Inputs",
|
||||||
|
"description": "List of required input fields for this block"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": ["id", "name", "description"],
|
"required": [
|
||||||
|
"id",
|
||||||
|
"name",
|
||||||
|
"description",
|
||||||
|
"categories",
|
||||||
|
"input_schema",
|
||||||
|
"output_schema"
|
||||||
|
],
|
||||||
"title": "BlockInfoSummary",
|
"title": "BlockInfoSummary",
|
||||||
"description": "Summary of a block for search results."
|
"description": "Summary of a block for search results."
|
||||||
},
|
},
|
||||||
|
"BlockInputFieldInfo": {
|
||||||
|
"properties": {
|
||||||
|
"name": { "type": "string", "title": "Name" },
|
||||||
|
"type": { "type": "string", "title": "Type" },
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"title": "Description",
|
||||||
|
"default": ""
|
||||||
|
},
|
||||||
|
"required": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "Required",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"default": { "anyOf": [{}, { "type": "null" }], "title": "Default" }
|
||||||
|
},
|
||||||
|
"type": "object",
|
||||||
|
"required": ["name", "type"],
|
||||||
|
"title": "BlockInputFieldInfo",
|
||||||
|
"description": "Information about a block input field."
|
||||||
|
},
|
||||||
"BlockListResponse": {
|
"BlockListResponse": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"type": {
|
"type": {
|
||||||
@@ -7090,7 +7086,12 @@
|
|||||||
"title": "Blocks"
|
"title": "Blocks"
|
||||||
},
|
},
|
||||||
"count": { "type": "integer", "title": "Count" },
|
"count": { "type": "integer", "title": "Count" },
|
||||||
"query": { "type": "string", "title": "Query" }
|
"query": { "type": "string", "title": "Query" },
|
||||||
|
"usage_hint": {
|
||||||
|
"type": "string",
|
||||||
|
"title": "Usage Hint",
|
||||||
|
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": ["message", "blocks", "count", "query"],
|
"required": ["message", "blocks", "count", "query"],
|
||||||
@@ -10483,7 +10484,6 @@
|
|||||||
"agent_saved",
|
"agent_saved",
|
||||||
"clarification_needed",
|
"clarification_needed",
|
||||||
"block_list",
|
"block_list",
|
||||||
"block_details",
|
|
||||||
"block_output",
|
"block_output",
|
||||||
"doc_search_results",
|
"doc_search_results",
|
||||||
"doc_page",
|
"doc_page",
|
||||||
|
|||||||
@@ -180,14 +180,3 @@ body[data-google-picker-open="true"] [data-dialog-content] {
|
|||||||
z-index: 1 !important;
|
z-index: 1 !important;
|
||||||
pointer-events: none !important;
|
pointer-events: none !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* CoPilot chat table styling — remove left/right borders, increase padding */
|
|
||||||
[data-streamdown="table-wrapper"] table {
|
|
||||||
border-left: none;
|
|
||||||
border-right: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
[data-streamdown="table-wrapper"] th,
|
|
||||||
[data-streamdown="table-wrapper"] td {
|
|
||||||
padding: 0.875rem 1rem; /* py-3.5 px-4 */
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ export function APIKeyCredentialsModal({
|
|||||||
const {
|
const {
|
||||||
form,
|
form,
|
||||||
isLoading,
|
isLoading,
|
||||||
isSubmitting,
|
|
||||||
supportsApiKey,
|
supportsApiKey,
|
||||||
providerName,
|
providerName,
|
||||||
schemaDescription,
|
schemaDescription,
|
||||||
@@ -139,12 +138,7 @@ export function APIKeyCredentialsModal({
|
|||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
<Button
|
<Button type="submit" className="min-w-68">
|
||||||
type="submit"
|
|
||||||
className="min-w-68"
|
|
||||||
loading={isSubmitting}
|
|
||||||
disabled={isSubmitting}
|
|
||||||
>
|
|
||||||
Add API Key
|
Add API Key
|
||||||
</Button>
|
</Button>
|
||||||
</form>
|
</form>
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import {
|
|||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
} from "@/lib/autogpt-server-api/types";
|
} from "@/lib/autogpt-server-api/types";
|
||||||
import { zodResolver } from "@hookform/resolvers/zod";
|
import { zodResolver } from "@hookform/resolvers/zod";
|
||||||
import { useState } from "react";
|
|
||||||
import { useForm, type UseFormReturn } from "react-hook-form";
|
import { useForm, type UseFormReturn } from "react-hook-form";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
|
|
||||||
@@ -27,7 +26,6 @@ export function useAPIKeyCredentialsModal({
|
|||||||
}: Args): {
|
}: Args): {
|
||||||
form: UseFormReturn<APIKeyFormValues>;
|
form: UseFormReturn<APIKeyFormValues>;
|
||||||
isLoading: boolean;
|
isLoading: boolean;
|
||||||
isSubmitting: boolean;
|
|
||||||
supportsApiKey: boolean;
|
supportsApiKey: boolean;
|
||||||
provider?: string;
|
provider?: string;
|
||||||
providerName?: string;
|
providerName?: string;
|
||||||
@@ -35,7 +33,6 @@ export function useAPIKeyCredentialsModal({
|
|||||||
onSubmit: (values: APIKeyFormValues) => Promise<void>;
|
onSubmit: (values: APIKeyFormValues) => Promise<void>;
|
||||||
} {
|
} {
|
||||||
const credentials = useCredentials(schema, siblingInputs);
|
const credentials = useCredentials(schema, siblingInputs);
|
||||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
|
||||||
|
|
||||||
const formSchema = z.object({
|
const formSchema = z.object({
|
||||||
apiKey: z.string().min(1, "API Key is required"),
|
apiKey: z.string().min(1, "API Key is required"),
|
||||||
@@ -43,42 +40,48 @@ export function useAPIKeyCredentialsModal({
|
|||||||
expiresAt: z.string().optional(),
|
expiresAt: z.string().optional(),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
function getDefaultExpirationDate(): string {
|
||||||
|
const tomorrow = new Date();
|
||||||
|
tomorrow.setDate(tomorrow.getDate() + 1);
|
||||||
|
tomorrow.setHours(0, 0, 0, 0);
|
||||||
|
const year = tomorrow.getFullYear();
|
||||||
|
const month = String(tomorrow.getMonth() + 1).padStart(2, "0");
|
||||||
|
const day = String(tomorrow.getDate()).padStart(2, "0");
|
||||||
|
const hours = String(tomorrow.getHours()).padStart(2, "0");
|
||||||
|
const minutes = String(tomorrow.getMinutes()).padStart(2, "0");
|
||||||
|
return `${year}-${month}-${day}T${hours}:${minutes}`;
|
||||||
|
}
|
||||||
|
|
||||||
const form = useForm<APIKeyFormValues>({
|
const form = useForm<APIKeyFormValues>({
|
||||||
resolver: zodResolver(formSchema),
|
resolver: zodResolver(formSchema),
|
||||||
defaultValues: {
|
defaultValues: {
|
||||||
apiKey: "",
|
apiKey: "",
|
||||||
title: "",
|
title: "",
|
||||||
expiresAt: "",
|
expiresAt: getDefaultExpirationDate(),
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
async function onSubmit(values: APIKeyFormValues) {
|
async function onSubmit(values: APIKeyFormValues) {
|
||||||
if (!credentials || credentials.isLoading) return;
|
if (!credentials || credentials.isLoading) return;
|
||||||
setIsSubmitting(true);
|
const expiresAt = values.expiresAt
|
||||||
try {
|
? new Date(values.expiresAt).getTime() / 1000
|
||||||
const expiresAt = values.expiresAt
|
: undefined;
|
||||||
? new Date(values.expiresAt).getTime() / 1000
|
const newCredentials = await credentials.createAPIKeyCredentials({
|
||||||
: undefined;
|
api_key: values.apiKey,
|
||||||
const newCredentials = await credentials.createAPIKeyCredentials({
|
title: values.title,
|
||||||
api_key: values.apiKey,
|
expires_at: expiresAt,
|
||||||
title: values.title,
|
});
|
||||||
expires_at: expiresAt,
|
onCredentialsCreate({
|
||||||
});
|
provider: credentials.provider,
|
||||||
onCredentialsCreate({
|
id: newCredentials.id,
|
||||||
provider: credentials.provider,
|
type: "api_key",
|
||||||
id: newCredentials.id,
|
title: newCredentials.title,
|
||||||
type: "api_key",
|
});
|
||||||
title: newCredentials.title,
|
|
||||||
});
|
|
||||||
} finally {
|
|
||||||
setIsSubmitting(false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
form,
|
form,
|
||||||
isLoading: !credentials || credentials.isLoading,
|
isLoading: !credentials || credentials.isLoading,
|
||||||
isSubmitting,
|
|
||||||
supportsApiKey: !!credentials?.supportsApiKey,
|
supportsApiKey: !!credentials?.supportsApiKey,
|
||||||
provider: credentials?.provider,
|
provider: credentials?.provider,
|
||||||
providerName:
|
providerName:
|
||||||
|
|||||||
@@ -226,7 +226,7 @@ function renderMarkdown(
|
|||||||
table: ({ children, ...props }) => (
|
table: ({ children, ...props }) => (
|
||||||
<div className="my-4 overflow-x-auto">
|
<div className="my-4 overflow-x-auto">
|
||||||
<table
|
<table
|
||||||
className="min-w-full divide-y divide-gray-200 border-y border-gray-200 dark:divide-gray-700 dark:border-gray-700"
|
className="min-w-full divide-y divide-gray-200 rounded-lg border border-gray-200 dark:divide-gray-700 dark:border-gray-700"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
@@ -235,7 +235,7 @@ function renderMarkdown(
|
|||||||
),
|
),
|
||||||
th: ({ children, ...props }) => (
|
th: ({ children, ...props }) => (
|
||||||
<th
|
<th
|
||||||
className="bg-gray-50 px-4 py-3.5 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
|
className="bg-gray-50 px-4 py-3 text-left text-xs font-semibold uppercase tracking-wider text-gray-700 dark:bg-gray-800 dark:text-gray-300"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
@@ -243,7 +243,7 @@ function renderMarkdown(
|
|||||||
),
|
),
|
||||||
td: ({ children, ...props }) => (
|
td: ({ children, ...props }) => (
|
||||||
<td
|
<td
|
||||||
className="border-t border-gray-200 px-4 py-3.5 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
|
className="border-t border-gray-200 px-4 py-3 text-sm text-gray-600 dark:border-gray-700 dark:text-gray-400"
|
||||||
{...props}
|
{...props}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
|
|||||||
@@ -563,7 +563,7 @@ The block supports conversation continuation through three mechanisms:
|
|||||||
|--------|-------------|------|
|
|--------|-------------|------|
|
||||||
| error | Error message if execution failed | str |
|
| error | Error message if execution failed | str |
|
||||||
| response | The output/response from Claude Code execution | str |
|
| response | The output/response from Claude Code execution | str |
|
||||||
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. workspace_ref contains a workspace:// URI if the file was stored to workspace. | List[SandboxFileOutput] |
|
| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', and 'content' fields. | List[FileOutput] |
|
||||||
| conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str |
|
| conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str |
|
||||||
| session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str |
|
| session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str |
|
||||||
| sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str |
|
| sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str |
|
||||||
|
|||||||
@@ -215,7 +215,6 @@ The sandbox includes pip and npm pre-installed. Set timeout to limit execution t
|
|||||||
| response | Text output (if any) of the main execution result | str |
|
| response | Text output (if any) of the main execution result | str |
|
||||||
| stdout_logs | Standard output logs from execution | str |
|
| stdout_logs | Standard output logs from execution | str |
|
||||||
| stderr_logs | Standard error logs from execution | str |
|
| stderr_logs | Standard error logs from execution | str |
|
||||||
| files | Files created or modified during execution. Each file has path, name, content, and workspace_ref (if stored). | List[SandboxFileOutput] |
|
|
||||||
|
|
||||||
### Possible use case
|
### Possible use case
|
||||||
<!-- MANUAL: use_case -->
|
<!-- MANUAL: use_case -->
|
||||||
|
|||||||
Reference in New Issue
Block a user