mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 08:14:58 -05:00
Compare commits
11 Commits
fix/copilo
...
swiftyos/s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
524439965c | ||
|
|
c8f08b4659 | ||
|
|
16061aefc6 | ||
|
|
0002cf9249 | ||
|
|
a1ff1a109e | ||
|
|
43b25b5e2f | ||
|
|
ab0b537cc7 | ||
|
|
5baa04f90e | ||
|
|
9a8c6ad609 | ||
|
|
af2e09372f | ||
|
|
f67b5bdb91 |
@@ -5,42 +5,13 @@
|
||||
!docs/
|
||||
|
||||
# Platform - Libs
|
||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||
!autogpt_platform/autogpt_libs/poetry.lock
|
||||
!autogpt_platform/autogpt_libs/README.md
|
||||
!autogpt_platform/autogpt_libs/
|
||||
|
||||
# Platform - Backend
|
||||
!autogpt_platform/backend/backend/
|
||||
!autogpt_platform/backend/test/e2e_test_data.py
|
||||
!autogpt_platform/backend/migrations/
|
||||
!autogpt_platform/backend/schema.prisma
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
!autogpt_platform/backend/.env
|
||||
!autogpt_platform/backend/gen_prisma_types_stub.py
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
!autogpt_platform/market/scripts.py
|
||||
!autogpt_platform/market/schema.prisma
|
||||
!autogpt_platform/market/pyproject.toml
|
||||
!autogpt_platform/market/poetry.lock
|
||||
!autogpt_platform/market/README.md
|
||||
!autogpt_platform/backend/
|
||||
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/scripts/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
!autogpt_platform/frontend/.env
|
||||
!autogpt_platform/frontend/
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
@@ -64,6 +35,38 @@
|
||||
# Classic - Frontend
|
||||
!classic/frontend/build/web/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
# Explicitly re-ignore unwanted files from whitelisted directories
|
||||
# Note: These patterns MUST come after the whitelist rules to take effect
|
||||
|
||||
# Hidden files and directories (but keep frontend .env files needed for build)
|
||||
**/.*
|
||||
!autogpt_platform/frontend/.env
|
||||
!autogpt_platform/frontend/.env.default
|
||||
!autogpt_platform/frontend/.env.production
|
||||
|
||||
# Python artifacts
|
||||
**/__pycache__/
|
||||
**/*.pyc
|
||||
**/*.pyo
|
||||
**/.venv/
|
||||
**/.ruff_cache/
|
||||
**/.pytest_cache/
|
||||
**/.coverage
|
||||
**/htmlcov/
|
||||
|
||||
# Node artifacts
|
||||
**/node_modules/
|
||||
**/.next/
|
||||
**/storybook-static/
|
||||
**/playwright-report/
|
||||
**/test-results/
|
||||
|
||||
# Build artifacts
|
||||
**/dist/
|
||||
**/build/
|
||||
!autogpt_platform/frontend/src/**/build/
|
||||
**/target/
|
||||
|
||||
# Logs and temp files
|
||||
**/*.log
|
||||
**/*.tmp
|
||||
|
||||
241
.github/workflows/platform-frontend-ci.yml
vendored
241
.github/workflows/platform-frontend-ci.yml
vendored
@@ -26,7 +26,6 @@ jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
components-changed: ${{ steps.filter.outputs.components }}
|
||||
|
||||
steps:
|
||||
@@ -41,28 +40,17 @@ jobs:
|
||||
components:
|
||||
- 'autogpt_platform/frontend/src/components/**'
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v5
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
- name: Install dependencies to populate cache
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
lint:
|
||||
@@ -73,22 +61,15 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
@@ -111,22 +92,15 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
@@ -141,10 +115,8 @@ jobs:
|
||||
exitOnceUploaded: true
|
||||
|
||||
e2e_test:
|
||||
name: end-to-end tests
|
||||
runs-on: big-boi
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -152,19 +124,11 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
- name: Set up Platform - Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Copy backend .env and set OpenAI API key
|
||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||
@@ -172,77 +136,125 @@ jobs:
|
||||
# Used by E2E test data script to generate embeddings for approved store agents
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
- name: Set up Platform - Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Cache Docker layers
|
||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
|
||||
- name: Set up Platform - Build Docker images (with cache)
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
# Resolve extends and generate a flat compose file that bake can understand
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
|
||||
# Add cache configuration to the resolved compose file
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
||||
--git-ref "${{ github.ref }}"
|
||||
|
||||
# Build with bake using the resolved compose file (now includes cache config)
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Cache E2E test data
|
||||
id: e2e-data-cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-frontend-test-
|
||||
path: /tmp/e2e_test_data.sql
|
||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
||||
|
||||
- name: Run docker compose
|
||||
- name: Set up Platform - Start Supabase DB + Auth
|
||||
run: |
|
||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||
echo "Waiting for auth service to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||
|
||||
- name: Set up Platform - Run migrations
|
||||
run: |
|
||||
echo "Running migrations..."
|
||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||
echo "✅ Migrations completed"
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Move cache
|
||||
- name: Set up tests - Load cached E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
fi
|
||||
echo "✅ Found cached E2E test data, restoring..."
|
||||
{
|
||||
echo "SET session_replication_role = 'replica';"
|
||||
cat /tmp/e2e_test_data.sql
|
||||
echo "SET session_replication_role = 'origin';"
|
||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||
# Refresh materialized views after restore
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||
|
||||
- name: Wait for services to be ready
|
||||
echo "✅ E2E test data restored from cache"
|
||||
|
||||
- name: Set up Platform - Start (all other services)
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Create E2E test data
|
||||
- name: Set up tests - Create E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
# First try to run the script from inside the container
|
||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||
# Copy the script into the container and run it
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||
echo "❌ Failed to copy script to container"
|
||||
exit 1
|
||||
}
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||
echo "Dumping database for cache..."
|
||||
{
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--table='auth.users' postgres
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--schema=platform \
|
||||
--exclude-table='platform._prisma_migrations' \
|
||||
--exclude-table='platform.apscheduler_jobs' \
|
||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||
postgres
|
||||
} > /tmp/e2e_test_data.sql
|
||||
|
||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||
|
||||
- name: Set up tests - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up tests - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
- name: Set up tests - Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Install Browser 'chromium'
|
||||
- name: Set up tests - Install browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
@@ -269,7 +281,7 @@ jobs:
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||
|
||||
integration_test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -281,22 +293,15 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
195
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
195
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add cache configuration to a resolved docker-compose file for all services
|
||||
that have a build key, and ensure image names match what docker compose expects.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
DEFAULT_BRANCH = "dev"
|
||||
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Add cache config to a resolved compose file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source",
|
||||
required=True,
|
||||
help="Source compose file to read (should be output of `docker compose config`)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache-from",
|
||||
default="type=gha",
|
||||
help="Cache source configuration",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache-to",
|
||||
default="type=gha,mode=max",
|
||||
help="Cache destination configuration",
|
||||
)
|
||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||
parser.add_argument(
|
||||
f"--{component}-hash",
|
||||
default="",
|
||||
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--git-ref",
|
||||
default="",
|
||||
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
|
||||
git_ref_scope = ""
|
||||
if args.git_ref:
|
||||
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
|
||||
|
||||
with open(args.source, "r") as f:
|
||||
compose = yaml.safe_load(f)
|
||||
|
||||
# Get project name from compose file or default
|
||||
project_name = compose.get("name", "autogpt_platform")
|
||||
|
||||
def get_image_name(dockerfile: str, target: str) -> str:
|
||||
"""Generate image name based on Dockerfile folder and build target."""
|
||||
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
||||
if len(dockerfile_parts) >= 2:
|
||||
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
||||
else:
|
||||
folder_name = "app"
|
||||
return f"{project_name}-{folder_name}:{target}"
|
||||
|
||||
def get_build_key(dockerfile: str, target: str) -> str:
|
||||
"""Generate a unique key for a Dockerfile+target combination."""
|
||||
return f"{dockerfile}:{target}"
|
||||
|
||||
def get_component(dockerfile: str) -> str | None:
|
||||
"""Get component name (frontend/backend) from dockerfile path."""
|
||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||
if component in dockerfile:
|
||||
return component
|
||||
return None
|
||||
|
||||
# First pass: collect all services with build configs and identify duplicates
|
||||
# Track which (dockerfile, target) combinations we've seen
|
||||
build_key_to_first_service: dict[str, str] = {}
|
||||
services_to_build: list[str] = []
|
||||
services_to_dedupe: list[str] = []
|
||||
|
||||
for service_name, service_config in compose.get("services", {}).items():
|
||||
if "build" not in service_config:
|
||||
continue
|
||||
|
||||
build_config = service_config["build"]
|
||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||
target = build_config.get("target", "default")
|
||||
build_key = get_build_key(dockerfile, target)
|
||||
|
||||
if build_key not in build_key_to_first_service:
|
||||
# First service with this build config - it will do the actual build
|
||||
build_key_to_first_service[build_key] = service_name
|
||||
services_to_build.append(service_name)
|
||||
else:
|
||||
# Duplicate - will just use the image from the first service
|
||||
services_to_dedupe.append(service_name)
|
||||
|
||||
# Second pass: configure builds and deduplicate
|
||||
modified_services = []
|
||||
for service_name, service_config in compose.get("services", {}).items():
|
||||
if "build" not in service_config:
|
||||
continue
|
||||
|
||||
build_config = service_config["build"]
|
||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||
target = build_config.get("target", "latest")
|
||||
image_name = get_image_name(dockerfile, target)
|
||||
|
||||
# Set image name for all services (needed for both builders and deduped)
|
||||
service_config["image"] = image_name
|
||||
|
||||
if service_name in services_to_dedupe:
|
||||
# Remove build config - this service will use the pre-built image
|
||||
del service_config["build"]
|
||||
continue
|
||||
|
||||
# This service will do the actual build - add cache config
|
||||
cache_from_list = []
|
||||
cache_to_list = []
|
||||
|
||||
component = get_component(dockerfile)
|
||||
if not component:
|
||||
# Skip services that don't clearly match frontend/backend
|
||||
continue
|
||||
|
||||
# Get the hash for this component
|
||||
component_hash = getattr(args, f"{component}_hash")
|
||||
|
||||
# Scope format: platform-{component}-{target}-{hash|ref}
|
||||
# Example: platform-backend-server-abc123
|
||||
|
||||
if "type=gha" in args.cache_from:
|
||||
# 1. Primary: exact hash match (most specific)
|
||||
if component_hash:
|
||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
|
||||
|
||||
# 2. Fallback: branch-based cache
|
||||
if git_ref_scope:
|
||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
|
||||
|
||||
# 3. Fallback: dev branch cache (for PRs/feature branches)
|
||||
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
|
||||
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
|
||||
|
||||
if "type=gha" in args.cache_to:
|
||||
# Write to both hash-based and branch-based scopes
|
||||
if component_hash:
|
||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
|
||||
|
||||
if git_ref_scope:
|
||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
|
||||
|
||||
# Ensure we have at least one cache source/target
|
||||
if not cache_from_list:
|
||||
cache_from_list.append(args.cache_from)
|
||||
if not cache_to_list:
|
||||
cache_to_list.append(args.cache_to)
|
||||
|
||||
build_config["cache_from"] = cache_from_list
|
||||
build_config["cache_to"] = cache_to_list
|
||||
modified_services.append(service_name)
|
||||
|
||||
# Write back to the same file
|
||||
with open(args.source, "w") as f:
|
||||
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
||||
|
||||
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
||||
for svc in modified_services:
|
||||
svc_config = compose["services"][svc]
|
||||
build_cfg = svc_config.get("build", {})
|
||||
cache_from_list = build_cfg.get("cache_from", ["none"])
|
||||
cache_to_list = build_cfg.get("cache_to", ["none"])
|
||||
print(f" - {svc}")
|
||||
print(f" image: {svc_config.get('image', 'N/A')}")
|
||||
print(f" cache_from: {cache_from_list}")
|
||||
print(f" cache_to: {cache_to_list}")
|
||||
if services_to_dedupe:
|
||||
print(
|
||||
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
||||
)
|
||||
for svc in services_to_dedupe:
|
||||
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -45,6 +45,11 @@ AutoGPT Platform is a monorepo containing:
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||
|
||||
### Branching Strategy
|
||||
|
||||
- **`dev`** is the main development branch. All PRs should target `dev`.
|
||||
- **`master`** is the production branch. Only used for production releases.
|
||||
|
||||
### Creating Pull Requests
|
||||
|
||||
- Create the PR against the `dev` branch of the repository.
|
||||
|
||||
169
autogpt_platform/autogpt_libs/poetry.lock
generated
169
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "46.0.4"
|
||||
version = "46.0.5"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
|
||||
{file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"},
|
||||
{file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||
sdist = ["build (>=1.0.0)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test-randomorder = ["pytest-randomly"]
|
||||
|
||||
[[package]]
|
||||
@@ -570,24 +570,25 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.128.0"
|
||||
version = "0.128.7"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
|
||||
{file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
|
||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-doc = ">=0.0.2"
|
||||
pydantic = ">=2.7.0"
|
||||
starlette = ">=0.40.0,<0.51.0"
|
||||
starlette = ">=0.40.0,<1.0.0"
|
||||
typing-extensions = ">=4.8.0"
|
||||
typing-inspection = ">=0.4.2"
|
||||
|
||||
[package.extras]
|
||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||
|
||||
@@ -1062,14 +1063,14 @@ urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-server-sdk"
|
||||
version = "9.14.1"
|
||||
version = "9.15.0"
|
||||
description = "LaunchDarkly SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1478,14 +1479,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "2.27.2"
|
||||
version = "2.28.0"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
|
||||
{file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
|
||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2248,14 +2249,14 @@ cli = ["click (>=5.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "realtime"
|
||||
version = "2.27.2"
|
||||
version = "2.28.0"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
|
||||
{file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
|
||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2436,14 +2437,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "2.27.2"
|
||||
version = "2.28.0"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
|
||||
{file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
|
||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2487,35 +2488,35 @@ python-dateutil = ">=2.6.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.27.2"
|
||||
version = "2.28.0"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
|
||||
{file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
|
||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.26,<0.29"
|
||||
postgrest = "2.27.2"
|
||||
realtime = "2.27.2"
|
||||
storage3 = "2.27.2"
|
||||
supabase-auth = "2.27.2"
|
||||
supabase-functions = "2.27.2"
|
||||
postgrest = "2.28.0"
|
||||
realtime = "2.28.0"
|
||||
storage3 = "2.28.0"
|
||||
supabase-auth = "2.28.0"
|
||||
supabase-functions = "2.28.0"
|
||||
yarl = ">=1.22.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase-auth"
|
||||
version = "2.27.2"
|
||||
version = "2.28.0"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
|
||||
{file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
|
||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2525,14 +2526,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||
|
||||
[[package]]
|
||||
name = "supabase-functions"
|
||||
version = "2.27.2"
|
||||
version = "2.28.0"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
|
||||
{file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
|
||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2911,4 +2912,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
|
||||
content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb"
|
||||
|
||||
@@ -11,14 +11,14 @@ python = ">=3.10,<4.0"
|
||||
colorama = "^0.4.6"
|
||||
cryptography = "^46.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.128.0"
|
||||
fastapi = "^0.128.7"
|
||||
google-cloud-logging = "^3.13.0"
|
||||
launchdarkly-server-sdk = "^9.14.1"
|
||||
launchdarkly-server-sdk = "^9.15.0"
|
||||
pydantic = "^2.12.5"
|
||||
pydantic-settings = "^2.12.0"
|
||||
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
||||
redis = "^6.2.0"
|
||||
supabase = "^2.27.2"
|
||||
supabase = "^2.28.0"
|
||||
uvicorn = "^0.40.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
||||
@@ -104,6 +104,12 @@ TWITTER_CLIENT_SECRET=
|
||||
# Make a new workspace for your OAuth APP -- trust me
|
||||
# https://linear.app/settings/api/applications/new
|
||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
LINEAR_API_KEY=
|
||||
# Linear project and team IDs for the feature request tracker.
|
||||
# Find these in your Linear workspace URL: linear.app/<workspace>/project/<project-id>
|
||||
# and in team settings. Used by the chat copilot to file and search feature requests.
|
||||
LINEAR_FEATURE_REQUEST_PROJECT_ID=
|
||||
LINEAR_FEATURE_REQUEST_TEAM_ID=
|
||||
LINEAR_CLIENT_ID=
|
||||
LINEAR_CLIENT_SECRET=
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# ============================ DEPENDENCY BUILDER ============================ #
|
||||
|
||||
FROM debian:13-slim AS builder
|
||||
|
||||
# Set environment variables
|
||||
@@ -51,7 +53,9 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
FROM debian:13-slim AS server_dependencies
|
||||
# ============================== BACKEND SERVER ============================== #
|
||||
|
||||
FROM debian:13-slim AS server
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -63,15 +67,14 @@ ENV POETRY_HOME=/opt/poetry \
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ffmpeg \
|
||||
imagemagick \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
COPY --from=builder /app /app
|
||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||
# Copy Node.js installation for Prisma
|
||||
@@ -81,30 +84,54 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||
|
||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||
RUN mkdir -p /app/autogpt_platform/backend
|
||||
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
FROM server_dependencies AS migrate
|
||||
# Copy only the .venv from builder (not the entire /app directory)
|
||||
# The .venv includes the generated Prisma client
|
||||
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||
|
||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||
# Copy dependency files + autogpt_libs (path dependency)
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
# Copy backend code + docs (for Copilot docs search)
|
||||
COPY autogpt_platform/backend ./
|
||||
COPY docs /app/docs
|
||||
RUN poetry install --no-ansi --only-root
|
||||
|
||||
ENV PORT=8000
|
||||
|
||||
CMD ["poetry", "run", "rest"]
|
||||
|
||||
# =============================== DB MIGRATOR =============================== #
|
||||
|
||||
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
||||
FROM debian:13-slim AS migrate
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy Node.js from builder (needed for Prisma CLI)
|
||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||
|
||||
# Copy Prisma binaries
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
# Install prisma-client-py directly (much smaller than copying full venv)
|
||||
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
||||
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||
COPY autogpt_platform/backend/migrations ./migrations
|
||||
|
||||
@@ -24,6 +24,7 @@ from .tools.models import (
|
||||
AgentPreviewResponse,
|
||||
AgentSavedResponse,
|
||||
AgentsFoundResponse,
|
||||
BlockDetailsResponse,
|
||||
BlockListResponse,
|
||||
BlockOutputResponse,
|
||||
ClarificationNeededResponse,
|
||||
@@ -971,6 +972,7 @@ ToolResponseUnion = (
|
||||
| AgentSavedResponse
|
||||
| ClarificationNeededResponse
|
||||
| BlockListResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| DocSearchResultsResponse
|
||||
| DocPageResponse
|
||||
|
||||
@@ -12,6 +12,7 @@ from .base import BaseTool
|
||||
from .create_agent import CreateAgentTool
|
||||
from .customize_agent import CustomizeAgentTool
|
||||
from .edit_agent import EditAgentTool
|
||||
from .feature_requests import CreateFeatureRequestTool, SearchFeatureRequestsTool
|
||||
from .find_agent import FindAgentTool
|
||||
from .find_block import FindBlockTool
|
||||
from .find_library_agent import FindLibraryAgentTool
|
||||
@@ -45,6 +46,9 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
||||
"view_agent_output": AgentOutputTool(),
|
||||
"search_docs": SearchDocsTool(),
|
||||
"get_doc_page": GetDocPageTool(),
|
||||
# Feature request tools
|
||||
"search_feature_requests": SearchFeatureRequestsTool(),
|
||||
"create_feature_request": CreateFeatureRequestTool(),
|
||||
# Workspace tools for CoPilot file operations
|
||||
"list_workspace_files": ListWorkspaceFilesTool(),
|
||||
"read_workspace_file": ReadWorkspaceFileTool(),
|
||||
|
||||
@@ -0,0 +1,448 @@
|
||||
"""Feature request tools - search and create feature requests via Linear."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
FeatureRequestCreatedResponse,
|
||||
FeatureRequestInfo,
|
||||
FeatureRequestSearchResponse,
|
||||
NoResultsResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
from backend.blocks.linear._api import LinearClient
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.data.user import get_user_email_by_id
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_SEARCH_RESULTS = 10
|
||||
|
||||
# GraphQL queries/mutations
|
||||
SEARCH_ISSUES_QUERY = """
|
||||
query SearchFeatureRequests($term: String!, $filter: IssueFilter, $first: Int) {
|
||||
searchIssues(term: $term, filter: $filter, first: $first) {
|
||||
nodes {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CUSTOMER_UPSERT_MUTATION = """
|
||||
mutation CustomerUpsert($input: CustomerUpsertInput!) {
|
||||
customerUpsert(input: $input) {
|
||||
success
|
||||
customer {
|
||||
id
|
||||
name
|
||||
externalIds
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
ISSUE_CREATE_MUTATION = """
|
||||
mutation IssueCreate($input: IssueCreateInput!) {
|
||||
issueCreate(input: $input) {
|
||||
success
|
||||
issue {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CUSTOMER_NEED_CREATE_MUTATION = """
|
||||
mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) {
|
||||
customerNeedCreate(input: $input) {
|
||||
success
|
||||
need {
|
||||
id
|
||||
body
|
||||
customer {
|
||||
id
|
||||
name
|
||||
}
|
||||
issue {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
_settings: Settings | None = None
|
||||
|
||||
|
||||
def _get_settings() -> Settings:
|
||||
global _settings
|
||||
if _settings is None:
|
||||
_settings = Settings()
|
||||
return _settings
|
||||
|
||||
|
||||
def _get_linear_config() -> tuple[LinearClient, str, str]:
|
||||
"""Return a configured Linear client, project ID, and team ID.
|
||||
|
||||
Raises RuntimeError if any required setting is missing.
|
||||
"""
|
||||
secrets = _get_settings().secrets
|
||||
if not secrets.linear_api_key:
|
||||
raise RuntimeError("LINEAR_API_KEY is not configured")
|
||||
if not secrets.linear_feature_request_project_id:
|
||||
raise RuntimeError("LINEAR_FEATURE_REQUEST_PROJECT_ID is not configured")
|
||||
if not secrets.linear_feature_request_team_id:
|
||||
raise RuntimeError("LINEAR_FEATURE_REQUEST_TEAM_ID is not configured")
|
||||
|
||||
credentials = APIKeyCredentials(
|
||||
id="system-linear",
|
||||
provider="linear",
|
||||
api_key=SecretStr(secrets.linear_api_key),
|
||||
title="System Linear API Key",
|
||||
)
|
||||
client = LinearClient(credentials=credentials)
|
||||
return (
|
||||
client,
|
||||
secrets.linear_feature_request_project_id,
|
||||
secrets.linear_feature_request_team_id,
|
||||
)
|
||||
|
||||
|
||||
class SearchFeatureRequestsTool(BaseTool):
|
||||
"""Tool for searching existing feature requests in Linear."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "search_feature_requests"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search existing feature requests to check if a similar request "
|
||||
"already exists before creating a new one. Returns matching feature "
|
||||
"requests with their ID, title, and description."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search term to find matching feature requests.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query.",
|
||||
error="Missing query parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
client, project_id, _team_id = _get_linear_config()
|
||||
data = await client.query(
|
||||
SEARCH_ISSUES_QUERY,
|
||||
{
|
||||
"term": query,
|
||||
"filter": {
|
||||
"project": {"id": {"eq": project_id}},
|
||||
},
|
||||
"first": MAX_SEARCH_RESULTS,
|
||||
},
|
||||
)
|
||||
|
||||
nodes = data.get("searchIssues", {}).get("nodes", [])
|
||||
|
||||
if not nodes:
|
||||
return NoResultsResponse(
|
||||
message=f"No feature requests found matching '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use broader search terms",
|
||||
"You can create a new feature request if none exists",
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
results = [
|
||||
FeatureRequestInfo(
|
||||
id=node["id"],
|
||||
identifier=node["identifier"],
|
||||
title=node["title"],
|
||||
description=node.get("description"),
|
||||
)
|
||||
for node in nodes
|
||||
]
|
||||
|
||||
return FeatureRequestSearchResponse(
|
||||
message=f"Found {len(results)} feature request(s) matching '{query}'.",
|
||||
results=results,
|
||||
count=len(results),
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to search feature requests")
|
||||
return ErrorResponse(
|
||||
message="Failed to search feature requests.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
class CreateFeatureRequestTool(BaseTool):
|
||||
"""Tool for creating feature requests (or adding needs to existing ones)."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "create_feature_request"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Create a new feature request or add a customer need to an existing one. "
|
||||
"Always search first with search_feature_requests to avoid duplicates. "
|
||||
"If a matching request exists, pass its ID as existing_issue_id to add "
|
||||
"the user's need to it instead of creating a duplicate."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Title for the feature request.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Detailed description of what the user wants and why.",
|
||||
},
|
||||
"existing_issue_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"If adding a need to an existing feature request, "
|
||||
"provide its Linear issue ID (from search results). "
|
||||
"Omit to create a new feature request."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["title", "description"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _find_or_create_customer(
|
||||
self, client: LinearClient, user_id: str, name: str
|
||||
) -> dict:
|
||||
"""Find existing customer by user_id or create a new one via upsert.
|
||||
|
||||
Args:
|
||||
client: Linear API client.
|
||||
user_id: Stable external ID used to deduplicate customers.
|
||||
name: Human-readable display name (e.g. the user's email).
|
||||
"""
|
||||
data = await client.mutate(
|
||||
CUSTOMER_UPSERT_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"name": name,
|
||||
"externalId": user_id,
|
||||
},
|
||||
},
|
||||
)
|
||||
result = data.get("customerUpsert", {})
|
||||
if not result.get("success"):
|
||||
raise RuntimeError(f"Failed to upsert customer: {data}")
|
||||
return result["customer"]
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
title = kwargs.get("title", "").strip()
|
||||
description = kwargs.get("description", "").strip()
|
||||
existing_issue_id = kwargs.get("existing_issue_id")
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not title or not description:
|
||||
return ErrorResponse(
|
||||
message="Both title and description are required.",
|
||||
error="Missing required parameters",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required to create feature requests.",
|
||||
error="Missing user_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
try:
|
||||
client, project_id, team_id = _get_linear_config()
|
||||
except Exception as e:
|
||||
logger.exception("Failed to initialize Linear client")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Resolve a human-readable name (email) for the Linear customer record.
|
||||
# Fall back to user_id if the lookup fails or returns None.
|
||||
try:
|
||||
customer_display_name = await get_user_email_by_id(user_id) or user_id
|
||||
except Exception:
|
||||
customer_display_name = user_id
|
||||
|
||||
# Step 1: Find or create customer for this user
|
||||
try:
|
||||
customer = await self._find_or_create_customer(
|
||||
client, user_id, customer_display_name
|
||||
)
|
||||
customer_id = customer["id"]
|
||||
customer_name = customer["name"]
|
||||
except Exception as e:
|
||||
logger.exception("Failed to upsert customer in Linear")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Step 2: Create or reuse issue
|
||||
issue_id: str | None = None
|
||||
issue_identifier: str | None = None
|
||||
if existing_issue_id:
|
||||
# Add need to existing issue - we still need the issue details for response
|
||||
is_new_issue = False
|
||||
issue_id = existing_issue_id
|
||||
else:
|
||||
# Create new issue in the feature requests project
|
||||
try:
|
||||
data = await client.mutate(
|
||||
ISSUE_CREATE_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"title": title,
|
||||
"description": description,
|
||||
"teamId": team_id,
|
||||
"projectId": project_id,
|
||||
},
|
||||
},
|
||||
)
|
||||
result = data.get("issueCreate", {})
|
||||
if not result.get("success"):
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request issue.",
|
||||
error=str(data),
|
||||
session_id=session_id,
|
||||
)
|
||||
issue = result["issue"]
|
||||
issue_id = issue["id"]
|
||||
issue_identifier = issue.get("identifier")
|
||||
except Exception as e:
|
||||
logger.exception("Failed to create feature request issue")
|
||||
return ErrorResponse(
|
||||
message="Failed to create feature request.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
is_new_issue = True
|
||||
|
||||
# Step 3: Create customer need on the issue
|
||||
try:
|
||||
data = await client.mutate(
|
||||
CUSTOMER_NEED_CREATE_MUTATION,
|
||||
{
|
||||
"input": {
|
||||
"customerId": customer_id,
|
||||
"issueId": issue_id,
|
||||
"body": description,
|
||||
"priority": 0,
|
||||
},
|
||||
},
|
||||
)
|
||||
need_result = data.get("customerNeedCreate", {})
|
||||
if not need_result.get("success"):
|
||||
orphaned = (
|
||||
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||
if is_new_issue
|
||||
else None
|
||||
)
|
||||
return ErrorResponse(
|
||||
message="Failed to attach customer need to the feature request.",
|
||||
error=str(data),
|
||||
details=orphaned,
|
||||
session_id=session_id,
|
||||
)
|
||||
need = need_result["need"]
|
||||
issue_info = need["issue"]
|
||||
except Exception as e:
|
||||
logger.exception("Failed to create customer need")
|
||||
orphaned = (
|
||||
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||
if is_new_issue
|
||||
else None
|
||||
)
|
||||
return ErrorResponse(
|
||||
message="Failed to attach customer need to the feature request.",
|
||||
error=str(e),
|
||||
details=orphaned,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
return FeatureRequestCreatedResponse(
|
||||
message=(
|
||||
f"{'Created new feature request' if is_new_issue else 'Added your request to existing feature request'} "
|
||||
f"[{issue_info['identifier']}] {issue_info['title']}."
|
||||
),
|
||||
issue_id=issue_info["id"],
|
||||
issue_identifier=issue_info["identifier"],
|
||||
issue_title=issue_info["title"],
|
||||
issue_url=issue_info.get("url", ""),
|
||||
is_new_issue=is_new_issue,
|
||||
customer_name=customer_name,
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -0,0 +1,615 @@
|
||||
"""Tests for SearchFeatureRequestsTool and CreateFeatureRequestTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.feature_requests import (
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
)
|
||||
from backend.api.features.chat.tools.models import (
|
||||
ErrorResponse,
|
||||
FeatureRequestCreatedResponse,
|
||||
FeatureRequestSearchResponse,
|
||||
NoResultsResponse,
|
||||
)
|
||||
|
||||
from ._test_data import make_session
|
||||
|
||||
_TEST_USER_ID = "test-user-feature-requests"
|
||||
_TEST_USER_EMAIL = "testuser@example.com"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
_FAKE_PROJECT_ID = "test-project-id"
|
||||
_FAKE_TEAM_ID = "test-team-id"
|
||||
|
||||
|
||||
def _mock_linear_config(*, query_return=None, mutate_return=None):
|
||||
"""Return a patched _get_linear_config that yields a mock LinearClient."""
|
||||
client = AsyncMock()
|
||||
if query_return is not None:
|
||||
client.query.return_value = query_return
|
||||
if mutate_return is not None:
|
||||
client.mutate.return_value = mutate_return
|
||||
return (
|
||||
patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
return_value=(client, _FAKE_PROJECT_ID, _FAKE_TEAM_ID),
|
||||
),
|
||||
client,
|
||||
)
|
||||
|
||||
|
||||
def _search_response(nodes: list[dict]) -> dict:
|
||||
return {"searchIssues": {"nodes": nodes}}
|
||||
|
||||
|
||||
def _customer_upsert_response(
|
||||
customer_id: str = "cust-1", name: str = _TEST_USER_EMAIL, success: bool = True
|
||||
) -> dict:
|
||||
return {
|
||||
"customerUpsert": {
|
||||
"success": success,
|
||||
"customer": {"id": customer_id, "name": name, "externalIds": [name]},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _issue_create_response(
|
||||
issue_id: str = "issue-1",
|
||||
identifier: str = "FR-1",
|
||||
title: str = "New Feature",
|
||||
success: bool = True,
|
||||
) -> dict:
|
||||
return {
|
||||
"issueCreate": {
|
||||
"success": success,
|
||||
"issue": {
|
||||
"id": issue_id,
|
||||
"identifier": identifier,
|
||||
"title": title,
|
||||
"url": f"https://linear.app/issue/{identifier}",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _need_create_response(
|
||||
need_id: str = "need-1",
|
||||
issue_id: str = "issue-1",
|
||||
identifier: str = "FR-1",
|
||||
title: str = "New Feature",
|
||||
success: bool = True,
|
||||
) -> dict:
|
||||
return {
|
||||
"customerNeedCreate": {
|
||||
"success": success,
|
||||
"need": {
|
||||
"id": need_id,
|
||||
"body": "description",
|
||||
"customer": {"id": "cust-1", "name": _TEST_USER_EMAIL},
|
||||
"issue": {
|
||||
"id": issue_id,
|
||||
"identifier": identifier,
|
||||
"title": title,
|
||||
"url": f"https://linear.app/issue/{identifier}",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# SearchFeatureRequestsTool
|
||||
# ===========================================================================
|
||||
|
||||
|
||||
class TestSearchFeatureRequestsTool:
|
||||
"""Tests for SearchFeatureRequestsTool._execute."""
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_successful_search(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
nodes = [
|
||||
{
|
||||
"id": "id-1",
|
||||
"identifier": "FR-1",
|
||||
"title": "Dark mode",
|
||||
"description": "Add dark mode support",
|
||||
},
|
||||
{
|
||||
"id": "id-2",
|
||||
"identifier": "FR-2",
|
||||
"title": "Dark theme",
|
||||
"description": None,
|
||||
},
|
||||
]
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response(nodes))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="dark mode"
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestSearchResponse)
|
||||
assert resp.count == 2
|
||||
assert resp.results[0].id == "id-1"
|
||||
assert resp.results[1].identifier == "FR-2"
|
||||
assert resp.query == "dark mode"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_no_results(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response([]))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="nonexistent"
|
||||
)
|
||||
|
||||
assert isinstance(resp, NoResultsResponse)
|
||||
assert "nonexistent" in resp.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_empty_query_returns_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(user_id=_TEST_USER_ID, session=session, query=" ")
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "query" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_query_returns_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(user_id=_TEST_USER_ID, session=session)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_api_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.query.side_effect = RuntimeError("Linear API down")
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Linear API down" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_malformed_node_returns_error(self):
|
||||
"""A node missing required keys should be caught by the try/except."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
# Node missing 'identifier' key
|
||||
bad_nodes = [{"id": "id-1", "title": "Missing identifier"}]
|
||||
patcher, _ = _mock_linear_config(query_return=_search_response(bad_nodes))
|
||||
with patcher:
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_linear_client_init_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
side_effect=RuntimeError("No API key"),
|
||||
):
|
||||
tool = SearchFeatureRequestsTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "No API key" in resp.error
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# CreateFeatureRequestTool
|
||||
# ===========================================================================
|
||||
|
||||
|
||||
class TestCreateFeatureRequestTool:
|
||||
"""Tests for CreateFeatureRequestTool._execute."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _patch_email_lookup(self):
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests.get_user_email_by_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=_TEST_USER_EMAIL,
|
||||
):
|
||||
yield
|
||||
|
||||
# ---- Happy paths -------------------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_create_new_issue(self):
|
||||
"""Full happy path: upsert customer -> create issue -> attach need."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(),
|
||||
_need_create_response(),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="New Feature",
|
||||
description="Please add this",
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||
assert resp.is_new_issue is True
|
||||
assert resp.issue_identifier == "FR-1"
|
||||
assert resp.customer_name == _TEST_USER_EMAIL
|
||||
assert client.mutate.call_count == 3
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_need_to_existing_issue(self):
|
||||
"""When existing_issue_id is provided, skip issue creation."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_need_create_response(issue_id="existing-1", identifier="FR-99"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Existing Feature",
|
||||
description="Me too",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||
assert resp.is_new_issue is False
|
||||
assert resp.issue_id == "existing-1"
|
||||
# Only 2 mutations: customer upsert + need create (no issue create)
|
||||
assert client.mutate.call_count == 2
|
||||
|
||||
# ---- Validation errors -------------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_title(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="",
|
||||
description="some desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "required" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_description(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Some title",
|
||||
description="",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "required" in resp.error.lower()
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_user_id(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=None,
|
||||
session=session,
|
||||
title="Some title",
|
||||
description="Some desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "user_id" in resp.error.lower()
|
||||
|
||||
# ---- Linear client init failure ----------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_linear_client_init_failure(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||
side_effect=RuntimeError("No API key"),
|
||||
):
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "No API key" in resp.error
|
||||
|
||||
# ---- Customer upsert failures ------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_upsert_api_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = RuntimeError("Customer API error")
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Customer API error" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_upsert_not_success(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.return_value = _customer_upsert_response(success=False)
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_customer_malformed_response(self):
|
||||
"""Customer dict missing 'id' key should be caught."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
# success=True but customer has no 'id'
|
||||
client.mutate.return_value = {
|
||||
"customerUpsert": {
|
||||
"success": True,
|
||||
"customer": {"name": _TEST_USER_ID},
|
||||
}
|
||||
}
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
# ---- Issue creation failures -------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_api_error(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
RuntimeError("Issue create failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Issue create failed" in resp.error
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_not_success(self):
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert "Failed to create feature request issue" in resp.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_issue_create_malformed_response(self):
|
||||
"""issueCreate success=True but missing 'issue' key."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
{"issueCreate": {"success": True}}, # no 'issue' key
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
|
||||
# ---- Customer need attachment failures ---------------------------------
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_api_error_new_issue(self):
|
||||
"""Need creation fails after new issue was created -> orphaned issue info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(issue_id="orphan-1", identifier="FR-10"),
|
||||
RuntimeError("Need attach failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.error is not None
|
||||
assert "Need attach failed" in resp.error
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "orphan-1"
|
||||
assert resp.details["issue_identifier"] == "FR-10"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_api_error_existing_issue(self):
|
||||
"""Need creation fails on existing issue -> no orphaned info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
RuntimeError("Need attach failed"),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is None
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_not_success_includes_orphaned_info(self):
|
||||
"""customerNeedCreate returns success=False -> includes orphaned issue."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(issue_id="orphan-2", identifier="FR-20"),
|
||||
_need_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "orphan-2"
|
||||
assert resp.details["issue_identifier"] == "FR-20"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_not_success_existing_issue_no_details(self):
|
||||
"""customerNeedCreate fails on existing issue -> no orphaned info."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_need_create_response(success=False),
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
existing_issue_id="existing-1",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is None
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_need_create_malformed_response(self):
|
||||
"""need_result missing 'need' key after success=True."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
patcher, client = _mock_linear_config()
|
||||
client.mutate.side_effect = [
|
||||
_customer_upsert_response(),
|
||||
_issue_create_response(),
|
||||
{"customerNeedCreate": {"success": True}}, # no 'need' key
|
||||
]
|
||||
|
||||
with patcher:
|
||||
tool = CreateFeatureRequestTool()
|
||||
resp = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
title="Title",
|
||||
description="Desc",
|
||||
)
|
||||
|
||||
assert isinstance(resp, ErrorResponse)
|
||||
assert resp.details is not None
|
||||
assert resp.details["issue_id"] == "issue-1"
|
||||
@@ -7,7 +7,6 @@ from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockInfoSummary,
|
||||
BlockInputFieldInfo,
|
||||
BlockListResponse,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
@@ -55,7 +54,8 @@ class FindBlockTool(BaseTool):
|
||||
"Blocks are reusable components that perform specific tasks like "
|
||||
"sending emails, making API calls, processing text, etc. "
|
||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||
"The response includes each block's id, required_inputs, and input_schema."
|
||||
"The response includes each block's id, name, and description. "
|
||||
"Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -124,7 +124,7 @@ class FindBlockTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Enrich results with full block information
|
||||
# Enrich results with block information
|
||||
blocks: list[BlockInfoSummary] = []
|
||||
for result in results:
|
||||
block_id = result["content_id"]
|
||||
@@ -141,65 +141,11 @@ class FindBlockTool(BaseTool):
|
||||
):
|
||||
continue
|
||||
|
||||
# Get input/output schemas
|
||||
input_schema = {}
|
||||
output_schema = {}
|
||||
try:
|
||||
input_schema = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
try:
|
||||
output_schema = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
|
||||
# Get categories from block instance
|
||||
categories = []
|
||||
if hasattr(block, "categories") and block.categories:
|
||||
categories = [cat.value for cat in block.categories]
|
||||
|
||||
# Extract required inputs for easier use
|
||||
required_inputs: list[BlockInputFieldInfo] = []
|
||||
if input_schema:
|
||||
properties = input_schema.get("properties", {})
|
||||
required_fields = set(input_schema.get("required", []))
|
||||
# Get credential field names to exclude from required inputs
|
||||
credentials_fields = set(
|
||||
block.input_schema.get_credentials_fields().keys()
|
||||
)
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
# Skip credential fields - they're handled separately
|
||||
if field_name in credentials_fields:
|
||||
continue
|
||||
|
||||
required_inputs.append(
|
||||
BlockInputFieldInfo(
|
||||
name=field_name,
|
||||
type=field_schema.get("type", "string"),
|
||||
description=field_schema.get("description", ""),
|
||||
required=field_name in required_fields,
|
||||
default=field_schema.get("default"),
|
||||
)
|
||||
)
|
||||
|
||||
blocks.append(
|
||||
BlockInfoSummary(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
categories=categories,
|
||||
input_schema=input_schema,
|
||||
output_schema=output_schema,
|
||||
required_inputs=required_inputs,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -228,8 +174,7 @@ class FindBlockTool(BaseTool):
|
||||
return BlockListResponse(
|
||||
message=(
|
||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||
"To execute a block, use run_block with the block's 'id' field "
|
||||
"and provide 'input_data' matching the block's input_schema."
|
||||
"To see a block's inputs/outputs and execute it, use run_block with the block's 'id' - providing no inputs."
|
||||
),
|
||||
blocks=blocks,
|
||||
count=len(blocks),
|
||||
|
||||
@@ -18,7 +18,13 @@ _TEST_USER_ID = "test-user-find-block"
|
||||
|
||||
|
||||
def make_mock_block(
|
||||
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
||||
block_id: str,
|
||||
name: str,
|
||||
block_type: BlockType,
|
||||
disabled: bool = False,
|
||||
input_schema: dict | None = None,
|
||||
output_schema: dict | None = None,
|
||||
credentials_fields: dict | None = None,
|
||||
):
|
||||
"""Create a mock block for testing."""
|
||||
mock = MagicMock()
|
||||
@@ -28,10 +34,13 @@ def make_mock_block(
|
||||
mock.block_type = block_type
|
||||
mock.disabled = disabled
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.input_schema.jsonschema.return_value = input_schema or {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = credentials_fields or {}
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {}
|
||||
mock.output_schema.jsonschema.return_value = output_schema or {}
|
||||
mock.categories = []
|
||||
return mock
|
||||
|
||||
@@ -137,3 +146,241 @@ class TestFindBlockFiltering:
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert len(response.blocks) == 1
|
||||
assert response.blocks[0].id == "normal-block-id"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_response_size_average_chars_per_block(self):
|
||||
"""Measure average chars per block in the serialized response."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Realistic block definitions modeled after real blocks
|
||||
block_defs = [
|
||||
{
|
||||
"id": "http-block-id",
|
||||
"name": "Send Web Request",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL to send the request to",
|
||||
},
|
||||
"method": {
|
||||
"type": "string",
|
||||
"description": "The HTTP method to use",
|
||||
},
|
||||
"headers": {
|
||||
"type": "object",
|
||||
"description": "Headers to include in the request",
|
||||
},
|
||||
"json_format": {
|
||||
"type": "boolean",
|
||||
"description": "If true, send the body as JSON",
|
||||
},
|
||||
"body": {
|
||||
"type": "object",
|
||||
"description": "Form/JSON body payload",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "HTTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["url", "method"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "object",
|
||||
"description": "The response from the server",
|
||||
},
|
||||
"client_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 4xx status codes",
|
||||
},
|
||||
"server_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 5xx status codes",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Errors for all other exceptions",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "email-block-id",
|
||||
"name": "Send Email",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"to_email": {
|
||||
"type": "string",
|
||||
"description": "Recipient email address",
|
||||
},
|
||||
"subject": {
|
||||
"type": "string",
|
||||
"description": "Subject of the email",
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Body of the email",
|
||||
},
|
||||
"config": {
|
||||
"type": "object",
|
||||
"description": "SMTP Config",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "SMTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["to_email", "subject", "body", "credentials"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Status of the email sending operation",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if sending failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "claude-code-block-id",
|
||||
"name": "Claude Code",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"e2b_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for E2B platform",
|
||||
},
|
||||
"anthropic_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for Anthropic",
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "Task or instruction for Claude Code",
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": "Sandbox timeout in seconds",
|
||||
},
|
||||
"setup_commands": {
|
||||
"type": "array",
|
||||
"description": "Shell commands to run before execution",
|
||||
},
|
||||
"working_directory": {
|
||||
"type": "string",
|
||||
"description": "Working directory for Claude Code",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID to resume a conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "Sandbox ID to reconnect to",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Previous conversation history",
|
||||
},
|
||||
"dispose_sandbox": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to dispose sandbox after execution",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"e2b_credentials",
|
||||
"anthropic_credentials",
|
||||
"prompt",
|
||||
],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "string",
|
||||
"description": "Output from Claude Code execution",
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"description": "Files created/modified by Claude Code",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Full conversation history",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID for this conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "ID of the sandbox instance",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if execution failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {
|
||||
"e2b_credentials": True,
|
||||
"anthropic_credentials": True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
search_results = [
|
||||
{"content_id": d["id"], "score": 0.9 - i * 0.1}
|
||||
for i, d in enumerate(block_defs)
|
||||
]
|
||||
mock_blocks = {
|
||||
d["id"]: make_mock_block(
|
||||
block_id=d["id"],
|
||||
name=d["name"],
|
||||
block_type=BlockType.STANDARD,
|
||||
input_schema=d["input_schema"],
|
||||
output_schema=d["output_schema"],
|
||||
credentials_fields=d["credentials_fields"],
|
||||
)
|
||||
for d in block_defs
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(search_results, len(search_results)),
|
||||
), patch(
|
||||
"backend.api.features.chat.tools.find_block.get_block",
|
||||
side_effect=lambda bid: mock_blocks.get(bid),
|
||||
):
|
||||
tool = FindBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert response.count == len(block_defs)
|
||||
|
||||
total_chars = len(response.model_dump_json())
|
||||
avg_chars = total_chars // response.count
|
||||
|
||||
# Print for visibility in test output
|
||||
print(f"\nTotal response size: {total_chars} chars")
|
||||
print(f"Number of blocks: {response.count}")
|
||||
print(f"Average chars per block: {avg_chars}")
|
||||
|
||||
# The old response was ~90K for 10 blocks (~9K per block).
|
||||
# Previous optimization reduced it to ~1.5K per block (no raw JSON schemas).
|
||||
# Now with only id/name/description, we expect ~300 chars per block.
|
||||
assert avg_chars < 500, (
|
||||
f"Average chars per block ({avg_chars}) exceeds 500. "
|
||||
f"Total response: {total_chars} chars for {response.count} blocks."
|
||||
)
|
||||
|
||||
@@ -25,6 +25,7 @@ class ResponseType(str, Enum):
|
||||
AGENT_SAVED = "agent_saved"
|
||||
CLARIFICATION_NEEDED = "clarification_needed"
|
||||
BLOCK_LIST = "block_list"
|
||||
BLOCK_DETAILS = "block_details"
|
||||
BLOCK_OUTPUT = "block_output"
|
||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||
DOC_PAGE = "doc_page"
|
||||
@@ -40,6 +41,9 @@ class ResponseType(str, Enum):
|
||||
OPERATION_IN_PROGRESS = "operation_in_progress"
|
||||
# Input validation
|
||||
INPUT_VALIDATION_ERROR = "input_validation_error"
|
||||
# Feature request types
|
||||
FEATURE_REQUEST_SEARCH = "feature_request_search"
|
||||
FEATURE_REQUEST_CREATED = "feature_request_created"
|
||||
|
||||
|
||||
# Base response model
|
||||
@@ -334,13 +338,6 @@ class BlockInfoSummary(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
required_inputs: list[BlockInputFieldInfo] = Field(
|
||||
default_factory=list,
|
||||
description="List of required input fields for this block",
|
||||
)
|
||||
|
||||
|
||||
class BlockListResponse(ToolResponseBase):
|
||||
@@ -350,10 +347,25 @@ class BlockListResponse(ToolResponseBase):
|
||||
blocks: list[BlockInfoSummary]
|
||||
count: int
|
||||
query: str
|
||||
usage_hint: str = Field(
|
||||
default="To execute a block, call run_block with block_id set to the block's "
|
||||
"'id' field and input_data containing the required fields from input_schema."
|
||||
)
|
||||
|
||||
|
||||
class BlockDetails(BaseModel):
|
||||
"""Detailed block information."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
inputs: dict[str, Any] = {}
|
||||
outputs: dict[str, Any] = {}
|
||||
credentials: list[CredentialsMetaInput] = []
|
||||
|
||||
|
||||
class BlockDetailsResponse(ToolResponseBase):
|
||||
"""Response for block details (first run_block attempt)."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_DETAILS
|
||||
block: BlockDetails
|
||||
user_authenticated: bool = False
|
||||
|
||||
|
||||
class BlockOutputResponse(ToolResponseBase):
|
||||
@@ -421,3 +433,34 @@ class AsyncProcessingResponse(ToolResponseBase):
|
||||
status: str = "accepted" # Must be "accepted" for detection
|
||||
operation_id: str | None = None
|
||||
task_id: str | None = None
|
||||
|
||||
|
||||
# Feature request models
|
||||
class FeatureRequestInfo(BaseModel):
|
||||
"""Information about a feature request issue."""
|
||||
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None = None
|
||||
|
||||
|
||||
class FeatureRequestSearchResponse(ToolResponseBase):
|
||||
"""Response for search_feature_requests tool."""
|
||||
|
||||
type: ResponseType = ResponseType.FEATURE_REQUEST_SEARCH
|
||||
results: list[FeatureRequestInfo]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
class FeatureRequestCreatedResponse(ToolResponseBase):
|
||||
"""Response for create_feature_request tool."""
|
||||
|
||||
type: ResponseType = ResponseType.FEATURE_REQUEST_CREATED
|
||||
issue_id: str
|
||||
issue_identifier: str
|
||||
issue_title: str
|
||||
issue_url: str
|
||||
is_new_issue: bool # False if added to existing
|
||||
customer_name: str
|
||||
|
||||
@@ -23,8 +23,11 @@ from backend.util.exceptions import BlockError
|
||||
from .base import BaseTool
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .models import (
|
||||
BlockDetails,
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
SetupInfo,
|
||||
SetupRequirementsResponse,
|
||||
ToolResponseBase,
|
||||
@@ -51,8 +54,8 @@ class RunBlockTool(BaseTool):
|
||||
"Execute a specific block with the provided input data. "
|
||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||
"do NOT guess or make up block IDs. "
|
||||
"Use the 'id' from find_block results and provide input_data "
|
||||
"matching the block's required_inputs."
|
||||
"On first attempt (without input_data), returns detailed schema showing "
|
||||
"required inputs and outputs. Then call again with proper input_data to execute."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -67,11 +70,19 @@ class RunBlockTool(BaseTool):
|
||||
"NEVER guess this - always get it from find_block first."
|
||||
),
|
||||
},
|
||||
"block_name": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The block's human-readable name from find_block results. "
|
||||
"Used for display purposes in the UI."
|
||||
),
|
||||
},
|
||||
"input_data": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Input values for the block. Use the 'required_inputs' field "
|
||||
"from find_block to see what fields are needed."
|
||||
"Input values for the block. "
|
||||
"First call with empty {} to see the block's schema, "
|
||||
"then call again with proper values to execute."
|
||||
),
|
||||
},
|
||||
},
|
||||
@@ -156,6 +167,34 @@ class RunBlockTool(BaseTool):
|
||||
await self._resolve_block_credentials(user_id, block, input_data)
|
||||
)
|
||||
|
||||
# Get block schemas for details/validation
|
||||
try:
|
||||
input_schema: dict[str, Any] = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid input schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
try:
|
||||
output_schema: dict[str, Any] = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid output schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
# Return setup requirements response with missing credentials
|
||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||
@@ -188,6 +227,53 @@ class RunBlockTool(BaseTool):
|
||||
graph_version=None,
|
||||
)
|
||||
|
||||
# Check if this is a first attempt (required inputs missing)
|
||||
# Return block details so user can see what inputs are needed
|
||||
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
||||
required_keys = set(input_schema.get("required", []))
|
||||
required_non_credential_keys = required_keys - credentials_fields
|
||||
provided_input_keys = set(input_data.keys()) - credentials_fields
|
||||
|
||||
# Check for unknown input fields
|
||||
valid_fields = (
|
||||
set(input_schema.get("properties", {}).keys()) - credentials_fields
|
||||
)
|
||||
unrecognized_fields = provided_input_keys - valid_fields
|
||||
if unrecognized_fields:
|
||||
return InputValidationErrorResponse(
|
||||
message=(
|
||||
f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. "
|
||||
f"Block was not executed. Please use the correct field names from the schema."
|
||||
),
|
||||
session_id=session_id,
|
||||
unrecognized_fields=sorted(unrecognized_fields),
|
||||
inputs=input_schema,
|
||||
)
|
||||
|
||||
# Show details when not all required non-credential inputs are provided
|
||||
if not (required_non_credential_keys <= provided_input_keys):
|
||||
# Get credentials info for the response
|
||||
credentials_meta = []
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
credentials_meta.append(cred_meta)
|
||||
|
||||
return BlockDetailsResponse(
|
||||
message=(
|
||||
f"Block '{block.name}' details. "
|
||||
"Provide input_data matching the inputs schema to execute the block."
|
||||
),
|
||||
session_id=session_id,
|
||||
block=BlockDetails(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
inputs=input_schema,
|
||||
outputs=output_schema,
|
||||
credentials=credentials_meta,
|
||||
),
|
||||
user_authenticated=True,
|
||||
)
|
||||
|
||||
try:
|
||||
# Get or create user's workspace for CoPilot file operations
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
"""Tests for block execution guards in RunBlockTool."""
|
||||
"""Tests for block execution guards and input validation in RunBlockTool."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.models import ErrorResponse
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
)
|
||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||
from backend.blocks._base import BlockType
|
||||
|
||||
@@ -28,6 +33,39 @@ def make_mock_block(
|
||||
return mock
|
||||
|
||||
|
||||
def make_mock_block_with_schema(
|
||||
block_id: str,
|
||||
name: str,
|
||||
input_properties: dict,
|
||||
required_fields: list[str],
|
||||
output_properties: dict | None = None,
|
||||
):
|
||||
"""Create a mock block with a defined input/output schema for validation tests."""
|
||||
mock = MagicMock()
|
||||
mock.id = block_id
|
||||
mock.name = name
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
mock.description = f"Test block: {name}"
|
||||
|
||||
input_schema = {
|
||||
"properties": input_properties,
|
||||
"required": required_fields,
|
||||
}
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = input_schema
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
|
||||
output_schema = {
|
||||
"properties": output_properties or {"result": {"type": "string"}},
|
||||
}
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = output_schema
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
class TestRunBlockFiltering:
|
||||
"""Tests for block execution guards in RunBlockTool."""
|
||||
|
||||
@@ -104,3 +142,221 @@ class TestRunBlockFiltering:
|
||||
# (may be other errors like missing credentials, but not the exclusion guard)
|
||||
if isinstance(response, ErrorResponse):
|
||||
assert "cannot be run directly in CoPilot" not in response.message
|
||||
|
||||
|
||||
class TestRunBlockInputValidation:
|
||||
"""Tests for input field validation in RunBlockTool.
|
||||
|
||||
run_block rejects unknown input field names with InputValidationErrorResponse,
|
||||
preventing silent failures where incorrect keys would be ignored and the block
|
||||
would execute with default values instead of the caller's intended values.
|
||||
"""
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_unknown_input_fields_are_rejected(self):
|
||||
"""run_block rejects unknown input fields instead of silently ignoring them.
|
||||
|
||||
Scenario: The AI Text Generator block has a field called 'model' (for LLM model
|
||||
selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model'
|
||||
instead. The block should reject the request and return the valid schema.
|
||||
"""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string", "description": "The prompt to send"},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The LLM model to use",
|
||||
"default": "gpt-4o-mini",
|
||||
},
|
||||
"sys_prompt": {
|
||||
"type": "string",
|
||||
"description": "System prompt",
|
||||
"default": "",
|
||||
},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
output_properties={"response": {"type": "string"}},
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key)
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku about coding",
|
||||
"LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model'
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
assert "Block was not executed" in response.message
|
||||
assert "inputs" in response.model_dump() # valid schema included
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_multiple_wrong_keys_are_all_reported(self):
|
||||
"""All unrecognized field names are reported in a single error response."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
"sys_prompt": {"type": "string", "default": ""},
|
||||
"retry": {"type": "integer", "default": 3},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Hello", # correct
|
||||
"llm_model": "claude-opus-4-6", # WRONG - should be 'model'
|
||||
"system_prompt": "Be helpful", # WRONG - should be 'sys_prompt'
|
||||
"retries": 5, # WRONG - should be 'retry'
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert set(response.unrecognized_fields) == {
|
||||
"llm_model",
|
||||
"system_prompt",
|
||||
"retries",
|
||||
}
|
||||
assert "Block was not executed" in response.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_unknown_fields_rejected_even_with_missing_required(self):
|
||||
"""Unknown fields are caught before the missing-required-fields check."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# 'prompt' is missing AND 'LLM_Model' is an unknown field
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing
|
||||
},
|
||||
)
|
||||
|
||||
# Unknown fields are caught first
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_correct_inputs_still_execute(self):
|
||||
"""Correct input field names pass validation and the block executes."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
async def mock_execute(input_data, **kwargs):
|
||||
yield "response", "Generated text"
|
||||
|
||||
mock_block.execute = mock_execute
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.api.features.chat.tools.run_block.get_or_create_workspace",
|
||||
new_callable=AsyncMock,
|
||||
return_value=MagicMock(id="test-workspace-id"),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku",
|
||||
"model": "gpt-4o-mini", # correct field name
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockOutputResponse)
|
||||
assert response.success is True
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_required_fields_returns_details(self):
|
||||
"""Missing required fields returns BlockDetailsResponse with schema."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Only provide valid optional field, missing required 'prompt'
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"model": "gpt-4o-mini", # valid but optional
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
|
||||
@@ -0,0 +1,153 @@
|
||||
"""Tests for BlockDetailsResponse in RunBlockTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.models import BlockDetailsResponse
|
||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||
from backend.blocks._base import BlockType
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
from ._test_data import make_session
|
||||
|
||||
_TEST_USER_ID = "test-user-run-block-details"
|
||||
|
||||
|
||||
def make_mock_block_with_inputs(
|
||||
block_id: str, name: str, description: str = "Test description"
|
||||
):
|
||||
"""Create a mock block with input/output schemas for testing."""
|
||||
mock = MagicMock()
|
||||
mock.id = block_id
|
||||
mock.name = name
|
||||
mock.description = description
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
# Input schema with non-credential fields
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"url": {"type": "string", "description": "URL to fetch"},
|
||||
"method": {"type": "string", "description": "HTTP method"},
|
||||
},
|
||||
"required": ["url"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
# Output schema
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"response": {"type": "object", "description": "HTTP response"},
|
||||
"error": {"type": "string", "description": "Error message"},
|
||||
}
|
||||
}
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_no_input_provided():
|
||||
"""When run_block is called without input_data, it should return BlockDetailsResponse."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with inputs
|
||||
http_block = make_mock_block_with_inputs(
|
||||
"http-block-id", "HTTP Request", "Send HTTP requests"
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=http_block,
|
||||
):
|
||||
# Mock credentials check to return no missing credentials
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=({}, []), # (matched_credentials, missing_credentials)
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="http-block-id",
|
||||
input_data={}, # Empty input data
|
||||
)
|
||||
|
||||
# Should return BlockDetailsResponse showing the schema
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "http-block-id"
|
||||
assert response.block.name == "HTTP Request"
|
||||
assert response.block.description == "Send HTTP requests"
|
||||
assert "url" in response.block.inputs["properties"]
|
||||
assert "method" in response.block.inputs["properties"]
|
||||
assert "response" in response.block.outputs["properties"]
|
||||
assert response.user_authenticated is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_only_credentials_provided():
|
||||
"""When only credentials are provided (no actual input), should return details."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with both credential and non-credential inputs
|
||||
mock = MagicMock()
|
||||
mock.id = "api-block-id"
|
||||
mock.name = "API Call"
|
||||
mock.description = "Make API calls"
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"credentials": {"type": "object", "description": "API credentials"},
|
||||
"endpoint": {"type": "string", "description": "API endpoint"},
|
||||
},
|
||||
"required": ["credentials", "endpoint"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {"credentials": True}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {"result": {"type": "object"}}
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock,
|
||||
):
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(
|
||||
{
|
||||
"credentials": CredentialsMetaInput(
|
||||
id="cred-id",
|
||||
provider=ProviderName("test_provider"),
|
||||
type="api_key",
|
||||
title="Test Credential",
|
||||
)
|
||||
},
|
||||
[],
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="api-block-id",
|
||||
input_data={"credentials": {"some": "cred"}}, # Only credential
|
||||
)
|
||||
|
||||
# Should return details because no non-credential inputs provided
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "api-block-id"
|
||||
assert response.block.name == "API Call"
|
||||
@@ -662,6 +662,17 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||
|
||||
linear_api_key: str = Field(
|
||||
default="", description="Linear API key for system-level operations"
|
||||
)
|
||||
linear_feature_request_project_id: str = Field(
|
||||
default="",
|
||||
description="Linear project ID where feature requests are tracked",
|
||||
)
|
||||
linear_feature_request_team_id: str = Field(
|
||||
default="",
|
||||
description="Linear team ID used when creating feature request issues",
|
||||
)
|
||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||
|
||||
|
||||
68
autogpt_platform/backend/poetry.lock
generated
68
autogpt_platform/backend/poetry.lock
generated
@@ -441,14 +441,14 @@ develop = true
|
||||
colorama = "^0.4.6"
|
||||
cryptography = "^46.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.128.0"
|
||||
fastapi = "^0.128.7"
|
||||
google-cloud-logging = "^3.13.0"
|
||||
launchdarkly-server-sdk = "^9.14.1"
|
||||
launchdarkly-server-sdk = "^9.15.0"
|
||||
pydantic = "^2.12.5"
|
||||
pydantic-settings = "^2.12.0"
|
||||
pyjwt = {version = "^2.11.0", extras = ["crypto"]}
|
||||
redis = "^6.2.0"
|
||||
supabase = "^2.27.2"
|
||||
supabase = "^2.28.0"
|
||||
uvicorn = "^0.40.0"
|
||||
|
||||
[package.source]
|
||||
@@ -1382,14 +1382,14 @@ tzdata = "*"
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.128.6"
|
||||
version = "0.128.7"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"},
|
||||
{file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"},
|
||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3117,14 +3117,14 @@ urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-server-sdk"
|
||||
version = "9.14.1"
|
||||
version = "9.15.0"
|
||||
description = "LaunchDarkly SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -4728,14 +4728,14 @@ tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "2.27.3"
|
||||
version = "2.28.0"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "postgrest-2.27.3-py3-none-any.whl", hash = "sha256:ed79123af7127edd78d538bfe8351d277e45b1a36994a4dbf57ae27dde87a7b7"},
|
||||
{file = "postgrest-2.27.3.tar.gz", hash = "sha256:c2e2679addfc8eaab23197bad7ddaee6cbb4cbe8c483ebd2d2e5219543037cc3"},
|
||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -6260,14 +6260,14 @@ all = ["numpy"]
|
||||
|
||||
[[package]]
|
||||
name = "realtime"
|
||||
version = "2.27.3"
|
||||
version = "2.28.0"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "realtime-2.27.3-py3-none-any.whl", hash = "sha256:f571115f86988e33c41c895cb3fba2eaa1b693aeaede3617288f44274ca90f43"},
|
||||
{file = "realtime-2.27.3.tar.gz", hash = "sha256:02b082243107656a5ef3fb63e8e2ab4c40bc199abb45adb8a42ed63f089a1041"},
|
||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7024,14 +7024,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "2.27.3"
|
||||
version = "2.28.0"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "storage3-2.27.3-py3-none-any.whl", hash = "sha256:11a05b7da84bccabeeea12d940bca3760cf63fe6ca441868677335cfe4fdfbe0"},
|
||||
{file = "storage3-2.27.3.tar.gz", hash = "sha256:dc1a4a010cf36d5482c5cb6c1c28fc5f00e23284342b89e4ae43b5eae8501ddb"},
|
||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7091,35 +7091,35 @@ typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""}
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.27.3"
|
||||
version = "2.28.0"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.27.3-py3-none-any.whl", hash = "sha256:082a74642fcf9954693f1ce8c251baf23e4bda26ffdbc8dcd4c99c82e60d69ff"},
|
||||
{file = "supabase-2.27.3.tar.gz", hash = "sha256:5e5a348232ac4315c1032ddd687278f0b982465471f0cbb52bca7e6a66495ff3"},
|
||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.26,<0.29"
|
||||
postgrest = "2.27.3"
|
||||
realtime = "2.27.3"
|
||||
storage3 = "2.27.3"
|
||||
supabase-auth = "2.27.3"
|
||||
supabase-functions = "2.27.3"
|
||||
postgrest = "2.28.0"
|
||||
realtime = "2.28.0"
|
||||
storage3 = "2.28.0"
|
||||
supabase-auth = "2.28.0"
|
||||
supabase-functions = "2.28.0"
|
||||
yarl = ">=1.22.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase-auth"
|
||||
version = "2.27.3"
|
||||
version = "2.28.0"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_auth-2.27.3-py3-none-any.whl", hash = "sha256:82a4262eaad85383319d394dab0eea11fcf3ebd774062aef8ea3874ae2f02579"},
|
||||
{file = "supabase_auth-2.27.3.tar.gz", hash = "sha256:39894d4bc60b6f23b5cff4d0d7d4c1659e5d69563cadf014d4896f780ca8ca78"},
|
||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7129,14 +7129,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||
|
||||
[[package]]
|
||||
name = "supabase-functions"
|
||||
version = "2.27.3"
|
||||
version = "2.28.0"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_functions-2.27.3-py3-none-any.whl", hash = "sha256:9d14a931d49ede1c6cf5fbfceb11c44061535ba1c3f310f15384964d86a83d9e"},
|
||||
{file = "supabase_functions-2.27.3.tar.gz", hash = "sha256:e954f1646da8ca6e7e16accef58d0884a5f97b25956ee98e7d4927a210ed92f9"},
|
||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||
content-hash = "fa9c5deadf593e815dd2190f58e22152373900603f5f244b9616cd721de84d2f"
|
||||
|
||||
@@ -65,7 +65,7 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
|
||||
sqlalchemy = "^2.0.40"
|
||||
strenum = "^0.4.9"
|
||||
stripe = "^11.5.0"
|
||||
supabase = "2.27.3"
|
||||
supabase = "2.28.0"
|
||||
tenacity = "^9.1.4"
|
||||
todoist-api-python = "^2.1.7"
|
||||
tweepy = "^4.16.0"
|
||||
|
||||
@@ -37,7 +37,7 @@ services:
|
||||
context: ../
|
||||
dockerfile: autogpt_platform/backend/Dockerfile
|
||||
target: migrate
|
||||
command: ["sh", "-c", "poetry run prisma generate && poetry run gen-prisma-stub && poetry run prisma migrate deploy"]
|
||||
command: ["sh", "-c", "prisma generate && python3 gen_prisma_types_stub.py && prisma migrate deploy"]
|
||||
develop:
|
||||
watch:
|
||||
- path: ./
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||
"prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
|
||||
@@ -15,6 +15,10 @@ import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||
import {
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
} from "../../tools/FeatureRequests/FeatureRequests";
|
||||
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
||||
import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
|
||||
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
|
||||
@@ -254,6 +258,20 @@ export const ChatMessagesContainer = ({
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-search_feature_requests":
|
||||
return (
|
||||
<SearchFeatureRequestsTool
|
||||
key={`${message.id}-${i}`}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-create_feature_request":
|
||||
return (
|
||||
<CreateFeatureRequestTool
|
||||
key={`${message.id}-${i}`}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -14,6 +14,10 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider";
|
||||
import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../tools/EditAgent/EditAgent";
|
||||
import {
|
||||
CreateFeatureRequestTool,
|
||||
SearchFeatureRequestsTool,
|
||||
} from "../tools/FeatureRequests/FeatureRequests";
|
||||
import { FindAgentsTool } from "../tools/FindAgents/FindAgents";
|
||||
import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks";
|
||||
import { RunAgentTool } from "../tools/RunAgent/RunAgent";
|
||||
@@ -45,6 +49,8 @@ const SECTIONS = [
|
||||
"Tool: Create Agent",
|
||||
"Tool: Edit Agent",
|
||||
"Tool: View Agent Output",
|
||||
"Tool: Search Feature Requests",
|
||||
"Tool: Create Feature Request",
|
||||
"Full Conversation Example",
|
||||
] as const;
|
||||
|
||||
@@ -1421,6 +1427,235 @@ export default function StyleguidePage() {
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* SEARCH FEATURE REQUESTS */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
<Section title="Tool: Search Feature Requests">
|
||||
<SubSection label="Input streaming">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "input-streaming",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Input available">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "input-available",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (with results)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "dark mode" },
|
||||
output: {
|
||||
type: "feature_request_search",
|
||||
message:
|
||||
'Found 2 feature request(s) matching "dark mode".',
|
||||
query: "dark mode",
|
||||
count: 2,
|
||||
results: [
|
||||
{
|
||||
id: "fr-001",
|
||||
identifier: "INT-42",
|
||||
title: "Add dark mode to the platform",
|
||||
description:
|
||||
"Users have requested a dark mode option for the builder and copilot interfaces to reduce eye strain during long sessions.",
|
||||
},
|
||||
{
|
||||
id: "fr-002",
|
||||
identifier: "INT-87",
|
||||
title: "Dark theme for agent output viewer",
|
||||
description:
|
||||
"Specifically requesting dark theme support for the agent output/execution viewer panel.",
|
||||
},
|
||||
],
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (no results)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "teleportation" },
|
||||
output: {
|
||||
type: "no_results",
|
||||
message:
|
||||
"No feature requests found matching 'teleportation'.",
|
||||
suggestions: [
|
||||
"Try different keywords",
|
||||
"Use broader search terms",
|
||||
"You can create a new feature request if none exists",
|
||||
],
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (error)">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: { query: "dark mode" },
|
||||
output: {
|
||||
type: "error",
|
||||
message: "Failed to search feature requests.",
|
||||
error: "LINEAR_API_KEY environment variable is not set",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output error">
|
||||
<SearchFeatureRequestsTool
|
||||
part={{
|
||||
type: "tool-search_feature_requests",
|
||||
toolCallId: uid(),
|
||||
state: "output-error",
|
||||
input: { query: "dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* CREATE FEATURE REQUEST */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
<Section title="Tool: Create Feature Request">
|
||||
<SubSection label="Input streaming">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "input-streaming",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Input available">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "input-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (new issue created)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode for the platform.",
|
||||
},
|
||||
output: {
|
||||
type: "feature_request_created",
|
||||
message:
|
||||
"Created new feature request [INT-105] Add dark mode.",
|
||||
issue_id: "issue-new-123",
|
||||
issue_identifier: "INT-105",
|
||||
issue_title: "Add dark mode",
|
||||
issue_url:
|
||||
"https://linear.app/autogpt/issue/INT-105/add-dark-mode",
|
||||
is_new_issue: true,
|
||||
customer_name: "user-abc-123",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (added to existing issue)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Dark mode support",
|
||||
description:
|
||||
"Please add dark mode, it would help with long sessions.",
|
||||
existing_issue_id: "fr-001",
|
||||
},
|
||||
output: {
|
||||
type: "feature_request_created",
|
||||
message:
|
||||
"Added your request to existing feature request [INT-42] Add dark mode to the platform.",
|
||||
issue_id: "fr-001",
|
||||
issue_identifier: "INT-42",
|
||||
issue_title: "Add dark mode to the platform",
|
||||
issue_url:
|
||||
"https://linear.app/autogpt/issue/INT-42/add-dark-mode-to-the-platform",
|
||||
is_new_issue: false,
|
||||
customer_name: "user-xyz-789",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output available (error)">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-available",
|
||||
input: {
|
||||
title: "Add dark mode",
|
||||
description: "I would love dark mode.",
|
||||
},
|
||||
output: {
|
||||
type: "error",
|
||||
message:
|
||||
"Failed to attach customer need to the feature request.",
|
||||
error: "Linear API request failed (500): Internal error",
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
|
||||
<SubSection label="Output error">
|
||||
<CreateFeatureRequestTool
|
||||
part={{
|
||||
type: "tool-create_feature_request",
|
||||
toolCallId: uid(),
|
||||
state: "output-error",
|
||||
input: { title: "Add dark mode" },
|
||||
}}
|
||||
/>
|
||||
</SubSection>
|
||||
</Section>
|
||||
|
||||
{/* ============================================================= */}
|
||||
{/* FULL CONVERSATION EXAMPLE */}
|
||||
{/* ============================================================= */}
|
||||
|
||||
@@ -0,0 +1,240 @@
|
||||
"use client";
|
||||
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { useMemo } from "react";
|
||||
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import {
|
||||
ContentBadge,
|
||||
ContentCard,
|
||||
ContentCardDescription,
|
||||
ContentCardHeader,
|
||||
ContentCardTitle,
|
||||
ContentGrid,
|
||||
ContentLink,
|
||||
ContentMessage,
|
||||
ContentSuggestionsList,
|
||||
} from "../../components/ToolAccordion/AccordionContent";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import {
|
||||
AccordionIcon,
|
||||
getAccordionTitle,
|
||||
getAnimationText,
|
||||
getFeatureRequestOutput,
|
||||
isCreatedOutput,
|
||||
isErrorOutput,
|
||||
isNoResultsOutput,
|
||||
isSearchResultsOutput,
|
||||
ToolIcon,
|
||||
type FeatureRequestToolType,
|
||||
} from "./helpers";
|
||||
|
||||
export interface FeatureRequestToolPart {
|
||||
type: FeatureRequestToolType;
|
||||
toolCallId: string;
|
||||
state: ToolUIPart["state"];
|
||||
input?: unknown;
|
||||
output?: unknown;
|
||||
}
|
||||
|
||||
interface Props {
|
||||
part: FeatureRequestToolPart;
|
||||
}
|
||||
|
||||
function truncate(text: string, maxChars: number): string {
|
||||
const trimmed = text.trim();
|
||||
if (trimmed.length <= maxChars) return trimmed;
|
||||
return `${trimmed.slice(0, maxChars).trimEnd()}…`;
|
||||
}
|
||||
|
||||
export function SearchFeatureRequestsTool({ part }: Props) {
|
||||
const output = getFeatureRequestOutput(part);
|
||||
const text = getAnimationText(part);
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
const isError =
|
||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||
|
||||
const normalized = useMemo(() => {
|
||||
if (!output) return null;
|
||||
return { title: getAccordionTitle(part.type, output) };
|
||||
}, [output, part.type]);
|
||||
|
||||
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||
|
||||
const searchOutput =
|
||||
isOutputAvailable && output && isSearchResultsOutput(output)
|
||||
? output
|
||||
: null;
|
||||
const noResultsOutput =
|
||||
isOutputAvailable && output && isNoResultsOutput(output) ? output : null;
|
||||
const errorOutput =
|
||||
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||
|
||||
const hasExpandableContent =
|
||||
isOutputAvailable &&
|
||||
((!!searchOutput && searchOutput.count > 0) ||
|
||||
!!noResultsOutput ||
|
||||
!!errorOutput);
|
||||
|
||||
const accordionDescription =
|
||||
hasExpandableContent && searchOutput
|
||||
? `Found ${searchOutput.count} result${searchOutput.count === 1 ? "" : "s"} for "${searchOutput.query}"`
|
||||
: hasExpandableContent && (noResultsOutput || errorOutput)
|
||||
? ((noResultsOutput ?? errorOutput)?.message ?? null)
|
||||
: null;
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ToolIcon
|
||||
toolType={part.type}
|
||||
isStreaming={isStreaming}
|
||||
isError={isError}
|
||||
/>
|
||||
<MorphingTextAnimation
|
||||
text={text}
|
||||
className={isError ? "text-red-500" : undefined}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && normalized && (
|
||||
<ToolAccordion
|
||||
icon={<AccordionIcon toolType={part.type} />}
|
||||
title={normalized.title}
|
||||
description={accordionDescription}
|
||||
>
|
||||
{searchOutput && (
|
||||
<ContentGrid>
|
||||
{searchOutput.results.map((r) => (
|
||||
<ContentCard key={r.id}>
|
||||
<ContentCardHeader>
|
||||
<ContentCardTitle>
|
||||
{r.identifier} — {r.title}
|
||||
</ContentCardTitle>
|
||||
</ContentCardHeader>
|
||||
{r.description && (
|
||||
<ContentCardDescription>
|
||||
{truncate(r.description, 200)}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</ContentCard>
|
||||
))}
|
||||
</ContentGrid>
|
||||
)}
|
||||
|
||||
{noResultsOutput && (
|
||||
<div>
|
||||
<ContentMessage>{noResultsOutput.message}</ContentMessage>
|
||||
{noResultsOutput.suggestions &&
|
||||
noResultsOutput.suggestions.length > 0 && (
|
||||
<ContentSuggestionsList items={noResultsOutput.suggestions} />
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{errorOutput && (
|
||||
<div>
|
||||
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||
{errorOutput.error && (
|
||||
<ContentCardDescription>
|
||||
{errorOutput.error}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</ToolAccordion>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export function CreateFeatureRequestTool({ part }: Props) {
|
||||
const output = getFeatureRequestOutput(part);
|
||||
const text = getAnimationText(part);
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
const isError =
|
||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||
|
||||
const normalized = useMemo(() => {
|
||||
if (!output) return null;
|
||||
return { title: getAccordionTitle(part.type, output) };
|
||||
}, [output, part.type]);
|
||||
|
||||
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||
|
||||
const createdOutput =
|
||||
isOutputAvailable && output && isCreatedOutput(output) ? output : null;
|
||||
const errorOutput =
|
||||
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||
|
||||
const hasExpandableContent =
|
||||
isOutputAvailable && (!!createdOutput || !!errorOutput);
|
||||
|
||||
const accordionDescription =
|
||||
hasExpandableContent && createdOutput
|
||||
? `${createdOutput.issue_identifier} — ${createdOutput.issue_title}`
|
||||
: hasExpandableContent && errorOutput
|
||||
? errorOutput.message
|
||||
: null;
|
||||
|
||||
return (
|
||||
<div className="py-2">
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ToolIcon
|
||||
toolType={part.type}
|
||||
isStreaming={isStreaming}
|
||||
isError={isError}
|
||||
/>
|
||||
<MorphingTextAnimation
|
||||
text={text}
|
||||
className={isError ? "text-red-500" : undefined}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && normalized && (
|
||||
<ToolAccordion
|
||||
icon={<AccordionIcon toolType={part.type} />}
|
||||
title={normalized.title}
|
||||
description={accordionDescription}
|
||||
>
|
||||
{createdOutput && (
|
||||
<ContentCard>
|
||||
<ContentCardHeader
|
||||
action={
|
||||
createdOutput.issue_url ? (
|
||||
<ContentLink href={createdOutput.issue_url}>
|
||||
View
|
||||
</ContentLink>
|
||||
) : undefined
|
||||
}
|
||||
>
|
||||
<ContentCardTitle>
|
||||
{createdOutput.issue_identifier} — {createdOutput.issue_title}
|
||||
</ContentCardTitle>
|
||||
</ContentCardHeader>
|
||||
<div className="mt-2 flex items-center gap-2">
|
||||
<ContentBadge>
|
||||
{createdOutput.is_new_issue ? "New" : "Existing"}
|
||||
</ContentBadge>
|
||||
</div>
|
||||
<ContentMessage>{createdOutput.message}</ContentMessage>
|
||||
</ContentCard>
|
||||
)}
|
||||
|
||||
{errorOutput && (
|
||||
<div>
|
||||
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||
{errorOutput.error && (
|
||||
<ContentCardDescription>
|
||||
{errorOutput.error}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</ToolAccordion>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,271 @@
|
||||
import {
|
||||
CheckCircleIcon,
|
||||
LightbulbIcon,
|
||||
MagnifyingGlassIcon,
|
||||
PlusCircleIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import type { ToolUIPart } from "ai";
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Types (local until API client is regenerated) */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface FeatureRequestInfo {
|
||||
id: string;
|
||||
identifier: string;
|
||||
title: string;
|
||||
description?: string | null;
|
||||
}
|
||||
|
||||
export interface FeatureRequestSearchResponse {
|
||||
type: "feature_request_search";
|
||||
message: string;
|
||||
results: FeatureRequestInfo[];
|
||||
count: number;
|
||||
query: string;
|
||||
}
|
||||
|
||||
export interface FeatureRequestCreatedResponse {
|
||||
type: "feature_request_created";
|
||||
message: string;
|
||||
issue_id: string;
|
||||
issue_identifier: string;
|
||||
issue_title: string;
|
||||
issue_url: string;
|
||||
is_new_issue: boolean;
|
||||
customer_name: string;
|
||||
}
|
||||
|
||||
interface NoResultsResponse {
|
||||
type: "no_results";
|
||||
message: string;
|
||||
suggestions?: string[];
|
||||
}
|
||||
|
||||
interface ErrorResponse {
|
||||
type: "error";
|
||||
message: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export type FeatureRequestOutput =
|
||||
| FeatureRequestSearchResponse
|
||||
| FeatureRequestCreatedResponse
|
||||
| NoResultsResponse
|
||||
| ErrorResponse;
|
||||
|
||||
export type FeatureRequestToolType =
|
||||
| "tool-search_feature_requests"
|
||||
| "tool-create_feature_request"
|
||||
| string;
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Output parsing */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
function parseOutput(output: unknown): FeatureRequestOutput | null {
|
||||
if (!output) return null;
|
||||
if (typeof output === "string") {
|
||||
const trimmed = output.trim();
|
||||
if (!trimmed) return null;
|
||||
try {
|
||||
return parseOutput(JSON.parse(trimmed) as unknown);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
if (typeof output === "object") {
|
||||
const type = (output as { type?: unknown }).type;
|
||||
if (
|
||||
type === "feature_request_search" ||
|
||||
type === "feature_request_created" ||
|
||||
type === "no_results" ||
|
||||
type === "error"
|
||||
) {
|
||||
return output as FeatureRequestOutput;
|
||||
}
|
||||
// Fallback structural checks
|
||||
if ("results" in output && "query" in output)
|
||||
return output as FeatureRequestSearchResponse;
|
||||
if ("issue_identifier" in output)
|
||||
return output as FeatureRequestCreatedResponse;
|
||||
if ("suggestions" in output && !("error" in output))
|
||||
return output as NoResultsResponse;
|
||||
if ("error" in output || "details" in output)
|
||||
return output as ErrorResponse;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function getFeatureRequestOutput(
|
||||
part: unknown,
|
||||
): FeatureRequestOutput | null {
|
||||
if (!part || typeof part !== "object") return null;
|
||||
return parseOutput((part as { output?: unknown }).output);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Type guards */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function isSearchResultsOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is FeatureRequestSearchResponse {
|
||||
return (
|
||||
output.type === "feature_request_search" ||
|
||||
("results" in output && "query" in output)
|
||||
);
|
||||
}
|
||||
|
||||
export function isCreatedOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is FeatureRequestCreatedResponse {
|
||||
return (
|
||||
output.type === "feature_request_created" || "issue_identifier" in output
|
||||
);
|
||||
}
|
||||
|
||||
export function isNoResultsOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is NoResultsResponse {
|
||||
return (
|
||||
output.type === "no_results" ||
|
||||
("suggestions" in output && !("error" in output))
|
||||
);
|
||||
}
|
||||
|
||||
export function isErrorOutput(
|
||||
output: FeatureRequestOutput,
|
||||
): output is ErrorResponse {
|
||||
return output.type === "error" || "error" in output;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Accordion metadata */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function getAccordionTitle(
|
||||
toolType: FeatureRequestToolType,
|
||||
output: FeatureRequestOutput,
|
||||
): string {
|
||||
if (toolType === "tool-search_feature_requests") {
|
||||
if (isSearchResultsOutput(output)) return "Feature requests";
|
||||
if (isNoResultsOutput(output)) return "No feature requests found";
|
||||
return "Feature request search error";
|
||||
}
|
||||
if (isCreatedOutput(output)) {
|
||||
return output.is_new_issue
|
||||
? "Feature request created"
|
||||
: "Added to feature request";
|
||||
}
|
||||
if (isErrorOutput(output)) return "Feature request error";
|
||||
return "Feature request";
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Animation text */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface AnimationPart {
|
||||
type: FeatureRequestToolType;
|
||||
state: ToolUIPart["state"];
|
||||
input?: unknown;
|
||||
output?: unknown;
|
||||
}
|
||||
|
||||
export function getAnimationText(part: AnimationPart): string {
|
||||
if (part.type === "tool-search_feature_requests") {
|
||||
const query = (part.input as { query?: string } | undefined)?.query?.trim();
|
||||
const queryText = query ? ` for "${query}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Searching feature requests${queryText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Searching feature requests${queryText}`;
|
||||
if (isSearchResultsOutput(output)) {
|
||||
return `Found ${output.count} feature request${output.count === 1 ? "" : "s"}${queryText}`;
|
||||
}
|
||||
if (isNoResultsOutput(output))
|
||||
return `No feature requests found${queryText}`;
|
||||
return `Error searching feature requests${queryText}`;
|
||||
}
|
||||
case "output-error":
|
||||
return `Error searching feature requests${queryText}`;
|
||||
default:
|
||||
return "Searching feature requests";
|
||||
}
|
||||
}
|
||||
|
||||
// create_feature_request
|
||||
const title = (part.input as { title?: string } | undefined)?.title?.trim();
|
||||
const titleText = title ? ` "${title}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Creating feature request${titleText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Creating feature request${titleText}`;
|
||||
if (isCreatedOutput(output)) {
|
||||
return output.is_new_issue
|
||||
? `Created ${output.issue_identifier}`
|
||||
: `Added to ${output.issue_identifier}`;
|
||||
}
|
||||
if (isErrorOutput(output)) return "Error creating feature request";
|
||||
return `Created feature request${titleText}`;
|
||||
}
|
||||
case "output-error":
|
||||
return "Error creating feature request";
|
||||
default:
|
||||
return "Creating feature request";
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Icons */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function ToolIcon({
|
||||
toolType,
|
||||
isStreaming,
|
||||
isError,
|
||||
}: {
|
||||
toolType: FeatureRequestToolType;
|
||||
isStreaming?: boolean;
|
||||
isError?: boolean;
|
||||
}) {
|
||||
const IconComponent =
|
||||
toolType === "tool-create_feature_request"
|
||||
? PlusCircleIcon
|
||||
: MagnifyingGlassIcon;
|
||||
|
||||
return (
|
||||
<IconComponent
|
||||
size={14}
|
||||
weight="regular"
|
||||
className={
|
||||
isError
|
||||
? "text-red-500"
|
||||
: isStreaming
|
||||
? "text-neutral-500"
|
||||
: "text-neutral-400"
|
||||
}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function AccordionIcon({
|
||||
toolType,
|
||||
}: {
|
||||
toolType: FeatureRequestToolType;
|
||||
}) {
|
||||
const IconComponent =
|
||||
toolType === "tool-create_feature_request"
|
||||
? CheckCircleIcon
|
||||
: LightbulbIcon;
|
||||
return <IconComponent size={32} weight="light" />;
|
||||
}
|
||||
@@ -3,6 +3,7 @@
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import { BlockDetailsCard } from "./components/BlockDetailsCard/BlockDetailsCard";
|
||||
import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard";
|
||||
import { ErrorCard } from "./components/ErrorCard/ErrorCard";
|
||||
import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard";
|
||||
@@ -11,6 +12,7 @@ import {
|
||||
getAnimationText,
|
||||
getRunBlockToolOutput,
|
||||
isRunBlockBlockOutput,
|
||||
isRunBlockDetailsOutput,
|
||||
isRunBlockErrorOutput,
|
||||
isRunBlockSetupRequirementsOutput,
|
||||
ToolIcon,
|
||||
@@ -41,6 +43,7 @@ export function RunBlockTool({ part }: Props) {
|
||||
part.state === "output-available" &&
|
||||
!!output &&
|
||||
(isRunBlockBlockOutput(output) ||
|
||||
isRunBlockDetailsOutput(output) ||
|
||||
isRunBlockSetupRequirementsOutput(output) ||
|
||||
isRunBlockErrorOutput(output));
|
||||
|
||||
@@ -58,6 +61,10 @@ export function RunBlockTool({ part }: Props) {
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||
|
||||
{isRunBlockDetailsOutput(output) && (
|
||||
<BlockDetailsCard output={output} />
|
||||
)}
|
||||
|
||||
{isRunBlockSetupRequirementsOutput(output) && (
|
||||
<SetupRequirementsCard output={output} />
|
||||
)}
|
||||
|
||||
@@ -0,0 +1,188 @@
|
||||
import type { Meta, StoryObj } from "@storybook/nextjs";
|
||||
import { ResponseType } from "@/app/api/__generated__/models/responseType";
|
||||
import type { BlockDetailsResponse } from "../../helpers";
|
||||
import { BlockDetailsCard } from "./BlockDetailsCard";
|
||||
|
||||
const meta: Meta<typeof BlockDetailsCard> = {
|
||||
title: "Copilot/RunBlock/BlockDetailsCard",
|
||||
component: BlockDetailsCard,
|
||||
parameters: {
|
||||
layout: "centered",
|
||||
},
|
||||
tags: ["autodocs"],
|
||||
decorators: [
|
||||
(Story) => (
|
||||
<div style={{ maxWidth: 480 }}>
|
||||
<Story />
|
||||
</div>
|
||||
),
|
||||
],
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof meta>;
|
||||
|
||||
const baseBlock: BlockDetailsResponse = {
|
||||
type: ResponseType.block_details,
|
||||
message:
|
||||
"Here are the details for the GetWeather block. Provide the required inputs to run it.",
|
||||
session_id: "session-123",
|
||||
user_authenticated: true,
|
||||
block: {
|
||||
id: "block-abc-123",
|
||||
name: "GetWeather",
|
||||
description: "Fetches current weather data for a given location.",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: {
|
||||
title: "Location",
|
||||
type: "string",
|
||||
description:
|
||||
"City name or coordinates (e.g. 'London' or '51.5,-0.1')",
|
||||
},
|
||||
units: {
|
||||
title: "Units",
|
||||
type: "string",
|
||||
description: "Temperature units: 'metric' or 'imperial'",
|
||||
},
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
temperature: {
|
||||
title: "Temperature",
|
||||
type: "number",
|
||||
description: "Current temperature in the requested units",
|
||||
},
|
||||
condition: {
|
||||
title: "Condition",
|
||||
type: "string",
|
||||
description: "Weather condition description (e.g. 'Sunny', 'Rain')",
|
||||
},
|
||||
},
|
||||
},
|
||||
credentials: [],
|
||||
},
|
||||
};
|
||||
|
||||
export const Default: Story = {
|
||||
args: {
|
||||
output: baseBlock,
|
||||
},
|
||||
};
|
||||
|
||||
export const InputsOnly: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "This block requires inputs. No outputs are defined.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
outputs: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const OutputsOnly: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "This block has no required inputs.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
inputs: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const ManyFields: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "Block with many input and output fields.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
name: "SendEmail",
|
||||
description: "Sends an email via SMTP.",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
to: {
|
||||
title: "To",
|
||||
type: "string",
|
||||
description: "Recipient email address",
|
||||
},
|
||||
subject: {
|
||||
title: "Subject",
|
||||
type: "string",
|
||||
description: "Email subject line",
|
||||
},
|
||||
body: {
|
||||
title: "Body",
|
||||
type: "string",
|
||||
description: "Email body content",
|
||||
},
|
||||
cc: {
|
||||
title: "CC",
|
||||
type: "string",
|
||||
description: "CC recipients (comma-separated)",
|
||||
},
|
||||
bcc: {
|
||||
title: "BCC",
|
||||
type: "string",
|
||||
description: "BCC recipients (comma-separated)",
|
||||
},
|
||||
},
|
||||
required: ["to", "subject", "body"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
message_id: {
|
||||
title: "Message ID",
|
||||
type: "string",
|
||||
description: "Unique ID of the sent email",
|
||||
},
|
||||
status: {
|
||||
title: "Status",
|
||||
type: "string",
|
||||
description: "Delivery status",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const NoFieldDescriptions: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "Fields without descriptions.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
name: "SimpleBlock",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
input_a: { title: "Input A", type: "string" },
|
||||
input_b: { title: "Input B", type: "number" },
|
||||
},
|
||||
required: ["input_a"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
result: { title: "Result", type: "string" },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
@@ -0,0 +1,103 @@
|
||||
"use client";
|
||||
|
||||
import type { BlockDetailsResponse } from "../../helpers";
|
||||
import {
|
||||
ContentBadge,
|
||||
ContentCard,
|
||||
ContentCardDescription,
|
||||
ContentCardTitle,
|
||||
ContentGrid,
|
||||
ContentMessage,
|
||||
} from "../../../../components/ToolAccordion/AccordionContent";
|
||||
|
||||
interface Props {
|
||||
output: BlockDetailsResponse;
|
||||
}
|
||||
|
||||
function SchemaFieldList({
|
||||
title,
|
||||
properties,
|
||||
required,
|
||||
}: {
|
||||
title: string;
|
||||
properties: Record<string, unknown>;
|
||||
required?: string[];
|
||||
}) {
|
||||
const entries = Object.entries(properties);
|
||||
if (entries.length === 0) return null;
|
||||
|
||||
const requiredSet = new Set(required ?? []);
|
||||
|
||||
return (
|
||||
<ContentCard>
|
||||
<ContentCardTitle className="text-xs">{title}</ContentCardTitle>
|
||||
<div className="mt-2 grid gap-2">
|
||||
{entries.map(([name, schema]) => {
|
||||
const field = schema as Record<string, unknown> | undefined;
|
||||
const fieldTitle =
|
||||
typeof field?.title === "string" ? field.title : name;
|
||||
const fieldType =
|
||||
typeof field?.type === "string" ? field.type : "unknown";
|
||||
const description =
|
||||
typeof field?.description === "string"
|
||||
? field.description
|
||||
: undefined;
|
||||
|
||||
return (
|
||||
<div key={name} className="rounded-xl border p-2">
|
||||
<div className="flex items-center justify-between gap-2">
|
||||
<ContentCardTitle className="text-xs">
|
||||
{fieldTitle}
|
||||
</ContentCardTitle>
|
||||
<div className="flex gap-1">
|
||||
<ContentBadge>{fieldType}</ContentBadge>
|
||||
{requiredSet.has(name) && (
|
||||
<ContentBadge>Required</ContentBadge>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{description && (
|
||||
<ContentCardDescription className="mt-1 text-xs">
|
||||
{description}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</ContentCard>
|
||||
);
|
||||
}
|
||||
|
||||
export function BlockDetailsCard({ output }: Props) {
|
||||
const inputs = output.block.inputs as {
|
||||
properties?: Record<string, unknown>;
|
||||
required?: string[];
|
||||
} | null;
|
||||
const outputs = output.block.outputs as {
|
||||
properties?: Record<string, unknown>;
|
||||
required?: string[];
|
||||
} | null;
|
||||
|
||||
return (
|
||||
<ContentGrid>
|
||||
<ContentMessage>{output.message}</ContentMessage>
|
||||
|
||||
{inputs?.properties && Object.keys(inputs.properties).length > 0 && (
|
||||
<SchemaFieldList
|
||||
title="Inputs"
|
||||
properties={inputs.properties}
|
||||
required={inputs.required}
|
||||
/>
|
||||
)}
|
||||
|
||||
{outputs?.properties && Object.keys(outputs.properties).length > 0 && (
|
||||
<SchemaFieldList
|
||||
title="Outputs"
|
||||
properties={outputs.properties}
|
||||
required={outputs.required}
|
||||
/>
|
||||
)}
|
||||
</ContentGrid>
|
||||
);
|
||||
}
|
||||
@@ -10,18 +10,37 @@ import {
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
|
||||
/** Block details returned on first run_block attempt (before input_data provided). */
|
||||
export interface BlockDetailsResponse {
|
||||
type: typeof ResponseType.block_details;
|
||||
message: string;
|
||||
session_id?: string | null;
|
||||
block: {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
inputs: Record<string, unknown>;
|
||||
outputs: Record<string, unknown>;
|
||||
credentials: unknown[];
|
||||
};
|
||||
user_authenticated: boolean;
|
||||
}
|
||||
|
||||
export interface RunBlockInput {
|
||||
block_id?: string;
|
||||
block_name?: string;
|
||||
input_data?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export type RunBlockToolOutput =
|
||||
| SetupRequirementsResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| ErrorResponse;
|
||||
|
||||
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
||||
ResponseType.setup_requirements,
|
||||
ResponseType.block_details,
|
||||
ResponseType.block_output,
|
||||
ResponseType.error,
|
||||
]);
|
||||
@@ -35,6 +54,15 @@ export function isRunBlockSetupRequirementsOutput(
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockDetailsOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockDetailsResponse {
|
||||
return (
|
||||
output.type === ResponseType.block_details ||
|
||||
("block" in output && typeof output.block === "object")
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockBlockOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockOutputResponse {
|
||||
@@ -64,6 +92,7 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
|
||||
return output as RunBlockToolOutput;
|
||||
}
|
||||
if ("block_id" in output) return output as BlockOutputResponse;
|
||||
if ("block" in output) return output as BlockDetailsResponse;
|
||||
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
||||
if ("error" in output || "details" in output)
|
||||
return output as ErrorResponse;
|
||||
@@ -84,17 +113,25 @@ export function getAnimationText(part: {
|
||||
output?: unknown;
|
||||
}): string {
|
||||
const input = part.input as RunBlockInput | undefined;
|
||||
const blockName = input?.block_name?.trim();
|
||||
const blockId = input?.block_id?.trim();
|
||||
const blockText = blockId ? ` "${blockId}"` : "";
|
||||
// Prefer block_name if available, otherwise fall back to block_id
|
||||
const blockText = blockName
|
||||
? ` "${blockName}"`
|
||||
: blockId
|
||||
? ` "${blockId}"`
|
||||
: "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Running the block${blockText}`;
|
||||
return `Running${blockText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Running the block${blockText}`;
|
||||
if (!output) return `Running${blockText}`;
|
||||
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
||||
if (isRunBlockDetailsOutput(output))
|
||||
return `Details for "${output.block.name}"`;
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
return `Setup needed for "${output.setup_info.agent_name}"`;
|
||||
}
|
||||
@@ -158,6 +195,21 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockDetailsOutput(output)) {
|
||||
const inputKeys = Object.keys(
|
||||
(output.block.inputs as { properties?: Record<string, unknown> })
|
||||
?.properties ?? {},
|
||||
);
|
||||
return {
|
||||
icon,
|
||||
title: output.block.name,
|
||||
description:
|
||||
inputKeys.length > 0
|
||||
? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available`
|
||||
: output.message,
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
const missingCredsCount = Object.keys(
|
||||
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
||||
|
||||
@@ -1053,6 +1053,7 @@
|
||||
"$ref": "#/components/schemas/ClarificationNeededResponse"
|
||||
},
|
||||
{ "$ref": "#/components/schemas/BlockListResponse" },
|
||||
{ "$ref": "#/components/schemas/BlockDetailsResponse" },
|
||||
{ "$ref": "#/components/schemas/BlockOutputResponse" },
|
||||
{ "$ref": "#/components/schemas/DocSearchResultsResponse" },
|
||||
{ "$ref": "#/components/schemas/DocPageResponse" },
|
||||
@@ -6958,6 +6959,58 @@
|
||||
"enum": ["run", "byte", "second"],
|
||||
"title": "BlockCostType"
|
||||
},
|
||||
"BlockDetails": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"inputs": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Inputs",
|
||||
"default": {}
|
||||
},
|
||||
"outputs": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Outputs",
|
||||
"default": {}
|
||||
},
|
||||
"credentials": {
|
||||
"items": { "$ref": "#/components/schemas/CredentialsMetaInput" },
|
||||
"type": "array",
|
||||
"title": "Credentials",
|
||||
"default": []
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["id", "name", "description"],
|
||||
"title": "BlockDetails",
|
||||
"description": "Detailed block information."
|
||||
},
|
||||
"BlockDetailsResponse": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"$ref": "#/components/schemas/ResponseType",
|
||||
"default": "block_details"
|
||||
},
|
||||
"message": { "type": "string", "title": "Message" },
|
||||
"session_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Session Id"
|
||||
},
|
||||
"block": { "$ref": "#/components/schemas/BlockDetails" },
|
||||
"user_authenticated": {
|
||||
"type": "boolean",
|
||||
"title": "User Authenticated",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["message", "block"],
|
||||
"title": "BlockDetailsResponse",
|
||||
"description": "Response for block details (first run_block attempt)."
|
||||
},
|
||||
"BlockInfo": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
@@ -7013,62 +7066,13 @@
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"categories": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Categories"
|
||||
},
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Schema"
|
||||
},
|
||||
"output_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Output Schema"
|
||||
},
|
||||
"required_inputs": {
|
||||
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
|
||||
"type": "array",
|
||||
"title": "Required Inputs",
|
||||
"description": "List of required input fields for this block"
|
||||
}
|
||||
"description": { "type": "string", "title": "Description" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"name",
|
||||
"description",
|
||||
"categories",
|
||||
"input_schema",
|
||||
"output_schema"
|
||||
],
|
||||
"required": ["id", "name", "description"],
|
||||
"title": "BlockInfoSummary",
|
||||
"description": "Summary of a block for search results."
|
||||
},
|
||||
"BlockInputFieldInfo": {
|
||||
"properties": {
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"type": { "type": "string", "title": "Type" },
|
||||
"description": {
|
||||
"type": "string",
|
||||
"title": "Description",
|
||||
"default": ""
|
||||
},
|
||||
"required": {
|
||||
"type": "boolean",
|
||||
"title": "Required",
|
||||
"default": false
|
||||
},
|
||||
"default": { "anyOf": [{}, { "type": "null" }], "title": "Default" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "type"],
|
||||
"title": "BlockInputFieldInfo",
|
||||
"description": "Information about a block input field."
|
||||
},
|
||||
"BlockListResponse": {
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -7086,12 +7090,7 @@
|
||||
"title": "Blocks"
|
||||
},
|
||||
"count": { "type": "integer", "title": "Count" },
|
||||
"query": { "type": "string", "title": "Query" },
|
||||
"usage_hint": {
|
||||
"type": "string",
|
||||
"title": "Usage Hint",
|
||||
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
|
||||
}
|
||||
"query": { "type": "string", "title": "Query" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["message", "blocks", "count", "query"],
|
||||
@@ -10484,6 +10483,7 @@
|
||||
"agent_saved",
|
||||
"clarification_needed",
|
||||
"block_list",
|
||||
"block_details",
|
||||
"block_output",
|
||||
"doc_search_results",
|
||||
"doc_page",
|
||||
@@ -10495,7 +10495,9 @@
|
||||
"operation_started",
|
||||
"operation_pending",
|
||||
"operation_in_progress",
|
||||
"input_validation_error"
|
||||
"input_validation_error",
|
||||
"feature_request_search",
|
||||
"feature_request_created"
|
||||
],
|
||||
"title": "ResponseType",
|
||||
"description": "Types of tool responses."
|
||||
|
||||
Reference in New Issue
Block a user