diff --git a/.dockerignore b/.dockerignore index 9b744e7f9b..427cab29f4 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,42 +5,13 @@ !docs/ # Platform - Libs -!autogpt_platform/autogpt_libs/autogpt_libs/ -!autogpt_platform/autogpt_libs/pyproject.toml -!autogpt_platform/autogpt_libs/poetry.lock -!autogpt_platform/autogpt_libs/README.md +!autogpt_platform/autogpt_libs/ # Platform - Backend -!autogpt_platform/backend/backend/ -!autogpt_platform/backend/test/e2e_test_data.py -!autogpt_platform/backend/migrations/ -!autogpt_platform/backend/schema.prisma -!autogpt_platform/backend/pyproject.toml -!autogpt_platform/backend/poetry.lock -!autogpt_platform/backend/README.md -!autogpt_platform/backend/.env -!autogpt_platform/backend/gen_prisma_types_stub.py - -# Platform - Market -!autogpt_platform/market/market/ -!autogpt_platform/market/scripts.py -!autogpt_platform/market/schema.prisma -!autogpt_platform/market/pyproject.toml -!autogpt_platform/market/poetry.lock -!autogpt_platform/market/README.md +!autogpt_platform/backend/ # Platform - Frontend -!autogpt_platform/frontend/src/ -!autogpt_platform/frontend/public/ -!autogpt_platform/frontend/scripts/ -!autogpt_platform/frontend/package.json -!autogpt_platform/frontend/pnpm-lock.yaml -!autogpt_platform/frontend/tsconfig.json -!autogpt_platform/frontend/README.md -## config -!autogpt_platform/frontend/*.config.* -!autogpt_platform/frontend/.env.* -!autogpt_platform/frontend/.env +!autogpt_platform/frontend/ # Classic - AutoGPT !classic/original_autogpt/autogpt/ @@ -64,6 +35,38 @@ # Classic - Frontend !classic/frontend/build/web/ -# Explicitly re-ignore some folders -.* -**/__pycache__ +# Explicitly re-ignore unwanted files from whitelisted directories +# Note: These patterns MUST come after the whitelist rules to take effect + +# Hidden files and directories (but keep frontend .env files needed for build) +**/.* +!autogpt_platform/frontend/.env +!autogpt_platform/frontend/.env.default +!autogpt_platform/frontend/.env.production + +# Python artifacts +**/__pycache__/ +**/*.pyc +**/*.pyo +**/.venv/ +**/.ruff_cache/ +**/.pytest_cache/ +**/.coverage +**/htmlcov/ + +# Node artifacts +**/node_modules/ +**/.next/ +**/storybook-static/ +**/playwright-report/ +**/test-results/ + +# Build artifacts +**/dist/ +**/build/ +!autogpt_platform/frontend/src/**/build/ +**/target/ + +# Logs and temp files +**/*.log +**/*.tmp diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 6410daae9f..4bf8a2b80c 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -26,7 +26,6 @@ jobs: setup: runs-on: ubuntu-latest outputs: - cache-key: ${{ steps.cache-key.outputs.key }} components-changed: ${{ steps.filter.outputs.components }} steps: @@ -41,28 +40,17 @@ jobs: components: - 'autogpt_platform/frontend/src/components/**' - - name: Set up Node.js - uses: actions/setup-node@v6 - with: - node-version: "22.18.0" - - name: Enable corepack run: corepack enable - - name: Generate cache key - id: cache-key - run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT - - - name: Cache dependencies - uses: actions/cache@v5 + - name: Set up Node + uses: actions/setup-node@v6 with: - path: ~/.pnpm-store - key: ${{ steps.cache-key.outputs.key }} - restore-keys: | - ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }} - ${{ runner.os }}-pnpm- + node-version: "22.18.0" + cache: "pnpm" + cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - - name: Install dependencies + - name: Install dependencies to populate cache run: pnpm install --frozen-lockfile lint: @@ -73,22 +61,15 @@ jobs: - name: Checkout repository uses: actions/checkout@v6 - - name: Set up Node.js - uses: actions/setup-node@v6 - with: - node-version: "22.18.0" - - name: Enable corepack run: corepack enable - - name: Restore dependencies cache - uses: actions/cache@v5 + - name: Set up Node + uses: actions/setup-node@v6 with: - path: ~/.pnpm-store - key: ${{ needs.setup.outputs.cache-key }} - restore-keys: | - ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }} - ${{ runner.os }}-pnpm- + node-version: "22.18.0" + cache: "pnpm" + cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - name: Install dependencies run: pnpm install --frozen-lockfile @@ -111,22 +92,15 @@ jobs: with: fetch-depth: 0 - - name: Set up Node.js - uses: actions/setup-node@v6 - with: - node-version: "22.18.0" - - name: Enable corepack run: corepack enable - - name: Restore dependencies cache - uses: actions/cache@v5 + - name: Set up Node + uses: actions/setup-node@v6 with: - path: ~/.pnpm-store - key: ${{ needs.setup.outputs.cache-key }} - restore-keys: | - ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }} - ${{ runner.os }}-pnpm- + node-version: "22.18.0" + cache: "pnpm" + cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - name: Install dependencies run: pnpm install --frozen-lockfile @@ -141,10 +115,8 @@ jobs: exitOnceUploaded: true e2e_test: + name: end-to-end tests runs-on: big-boi - needs: setup - strategy: - fail-fast: false steps: - name: Checkout repository @@ -152,19 +124,11 @@ jobs: with: submodules: recursive - - name: Set up Node.js - uses: actions/setup-node@v6 - with: - node-version: "22.18.0" - - - name: Enable corepack - run: corepack enable - - - name: Copy default supabase .env + - name: Set up Platform - Copy default supabase .env run: | cp ../.env.default ../.env - - name: Copy backend .env and set OpenAI API key + - name: Set up Platform - Copy backend .env and set OpenAI API key run: | cp ../backend/.env.default ../backend/.env echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env @@ -172,77 +136,125 @@ jobs: # Used by E2E test data script to generate embeddings for approved store agents OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - - name: Set up Docker Buildx + - name: Set up Platform - Set up Docker Buildx uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + driver-opts: network=host - - name: Cache Docker layers + - name: Set up Platform - Expose GHA cache to docker buildx CLI + uses: crazy-max/ghaction-github-runtime@v3 + + - name: Set up Platform - Build Docker images (with cache) + working-directory: autogpt_platform + run: | + pip install pyyaml + + # Resolve extends and generate a flat compose file that bake can understand + docker compose -f docker-compose.yml config > docker-compose.resolved.yml + + # Add cache configuration to the resolved compose file + python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \ + --source docker-compose.resolved.yml \ + --cache-from "type=gha" \ + --cache-to "type=gha,mode=max" \ + --backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \ + --frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \ + --git-ref "${{ github.ref }}" + + # Build with bake using the resolved compose file (now includes cache config) + docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load + env: + NEXT_PUBLIC_PW_TEST: true + + - name: Set up tests - Cache E2E test data + id: e2e-data-cache uses: actions/cache@v5 with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }} - restore-keys: | - ${{ runner.os }}-buildx-frontend-test- + path: /tmp/e2e_test_data.sql + key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }} - - name: Run docker compose + - name: Set up Platform - Start Supabase DB + Auth run: | - NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d + docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build + echo "Waiting for database to be ready..." + timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' + echo "Waiting for auth service to be ready..." + timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..." + + - name: Set up Platform - Run migrations + run: | + echo "Running migrations..." + docker compose -f ../docker-compose.resolved.yml run --rm migrate + echo "✅ Migrations completed" env: - DOCKER_BUILDKIT: 1 - BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache - BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max + NEXT_PUBLIC_PW_TEST: true - - name: Move cache + - name: Set up tests - Load cached E2E test data + if: steps.e2e-data-cache.outputs.cache-hit == 'true' run: | - rm -rf /tmp/.buildx-cache - if [ -d "/tmp/.buildx-cache-new" ]; then - mv /tmp/.buildx-cache-new /tmp/.buildx-cache - fi + echo "✅ Found cached E2E test data, restoring..." + { + echo "SET session_replication_role = 'replica';" + cat /tmp/e2e_test_data.sql + echo "SET session_replication_role = 'origin';" + } | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b + # Refresh materialized views after restore + docker compose -f ../docker-compose.resolved.yml exec -T db \ + psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true - - name: Wait for services to be ready + echo "✅ E2E test data restored from cache" + + - name: Set up Platform - Start (all other services) run: | + docker compose -f ../docker-compose.resolved.yml up -d --no-build echo "Waiting for rest_server to be ready..." timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..." - echo "Waiting for database to be ready..." - timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..." + env: + NEXT_PUBLIC_PW_TEST: true - - name: Create E2E test data + - name: Set up tests - Create E2E test data + if: steps.e2e-data-cache.outputs.cache-hit != 'true' run: | echo "Creating E2E test data..." - # First try to run the script from inside the container - if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then - echo "✅ Found e2e_test_data.py in container, running it..." - docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || { - echo "❌ E2E test data creation failed!" - docker compose -f ../docker-compose.yml logs --tail=50 rest_server - exit 1 - } - else - echo "⚠️ e2e_test_data.py not found in container, copying and running..." - # Copy the script into the container and run it - docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || { - echo "❌ Failed to copy script to container" - exit 1 - } - docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || { - echo "❌ E2E test data creation failed!" - docker compose -f ../docker-compose.yml logs --tail=50 rest_server - exit 1 - } - fi + docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py + docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || { + echo "❌ E2E test data creation failed!" + docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server + exit 1 + } - - name: Restore dependencies cache - uses: actions/cache@v5 + # Dump auth.users + platform schema for cache (two separate dumps) + echo "Dumping database for cache..." + { + docker compose -f ../docker-compose.resolved.yml exec -T db \ + pg_dump -U postgres --data-only --column-inserts \ + --table='auth.users' postgres + docker compose -f ../docker-compose.resolved.yml exec -T db \ + pg_dump -U postgres --data-only --column-inserts \ + --schema=platform \ + --exclude-table='platform._prisma_migrations' \ + --exclude-table='platform.apscheduler_jobs' \ + --exclude-table='platform.apscheduler_jobs_batched_notifications' \ + postgres + } > /tmp/e2e_test_data.sql + + echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)" + + - name: Set up tests - Enable corepack + run: corepack enable + + - name: Set up tests - Set up Node + uses: actions/setup-node@v6 with: - path: ~/.pnpm-store - key: ${{ needs.setup.outputs.cache-key }} - restore-keys: | - ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }} - ${{ runner.os }}-pnpm- + node-version: "22.18.0" + cache: "pnpm" + cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - - name: Install dependencies + - name: Set up tests - Install dependencies run: pnpm install --frozen-lockfile - - name: Install Browser 'chromium' + - name: Set up tests - Install browser 'chromium' run: pnpm playwright install --with-deps chromium - name: Run Playwright tests @@ -269,7 +281,7 @@ jobs: - name: Print Final Docker Compose logs if: always() - run: docker compose -f ../docker-compose.yml logs + run: docker compose -f ../docker-compose.resolved.yml logs integration_test: runs-on: ubuntu-latest @@ -281,22 +293,15 @@ jobs: with: submodules: recursive - - name: Set up Node.js - uses: actions/setup-node@v6 - with: - node-version: "22.18.0" - - name: Enable corepack run: corepack enable - - name: Restore dependencies cache - uses: actions/cache@v5 + - name: Set up Node + uses: actions/setup-node@v6 with: - path: ~/.pnpm-store - key: ${{ needs.setup.outputs.cache-key }} - restore-keys: | - ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }} - ${{ runner.os }}-pnpm- + node-version: "22.18.0" + cache: "pnpm" + cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml - name: Install dependencies run: pnpm install --frozen-lockfile diff --git a/.github/workflows/scripts/docker-ci-fix-compose-build-cache.py b/.github/workflows/scripts/docker-ci-fix-compose-build-cache.py new file mode 100644 index 0000000000..33693fc739 --- /dev/null +++ b/.github/workflows/scripts/docker-ci-fix-compose-build-cache.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python3 +""" +Add cache configuration to a resolved docker-compose file for all services +that have a build key, and ensure image names match what docker compose expects. +""" + +import argparse + +import yaml + + +DEFAULT_BRANCH = "dev" +CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"] + + +def main(): + parser = argparse.ArgumentParser( + description="Add cache config to a resolved compose file" + ) + parser.add_argument( + "--source", + required=True, + help="Source compose file to read (should be output of `docker compose config`)", + ) + parser.add_argument( + "--cache-from", + default="type=gha", + help="Cache source configuration", + ) + parser.add_argument( + "--cache-to", + default="type=gha,mode=max", + help="Cache destination configuration", + ) + for component in CACHE_BUILDS_FOR_COMPONENTS: + parser.add_argument( + f"--{component}-hash", + default="", + help=f"Hash for {component} cache scope (e.g., from hashFiles())", + ) + parser.add_argument( + "--git-ref", + default="", + help="Git ref for branch-based cache scope (e.g., refs/heads/master)", + ) + args = parser.parse_args() + + # Normalize git ref to a safe scope name (e.g., refs/heads/master -> master) + git_ref_scope = "" + if args.git_ref: + git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-") + + with open(args.source, "r") as f: + compose = yaml.safe_load(f) + + # Get project name from compose file or default + project_name = compose.get("name", "autogpt_platform") + + def get_image_name(dockerfile: str, target: str) -> str: + """Generate image name based on Dockerfile folder and build target.""" + dockerfile_parts = dockerfile.replace("\\", "/").split("/") + if len(dockerfile_parts) >= 2: + folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend" + else: + folder_name = "app" + return f"{project_name}-{folder_name}:{target}" + + def get_build_key(dockerfile: str, target: str) -> str: + """Generate a unique key for a Dockerfile+target combination.""" + return f"{dockerfile}:{target}" + + def get_component(dockerfile: str) -> str | None: + """Get component name (frontend/backend) from dockerfile path.""" + for component in CACHE_BUILDS_FOR_COMPONENTS: + if component in dockerfile: + return component + return None + + # First pass: collect all services with build configs and identify duplicates + # Track which (dockerfile, target) combinations we've seen + build_key_to_first_service: dict[str, str] = {} + services_to_build: list[str] = [] + services_to_dedupe: list[str] = [] + + for service_name, service_config in compose.get("services", {}).items(): + if "build" not in service_config: + continue + + build_config = service_config["build"] + dockerfile = build_config.get("dockerfile", "Dockerfile") + target = build_config.get("target", "default") + build_key = get_build_key(dockerfile, target) + + if build_key not in build_key_to_first_service: + # First service with this build config - it will do the actual build + build_key_to_first_service[build_key] = service_name + services_to_build.append(service_name) + else: + # Duplicate - will just use the image from the first service + services_to_dedupe.append(service_name) + + # Second pass: configure builds and deduplicate + modified_services = [] + for service_name, service_config in compose.get("services", {}).items(): + if "build" not in service_config: + continue + + build_config = service_config["build"] + dockerfile = build_config.get("dockerfile", "Dockerfile") + target = build_config.get("target", "latest") + image_name = get_image_name(dockerfile, target) + + # Set image name for all services (needed for both builders and deduped) + service_config["image"] = image_name + + if service_name in services_to_dedupe: + # Remove build config - this service will use the pre-built image + del service_config["build"] + continue + + # This service will do the actual build - add cache config + cache_from_list = [] + cache_to_list = [] + + component = get_component(dockerfile) + if not component: + # Skip services that don't clearly match frontend/backend + continue + + # Get the hash for this component + component_hash = getattr(args, f"{component}_hash") + + # Scope format: platform-{component}-{target}-{hash|ref} + # Example: platform-backend-server-abc123 + + if "type=gha" in args.cache_from: + # 1. Primary: exact hash match (most specific) + if component_hash: + hash_scope = f"platform-{component}-{target}-{component_hash}" + cache_from_list.append(f"{args.cache_from},scope={hash_scope}") + + # 2. Fallback: branch-based cache + if git_ref_scope: + ref_scope = f"platform-{component}-{target}-{git_ref_scope}" + cache_from_list.append(f"{args.cache_from},scope={ref_scope}") + + # 3. Fallback: dev branch cache (for PRs/feature branches) + if git_ref_scope and git_ref_scope != DEFAULT_BRANCH: + master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}" + cache_from_list.append(f"{args.cache_from},scope={master_scope}") + + if "type=gha" in args.cache_to: + # Write to both hash-based and branch-based scopes + if component_hash: + hash_scope = f"platform-{component}-{target}-{component_hash}" + cache_to_list.append(f"{args.cache_to},scope={hash_scope}") + + if git_ref_scope: + ref_scope = f"platform-{component}-{target}-{git_ref_scope}" + cache_to_list.append(f"{args.cache_to},scope={ref_scope}") + + # Ensure we have at least one cache source/target + if not cache_from_list: + cache_from_list.append(args.cache_from) + if not cache_to_list: + cache_to_list.append(args.cache_to) + + build_config["cache_from"] = cache_from_list + build_config["cache_to"] = cache_to_list + modified_services.append(service_name) + + # Write back to the same file + with open(args.source, "w") as f: + yaml.dump(compose, f, default_flow_style=False, sort_keys=False) + + print(f"Added cache config to {len(modified_services)} services in {args.source}:") + for svc in modified_services: + svc_config = compose["services"][svc] + build_cfg = svc_config.get("build", {}) + cache_from_list = build_cfg.get("cache_from", ["none"]) + cache_to_list = build_cfg.get("cache_to", ["none"]) + print(f" - {svc}") + print(f" image: {svc_config.get('image', 'N/A')}") + print(f" cache_from: {cache_from_list}") + print(f" cache_to: {cache_to_list}") + if services_to_dedupe: + print( + f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):" + ) + for svc in services_to_dedupe: + print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}") + + +if __name__ == "__main__": + main() diff --git a/autogpt_platform/CLAUDE.md b/autogpt_platform/CLAUDE.md index 62adbdaefa..021b7c27e4 100644 --- a/autogpt_platform/CLAUDE.md +++ b/autogpt_platform/CLAUDE.md @@ -45,6 +45,11 @@ AutoGPT Platform is a monorepo containing: - Backend/Frontend services use YAML anchors for consistent configuration - Supabase services (`db/docker/docker-compose.yml`) follow the same pattern +### Branching Strategy + +- **`dev`** is the main development branch. All PRs should target `dev`. +- **`master`** is the production branch. Only used for production releases. + ### Creating Pull Requests - Create the PR against the `dev` branch of the repository. diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock index 0a421dda31..e1d599360e 100644 --- a/autogpt_platform/autogpt_libs/poetry.lock +++ b/autogpt_platform/autogpt_libs/poetry.lock @@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "cryptography" -version = "46.0.4" +version = "46.0.5" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.8" groups = ["main"] files = [ - {file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"}, - {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"}, - {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"}, - {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"}, - {file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"}, - {file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"}, - {file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"}, - {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"}, - {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"}, - {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"}, - {file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"}, - {file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"}, - {file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"}, - {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"}, - {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"}, - {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"}, - {file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"}, - {file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"}, - {file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"}, - {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"}, - {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"}, - {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"}, - {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"}, - {file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"}, - {file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"}, + {file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"}, + {file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"}, + {file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"}, + {file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"}, + {file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"}, + {file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"}, + {file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"}, + {file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"}, + {file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"}, + {file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"}, + {file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"}, + {file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"}, + {file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"}, + {file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"}, + {file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"}, + {file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"}, ] [package.dependencies] @@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"] pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] [[package]] @@ -570,24 +570,25 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"] [[package]] name = "fastapi" -version = "0.128.0" +version = "0.128.7" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"}, - {file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"}, + {file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"}, + {file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"}, ] [package.dependencies] annotated-doc = ">=0.0.2" pydantic = ">=2.7.0" -starlette = ">=0.40.0,<0.51.0" +starlette = ">=0.40.0,<1.0.0" typing-extensions = ">=4.8.0" +typing-inspection = ">=0.4.2" [package.extras] -all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"] standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] @@ -1062,14 +1063,14 @@ urllib3 = ">=1.26.0,<3" [[package]] name = "launchdarkly-server-sdk" -version = "9.14.1" +version = "9.15.0" description = "LaunchDarkly SDK for Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"}, - {file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"}, + {file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"}, + {file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"}, ] [package.dependencies] @@ -1478,14 +1479,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "postgrest" -version = "2.27.2" +version = "2.28.0" description = "PostgREST client for Python. This library provides an ORM interface to PostgREST." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"}, - {file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"}, + {file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"}, + {file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"}, ] [package.dependencies] @@ -2248,14 +2249,14 @@ cli = ["click (>=5.0)"] [[package]] name = "realtime" -version = "2.27.2" +version = "2.28.0" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"}, - {file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"}, + {file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"}, + {file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"}, ] [package.dependencies] @@ -2436,14 +2437,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart [[package]] name = "storage3" -version = "2.27.2" +version = "2.28.0" description = "Supabase Storage client for Python." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"}, - {file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"}, + {file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"}, + {file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"}, ] [package.dependencies] @@ -2487,35 +2488,35 @@ python-dateutil = ">=2.6.0" [[package]] name = "supabase" -version = "2.27.2" +version = "2.28.0" description = "Supabase client for Python." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"}, - {file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"}, + {file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"}, + {file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"}, ] [package.dependencies] httpx = ">=0.26,<0.29" -postgrest = "2.27.2" -realtime = "2.27.2" -storage3 = "2.27.2" -supabase-auth = "2.27.2" -supabase-functions = "2.27.2" +postgrest = "2.28.0" +realtime = "2.28.0" +storage3 = "2.28.0" +supabase-auth = "2.28.0" +supabase-functions = "2.28.0" yarl = ">=1.22.0" [[package]] name = "supabase-auth" -version = "2.27.2" +version = "2.28.0" description = "Python Client Library for Supabase Auth" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"}, - {file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"}, + {file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"}, + {file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"}, ] [package.dependencies] @@ -2525,14 +2526,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]} [[package]] name = "supabase-functions" -version = "2.27.2" +version = "2.28.0" description = "Library for Supabase Functions" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"}, - {file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"}, + {file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"}, + {file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"}, ] [package.dependencies] @@ -2911,4 +2912,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<4.0" -content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d" +content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb" diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml index 8deb4d2169..2cfa742922 100644 --- a/autogpt_platform/autogpt_libs/pyproject.toml +++ b/autogpt_platform/autogpt_libs/pyproject.toml @@ -11,14 +11,14 @@ python = ">=3.10,<4.0" colorama = "^0.4.6" cryptography = "^46.0" expiringdict = "^1.2.2" -fastapi = "^0.128.0" +fastapi = "^0.128.7" google-cloud-logging = "^3.13.0" -launchdarkly-server-sdk = "^9.14.1" +launchdarkly-server-sdk = "^9.15.0" pydantic = "^2.12.5" pydantic-settings = "^2.12.0" pyjwt = { version = "^2.11.0", extras = ["crypto"] } redis = "^6.2.0" -supabase = "^2.27.2" +supabase = "^2.28.0" uvicorn = "^0.40.0" [tool.poetry.group.dev.dependencies] diff --git a/autogpt_platform/backend/Dockerfile b/autogpt_platform/backend/Dockerfile index 9bd455e490..ace534b730 100644 --- a/autogpt_platform/backend/Dockerfile +++ b/autogpt_platform/backend/Dockerfile @@ -1,3 +1,5 @@ +# ============================ DEPENDENCY BUILDER ============================ # + FROM debian:13-slim AS builder # Set environment variables @@ -51,7 +53,9 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti COPY autogpt_platform/backend/gen_prisma_types_stub.py ./ RUN poetry run prisma generate && poetry run gen-prisma-stub -FROM debian:13-slim AS server_dependencies +# ============================== BACKEND SERVER ============================== # + +FROM debian:13-slim AS server WORKDIR /app @@ -63,15 +67,14 @@ ENV POETRY_HOME=/opt/poetry \ ENV PATH=/opt/poetry/bin:$PATH # Install Python, FFmpeg, and ImageMagick (required for video processing blocks) -RUN apt-get update && apt-get install -y \ +# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc. +RUN apt-get update && apt-get install -y --no-install-recommends \ python3.13 \ python3-pip \ ffmpeg \ imagemagick \ && rm -rf /var/lib/apt/lists/* -# Copy only necessary files from builder -COPY --from=builder /app /app COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3* COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry # Copy Node.js installation for Prisma @@ -81,30 +84,54 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm COPY --from=builder /usr/bin/npx /usr/bin/npx COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries -ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH" - -RUN mkdir -p /app/autogpt_platform/autogpt_libs -RUN mkdir -p /app/autogpt_platform/backend - -COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs - -COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/ - WORKDIR /app/autogpt_platform/backend -FROM server_dependencies AS migrate +# Copy only the .venv from builder (not the entire /app directory) +# The .venv includes the generated Prisma client +COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv +ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH" -# Migration stage only needs schema and migrations - much lighter than full backend -COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/ -COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py -COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations +# Copy dependency files + autogpt_libs (path dependency) +COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs +COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./ -FROM server_dependencies AS server - -COPY autogpt_platform/backend /app/autogpt_platform/backend +# Copy backend code + docs (for Copilot docs search) +COPY autogpt_platform/backend ./ COPY docs /app/docs RUN poetry install --no-ansi --only-root ENV PORT=8000 CMD ["poetry", "run", "rest"] + +# =============================== DB MIGRATOR =============================== # + +# Lightweight migrate stage - only needs Prisma CLI, not full Python environment +FROM debian:13-slim AS migrate + +WORKDIR /app/autogpt_platform/backend + +ENV DEBIAN_FRONTEND=noninteractive + +# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3.13 \ + python3-pip \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Copy Node.js from builder (needed for Prisma CLI) +COPY --from=builder /usr/bin/node /usr/bin/node +COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules +COPY --from=builder /usr/bin/npm /usr/bin/npm + +# Copy Prisma binaries +COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries + +# Install prisma-client-py directly (much smaller than copying full venv) +RUN pip3 install prisma>=0.15.0 --break-system-packages + +COPY autogpt_platform/backend/schema.prisma ./ +COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py +COPY autogpt_platform/backend/gen_prisma_types_stub.py ./ +COPY autogpt_platform/backend/migrations ./migrations diff --git a/autogpt_platform/backend/backend/api/features/chat/routes.py b/autogpt_platform/backend/backend/api/features/chat/routes.py index c6f37569b7..0d8b12b0b7 100644 --- a/autogpt_platform/backend/backend/api/features/chat/routes.py +++ b/autogpt_platform/backend/backend/api/features/chat/routes.py @@ -24,6 +24,7 @@ from .tools.models import ( AgentPreviewResponse, AgentSavedResponse, AgentsFoundResponse, + BlockDetailsResponse, BlockListResponse, BlockOutputResponse, ClarificationNeededResponse, @@ -971,6 +972,7 @@ ToolResponseUnion = ( | AgentSavedResponse | ClarificationNeededResponse | BlockListResponse + | BlockDetailsResponse | BlockOutputResponse | DocSearchResultsResponse | DocPageResponse diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py new file mode 100644 index 0000000000..cf0e76d3b3 --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/dummy.py @@ -0,0 +1,154 @@ +"""Dummy Agent Generator for testing. + +Returns mock responses matching the format expected from the external service. +Enable via AGENTGENERATOR_USE_DUMMY=true in settings. + +WARNING: This is for testing only. Do not use in production. +""" + +import asyncio +import logging +import uuid +from typing import Any + +logger = logging.getLogger(__name__) + +# Dummy decomposition result (instructions type) +DUMMY_DECOMPOSITION_RESULT: dict[str, Any] = { + "type": "instructions", + "steps": [ + { + "description": "Get input from user", + "action": "input", + "block_name": "AgentInputBlock", + }, + { + "description": "Process the input", + "action": "process", + "block_name": "TextFormatterBlock", + }, + { + "description": "Return output to user", + "action": "output", + "block_name": "AgentOutputBlock", + }, + ], +} + +# Block IDs from backend/blocks/io.py +AGENT_INPUT_BLOCK_ID = "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b" +AGENT_OUTPUT_BLOCK_ID = "363ae599-353e-4804-937e-b2ee3cef3da4" + + +def _generate_dummy_agent_json() -> dict[str, Any]: + """Generate a minimal valid agent JSON for testing.""" + input_node_id = str(uuid.uuid4()) + output_node_id = str(uuid.uuid4()) + + return { + "id": str(uuid.uuid4()), + "version": 1, + "is_active": True, + "name": "Dummy Test Agent", + "description": "A dummy agent generated for testing purposes", + "nodes": [ + { + "id": input_node_id, + "block_id": AGENT_INPUT_BLOCK_ID, + "input_default": { + "name": "input", + "title": "Input", + "description": "Enter your input", + "placeholder_values": [], + }, + "metadata": {"position": {"x": 0, "y": 0}}, + }, + { + "id": output_node_id, + "block_id": AGENT_OUTPUT_BLOCK_ID, + "input_default": { + "name": "output", + "title": "Output", + "description": "Agent output", + "format": "{output}", + }, + "metadata": {"position": {"x": 400, "y": 0}}, + }, + ], + "links": [ + { + "id": str(uuid.uuid4()), + "source_id": input_node_id, + "sink_id": output_node_id, + "source_name": "result", + "sink_name": "value", + "is_static": False, + }, + ], + } + + +async def decompose_goal_dummy( + description: str, + context: str = "", + library_agents: list[dict[str, Any]] | None = None, +) -> dict[str, Any]: + """Return dummy decomposition result.""" + logger.info("Using dummy agent generator for decompose_goal") + return DUMMY_DECOMPOSITION_RESULT.copy() + + +async def generate_agent_dummy( + instructions: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, + operation_id: str | None = None, + task_id: str | None = None, +) -> dict[str, Any]: + """Return dummy agent JSON after a simulated delay.""" + logger.info("Using dummy agent generator for generate_agent (30s delay)") + await asyncio.sleep(30) + return _generate_dummy_agent_json() + + +async def generate_agent_patch_dummy( + update_request: str, + current_agent: dict[str, Any], + library_agents: list[dict[str, Any]] | None = None, + operation_id: str | None = None, + task_id: str | None = None, +) -> dict[str, Any]: + """Return dummy patched agent (returns the current agent with updated description).""" + logger.info("Using dummy agent generator for generate_agent_patch") + patched = current_agent.copy() + patched["description"] = ( + f"{current_agent.get('description', '')} (updated: {update_request})" + ) + return patched + + +async def customize_template_dummy( + template_agent: dict[str, Any], + modification_request: str, + context: str = "", +) -> dict[str, Any]: + """Return dummy customized template (returns template with updated description).""" + logger.info("Using dummy agent generator for customize_template") + customized = template_agent.copy() + customized["description"] = ( + f"{template_agent.get('description', '')} (customized: {modification_request})" + ) + return customized + + +async def get_blocks_dummy() -> list[dict[str, Any]]: + """Return dummy blocks list.""" + logger.info("Using dummy agent generator for get_blocks") + return [ + {"id": AGENT_INPUT_BLOCK_ID, "name": "AgentInputBlock"}, + {"id": AGENT_OUTPUT_BLOCK_ID, "name": "AgentOutputBlock"}, + ] + + +async def health_check_dummy() -> bool: + """Always returns healthy for dummy service.""" + return True diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py index 62411b4e1b..2b40c6d6f3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_generator/service.py @@ -12,8 +12,19 @@ import httpx from backend.util.settings import Settings +from .dummy import ( + customize_template_dummy, + decompose_goal_dummy, + generate_agent_dummy, + generate_agent_patch_dummy, + get_blocks_dummy, + health_check_dummy, +) + logger = logging.getLogger(__name__) +_dummy_mode_warned = False + def _create_error_response( error_message: str, @@ -90,10 +101,26 @@ def _get_settings() -> Settings: return _settings -def is_external_service_configured() -> bool: - """Check if external Agent Generator service is configured.""" +def _is_dummy_mode() -> bool: + """Check if dummy mode is enabled for testing.""" + global _dummy_mode_warned settings = _get_settings() - return bool(settings.config.agentgenerator_host) + is_dummy = bool(settings.config.agentgenerator_use_dummy) + if is_dummy and not _dummy_mode_warned: + logger.warning( + "Agent Generator running in DUMMY MODE - returning mock responses. " + "Do not use in production!" + ) + _dummy_mode_warned = True + return is_dummy + + +def is_external_service_configured() -> bool: + """Check if external Agent Generator service is configured (or dummy mode).""" + settings = _get_settings() + return bool(settings.config.agentgenerator_host) or bool( + settings.config.agentgenerator_use_dummy + ) def _get_base_url() -> str: @@ -137,6 +164,9 @@ async def decompose_goal_external( - {"type": "error", "error": "...", "error_type": "..."} on error Or None on unexpected error """ + if _is_dummy_mode(): + return await decompose_goal_dummy(description, context, library_agents) + client = _get_client() if context: @@ -226,6 +256,11 @@ async def generate_agent_external( Returns: Agent JSON dict, {"status": "accepted"} for async, or error dict {"type": "error", ...} on error """ + if _is_dummy_mode(): + return await generate_agent_dummy( + instructions, library_agents, operation_id, task_id + ) + client = _get_client() # Build request payload @@ -297,6 +332,11 @@ async def generate_agent_patch_external( Returns: Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error """ + if _is_dummy_mode(): + return await generate_agent_patch_dummy( + update_request, current_agent, library_agents, operation_id, task_id + ) + client = _get_client() # Build request payload @@ -383,6 +423,11 @@ async def customize_template_external( Returns: Customized agent JSON, clarifying questions dict, or error dict on error """ + if _is_dummy_mode(): + return await customize_template_dummy( + template_agent, modification_request, context + ) + client = _get_client() request = modification_request @@ -445,6 +490,9 @@ async def get_blocks_external() -> list[dict[str, Any]] | None: Returns: List of block info dicts or None on error """ + if _is_dummy_mode(): + return await get_blocks_dummy() + client = _get_client() try: @@ -478,6 +526,9 @@ async def health_check() -> bool: if not is_external_service_configured(): return False + if _is_dummy_mode(): + return await health_check_dummy() + client = _get_client() try: diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py index 6a8cfa9bbc..55b1c0d510 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py @@ -7,7 +7,6 @@ from backend.api.features.chat.model import ChatSession from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase from backend.api.features.chat.tools.models import ( BlockInfoSummary, - BlockInputFieldInfo, BlockListResponse, ErrorResponse, NoResultsResponse, @@ -55,7 +54,8 @@ class FindBlockTool(BaseTool): "Blocks are reusable components that perform specific tasks like " "sending emails, making API calls, processing text, etc. " "IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. " - "The response includes each block's id, required_inputs, and input_schema." + "The response includes each block's id, name, and description. " + "Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it." ) @property @@ -124,7 +124,7 @@ class FindBlockTool(BaseTool): session_id=session_id, ) - # Enrich results with full block information + # Enrich results with block information blocks: list[BlockInfoSummary] = [] for result in results: block_id = result["content_id"] @@ -141,65 +141,11 @@ class FindBlockTool(BaseTool): ): continue - # Get input/output schemas - input_schema = {} - output_schema = {} - try: - input_schema = block.input_schema.jsonschema() - except Exception as e: - logger.debug( - "Failed to generate input schema for block %s: %s", - block_id, - e, - ) - try: - output_schema = block.output_schema.jsonschema() - except Exception as e: - logger.debug( - "Failed to generate output schema for block %s: %s", - block_id, - e, - ) - - # Get categories from block instance - categories = [] - if hasattr(block, "categories") and block.categories: - categories = [cat.value for cat in block.categories] - - # Extract required inputs for easier use - required_inputs: list[BlockInputFieldInfo] = [] - if input_schema: - properties = input_schema.get("properties", {}) - required_fields = set(input_schema.get("required", [])) - # Get credential field names to exclude from required inputs - credentials_fields = set( - block.input_schema.get_credentials_fields().keys() - ) - - for field_name, field_schema in properties.items(): - # Skip credential fields - they're handled separately - if field_name in credentials_fields: - continue - - required_inputs.append( - BlockInputFieldInfo( - name=field_name, - type=field_schema.get("type", "string"), - description=field_schema.get("description", ""), - required=field_name in required_fields, - default=field_schema.get("default"), - ) - ) - blocks.append( BlockInfoSummary( id=block_id, name=block.name, description=block.description or "", - categories=categories, - input_schema=input_schema, - output_schema=output_schema, - required_inputs=required_inputs, ) ) @@ -228,8 +174,7 @@ class FindBlockTool(BaseTool): return BlockListResponse( message=( f"Found {len(blocks)} block(s) matching '{query}'. " - "To execute a block, use run_block with the block's 'id' field " - "and provide 'input_data' matching the block's input_schema." + "To see a block's inputs/outputs and execute it, use run_block with the block's 'id' - providing no inputs." ), blocks=blocks, count=len(blocks), diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py index d567a89bbe..44606f81c3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py @@ -18,7 +18,13 @@ _TEST_USER_ID = "test-user-find-block" def make_mock_block( - block_id: str, name: str, block_type: BlockType, disabled: bool = False + block_id: str, + name: str, + block_type: BlockType, + disabled: bool = False, + input_schema: dict | None = None, + output_schema: dict | None = None, + credentials_fields: dict | None = None, ): """Create a mock block for testing.""" mock = MagicMock() @@ -28,10 +34,13 @@ def make_mock_block( mock.block_type = block_type mock.disabled = disabled mock.input_schema = MagicMock() - mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []} - mock.input_schema.get_credentials_fields.return_value = {} + mock.input_schema.jsonschema.return_value = input_schema or { + "properties": {}, + "required": [], + } + mock.input_schema.get_credentials_fields.return_value = credentials_fields or {} mock.output_schema = MagicMock() - mock.output_schema.jsonschema.return_value = {} + mock.output_schema.jsonschema.return_value = output_schema or {} mock.categories = [] return mock @@ -137,3 +146,241 @@ class TestFindBlockFiltering: assert isinstance(response, BlockListResponse) assert len(response.blocks) == 1 assert response.blocks[0].id == "normal-block-id" + + @pytest.mark.asyncio(loop_scope="session") + async def test_response_size_average_chars_per_block(self): + """Measure average chars per block in the serialized response.""" + session = make_session(user_id=_TEST_USER_ID) + + # Realistic block definitions modeled after real blocks + block_defs = [ + { + "id": "http-block-id", + "name": "Send Web Request", + "input_schema": { + "properties": { + "url": { + "type": "string", + "description": "The URL to send the request to", + }, + "method": { + "type": "string", + "description": "The HTTP method to use", + }, + "headers": { + "type": "object", + "description": "Headers to include in the request", + }, + "json_format": { + "type": "boolean", + "description": "If true, send the body as JSON", + }, + "body": { + "type": "object", + "description": "Form/JSON body payload", + }, + "credentials": { + "type": "object", + "description": "HTTP credentials", + }, + }, + "required": ["url", "method"], + }, + "output_schema": { + "properties": { + "response": { + "type": "object", + "description": "The response from the server", + }, + "client_error": { + "type": "object", + "description": "Errors on 4xx status codes", + }, + "server_error": { + "type": "object", + "description": "Errors on 5xx status codes", + }, + "error": { + "type": "string", + "description": "Errors for all other exceptions", + }, + }, + }, + "credentials_fields": {"credentials": True}, + }, + { + "id": "email-block-id", + "name": "Send Email", + "input_schema": { + "properties": { + "to_email": { + "type": "string", + "description": "Recipient email address", + }, + "subject": { + "type": "string", + "description": "Subject of the email", + }, + "body": { + "type": "string", + "description": "Body of the email", + }, + "config": { + "type": "object", + "description": "SMTP Config", + }, + "credentials": { + "type": "object", + "description": "SMTP credentials", + }, + }, + "required": ["to_email", "subject", "body", "credentials"], + }, + "output_schema": { + "properties": { + "status": { + "type": "string", + "description": "Status of the email sending operation", + }, + "error": { + "type": "string", + "description": "Error message if sending failed", + }, + }, + }, + "credentials_fields": {"credentials": True}, + }, + { + "id": "claude-code-block-id", + "name": "Claude Code", + "input_schema": { + "properties": { + "e2b_credentials": { + "type": "object", + "description": "API key for E2B platform", + }, + "anthropic_credentials": { + "type": "object", + "description": "API key for Anthropic", + }, + "prompt": { + "type": "string", + "description": "Task or instruction for Claude Code", + }, + "timeout": { + "type": "integer", + "description": "Sandbox timeout in seconds", + }, + "setup_commands": { + "type": "array", + "description": "Shell commands to run before execution", + }, + "working_directory": { + "type": "string", + "description": "Working directory for Claude Code", + }, + "session_id": { + "type": "string", + "description": "Session ID to resume a conversation", + }, + "sandbox_id": { + "type": "string", + "description": "Sandbox ID to reconnect to", + }, + "conversation_history": { + "type": "string", + "description": "Previous conversation history", + }, + "dispose_sandbox": { + "type": "boolean", + "description": "Whether to dispose sandbox after execution", + }, + }, + "required": [ + "e2b_credentials", + "anthropic_credentials", + "prompt", + ], + }, + "output_schema": { + "properties": { + "response": { + "type": "string", + "description": "Output from Claude Code execution", + }, + "files": { + "type": "array", + "description": "Files created/modified by Claude Code", + }, + "conversation_history": { + "type": "string", + "description": "Full conversation history", + }, + "session_id": { + "type": "string", + "description": "Session ID for this conversation", + }, + "sandbox_id": { + "type": "string", + "description": "ID of the sandbox instance", + }, + "error": { + "type": "string", + "description": "Error message if execution failed", + }, + }, + }, + "credentials_fields": { + "e2b_credentials": True, + "anthropic_credentials": True, + }, + }, + ] + + search_results = [ + {"content_id": d["id"], "score": 0.9 - i * 0.1} + for i, d in enumerate(block_defs) + ] + mock_blocks = { + d["id"]: make_mock_block( + block_id=d["id"], + name=d["name"], + block_type=BlockType.STANDARD, + input_schema=d["input_schema"], + output_schema=d["output_schema"], + credentials_fields=d["credentials_fields"], + ) + for d in block_defs + } + + with patch( + "backend.api.features.chat.tools.find_block.unified_hybrid_search", + new_callable=AsyncMock, + return_value=(search_results, len(search_results)), + ), patch( + "backend.api.features.chat.tools.find_block.get_block", + side_effect=lambda bid: mock_blocks.get(bid), + ): + tool = FindBlockTool() + response = await tool._execute( + user_id=_TEST_USER_ID, session=session, query="test" + ) + + assert isinstance(response, BlockListResponse) + assert response.count == len(block_defs) + + total_chars = len(response.model_dump_json()) + avg_chars = total_chars // response.count + + # Print for visibility in test output + print(f"\nTotal response size: {total_chars} chars") + print(f"Number of blocks: {response.count}") + print(f"Average chars per block: {avg_chars}") + + # The old response was ~90K for 10 blocks (~9K per block). + # Previous optimization reduced it to ~1.5K per block (no raw JSON schemas). + # Now with only id/name/description, we expect ~300 chars per block. + assert avg_chars < 500, ( + f"Average chars per block ({avg_chars}) exceeds 500. " + f"Total response: {total_chars} chars for {response.count} blocks." + ) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/models.py b/autogpt_platform/backend/backend/api/features/chat/tools/models.py index d420b289dc..f2d8f364e4 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/models.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/models.py @@ -25,6 +25,7 @@ class ResponseType(str, Enum): AGENT_SAVED = "agent_saved" CLARIFICATION_NEEDED = "clarification_needed" BLOCK_LIST = "block_list" + BLOCK_DETAILS = "block_details" BLOCK_OUTPUT = "block_output" DOC_SEARCH_RESULTS = "doc_search_results" DOC_PAGE = "doc_page" @@ -337,13 +338,6 @@ class BlockInfoSummary(BaseModel): id: str name: str description: str - categories: list[str] - input_schema: dict[str, Any] - output_schema: dict[str, Any] - required_inputs: list[BlockInputFieldInfo] = Field( - default_factory=list, - description="List of required input fields for this block", - ) class BlockListResponse(ToolResponseBase): @@ -353,10 +347,25 @@ class BlockListResponse(ToolResponseBase): blocks: list[BlockInfoSummary] count: int query: str - usage_hint: str = Field( - default="To execute a block, call run_block with block_id set to the block's " - "'id' field and input_data containing the required fields from input_schema." - ) + + +class BlockDetails(BaseModel): + """Detailed block information.""" + + id: str + name: str + description: str + inputs: dict[str, Any] = {} + outputs: dict[str, Any] = {} + credentials: list[CredentialsMetaInput] = [] + + +class BlockDetailsResponse(ToolResponseBase): + """Response for block details (first run_block attempt).""" + + type: ResponseType = ResponseType.BLOCK_DETAILS + block: BlockDetails + user_authenticated: bool = False class BlockOutputResponse(ToolResponseBase): diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 8c29820f8e..a55478326a 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -23,8 +23,11 @@ from backend.util.exceptions import BlockError from .base import BaseTool from .helpers import get_inputs_from_schema from .models import ( + BlockDetails, + BlockDetailsResponse, BlockOutputResponse, ErrorResponse, + InputValidationErrorResponse, SetupInfo, SetupRequirementsResponse, ToolResponseBase, @@ -51,8 +54,8 @@ class RunBlockTool(BaseTool): "Execute a specific block with the provided input data. " "IMPORTANT: You MUST call find_block first to get the block's 'id' - " "do NOT guess or make up block IDs. " - "Use the 'id' from find_block results and provide input_data " - "matching the block's required_inputs." + "On first attempt (without input_data), returns detailed schema showing " + "required inputs and outputs. Then call again with proper input_data to execute." ) @property @@ -67,11 +70,19 @@ class RunBlockTool(BaseTool): "NEVER guess this - always get it from find_block first." ), }, + "block_name": { + "type": "string", + "description": ( + "The block's human-readable name from find_block results. " + "Used for display purposes in the UI." + ), + }, "input_data": { "type": "object", "description": ( - "Input values for the block. Use the 'required_inputs' field " - "from find_block to see what fields are needed." + "Input values for the block. " + "First call with empty {} to see the block's schema, " + "then call again with proper values to execute." ), }, }, @@ -156,6 +167,34 @@ class RunBlockTool(BaseTool): await self._resolve_block_credentials(user_id, block, input_data) ) + # Get block schemas for details/validation + try: + input_schema: dict[str, Any] = block.input_schema.jsonschema() + except Exception as e: + logger.warning( + "Failed to generate input schema for block %s: %s", + block_id, + e, + ) + return ErrorResponse( + message=f"Block '{block.name}' has an invalid input schema", + error=str(e), + session_id=session_id, + ) + try: + output_schema: dict[str, Any] = block.output_schema.jsonschema() + except Exception as e: + logger.warning( + "Failed to generate output schema for block %s: %s", + block_id, + e, + ) + return ErrorResponse( + message=f"Block '{block.name}' has an invalid output schema", + error=str(e), + session_id=session_id, + ) + if missing_credentials: # Return setup requirements response with missing credentials credentials_fields_info = block.input_schema.get_credentials_fields_info() @@ -188,6 +227,53 @@ class RunBlockTool(BaseTool): graph_version=None, ) + # Check if this is a first attempt (required inputs missing) + # Return block details so user can see what inputs are needed + credentials_fields = set(block.input_schema.get_credentials_fields().keys()) + required_keys = set(input_schema.get("required", [])) + required_non_credential_keys = required_keys - credentials_fields + provided_input_keys = set(input_data.keys()) - credentials_fields + + # Check for unknown input fields + valid_fields = ( + set(input_schema.get("properties", {}).keys()) - credentials_fields + ) + unrecognized_fields = provided_input_keys - valid_fields + if unrecognized_fields: + return InputValidationErrorResponse( + message=( + f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. " + f"Block was not executed. Please use the correct field names from the schema." + ), + session_id=session_id, + unrecognized_fields=sorted(unrecognized_fields), + inputs=input_schema, + ) + + # Show details when not all required non-credential inputs are provided + if not (required_non_credential_keys <= provided_input_keys): + # Get credentials info for the response + credentials_meta = [] + for field_name, cred_meta in matched_credentials.items(): + credentials_meta.append(cred_meta) + + return BlockDetailsResponse( + message=( + f"Block '{block.name}' details. " + "Provide input_data matching the inputs schema to execute the block." + ), + session_id=session_id, + block=BlockDetails( + id=block_id, + name=block.name, + description=block.description or "", + inputs=input_schema, + outputs=output_schema, + credentials=credentials_meta, + ), + user_authenticated=True, + ) + try: # Get or create user's workspace for CoPilot file operations workspace = await get_or_create_workspace(user_id) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py index aadc161155..55efc38479 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py @@ -1,10 +1,15 @@ -"""Tests for block execution guards in RunBlockTool.""" +"""Tests for block execution guards and input validation in RunBlockTool.""" -from unittest.mock import MagicMock, patch +from unittest.mock import AsyncMock, MagicMock, patch import pytest -from backend.api.features.chat.tools.models import ErrorResponse +from backend.api.features.chat.tools.models import ( + BlockDetailsResponse, + BlockOutputResponse, + ErrorResponse, + InputValidationErrorResponse, +) from backend.api.features.chat.tools.run_block import RunBlockTool from backend.blocks._base import BlockType @@ -28,6 +33,39 @@ def make_mock_block( return mock +def make_mock_block_with_schema( + block_id: str, + name: str, + input_properties: dict, + required_fields: list[str], + output_properties: dict | None = None, +): + """Create a mock block with a defined input/output schema for validation tests.""" + mock = MagicMock() + mock.id = block_id + mock.name = name + mock.block_type = BlockType.STANDARD + mock.disabled = False + mock.description = f"Test block: {name}" + + input_schema = { + "properties": input_properties, + "required": required_fields, + } + mock.input_schema = MagicMock() + mock.input_schema.jsonschema.return_value = input_schema + mock.input_schema.get_credentials_fields_info.return_value = {} + mock.input_schema.get_credentials_fields.return_value = {} + + output_schema = { + "properties": output_properties or {"result": {"type": "string"}}, + } + mock.output_schema = MagicMock() + mock.output_schema.jsonschema.return_value = output_schema + + return mock + + class TestRunBlockFiltering: """Tests for block execution guards in RunBlockTool.""" @@ -104,3 +142,221 @@ class TestRunBlockFiltering: # (may be other errors like missing credentials, but not the exclusion guard) if isinstance(response, ErrorResponse): assert "cannot be run directly in CoPilot" not in response.message + + +class TestRunBlockInputValidation: + """Tests for input field validation in RunBlockTool. + + run_block rejects unknown input field names with InputValidationErrorResponse, + preventing silent failures where incorrect keys would be ignored and the block + would execute with default values instead of the caller's intended values. + """ + + @pytest.mark.asyncio(loop_scope="session") + async def test_unknown_input_fields_are_rejected(self): + """run_block rejects unknown input fields instead of silently ignoring them. + + Scenario: The AI Text Generator block has a field called 'model' (for LLM model + selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model' + instead. The block should reject the request and return the valid schema. + """ + session = make_session(user_id=_TEST_USER_ID) + + mock_block = make_mock_block_with_schema( + block_id="ai-text-gen-id", + name="AI Text Generator", + input_properties={ + "prompt": {"type": "string", "description": "The prompt to send"}, + "model": { + "type": "string", + "description": "The LLM model to use", + "default": "gpt-4o-mini", + }, + "sys_prompt": { + "type": "string", + "description": "System prompt", + "default": "", + }, + }, + required_fields=["prompt"], + output_properties={"response": {"type": "string"}}, + ) + + with patch( + "backend.api.features.chat.tools.run_block.get_block", + return_value=mock_block, + ): + tool = RunBlockTool() + + # Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key) + response = await tool._execute( + user_id=_TEST_USER_ID, + session=session, + block_id="ai-text-gen-id", + input_data={ + "prompt": "Write a haiku about coding", + "LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model' + }, + ) + + assert isinstance(response, InputValidationErrorResponse) + assert "LLM_Model" in response.unrecognized_fields + assert "Block was not executed" in response.message + assert "inputs" in response.model_dump() # valid schema included + + @pytest.mark.asyncio(loop_scope="session") + async def test_multiple_wrong_keys_are_all_reported(self): + """All unrecognized field names are reported in a single error response.""" + session = make_session(user_id=_TEST_USER_ID) + + mock_block = make_mock_block_with_schema( + block_id="ai-text-gen-id", + name="AI Text Generator", + input_properties={ + "prompt": {"type": "string"}, + "model": {"type": "string", "default": "gpt-4o-mini"}, + "sys_prompt": {"type": "string", "default": ""}, + "retry": {"type": "integer", "default": 3}, + }, + required_fields=["prompt"], + ) + + with patch( + "backend.api.features.chat.tools.run_block.get_block", + return_value=mock_block, + ): + tool = RunBlockTool() + + response = await tool._execute( + user_id=_TEST_USER_ID, + session=session, + block_id="ai-text-gen-id", + input_data={ + "prompt": "Hello", # correct + "llm_model": "claude-opus-4-6", # WRONG - should be 'model' + "system_prompt": "Be helpful", # WRONG - should be 'sys_prompt' + "retries": 5, # WRONG - should be 'retry' + }, + ) + + assert isinstance(response, InputValidationErrorResponse) + assert set(response.unrecognized_fields) == { + "llm_model", + "system_prompt", + "retries", + } + assert "Block was not executed" in response.message + + @pytest.mark.asyncio(loop_scope="session") + async def test_unknown_fields_rejected_even_with_missing_required(self): + """Unknown fields are caught before the missing-required-fields check.""" + session = make_session(user_id=_TEST_USER_ID) + + mock_block = make_mock_block_with_schema( + block_id="ai-text-gen-id", + name="AI Text Generator", + input_properties={ + "prompt": {"type": "string"}, + "model": {"type": "string", "default": "gpt-4o-mini"}, + }, + required_fields=["prompt"], + ) + + with patch( + "backend.api.features.chat.tools.run_block.get_block", + return_value=mock_block, + ): + tool = RunBlockTool() + + # 'prompt' is missing AND 'LLM_Model' is an unknown field + response = await tool._execute( + user_id=_TEST_USER_ID, + session=session, + block_id="ai-text-gen-id", + input_data={ + "LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing + }, + ) + + # Unknown fields are caught first + assert isinstance(response, InputValidationErrorResponse) + assert "LLM_Model" in response.unrecognized_fields + + @pytest.mark.asyncio(loop_scope="session") + async def test_correct_inputs_still_execute(self): + """Correct input field names pass validation and the block executes.""" + session = make_session(user_id=_TEST_USER_ID) + + mock_block = make_mock_block_with_schema( + block_id="ai-text-gen-id", + name="AI Text Generator", + input_properties={ + "prompt": {"type": "string"}, + "model": {"type": "string", "default": "gpt-4o-mini"}, + }, + required_fields=["prompt"], + ) + + async def mock_execute(input_data, **kwargs): + yield "response", "Generated text" + + mock_block.execute = mock_execute + + with ( + patch( + "backend.api.features.chat.tools.run_block.get_block", + return_value=mock_block, + ), + patch( + "backend.api.features.chat.tools.run_block.get_or_create_workspace", + new_callable=AsyncMock, + return_value=MagicMock(id="test-workspace-id"), + ), + ): + tool = RunBlockTool() + + response = await tool._execute( + user_id=_TEST_USER_ID, + session=session, + block_id="ai-text-gen-id", + input_data={ + "prompt": "Write a haiku", + "model": "gpt-4o-mini", # correct field name + }, + ) + + assert isinstance(response, BlockOutputResponse) + assert response.success is True + + @pytest.mark.asyncio(loop_scope="session") + async def test_missing_required_fields_returns_details(self): + """Missing required fields returns BlockDetailsResponse with schema.""" + session = make_session(user_id=_TEST_USER_ID) + + mock_block = make_mock_block_with_schema( + block_id="ai-text-gen-id", + name="AI Text Generator", + input_properties={ + "prompt": {"type": "string"}, + "model": {"type": "string", "default": "gpt-4o-mini"}, + }, + required_fields=["prompt"], + ) + + with patch( + "backend.api.features.chat.tools.run_block.get_block", + return_value=mock_block, + ): + tool = RunBlockTool() + + # Only provide valid optional field, missing required 'prompt' + response = await tool._execute( + user_id=_TEST_USER_ID, + session=session, + block_id="ai-text-gen-id", + input_data={ + "model": "gpt-4o-mini", # valid but optional + }, + ) + + assert isinstance(response, BlockDetailsResponse) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/test_run_block_details.py b/autogpt_platform/backend/backend/api/features/chat/tools/test_run_block_details.py new file mode 100644 index 0000000000..fbab0b723d --- /dev/null +++ b/autogpt_platform/backend/backend/api/features/chat/tools/test_run_block_details.py @@ -0,0 +1,153 @@ +"""Tests for BlockDetailsResponse in RunBlockTool.""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from backend.api.features.chat.tools.models import BlockDetailsResponse +from backend.api.features.chat.tools.run_block import RunBlockTool +from backend.blocks._base import BlockType +from backend.data.model import CredentialsMetaInput +from backend.integrations.providers import ProviderName + +from ._test_data import make_session + +_TEST_USER_ID = "test-user-run-block-details" + + +def make_mock_block_with_inputs( + block_id: str, name: str, description: str = "Test description" +): + """Create a mock block with input/output schemas for testing.""" + mock = MagicMock() + mock.id = block_id + mock.name = name + mock.description = description + mock.block_type = BlockType.STANDARD + mock.disabled = False + + # Input schema with non-credential fields + mock.input_schema = MagicMock() + mock.input_schema.jsonschema.return_value = { + "properties": { + "url": {"type": "string", "description": "URL to fetch"}, + "method": {"type": "string", "description": "HTTP method"}, + }, + "required": ["url"], + } + mock.input_schema.get_credentials_fields.return_value = {} + mock.input_schema.get_credentials_fields_info.return_value = {} + + # Output schema + mock.output_schema = MagicMock() + mock.output_schema.jsonschema.return_value = { + "properties": { + "response": {"type": "object", "description": "HTTP response"}, + "error": {"type": "string", "description": "Error message"}, + } + } + + return mock + + +@pytest.mark.asyncio(loop_scope="session") +async def test_run_block_returns_details_when_no_input_provided(): + """When run_block is called without input_data, it should return BlockDetailsResponse.""" + session = make_session(user_id=_TEST_USER_ID) + + # Create a block with inputs + http_block = make_mock_block_with_inputs( + "http-block-id", "HTTP Request", "Send HTTP requests" + ) + + with patch( + "backend.api.features.chat.tools.run_block.get_block", + return_value=http_block, + ): + # Mock credentials check to return no missing credentials + with patch.object( + RunBlockTool, + "_resolve_block_credentials", + new_callable=AsyncMock, + return_value=({}, []), # (matched_credentials, missing_credentials) + ): + tool = RunBlockTool() + response = await tool._execute( + user_id=_TEST_USER_ID, + session=session, + block_id="http-block-id", + input_data={}, # Empty input data + ) + + # Should return BlockDetailsResponse showing the schema + assert isinstance(response, BlockDetailsResponse) + assert response.block.id == "http-block-id" + assert response.block.name == "HTTP Request" + assert response.block.description == "Send HTTP requests" + assert "url" in response.block.inputs["properties"] + assert "method" in response.block.inputs["properties"] + assert "response" in response.block.outputs["properties"] + assert response.user_authenticated is True + + +@pytest.mark.asyncio(loop_scope="session") +async def test_run_block_returns_details_when_only_credentials_provided(): + """When only credentials are provided (no actual input), should return details.""" + session = make_session(user_id=_TEST_USER_ID) + + # Create a block with both credential and non-credential inputs + mock = MagicMock() + mock.id = "api-block-id" + mock.name = "API Call" + mock.description = "Make API calls" + mock.block_type = BlockType.STANDARD + mock.disabled = False + + mock.input_schema = MagicMock() + mock.input_schema.jsonschema.return_value = { + "properties": { + "credentials": {"type": "object", "description": "API credentials"}, + "endpoint": {"type": "string", "description": "API endpoint"}, + }, + "required": ["credentials", "endpoint"], + } + mock.input_schema.get_credentials_fields.return_value = {"credentials": True} + mock.input_schema.get_credentials_fields_info.return_value = {} + + mock.output_schema = MagicMock() + mock.output_schema.jsonschema.return_value = { + "properties": {"result": {"type": "object"}} + } + + with patch( + "backend.api.features.chat.tools.run_block.get_block", + return_value=mock, + ): + with patch.object( + RunBlockTool, + "_resolve_block_credentials", + new_callable=AsyncMock, + return_value=( + { + "credentials": CredentialsMetaInput( + id="cred-id", + provider=ProviderName("test_provider"), + type="api_key", + title="Test Credential", + ) + }, + [], + ), + ): + tool = RunBlockTool() + response = await tool._execute( + user_id=_TEST_USER_ID, + session=session, + block_id="api-block-id", + input_data={"credentials": {"some": "cred"}}, # Only credential + ) + + # Should return details because no non-credential inputs provided + assert isinstance(response, BlockDetailsResponse) + assert response.block.id == "api-block-id" + assert response.block.name == "API Call" diff --git a/autogpt_platform/backend/backend/blocks/claude_code.py b/autogpt_platform/backend/backend/blocks/claude_code.py index 1919406c6f..2e870f02b6 100644 --- a/autogpt_platform/backend/backend/blocks/claude_code.py +++ b/autogpt_platform/backend/backend/blocks/claude_code.py @@ -1,10 +1,10 @@ import json import shlex import uuid -from typing import Literal, Optional +from typing import TYPE_CHECKING, Literal, Optional from e2b import AsyncSandbox as BaseAsyncSandbox -from pydantic import BaseModel, SecretStr +from pydantic import SecretStr from backend.blocks._base import ( Block, @@ -20,6 +20,13 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.sandbox_files import ( + SandboxFileOutput, + extract_and_store_sandbox_files, +) + +if TYPE_CHECKING: + from backend.executor.utils import ExecutionContext class ClaudeCodeExecutionError(Exception): @@ -174,22 +181,15 @@ class ClaudeCodeBlock(Block): advanced=True, ) - class FileOutput(BaseModel): - """A file extracted from the sandbox.""" - - path: str - relative_path: str # Path relative to working directory (for GitHub, etc.) - name: str - content: str - class Output(BlockSchemaOutput): response: str = SchemaField( description="The output/response from Claude Code execution" ) - files: list["ClaudeCodeBlock.FileOutput"] = SchemaField( + files: list[SandboxFileOutput] = SchemaField( description=( "List of text files created/modified by Claude Code during this execution. " - "Each file has 'path', 'relative_path', 'name', and 'content' fields." + "Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. " + "workspace_ref contains a workspace:// URI if the file was stored to workspace." ) ) conversation_history: str = SchemaField( @@ -252,6 +252,7 @@ class ClaudeCodeBlock(Block): "relative_path": "index.html", "name": "index.html", "content": "Hello World", + "workspace_ref": None, } ], ), @@ -267,11 +268,12 @@ class ClaudeCodeBlock(Block): "execute_claude_code": lambda *args, **kwargs: ( "Created index.html with hello world content", # response [ - ClaudeCodeBlock.FileOutput( + SandboxFileOutput( path="/home/user/index.html", relative_path="index.html", name="index.html", content="Hello World", + workspace_ref=None, ) ], # files "User: Create a hello world HTML file\n" @@ -294,7 +296,8 @@ class ClaudeCodeBlock(Block): existing_sandbox_id: str, conversation_history: str, dispose_sandbox: bool, - ) -> tuple[str, list["ClaudeCodeBlock.FileOutput"], str, str, str]: + execution_context: "ExecutionContext", + ) -> tuple[str, list[SandboxFileOutput], str, str, str]: """ Execute Claude Code in an E2B sandbox. @@ -449,14 +452,18 @@ class ClaudeCodeBlock(Block): else: new_conversation_history = turn_entry - # Extract files created/modified during this run - files = await self._extract_files( - sandbox, working_directory, start_timestamp + # Extract files created/modified during this run and store to workspace + sandbox_files = await extract_and_store_sandbox_files( + sandbox=sandbox, + working_directory=working_directory, + execution_context=execution_context, + since_timestamp=start_timestamp, + text_only=True, ) return ( response, - files, + sandbox_files, # Already SandboxFileOutput objects new_conversation_history, current_session_id, sandbox_id, @@ -471,140 +478,6 @@ class ClaudeCodeBlock(Block): if dispose_sandbox and sandbox: await sandbox.kill() - async def _extract_files( - self, - sandbox: BaseAsyncSandbox, - working_directory: str, - since_timestamp: str | None = None, - ) -> list["ClaudeCodeBlock.FileOutput"]: - """ - Extract text files created/modified during this Claude Code execution. - - Args: - sandbox: The E2B sandbox instance - working_directory: Directory to search for files - since_timestamp: ISO timestamp - only return files modified after this time - - Returns: - List of FileOutput objects with path, relative_path, name, and content - """ - files: list[ClaudeCodeBlock.FileOutput] = [] - - # Text file extensions we can safely read as text - text_extensions = { - ".txt", - ".md", - ".html", - ".htm", - ".css", - ".js", - ".ts", - ".jsx", - ".tsx", - ".json", - ".xml", - ".yaml", - ".yml", - ".toml", - ".ini", - ".cfg", - ".conf", - ".py", - ".rb", - ".php", - ".java", - ".c", - ".cpp", - ".h", - ".hpp", - ".cs", - ".go", - ".rs", - ".swift", - ".kt", - ".scala", - ".sh", - ".bash", - ".zsh", - ".sql", - ".graphql", - ".env", - ".gitignore", - ".dockerfile", - "Dockerfile", - ".vue", - ".svelte", - ".astro", - ".mdx", - ".rst", - ".tex", - ".csv", - ".log", - } - - try: - # List files recursively using find command - # Exclude node_modules and .git directories, but allow hidden files - # like .env and .gitignore (they're filtered by text_extensions later) - # Filter by timestamp to only get files created/modified during this run - safe_working_dir = shlex.quote(working_directory) - timestamp_filter = "" - if since_timestamp: - timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} " - find_result = await sandbox.commands.run( - f"find {safe_working_dir} -type f " - f"{timestamp_filter}" - f"-not -path '*/node_modules/*' " - f"-not -path '*/.git/*' " - f"2>/dev/null" - ) - - if find_result.stdout: - for file_path in find_result.stdout.strip().split("\n"): - if not file_path: - continue - - # Check if it's a text file we can read - is_text = any( - file_path.endswith(ext) for ext in text_extensions - ) or file_path.endswith("Dockerfile") - - if is_text: - try: - content = await sandbox.files.read(file_path) - # Handle bytes or string - if isinstance(content, bytes): - content = content.decode("utf-8", errors="replace") - - # Extract filename from path - file_name = file_path.split("/")[-1] - - # Calculate relative path by stripping working directory - relative_path = file_path - if file_path.startswith(working_directory): - relative_path = file_path[len(working_directory) :] - # Remove leading slash if present - if relative_path.startswith("/"): - relative_path = relative_path[1:] - - files.append( - ClaudeCodeBlock.FileOutput( - path=file_path, - relative_path=relative_path, - name=file_name, - content=content, - ) - ) - except Exception: - # Skip files that can't be read - pass - - except Exception: - # If file extraction fails, return empty results - pass - - return files - def _escape_prompt(self, prompt: str) -> str: """Escape the prompt for safe shell execution.""" # Use single quotes and escape any single quotes in the prompt @@ -617,6 +490,7 @@ class ClaudeCodeBlock(Block): *, e2b_credentials: APIKeyCredentials, anthropic_credentials: APIKeyCredentials, + execution_context: "ExecutionContext", **kwargs, ) -> BlockOutput: try: @@ -637,6 +511,7 @@ class ClaudeCodeBlock(Block): existing_sandbox_id=input_data.sandbox_id, conversation_history=input_data.conversation_history, dispose_sandbox=input_data.dispose_sandbox, + execution_context=execution_context, ) yield "response", response diff --git a/autogpt_platform/backend/backend/blocks/code_executor.py b/autogpt_platform/backend/backend/blocks/code_executor.py index 766f44b7bb..26bf9acd4f 100644 --- a/autogpt_platform/backend/backend/blocks/code_executor.py +++ b/autogpt_platform/backend/backend/blocks/code_executor.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Any, Literal, Optional +from typing import TYPE_CHECKING, Any, Literal, Optional from e2b_code_interpreter import AsyncSandbox from e2b_code_interpreter import Result as E2BExecutionResult @@ -20,6 +20,13 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName +from backend.util.sandbox_files import ( + SandboxFileOutput, + extract_and_store_sandbox_files, +) + +if TYPE_CHECKING: + from backend.executor.utils import ExecutionContext TEST_CREDENTIALS = APIKeyCredentials( id="01234567-89ab-cdef-0123-456789abcdef", @@ -85,6 +92,9 @@ class CodeExecutionResult(MainCodeExecutionResult): class BaseE2BExecutorMixin: """Shared implementation methods for E2B executor blocks.""" + # Default working directory in E2B sandboxes + WORKING_DIR = "/home/user" + async def execute_code( self, api_key: str, @@ -95,14 +105,21 @@ class BaseE2BExecutorMixin: timeout: Optional[int] = None, sandbox_id: Optional[str] = None, dispose_sandbox: bool = False, + execution_context: Optional["ExecutionContext"] = None, + extract_files: bool = False, ): """ Unified code execution method that handles all three use cases: 1. Create new sandbox and execute (ExecuteCodeBlock) 2. Create new sandbox, execute, and return sandbox_id (InstantiateCodeSandboxBlock) 3. Connect to existing sandbox and execute (ExecuteCodeStepBlock) + + Args: + extract_files: If True and execution_context provided, extract files + created/modified during execution and store to workspace. """ # noqa sandbox = None + files: list[SandboxFileOutput] = [] try: if sandbox_id: # Connect to existing sandbox (ExecuteCodeStepBlock case) @@ -118,6 +135,12 @@ class BaseE2BExecutorMixin: for cmd in setup_commands: await sandbox.commands.run(cmd) + # Capture timestamp before execution to scope file extraction + start_timestamp = None + if extract_files: + ts_result = await sandbox.commands.run("date -u +%Y-%m-%dT%H:%M:%S") + start_timestamp = ts_result.stdout.strip() if ts_result.stdout else None + # Execute the code execution = await sandbox.run_code( code, @@ -133,7 +156,24 @@ class BaseE2BExecutorMixin: stdout_logs = "".join(execution.logs.stdout) stderr_logs = "".join(execution.logs.stderr) - return results, text_output, stdout_logs, stderr_logs, sandbox.sandbox_id + # Extract files created/modified during this execution + if extract_files and execution_context: + files = await extract_and_store_sandbox_files( + sandbox=sandbox, + working_directory=self.WORKING_DIR, + execution_context=execution_context, + since_timestamp=start_timestamp, + text_only=False, # Include binary files too + ) + + return ( + results, + text_output, + stdout_logs, + stderr_logs, + sandbox.sandbox_id, + files, + ) finally: # Dispose of sandbox if requested to reduce usage costs if dispose_sandbox and sandbox: @@ -238,6 +278,12 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): description="Standard output logs from execution" ) stderr_logs: str = SchemaField(description="Standard error logs from execution") + files: list[SandboxFileOutput] = SchemaField( + description=( + "Files created or modified during execution. " + "Each file has path, name, content, and workspace_ref (if stored)." + ), + ) def __init__(self): super().__init__( @@ -259,23 +305,30 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): ("results", []), ("response", "Hello World"), ("stdout_logs", "Hello World\n"), + ("files", []), ], test_mock={ - "execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox: ( # noqa + "execute_code": lambda api_key, code, language, template_id, setup_commands, timeout, dispose_sandbox, execution_context, extract_files: ( # noqa [], # results "Hello World", # text_output "Hello World\n", # stdout_logs "", # stderr_logs "sandbox_id", # sandbox_id + [], # files ), }, ) async def run( - self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + execution_context: "ExecutionContext", + **kwargs, ) -> BlockOutput: try: - results, text_output, stdout, stderr, _ = await self.execute_code( + results, text_output, stdout, stderr, _, files = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.code, language=input_data.language, @@ -283,6 +336,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): setup_commands=input_data.setup_commands, timeout=input_data.timeout, dispose_sandbox=input_data.dispose_sandbox, + execution_context=execution_context, + extract_files=True, ) # Determine result object shape & filter out empty formats @@ -296,6 +351,8 @@ class ExecuteCodeBlock(Block, BaseE2BExecutorMixin): yield "stdout_logs", stdout if stderr: yield "stderr_logs", stderr + # Always yield files (empty list if none) + yield "files", [f.model_dump() for f in files] except Exception as e: yield "error", str(e) @@ -393,6 +450,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin): "Hello World\n", # stdout_logs "", # stderr_logs "sandbox_id", # sandbox_id + [], # files ), }, ) @@ -401,7 +459,7 @@ class InstantiateCodeSandboxBlock(Block, BaseE2BExecutorMixin): self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs ) -> BlockOutput: try: - _, text_output, stdout, stderr, sandbox_id = await self.execute_code( + _, text_output, stdout, stderr, sandbox_id, _ = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.setup_code, language=input_data.language, @@ -500,6 +558,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin): "Hello World\n", # stdout_logs "", # stderr_logs sandbox_id, # sandbox_id + [], # files ), }, ) @@ -508,7 +567,7 @@ class ExecuteCodeStepBlock(Block, BaseE2BExecutorMixin): self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs ) -> BlockOutput: try: - results, text_output, stdout, stderr, _ = await self.execute_code( + results, text_output, stdout, stderr, _, _ = await self.execute_code( api_key=credentials.api_key.get_secret_value(), code=input_data.step_code, language=input_data.language, diff --git a/autogpt_platform/backend/backend/util/sandbox_files.py b/autogpt_platform/backend/backend/util/sandbox_files.py new file mode 100644 index 0000000000..9db53ded14 --- /dev/null +++ b/autogpt_platform/backend/backend/util/sandbox_files.py @@ -0,0 +1,288 @@ +""" +Shared utilities for extracting and storing files from E2B sandboxes. + +This module provides common file extraction and workspace storage functionality +for blocks that run code in E2B sandboxes (Claude Code, Code Executor, etc.). +""" + +import base64 +import logging +import mimetypes +import shlex +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from pydantic import BaseModel + +from backend.util.file import store_media_file +from backend.util.type import MediaFileType + +if TYPE_CHECKING: + from e2b import AsyncSandbox as BaseAsyncSandbox + + from backend.executor.utils import ExecutionContext + +logger = logging.getLogger(__name__) + +# Text file extensions that can be safely read and stored as text +TEXT_EXTENSIONS = { + ".txt", + ".md", + ".html", + ".htm", + ".css", + ".js", + ".ts", + ".jsx", + ".tsx", + ".json", + ".xml", + ".yaml", + ".yml", + ".toml", + ".ini", + ".cfg", + ".conf", + ".py", + ".rb", + ".php", + ".java", + ".c", + ".cpp", + ".h", + ".hpp", + ".cs", + ".go", + ".rs", + ".swift", + ".kt", + ".scala", + ".sh", + ".bash", + ".zsh", + ".sql", + ".graphql", + ".env", + ".gitignore", + ".dockerfile", + "Dockerfile", + ".vue", + ".svelte", + ".astro", + ".mdx", + ".rst", + ".tex", + ".csv", + ".log", +} + + +class SandboxFileOutput(BaseModel): + """A file extracted from a sandbox and optionally stored in workspace.""" + + path: str + """Full path in the sandbox.""" + + relative_path: str + """Path relative to the working directory.""" + + name: str + """Filename only.""" + + content: str + """File content as text (for backward compatibility).""" + + workspace_ref: str | None = None + """Workspace reference (workspace://{id}#mime) if stored, None otherwise.""" + + +@dataclass +class ExtractedFile: + """Internal representation of an extracted file before storage.""" + + path: str + relative_path: str + name: str + content: bytes + is_text: bool + + +async def extract_sandbox_files( + sandbox: "BaseAsyncSandbox", + working_directory: str, + since_timestamp: str | None = None, + text_only: bool = True, +) -> list[ExtractedFile]: + """ + Extract files from an E2B sandbox. + + Args: + sandbox: The E2B sandbox instance + working_directory: Directory to search for files + since_timestamp: ISO timestamp - only return files modified after this time + text_only: If True, only extract text files (default). If False, extract all files. + + Returns: + List of ExtractedFile objects with path, content, and metadata + """ + files: list[ExtractedFile] = [] + + try: + # Build find command + safe_working_dir = shlex.quote(working_directory) + timestamp_filter = "" + if since_timestamp: + timestamp_filter = f"-newermt {shlex.quote(since_timestamp)} " + + find_result = await sandbox.commands.run( + f"find {safe_working_dir} -type f " + f"{timestamp_filter}" + f"-not -path '*/node_modules/*' " + f"-not -path '*/.git/*' " + f"2>/dev/null" + ) + + if not find_result.stdout: + return files + + for file_path in find_result.stdout.strip().split("\n"): + if not file_path: + continue + + # Check if it's a text file + is_text = any(file_path.endswith(ext) for ext in TEXT_EXTENSIONS) + + # Skip non-text files if text_only mode + if text_only and not is_text: + continue + + try: + # Read file content as bytes + content = await sandbox.files.read(file_path, format="bytes") + if isinstance(content, str): + content = content.encode("utf-8") + elif isinstance(content, bytearray): + content = bytes(content) + + # Extract filename from path + file_name = file_path.split("/")[-1] + + # Calculate relative path + relative_path = file_path + if file_path.startswith(working_directory): + relative_path = file_path[len(working_directory) :] + if relative_path.startswith("/"): + relative_path = relative_path[1:] + + files.append( + ExtractedFile( + path=file_path, + relative_path=relative_path, + name=file_name, + content=content, + is_text=is_text, + ) + ) + except Exception as e: + logger.debug(f"Failed to read file {file_path}: {e}") + continue + + except Exception as e: + logger.warning(f"File extraction failed: {e}") + + return files + + +async def store_sandbox_files( + extracted_files: list[ExtractedFile], + execution_context: "ExecutionContext", +) -> list[SandboxFileOutput]: + """ + Store extracted sandbox files to workspace and return output objects. + + Args: + extracted_files: List of files extracted from sandbox + execution_context: Execution context for workspace storage + + Returns: + List of SandboxFileOutput objects with workspace refs + """ + outputs: list[SandboxFileOutput] = [] + + for file in extracted_files: + # Decode content for text files (for backward compat content field) + if file.is_text: + try: + content_str = file.content.decode("utf-8", errors="replace") + except Exception: + content_str = "" + else: + content_str = f"[Binary file: {len(file.content)} bytes]" + + # Build data URI (needed for storage and as binary fallback) + mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream" + data_uri = f"data:{mime_type};base64,{base64.b64encode(file.content).decode()}" + + # Try to store in workspace + workspace_ref: str | None = None + try: + result = await store_media_file( + file=MediaFileType(data_uri), + execution_context=execution_context, + return_format="for_block_output", + ) + if result.startswith("workspace://"): + workspace_ref = result + elif not file.is_text: + # Non-workspace context (graph execution): store_media_file + # returned a data URI — use it as content so binary data isn't lost. + content_str = result + except Exception as e: + logger.warning(f"Failed to store file {file.name} to workspace: {e}") + # For binary files, fall back to data URI to prevent data loss + if not file.is_text: + content_str = data_uri + + outputs.append( + SandboxFileOutput( + path=file.path, + relative_path=file.relative_path, + name=file.name, + content=content_str, + workspace_ref=workspace_ref, + ) + ) + + return outputs + + +async def extract_and_store_sandbox_files( + sandbox: "BaseAsyncSandbox", + working_directory: str, + execution_context: "ExecutionContext", + since_timestamp: str | None = None, + text_only: bool = True, +) -> list[SandboxFileOutput]: + """ + Extract files from sandbox and store them in workspace. + + This is the main entry point combining extraction and storage. + + Args: + sandbox: The E2B sandbox instance + working_directory: Directory to search for files + execution_context: Execution context for workspace storage + since_timestamp: ISO timestamp - only return files modified after this time + text_only: If True, only extract text files + + Returns: + List of SandboxFileOutput objects with content and workspace refs + """ + extracted = await extract_sandbox_files( + sandbox=sandbox, + working_directory=working_directory, + since_timestamp=since_timestamp, + text_only=text_only, + ) + + return await store_sandbox_files(extracted, execution_context) diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index d539832fb0..f35aa8bb3b 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -368,6 +368,10 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): default=600, description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)", ) + agentgenerator_use_dummy: bool = Field( + default=False, + description="Use dummy agent generator responses for testing (bypasses external service)", + ) enable_example_blocks: bool = Field( default=False, diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 53b5030da6..d71cca7865 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -441,14 +441,14 @@ develop = true colorama = "^0.4.6" cryptography = "^46.0" expiringdict = "^1.2.2" -fastapi = "^0.128.0" +fastapi = "^0.128.7" google-cloud-logging = "^3.13.0" -launchdarkly-server-sdk = "^9.14.1" +launchdarkly-server-sdk = "^9.15.0" pydantic = "^2.12.5" pydantic-settings = "^2.12.0" pyjwt = {version = "^2.11.0", extras = ["crypto"]} redis = "^6.2.0" -supabase = "^2.27.2" +supabase = "^2.28.0" uvicorn = "^0.40.0" [package.source] @@ -1382,14 +1382,14 @@ tzdata = "*" [[package]] name = "fastapi" -version = "0.128.6" +version = "0.128.7" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"}, - {file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"}, + {file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"}, + {file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"}, ] [package.dependencies] @@ -3117,14 +3117,14 @@ urllib3 = ">=1.26.0,<3" [[package]] name = "launchdarkly-server-sdk" -version = "9.14.1" +version = "9.15.0" description = "LaunchDarkly SDK for Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"}, - {file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"}, + {file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"}, + {file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"}, ] [package.dependencies] @@ -4728,14 +4728,14 @@ tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest [[package]] name = "postgrest" -version = "2.27.3" +version = "2.28.0" description = "PostgREST client for Python. This library provides an ORM interface to PostgREST." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "postgrest-2.27.3-py3-none-any.whl", hash = "sha256:ed79123af7127edd78d538bfe8351d277e45b1a36994a4dbf57ae27dde87a7b7"}, - {file = "postgrest-2.27.3.tar.gz", hash = "sha256:c2e2679addfc8eaab23197bad7ddaee6cbb4cbe8c483ebd2d2e5219543037cc3"}, + {file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"}, + {file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"}, ] [package.dependencies] @@ -6260,14 +6260,14 @@ all = ["numpy"] [[package]] name = "realtime" -version = "2.27.3" +version = "2.28.0" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "realtime-2.27.3-py3-none-any.whl", hash = "sha256:f571115f86988e33c41c895cb3fba2eaa1b693aeaede3617288f44274ca90f43"}, - {file = "realtime-2.27.3.tar.gz", hash = "sha256:02b082243107656a5ef3fb63e8e2ab4c40bc199abb45adb8a42ed63f089a1041"}, + {file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"}, + {file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"}, ] [package.dependencies] @@ -7024,14 +7024,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart [[package]] name = "storage3" -version = "2.27.3" +version = "2.28.0" description = "Supabase Storage client for Python." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "storage3-2.27.3-py3-none-any.whl", hash = "sha256:11a05b7da84bccabeeea12d940bca3760cf63fe6ca441868677335cfe4fdfbe0"}, - {file = "storage3-2.27.3.tar.gz", hash = "sha256:dc1a4a010cf36d5482c5cb6c1c28fc5f00e23284342b89e4ae43b5eae8501ddb"}, + {file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"}, + {file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"}, ] [package.dependencies] @@ -7091,35 +7091,35 @@ typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""} [[package]] name = "supabase" -version = "2.27.3" +version = "2.28.0" description = "Supabase client for Python." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "supabase-2.27.3-py3-none-any.whl", hash = "sha256:082a74642fcf9954693f1ce8c251baf23e4bda26ffdbc8dcd4c99c82e60d69ff"}, - {file = "supabase-2.27.3.tar.gz", hash = "sha256:5e5a348232ac4315c1032ddd687278f0b982465471f0cbb52bca7e6a66495ff3"}, + {file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"}, + {file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"}, ] [package.dependencies] httpx = ">=0.26,<0.29" -postgrest = "2.27.3" -realtime = "2.27.3" -storage3 = "2.27.3" -supabase-auth = "2.27.3" -supabase-functions = "2.27.3" +postgrest = "2.28.0" +realtime = "2.28.0" +storage3 = "2.28.0" +supabase-auth = "2.28.0" +supabase-functions = "2.28.0" yarl = ">=1.22.0" [[package]] name = "supabase-auth" -version = "2.27.3" +version = "2.28.0" description = "Python Client Library for Supabase Auth" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "supabase_auth-2.27.3-py3-none-any.whl", hash = "sha256:82a4262eaad85383319d394dab0eea11fcf3ebd774062aef8ea3874ae2f02579"}, - {file = "supabase_auth-2.27.3.tar.gz", hash = "sha256:39894d4bc60b6f23b5cff4d0d7d4c1659e5d69563cadf014d4896f780ca8ca78"}, + {file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"}, + {file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"}, ] [package.dependencies] @@ -7129,14 +7129,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]} [[package]] name = "supabase-functions" -version = "2.27.3" +version = "2.28.0" description = "Library for Supabase Functions" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "supabase_functions-2.27.3-py3-none-any.whl", hash = "sha256:9d14a931d49ede1c6cf5fbfceb11c44061535ba1c3f310f15384964d86a83d9e"}, - {file = "supabase_functions-2.27.3.tar.gz", hash = "sha256:e954f1646da8ca6e7e16accef58d0884a5f97b25956ee98e7d4927a210ed92f9"}, + {file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"}, + {file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"}, ] [package.dependencies] @@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af" +content-hash = "fa9c5deadf593e815dd2190f58e22152373900603f5f244b9616cd721de84d2f" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 317663ee98..32dfc547bc 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -65,7 +65,7 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal sqlalchemy = "^2.0.40" strenum = "^0.4.9" stripe = "^11.5.0" -supabase = "2.27.3" +supabase = "2.28.0" tenacity = "^9.1.4" todoist-api-python = "^2.1.7" tweepy = "^4.16.0" diff --git a/autogpt_platform/backend/test/agent_generator/test_service.py b/autogpt_platform/backend/test/agent_generator/test_service.py index cc37c428c0..93c9b9dcc0 100644 --- a/autogpt_platform/backend/test/agent_generator/test_service.py +++ b/autogpt_platform/backend/test/agent_generator/test_service.py @@ -25,6 +25,7 @@ class TestServiceConfiguration: """Test that external service is not configured when host is empty.""" mock_settings = MagicMock() mock_settings.config.agentgenerator_host = "" + mock_settings.config.agentgenerator_use_dummy = False with patch.object(service, "_get_settings", return_value=mock_settings): assert service.is_external_service_configured() is False diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index de6ecfd612..bab92d4693 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -37,7 +37,7 @@ services: context: ../ dockerfile: autogpt_platform/backend/Dockerfile target: migrate - command: ["sh", "-c", "poetry run prisma generate && poetry run gen-prisma-stub && poetry run prisma migrate deploy"] + command: ["sh", "-c", "prisma generate && python3 gen_prisma_types_stub.py && prisma migrate deploy"] develop: watch: - path: ./ @@ -56,7 +56,7 @@ services: test: [ "CMD-SHELL", - "poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1", + "prisma migrate status | grep -q 'No pending migrations' || exit 1", ] interval: 30s timeout: 10s diff --git a/autogpt_platform/frontend/instrumentation-client.ts b/autogpt_platform/frontend/instrumentation-client.ts index 86fe015e62..f4af2e8956 100644 --- a/autogpt_platform/frontend/instrumentation-client.ts +++ b/autogpt_platform/frontend/instrumentation-client.ts @@ -22,6 +22,11 @@ Sentry.init({ enabled: shouldEnable, + // Suppress cross-origin stylesheet errors from Sentry Replay (rrweb) + // serializing DOM snapshots with cross-origin stylesheets + // (e.g., from browser extensions or CDN-loaded CSS) + ignoreErrors: [/Not allowed to access cross-origin stylesheet/], + // Add optional integrations for additional features integrations: [ Sentry.captureConsoleIntegration(), diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled deleted file mode 100644 index 13769eb726..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/Untitled +++ /dev/null @@ -1,10 +0,0 @@ -import { parseAsString, useQueryState } from "nuqs"; - -export function useCopilotSessionId() { - const [urlSessionId, setUrlSessionId] = useQueryState( - "sessionId", - parseAsString, - ); - - return { urlSessionId, setUrlSessionId }; -} \ No newline at end of file diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useLongRunningToolPolling.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useLongRunningToolPolling.ts new file mode 100644 index 0000000000..85ef6b2962 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/hooks/useLongRunningToolPolling.ts @@ -0,0 +1,126 @@ +import { getGetV2GetSessionQueryKey } from "@/app/api/__generated__/endpoints/chat/chat"; +import { useQueryClient } from "@tanstack/react-query"; +import type { UIDataTypes, UIMessage, UITools } from "ai"; +import { useCallback, useEffect, useRef } from "react"; +import { convertChatSessionMessagesToUiMessages } from "../helpers/convertChatSessionToUiMessages"; + +const OPERATING_TYPES = new Set([ + "operation_started", + "operation_pending", + "operation_in_progress", +]); + +const POLL_INTERVAL_MS = 1_500; + +/** + * Detects whether any message contains a tool part whose output indicates + * a long-running operation is still in progress. + */ +function hasOperatingTool( + messages: UIMessage[], +) { + for (const msg of messages) { + for (const part of msg.parts) { + if (!part.type.startsWith("tool-")) continue; + const toolPart = part as { output?: unknown }; + if (!toolPart.output) continue; + const output = + typeof toolPart.output === "string" + ? safeParse(toolPart.output) + : toolPart.output; + if ( + output && + typeof output === "object" && + "type" in output && + OPERATING_TYPES.has((output as { type: string }).type) + ) { + return true; + } + } + } + return false; +} + +function safeParse(value: string): unknown { + try { + return JSON.parse(value); + } catch { + return null; + } +} + +/** + * Polls the session endpoint while any tool is in an "operating" state + * (operation_started / operation_pending / operation_in_progress). + * + * When the session data shows the tool output has changed (e.g. to + * agent_saved), it calls `setMessages` with the updated messages. + */ +export function useLongRunningToolPolling( + sessionId: string | null, + messages: UIMessage[], + setMessages: ( + updater: ( + prev: UIMessage[], + ) => UIMessage[], + ) => void, +) { + const queryClient = useQueryClient(); + const intervalRef = useRef | null>(null); + + const stopPolling = useCallback(() => { + if (intervalRef.current) { + clearInterval(intervalRef.current); + intervalRef.current = null; + } + }, []); + + const poll = useCallback(async () => { + if (!sessionId) return; + + // Invalidate the query cache so the next fetch gets fresh data + await queryClient.invalidateQueries({ + queryKey: getGetV2GetSessionQueryKey(sessionId), + }); + + // Fetch fresh session data + const data = queryClient.getQueryData<{ + status: number; + data: { messages?: unknown[] }; + }>(getGetV2GetSessionQueryKey(sessionId)); + + if (data?.status !== 200 || !data.data.messages) return; + + const freshMessages = convertChatSessionMessagesToUiMessages( + sessionId, + data.data.messages, + ); + + if (!freshMessages || freshMessages.length === 0) return; + + // Update when the long-running tool completed + if (!hasOperatingTool(freshMessages)) { + setMessages(() => freshMessages); + stopPolling(); + } + }, [sessionId, queryClient, setMessages, stopPolling]); + + useEffect(() => { + const shouldPoll = hasOperatingTool(messages); + + // Always clear any previous interval first so we never leak timers + // when the effect re-runs due to dependency changes (e.g. messages + // updating as the LLM streams text after the tool call). + stopPolling(); + + if (shouldPoll && sessionId) { + intervalRef.current = setInterval(() => { + poll(); + }, POLL_INTERVAL_MS); + } + + return () => { + stopPolling(); + }; + }, [messages, sessionId, poll, stopPolling]); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx index 88b1c491d7..26977a207a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/CreateAgent.tsx @@ -1,24 +1,30 @@ "use client"; -import { WarningDiamondIcon } from "@phosphor-icons/react"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { + BookOpenIcon, + CheckFatIcon, + PencilSimpleIcon, + WarningDiamondIcon, +} from "@phosphor-icons/react"; import type { ToolUIPart } from "ai"; +import NextLink from "next/link"; import { useCopilotChatActions } from "../../components/CopilotChatActionsProvider/useCopilotChatActions"; import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; -import { ProgressBar } from "../../components/ProgressBar/ProgressBar"; import { ContentCardDescription, ContentCodeBlock, ContentGrid, ContentHint, - ContentLink, ContentMessage, } from "../../components/ToolAccordion/AccordionContent"; import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; -import { useAsymptoticProgress } from "../../hooks/useAsymptoticProgress"; import { ClarificationQuestionsCard, ClarifyingQuestion, } from "./components/ClarificationQuestionsCard"; +import { MiniGame } from "./components/MiniGame/MiniGame"; import { AccordionIcon, formatMaybeJson, @@ -52,7 +58,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) { const icon = ; if (isAgentSavedOutput(output)) { - return { icon, title: output.agent_name }; + return { icon, title: output.agent_name, expanded: true }; } if (isAgentPreviewOutput(output)) { return { @@ -78,6 +84,7 @@ function getAccordionMeta(output: CreateAgentToolOutput) { return { icon, title: "Creating agent, this may take a few minutes. Sit back and relax.", + expanded: true, }; } return { @@ -107,8 +114,6 @@ export function CreateAgentTool({ part }: Props) { isOperationPendingOutput(output) || isOperationInProgressOutput(output)); - const progress = useAsymptoticProgress(isOperating); - const hasExpandableContent = part.state === "output-available" && !!output && @@ -152,31 +157,53 @@ export function CreateAgentTool({ part }: Props) { {isOperating && ( - + - This could take a few minutes, grab a coffee ☕ + This could take a few minutes — play while you wait! )} {isAgentSavedOutput(output) && ( - - {output.message} -
- - Open in library - - - Open in builder - +
+
+ + + {output.message} +
- - {truncateText( - formatMaybeJson({ agent_id: output.agent_id }), - 800, - )} - - +
+ + +
+
)} {isAgentPreviewOutput(output) && ( diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx new file mode 100644 index 0000000000..53cfcf2731 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/MiniGame.tsx @@ -0,0 +1,21 @@ +"use client"; + +import { useMiniGame } from "./useMiniGame"; + +export function MiniGame() { + const { canvasRef } = useMiniGame(); + + return ( +
+ +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts new file mode 100644 index 0000000000..e91f1766ca --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/CreateAgent/components/MiniGame/useMiniGame.ts @@ -0,0 +1,579 @@ +import { useEffect, useRef } from "react"; + +/* ------------------------------------------------------------------ */ +/* Constants */ +/* ------------------------------------------------------------------ */ + +const CANVAS_HEIGHT = 150; +const GRAVITY = 0.55; +const JUMP_FORCE = -9.5; +const BASE_SPEED = 3; +const SPEED_INCREMENT = 0.0008; +const SPAWN_MIN = 70; +const SPAWN_MAX = 130; +const CHAR_SIZE = 18; +const CHAR_X = 50; +const GROUND_PAD = 20; +const STORAGE_KEY = "copilot-minigame-highscore"; + +// Colors +const COLOR_BG = "#E8EAF6"; +const COLOR_CHAR = "#263238"; +const COLOR_BOSS = "#F50057"; + +// Boss +const BOSS_SIZE = 36; +const BOSS_ENTER_SPEED = 2; +const BOSS_LEAVE_SPEED = 3; +const BOSS_SHOOT_COOLDOWN = 90; +const BOSS_SHOTS_TO_EVADE = 5; +const BOSS_INTERVAL = 20; // every N score +const PROJ_SPEED = 4.5; +const PROJ_SIZE = 12; + +/* ------------------------------------------------------------------ */ +/* Types */ +/* ------------------------------------------------------------------ */ + +interface Obstacle { + x: number; + width: number; + height: number; + scored: boolean; +} + +interface Projectile { + x: number; + y: number; + speed: number; + evaded: boolean; + type: "low" | "high"; +} + +interface BossState { + phase: "inactive" | "entering" | "fighting" | "leaving"; + x: number; + targetX: number; + shotsEvaded: number; + cooldown: number; + projectiles: Projectile[]; + bob: number; +} + +interface GameState { + charY: number; + vy: number; + obstacles: Obstacle[]; + score: number; + highScore: number; + speed: number; + frame: number; + nextSpawn: number; + running: boolean; + over: boolean; + groundY: number; + boss: BossState; + bossThreshold: number; +} + +/* ------------------------------------------------------------------ */ +/* Helpers */ +/* ------------------------------------------------------------------ */ + +function randInt(min: number, max: number) { + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +function readHighScore(): number { + try { + return parseInt(localStorage.getItem(STORAGE_KEY) || "0", 10) || 0; + } catch { + return 0; + } +} + +function writeHighScore(score: number) { + try { + localStorage.setItem(STORAGE_KEY, String(score)); + } catch { + /* noop */ + } +} + +function makeBoss(): BossState { + return { + phase: "inactive", + x: 0, + targetX: 0, + shotsEvaded: 0, + cooldown: 0, + projectiles: [], + bob: 0, + }; +} + +function makeState(groundY: number): GameState { + return { + charY: groundY - CHAR_SIZE, + vy: 0, + obstacles: [], + score: 0, + highScore: readHighScore(), + speed: BASE_SPEED, + frame: 0, + nextSpawn: randInt(SPAWN_MIN, SPAWN_MAX), + running: false, + over: false, + groundY, + boss: makeBoss(), + bossThreshold: BOSS_INTERVAL, + }; +} + +function gameOver(s: GameState) { + s.running = false; + s.over = true; + if (s.score > s.highScore) { + s.highScore = s.score; + writeHighScore(s.score); + } +} + +/* ------------------------------------------------------------------ */ +/* Projectile collision — shared between fighting & leaving phases */ +/* ------------------------------------------------------------------ */ + +/** Returns true if the player died. */ +function tickProjectiles(s: GameState): boolean { + const boss = s.boss; + + for (const p of boss.projectiles) { + p.x -= p.speed; + + if (!p.evaded && p.x + PROJ_SIZE < CHAR_X) { + p.evaded = true; + boss.shotsEvaded++; + } + + // Collision + if ( + !p.evaded && + CHAR_X + CHAR_SIZE > p.x && + CHAR_X < p.x + PROJ_SIZE && + s.charY + CHAR_SIZE > p.y && + s.charY < p.y + PROJ_SIZE + ) { + gameOver(s); + return true; + } + } + + boss.projectiles = boss.projectiles.filter((p) => p.x + PROJ_SIZE > -20); + return false; +} + +/* ------------------------------------------------------------------ */ +/* Update */ +/* ------------------------------------------------------------------ */ + +function update(s: GameState, canvasWidth: number) { + if (!s.running) return; + + s.frame++; + + // Speed only ramps during regular play + if (s.boss.phase === "inactive") { + s.speed = BASE_SPEED + s.frame * SPEED_INCREMENT; + } + + // ---- Character physics (always active) ---- // + s.vy += GRAVITY; + s.charY += s.vy; + if (s.charY + CHAR_SIZE >= s.groundY) { + s.charY = s.groundY - CHAR_SIZE; + s.vy = 0; + } + + // ---- Trigger boss ---- // + if (s.boss.phase === "inactive" && s.score >= s.bossThreshold) { + s.boss.phase = "entering"; + s.boss.x = canvasWidth + 10; + s.boss.targetX = canvasWidth - BOSS_SIZE - 40; + s.boss.shotsEvaded = 0; + s.boss.cooldown = BOSS_SHOOT_COOLDOWN; + s.boss.projectiles = []; + s.obstacles = []; + } + + // ---- Boss: entering ---- // + if (s.boss.phase === "entering") { + s.boss.bob = Math.sin(s.frame * 0.05) * 3; + s.boss.x -= BOSS_ENTER_SPEED; + if (s.boss.x <= s.boss.targetX) { + s.boss.x = s.boss.targetX; + s.boss.phase = "fighting"; + } + return; // no obstacles while entering + } + + // ---- Boss: fighting ---- // + if (s.boss.phase === "fighting") { + s.boss.bob = Math.sin(s.frame * 0.05) * 3; + + // Shoot + s.boss.cooldown--; + if (s.boss.cooldown <= 0) { + const isLow = Math.random() < 0.5; + s.boss.projectiles.push({ + x: s.boss.x - PROJ_SIZE, + y: isLow ? s.groundY - 14 : s.groundY - 70, + speed: PROJ_SPEED, + evaded: false, + type: isLow ? "low" : "high", + }); + s.boss.cooldown = BOSS_SHOOT_COOLDOWN; + } + + if (tickProjectiles(s)) return; + + // Boss defeated? + if (s.boss.shotsEvaded >= BOSS_SHOTS_TO_EVADE) { + s.boss.phase = "leaving"; + s.score += 5; // bonus + s.bossThreshold = s.score + BOSS_INTERVAL; + } + return; + } + + // ---- Boss: leaving ---- // + if (s.boss.phase === "leaving") { + s.boss.bob = Math.sin(s.frame * 0.05) * 3; + s.boss.x += BOSS_LEAVE_SPEED; + + // Still check in-flight projectiles + if (tickProjectiles(s)) return; + + if (s.boss.x > canvasWidth + 50) { + s.boss = makeBoss(); + s.nextSpawn = s.frame + randInt(SPAWN_MIN / 2, SPAWN_MAX / 2); + } + return; + } + + // ---- Regular obstacle play ---- // + if (s.frame >= s.nextSpawn) { + s.obstacles.push({ + x: canvasWidth + 10, + width: randInt(10, 16), + height: randInt(20, 48), + scored: false, + }); + s.nextSpawn = s.frame + randInt(SPAWN_MIN, SPAWN_MAX); + } + + for (const o of s.obstacles) { + o.x -= s.speed; + if (!o.scored && o.x + o.width < CHAR_X) { + o.scored = true; + s.score++; + } + } + + s.obstacles = s.obstacles.filter((o) => o.x + o.width > -20); + + for (const o of s.obstacles) { + const oY = s.groundY - o.height; + if ( + CHAR_X + CHAR_SIZE > o.x && + CHAR_X < o.x + o.width && + s.charY + CHAR_SIZE > oY + ) { + gameOver(s); + return; + } + } +} + +/* ------------------------------------------------------------------ */ +/* Drawing */ +/* ------------------------------------------------------------------ */ + +function drawBoss(ctx: CanvasRenderingContext2D, s: GameState, bg: string) { + const bx = s.boss.x; + const by = s.groundY - BOSS_SIZE + s.boss.bob; + + // Body + ctx.save(); + ctx.fillStyle = COLOR_BOSS; + ctx.globalAlpha = 0.9; + ctx.beginPath(); + ctx.roundRect(bx, by, BOSS_SIZE, BOSS_SIZE, 4); + ctx.fill(); + ctx.restore(); + + // Eyes + ctx.save(); + ctx.fillStyle = bg; + const eyeY = by + 13; + ctx.beginPath(); + ctx.arc(bx + 10, eyeY, 4, 0, Math.PI * 2); + ctx.fill(); + ctx.beginPath(); + ctx.arc(bx + 26, eyeY, 4, 0, Math.PI * 2); + ctx.fill(); + ctx.restore(); + + // Angry eyebrows + ctx.save(); + ctx.strokeStyle = bg; + ctx.lineWidth = 2; + ctx.beginPath(); + ctx.moveTo(bx + 5, eyeY - 7); + ctx.lineTo(bx + 14, eyeY - 4); + ctx.stroke(); + ctx.beginPath(); + ctx.moveTo(bx + 31, eyeY - 7); + ctx.lineTo(bx + 22, eyeY - 4); + ctx.stroke(); + ctx.restore(); + + // Zigzag mouth + ctx.save(); + ctx.strokeStyle = bg; + ctx.lineWidth = 1.5; + ctx.beginPath(); + ctx.moveTo(bx + 10, by + 27); + ctx.lineTo(bx + 14, by + 24); + ctx.lineTo(bx + 18, by + 27); + ctx.lineTo(bx + 22, by + 24); + ctx.lineTo(bx + 26, by + 27); + ctx.stroke(); + ctx.restore(); +} + +function drawProjectiles(ctx: CanvasRenderingContext2D, boss: BossState) { + ctx.save(); + ctx.fillStyle = COLOR_BOSS; + ctx.globalAlpha = 0.8; + for (const p of boss.projectiles) { + if (p.evaded) continue; + ctx.beginPath(); + ctx.arc( + p.x + PROJ_SIZE / 2, + p.y + PROJ_SIZE / 2, + PROJ_SIZE / 2, + 0, + Math.PI * 2, + ); + ctx.fill(); + } + ctx.restore(); +} + +function draw( + ctx: CanvasRenderingContext2D, + s: GameState, + w: number, + h: number, + fg: string, + started: boolean, +) { + ctx.fillStyle = COLOR_BG; + ctx.fillRect(0, 0, w, h); + + // Ground + ctx.save(); + ctx.strokeStyle = fg; + ctx.globalAlpha = 0.15; + ctx.setLineDash([4, 4]); + ctx.beginPath(); + ctx.moveTo(0, s.groundY); + ctx.lineTo(w, s.groundY); + ctx.stroke(); + ctx.restore(); + + // Character + ctx.save(); + ctx.fillStyle = COLOR_CHAR; + ctx.globalAlpha = 0.85; + ctx.beginPath(); + ctx.roundRect(CHAR_X, s.charY, CHAR_SIZE, CHAR_SIZE, 3); + ctx.fill(); + ctx.restore(); + + // Eyes + ctx.save(); + ctx.fillStyle = COLOR_BG; + ctx.beginPath(); + ctx.arc(CHAR_X + 6, s.charY + 7, 2.5, 0, Math.PI * 2); + ctx.fill(); + ctx.beginPath(); + ctx.arc(CHAR_X + 12, s.charY + 7, 2.5, 0, Math.PI * 2); + ctx.fill(); + ctx.restore(); + + // Obstacles + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.55; + for (const o of s.obstacles) { + ctx.fillRect(o.x, s.groundY - o.height, o.width, o.height); + } + ctx.restore(); + + // Boss + projectiles + if (s.boss.phase !== "inactive") { + drawBoss(ctx, s, COLOR_BG); + drawProjectiles(ctx, s.boss); + } + + // Score HUD + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.5; + ctx.font = "bold 11px monospace"; + ctx.textAlign = "right"; + ctx.fillText(`Score: ${s.score}`, w - 12, 20); + ctx.fillText(`Best: ${s.highScore}`, w - 12, 34); + if (s.boss.phase === "fighting") { + ctx.fillText( + `Evade: ${s.boss.shotsEvaded}/${BOSS_SHOTS_TO_EVADE}`, + w - 12, + 48, + ); + } + ctx.restore(); + + // Prompts + if (!started && !s.running && !s.over) { + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.5; + ctx.font = "12px sans-serif"; + ctx.textAlign = "center"; + ctx.fillText("Click or press Space to play while you wait", w / 2, h / 2); + ctx.restore(); + } + + if (s.over) { + ctx.save(); + ctx.fillStyle = fg; + ctx.globalAlpha = 0.7; + ctx.font = "bold 13px sans-serif"; + ctx.textAlign = "center"; + ctx.fillText("Game Over", w / 2, h / 2 - 8); + ctx.font = "11px sans-serif"; + ctx.fillText("Click or Space to restart", w / 2, h / 2 + 10); + ctx.restore(); + } +} + +/* ------------------------------------------------------------------ */ +/* Hook */ +/* ------------------------------------------------------------------ */ + +export function useMiniGame() { + const canvasRef = useRef(null); + const stateRef = useRef(null); + const rafRef = useRef(0); + const startedRef = useRef(false); + + useEffect(() => { + const canvas = canvasRef.current; + if (!canvas) return; + + const container = canvas.parentElement; + if (container) { + canvas.width = container.clientWidth; + canvas.height = CANVAS_HEIGHT; + } + + const groundY = canvas.height - GROUND_PAD; + stateRef.current = makeState(groundY); + + const style = getComputedStyle(canvas); + let fg = style.color || "#71717a"; + + // -------------------------------------------------------------- // + // Jump // + // -------------------------------------------------------------- // + function jump() { + const s = stateRef.current; + if (!s) return; + + if (s.over) { + const hs = s.highScore; + const gy = s.groundY; + stateRef.current = makeState(gy); + stateRef.current.highScore = hs; + stateRef.current.running = true; + startedRef.current = true; + return; + } + + if (!s.running) { + s.running = true; + startedRef.current = true; + return; + } + + // Only jump when on the ground + if (s.charY + CHAR_SIZE >= s.groundY) { + s.vy = JUMP_FORCE; + } + } + + function onKey(e: KeyboardEvent) { + if (e.code === "Space" || e.key === " ") { + e.preventDefault(); + jump(); + } + } + + function onClick() { + canvas?.focus(); + jump(); + } + + // -------------------------------------------------------------- // + // Loop // + // -------------------------------------------------------------- // + function loop() { + const s = stateRef.current; + if (!canvas || !s) return; + const ctx = canvas.getContext("2d"); + if (!ctx) return; + + update(s, canvas.width); + draw(ctx, s, canvas.width, canvas.height, fg, startedRef.current); + rafRef.current = requestAnimationFrame(loop); + } + + rafRef.current = requestAnimationFrame(loop); + + canvas.addEventListener("click", onClick); + canvas.addEventListener("keydown", onKey); + + const observer = new ResizeObserver((entries) => { + for (const entry of entries) { + canvas.width = entry.contentRect.width; + canvas.height = CANVAS_HEIGHT; + if (stateRef.current) { + stateRef.current.groundY = canvas.height - GROUND_PAD; + } + const cs = getComputedStyle(canvas); + fg = cs.color || fg; + } + }); + if (container) observer.observe(container); + + return () => { + cancelAnimationFrame(rafRef.current); + canvas.removeEventListener("click", onClick); + canvas.removeEventListener("keydown", onKey); + observer.disconnect(); + }; + }, []); + + return { canvasRef }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx index e1cb030449..6e2cbe90d7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/RunBlock.tsx @@ -3,6 +3,7 @@ import type { ToolUIPart } from "ai"; import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation"; import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion"; +import { BlockDetailsCard } from "./components/BlockDetailsCard/BlockDetailsCard"; import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard"; import { ErrorCard } from "./components/ErrorCard/ErrorCard"; import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard"; @@ -11,6 +12,7 @@ import { getAnimationText, getRunBlockToolOutput, isRunBlockBlockOutput, + isRunBlockDetailsOutput, isRunBlockErrorOutput, isRunBlockSetupRequirementsOutput, ToolIcon, @@ -41,6 +43,7 @@ export function RunBlockTool({ part }: Props) { part.state === "output-available" && !!output && (isRunBlockBlockOutput(output) || + isRunBlockDetailsOutput(output) || isRunBlockSetupRequirementsOutput(output) || isRunBlockErrorOutput(output)); @@ -58,6 +61,10 @@ export function RunBlockTool({ part }: Props) { {isRunBlockBlockOutput(output) && } + {isRunBlockDetailsOutput(output) && ( + + )} + {isRunBlockSetupRequirementsOutput(output) && ( )} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockDetailsCard/BlockDetailsCard.stories.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockDetailsCard/BlockDetailsCard.stories.tsx new file mode 100644 index 0000000000..6e133ca93b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockDetailsCard/BlockDetailsCard.stories.tsx @@ -0,0 +1,188 @@ +import type { Meta, StoryObj } from "@storybook/nextjs"; +import { ResponseType } from "@/app/api/__generated__/models/responseType"; +import type { BlockDetailsResponse } from "../../helpers"; +import { BlockDetailsCard } from "./BlockDetailsCard"; + +const meta: Meta = { + title: "Copilot/RunBlock/BlockDetailsCard", + component: BlockDetailsCard, + parameters: { + layout: "centered", + }, + tags: ["autodocs"], + decorators: [ + (Story) => ( +
+ +
+ ), + ], +}; + +export default meta; +type Story = StoryObj; + +const baseBlock: BlockDetailsResponse = { + type: ResponseType.block_details, + message: + "Here are the details for the GetWeather block. Provide the required inputs to run it.", + session_id: "session-123", + user_authenticated: true, + block: { + id: "block-abc-123", + name: "GetWeather", + description: "Fetches current weather data for a given location.", + inputs: { + type: "object", + properties: { + location: { + title: "Location", + type: "string", + description: + "City name or coordinates (e.g. 'London' or '51.5,-0.1')", + }, + units: { + title: "Units", + type: "string", + description: "Temperature units: 'metric' or 'imperial'", + }, + }, + required: ["location"], + }, + outputs: { + type: "object", + properties: { + temperature: { + title: "Temperature", + type: "number", + description: "Current temperature in the requested units", + }, + condition: { + title: "Condition", + type: "string", + description: "Weather condition description (e.g. 'Sunny', 'Rain')", + }, + }, + }, + credentials: [], + }, +}; + +export const Default: Story = { + args: { + output: baseBlock, + }, +}; + +export const InputsOnly: Story = { + args: { + output: { + ...baseBlock, + message: "This block requires inputs. No outputs are defined.", + block: { + ...baseBlock.block, + outputs: {}, + }, + }, + }, +}; + +export const OutputsOnly: Story = { + args: { + output: { + ...baseBlock, + message: "This block has no required inputs.", + block: { + ...baseBlock.block, + inputs: {}, + }, + }, + }, +}; + +export const ManyFields: Story = { + args: { + output: { + ...baseBlock, + message: "Block with many input and output fields.", + block: { + ...baseBlock.block, + name: "SendEmail", + description: "Sends an email via SMTP.", + inputs: { + type: "object", + properties: { + to: { + title: "To", + type: "string", + description: "Recipient email address", + }, + subject: { + title: "Subject", + type: "string", + description: "Email subject line", + }, + body: { + title: "Body", + type: "string", + description: "Email body content", + }, + cc: { + title: "CC", + type: "string", + description: "CC recipients (comma-separated)", + }, + bcc: { + title: "BCC", + type: "string", + description: "BCC recipients (comma-separated)", + }, + }, + required: ["to", "subject", "body"], + }, + outputs: { + type: "object", + properties: { + message_id: { + title: "Message ID", + type: "string", + description: "Unique ID of the sent email", + }, + status: { + title: "Status", + type: "string", + description: "Delivery status", + }, + }, + }, + }, + }, + }, +}; + +export const NoFieldDescriptions: Story = { + args: { + output: { + ...baseBlock, + message: "Fields without descriptions.", + block: { + ...baseBlock.block, + name: "SimpleBlock", + inputs: { + type: "object", + properties: { + input_a: { title: "Input A", type: "string" }, + input_b: { title: "Input B", type: "number" }, + }, + required: ["input_a"], + }, + outputs: { + type: "object", + properties: { + result: { title: "Result", type: "string" }, + }, + }, + }, + }, + }, +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockDetailsCard/BlockDetailsCard.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockDetailsCard/BlockDetailsCard.tsx new file mode 100644 index 0000000000..fdbf115222 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/components/BlockDetailsCard/BlockDetailsCard.tsx @@ -0,0 +1,103 @@ +"use client"; + +import type { BlockDetailsResponse } from "../../helpers"; +import { + ContentBadge, + ContentCard, + ContentCardDescription, + ContentCardTitle, + ContentGrid, + ContentMessage, +} from "../../../../components/ToolAccordion/AccordionContent"; + +interface Props { + output: BlockDetailsResponse; +} + +function SchemaFieldList({ + title, + properties, + required, +}: { + title: string; + properties: Record; + required?: string[]; +}) { + const entries = Object.entries(properties); + if (entries.length === 0) return null; + + const requiredSet = new Set(required ?? []); + + return ( + + {title} +
+ {entries.map(([name, schema]) => { + const field = schema as Record | undefined; + const fieldTitle = + typeof field?.title === "string" ? field.title : name; + const fieldType = + typeof field?.type === "string" ? field.type : "unknown"; + const description = + typeof field?.description === "string" + ? field.description + : undefined; + + return ( +
+
+ + {fieldTitle} + +
+ {fieldType} + {requiredSet.has(name) && ( + Required + )} +
+
+ {description && ( + + {description} + + )} +
+ ); + })} +
+
+ ); +} + +export function BlockDetailsCard({ output }: Props) { + const inputs = output.block.inputs as { + properties?: Record; + required?: string[]; + } | null; + const outputs = output.block.outputs as { + properties?: Record; + required?: string[]; + } | null; + + return ( + + {output.message} + + {inputs?.properties && Object.keys(inputs.properties).length > 0 && ( + + )} + + {outputs?.properties && Object.keys(outputs.properties).length > 0 && ( + + )} + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx index b8625988cd..6e56154a5e 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx @@ -10,18 +10,37 @@ import { import type { ToolUIPart } from "ai"; import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader"; +/** Block details returned on first run_block attempt (before input_data provided). */ +export interface BlockDetailsResponse { + type: typeof ResponseType.block_details; + message: string; + session_id?: string | null; + block: { + id: string; + name: string; + description: string; + inputs: Record; + outputs: Record; + credentials: unknown[]; + }; + user_authenticated: boolean; +} + export interface RunBlockInput { block_id?: string; + block_name?: string; input_data?: Record; } export type RunBlockToolOutput = | SetupRequirementsResponse + | BlockDetailsResponse | BlockOutputResponse | ErrorResponse; const RUN_BLOCK_OUTPUT_TYPES = new Set([ ResponseType.setup_requirements, + ResponseType.block_details, ResponseType.block_output, ResponseType.error, ]); @@ -35,6 +54,15 @@ export function isRunBlockSetupRequirementsOutput( ); } +export function isRunBlockDetailsOutput( + output: RunBlockToolOutput, +): output is BlockDetailsResponse { + return ( + output.type === ResponseType.block_details || + ("block" in output && typeof output.block === "object") + ); +} + export function isRunBlockBlockOutput( output: RunBlockToolOutput, ): output is BlockOutputResponse { @@ -64,6 +92,7 @@ function parseOutput(output: unknown): RunBlockToolOutput | null { return output as RunBlockToolOutput; } if ("block_id" in output) return output as BlockOutputResponse; + if ("block" in output) return output as BlockDetailsResponse; if ("setup_info" in output) return output as SetupRequirementsResponse; if ("error" in output || "details" in output) return output as ErrorResponse; @@ -84,17 +113,25 @@ export function getAnimationText(part: { output?: unknown; }): string { const input = part.input as RunBlockInput | undefined; + const blockName = input?.block_name?.trim(); const blockId = input?.block_id?.trim(); - const blockText = blockId ? ` "${blockId}"` : ""; + // Prefer block_name if available, otherwise fall back to block_id + const blockText = blockName + ? ` "${blockName}"` + : blockId + ? ` "${blockId}"` + : ""; switch (part.state) { case "input-streaming": case "input-available": - return `Running the block${blockText}`; + return `Running${blockText}`; case "output-available": { const output = parseOutput(part.output); - if (!output) return `Running the block${blockText}`; + if (!output) return `Running${blockText}`; if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`; + if (isRunBlockDetailsOutput(output)) + return `Details for "${output.block.name}"`; if (isRunBlockSetupRequirementsOutput(output)) { return `Setup needed for "${output.setup_info.agent_name}"`; } @@ -158,6 +195,21 @@ export function getAccordionMeta(output: RunBlockToolOutput): { }; } + if (isRunBlockDetailsOutput(output)) { + const inputKeys = Object.keys( + (output.block.inputs as { properties?: Record }) + ?.properties ?? {}, + ); + return { + icon, + title: output.block.name, + description: + inputKeys.length > 0 + ? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available` + : output.message, + }; + } + if (isRunBlockSetupRequirementsOutput(output)) { const missingCredsCount = Object.keys( (output.setup_info.user_readiness?.missing_credentials ?? {}) as Record< diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index 3dbba6e790..28e9ba7cfb 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -1,10 +1,14 @@ import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat"; +import { toast } from "@/components/molecules/Toast/use-toast"; import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useChat } from "@ai-sdk/react"; import { DefaultChatTransport } from "ai"; -import { useEffect, useMemo, useState } from "react"; +import { useEffect, useMemo, useRef, useState } from "react"; import { useChatSession } from "./useChatSession"; +import { useLongRunningToolPolling } from "./hooks/useLongRunningToolPolling"; + +const STREAM_START_TIMEOUT_MS = 12_000; export function useCopilotPage() { const { isUserLoading, isLoggedIn } = useSupabase(); @@ -52,6 +56,24 @@ export function useCopilotPage() { transport: transport ?? undefined, }); + // Abort the stream if the backend doesn't start sending data within 12s. + const stopRef = useRef(stop); + stopRef.current = stop; + useEffect(() => { + if (status !== "submitted") return; + + const timer = setTimeout(() => { + stopRef.current(); + toast({ + title: "Stream timed out", + description: "The server took too long to respond. Please try again.", + variant: "destructive", + }); + }, STREAM_START_TIMEOUT_MS); + + return () => clearTimeout(timer); + }, [status]); + useEffect(() => { if (!hydratedMessages || hydratedMessages.length === 0) return; setMessages((prev) => { @@ -60,6 +82,11 @@ export function useCopilotPage() { }); }, [hydratedMessages, setMessages]); + // Poll session endpoint when a long-running tool (create_agent, edit_agent) + // is in progress. When the backend completes, the session data will contain + // the final tool output — this hook detects the change and updates messages. + useLongRunningToolPolling(sessionId, messages, setMessages); + // Clear messages when session is null useEffect(() => { if (!sessionId) setMessages([]); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx index 1ad40fcef4..8815069011 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx @@ -29,6 +29,7 @@ export function ScheduleListItem({ description={formatDistanceToNow(schedule.next_run_time, { addSuffix: true, })} + descriptionTitle={new Date(schedule.next_run_time).toString()} onClick={onClick} selected={selected} icon={ diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx index 4f4e9962ce..a438568b74 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx @@ -7,6 +7,7 @@ import React from "react"; interface Props { title: string; description?: string; + descriptionTitle?: string; icon?: React.ReactNode; selected?: boolean; onClick?: () => void; @@ -16,6 +17,7 @@ interface Props { export function SidebarItemCard({ title, description, + descriptionTitle, icon, selected, onClick, @@ -38,7 +40,11 @@ export function SidebarItemCard({ > {title} - + {description}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx index 8970e82b64..b9320822fc 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx @@ -81,6 +81,9 @@ export function TaskListItem({ ? formatDistanceToNow(run.started_at, { addSuffix: true }) : "—" } + descriptionTitle={ + run.started_at ? new Date(run.started_at).toString() : undefined + } onClick={onClick} selected={selected} actions={ diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index a0eb141aa9..1e8dca865c 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -1053,6 +1053,7 @@ "$ref": "#/components/schemas/ClarificationNeededResponse" }, { "$ref": "#/components/schemas/BlockListResponse" }, + { "$ref": "#/components/schemas/BlockDetailsResponse" }, { "$ref": "#/components/schemas/BlockOutputResponse" }, { "$ref": "#/components/schemas/DocSearchResultsResponse" }, { "$ref": "#/components/schemas/DocPageResponse" }, @@ -6958,6 +6959,58 @@ "enum": ["run", "byte", "second"], "title": "BlockCostType" }, + "BlockDetails": { + "properties": { + "id": { "type": "string", "title": "Id" }, + "name": { "type": "string", "title": "Name" }, + "description": { "type": "string", "title": "Description" }, + "inputs": { + "additionalProperties": true, + "type": "object", + "title": "Inputs", + "default": {} + }, + "outputs": { + "additionalProperties": true, + "type": "object", + "title": "Outputs", + "default": {} + }, + "credentials": { + "items": { "$ref": "#/components/schemas/CredentialsMetaInput" }, + "type": "array", + "title": "Credentials", + "default": [] + } + }, + "type": "object", + "required": ["id", "name", "description"], + "title": "BlockDetails", + "description": "Detailed block information." + }, + "BlockDetailsResponse": { + "properties": { + "type": { + "$ref": "#/components/schemas/ResponseType", + "default": "block_details" + }, + "message": { "type": "string", "title": "Message" }, + "session_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Session Id" + }, + "block": { "$ref": "#/components/schemas/BlockDetails" }, + "user_authenticated": { + "type": "boolean", + "title": "User Authenticated", + "default": false + } + }, + "type": "object", + "required": ["message", "block"], + "title": "BlockDetailsResponse", + "description": "Response for block details (first run_block attempt)." + }, "BlockInfo": { "properties": { "id": { "type": "string", "title": "Id" }, @@ -7013,62 +7066,13 @@ "properties": { "id": { "type": "string", "title": "Id" }, "name": { "type": "string", "title": "Name" }, - "description": { "type": "string", "title": "Description" }, - "categories": { - "items": { "type": "string" }, - "type": "array", - "title": "Categories" - }, - "input_schema": { - "additionalProperties": true, - "type": "object", - "title": "Input Schema" - }, - "output_schema": { - "additionalProperties": true, - "type": "object", - "title": "Output Schema" - }, - "required_inputs": { - "items": { "$ref": "#/components/schemas/BlockInputFieldInfo" }, - "type": "array", - "title": "Required Inputs", - "description": "List of required input fields for this block" - } + "description": { "type": "string", "title": "Description" } }, "type": "object", - "required": [ - "id", - "name", - "description", - "categories", - "input_schema", - "output_schema" - ], + "required": ["id", "name", "description"], "title": "BlockInfoSummary", "description": "Summary of a block for search results." }, - "BlockInputFieldInfo": { - "properties": { - "name": { "type": "string", "title": "Name" }, - "type": { "type": "string", "title": "Type" }, - "description": { - "type": "string", - "title": "Description", - "default": "" - }, - "required": { - "type": "boolean", - "title": "Required", - "default": false - }, - "default": { "anyOf": [{}, { "type": "null" }], "title": "Default" } - }, - "type": "object", - "required": ["name", "type"], - "title": "BlockInputFieldInfo", - "description": "Information about a block input field." - }, "BlockListResponse": { "properties": { "type": { @@ -7086,12 +7090,7 @@ "title": "Blocks" }, "count": { "type": "integer", "title": "Count" }, - "query": { "type": "string", "title": "Query" }, - "usage_hint": { - "type": "string", - "title": "Usage Hint", - "default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema." - } + "query": { "type": "string", "title": "Query" } }, "type": "object", "required": ["message", "blocks", "count", "query"], @@ -10484,6 +10483,7 @@ "agent_saved", "clarification_needed", "block_list", + "block_details", "block_output", "doc_search_results", "doc_page", diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css index dd1d17cde7..4a1691eec3 100644 --- a/autogpt_platform/frontend/src/app/globals.css +++ b/autogpt_platform/frontend/src/app/globals.css @@ -180,3 +180,14 @@ body[data-google-picker-open="true"] [data-dialog-content] { z-index: 1 !important; pointer-events: none !important; } + +/* CoPilot chat table styling — remove left/right borders, increase padding */ +[data-streamdown="table-wrapper"] table { + border-left: none; + border-right: none; +} + +[data-streamdown="table-wrapper"] th, +[data-streamdown="table-wrapper"] td { + padding: 0.875rem 1rem; /* py-3.5 px-4 */ +} diff --git a/autogpt_platform/frontend/src/components/contextual/OutputRenderers/renderers/MarkdownRenderer.tsx b/autogpt_platform/frontend/src/components/contextual/OutputRenderers/renderers/MarkdownRenderer.tsx index d94966c6c8..9815cea6ff 100644 --- a/autogpt_platform/frontend/src/components/contextual/OutputRenderers/renderers/MarkdownRenderer.tsx +++ b/autogpt_platform/frontend/src/components/contextual/OutputRenderers/renderers/MarkdownRenderer.tsx @@ -226,7 +226,7 @@ function renderMarkdown( table: ({ children, ...props }) => (
{children} @@ -235,7 +235,7 @@ function renderMarkdown( ), th: ({ children, ...props }) => (
{children} @@ -243,7 +243,7 @@ function renderMarkdown( ), td: ({ children, ...props }) => ( {children} diff --git a/docs/integrations/block-integrations/llm.md b/docs/integrations/block-integrations/llm.md index 20a5147fcd..9c96ef56c0 100644 --- a/docs/integrations/block-integrations/llm.md +++ b/docs/integrations/block-integrations/llm.md @@ -563,7 +563,7 @@ The block supports conversation continuation through three mechanisms: |--------|-------------|------| | error | Error message if execution failed | str | | response | The output/response from Claude Code execution | str | -| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', and 'content' fields. | List[FileOutput] | +| files | List of text files created/modified by Claude Code during this execution. Each file has 'path', 'relative_path', 'name', 'content', and 'workspace_ref' fields. workspace_ref contains a workspace:// URI if the file was stored to workspace. | List[SandboxFileOutput] | | conversation_history | Full conversation history including this turn. Pass this to conversation_history input to continue on a fresh sandbox if the previous sandbox timed out. | str | | session_id | Session ID for this conversation. Pass this back along with sandbox_id to continue the conversation. | str | | sandbox_id | ID of the sandbox instance. Pass this back along with session_id to continue the conversation. This is None if dispose_sandbox was True (sandbox was disposed). | str | diff --git a/docs/integrations/block-integrations/misc.md b/docs/integrations/block-integrations/misc.md index 4c199bebb4..ad6300ae88 100644 --- a/docs/integrations/block-integrations/misc.md +++ b/docs/integrations/block-integrations/misc.md @@ -215,6 +215,7 @@ The sandbox includes pip and npm pre-installed. Set timeout to limit execution t | response | Text output (if any) of the main execution result | str | | stdout_logs | Standard output logs from execution | str | | stderr_logs | Standard error logs from execution | str | +| files | Files created or modified during execution. Each file has path, name, content, and workspace_ref (if stored). | List[SandboxFileOutput] | ### Possible use case