mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 16:25:05 -05:00
Compare commits
10 Commits
feat/opena
...
refactor/r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5739de04f0 | ||
|
|
5035b69c79 | ||
|
|
86af8fc856 | ||
|
|
dfa517300b | ||
|
|
28ad3d0b01 | ||
|
|
43b25b5e2f | ||
|
|
ab0b537cc7 | ||
|
|
9a8c6ad609 | ||
|
|
361d6ff6fc | ||
|
|
0fe6cc8dc7 |
@@ -5,42 +5,13 @@
|
|||||||
!docs/
|
!docs/
|
||||||
|
|
||||||
# Platform - Libs
|
# Platform - Libs
|
||||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
!autogpt_platform/autogpt_libs/
|
||||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
|
||||||
!autogpt_platform/autogpt_libs/poetry.lock
|
|
||||||
!autogpt_platform/autogpt_libs/README.md
|
|
||||||
|
|
||||||
# Platform - Backend
|
# Platform - Backend
|
||||||
!autogpt_platform/backend/backend/
|
!autogpt_platform/backend/
|
||||||
!autogpt_platform/backend/test/e2e_test_data.py
|
|
||||||
!autogpt_platform/backend/migrations/
|
|
||||||
!autogpt_platform/backend/schema.prisma
|
|
||||||
!autogpt_platform/backend/pyproject.toml
|
|
||||||
!autogpt_platform/backend/poetry.lock
|
|
||||||
!autogpt_platform/backend/README.md
|
|
||||||
!autogpt_platform/backend/.env
|
|
||||||
!autogpt_platform/backend/gen_prisma_types_stub.py
|
|
||||||
|
|
||||||
# Platform - Market
|
|
||||||
!autogpt_platform/market/market/
|
|
||||||
!autogpt_platform/market/scripts.py
|
|
||||||
!autogpt_platform/market/schema.prisma
|
|
||||||
!autogpt_platform/market/pyproject.toml
|
|
||||||
!autogpt_platform/market/poetry.lock
|
|
||||||
!autogpt_platform/market/README.md
|
|
||||||
|
|
||||||
# Platform - Frontend
|
# Platform - Frontend
|
||||||
!autogpt_platform/frontend/src/
|
!autogpt_platform/frontend/
|
||||||
!autogpt_platform/frontend/public/
|
|
||||||
!autogpt_platform/frontend/scripts/
|
|
||||||
!autogpt_platform/frontend/package.json
|
|
||||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
|
||||||
!autogpt_platform/frontend/tsconfig.json
|
|
||||||
!autogpt_platform/frontend/README.md
|
|
||||||
## config
|
|
||||||
!autogpt_platform/frontend/*.config.*
|
|
||||||
!autogpt_platform/frontend/.env.*
|
|
||||||
!autogpt_platform/frontend/.env
|
|
||||||
|
|
||||||
# Classic - AutoGPT
|
# Classic - AutoGPT
|
||||||
!classic/original_autogpt/autogpt/
|
!classic/original_autogpt/autogpt/
|
||||||
@@ -64,6 +35,38 @@
|
|||||||
# Classic - Frontend
|
# Classic - Frontend
|
||||||
!classic/frontend/build/web/
|
!classic/frontend/build/web/
|
||||||
|
|
||||||
# Explicitly re-ignore some folders
|
# Explicitly re-ignore unwanted files from whitelisted directories
|
||||||
.*
|
# Note: These patterns MUST come after the whitelist rules to take effect
|
||||||
**/__pycache__
|
|
||||||
|
# Hidden files and directories (but keep frontend .env files needed for build)
|
||||||
|
**/.*
|
||||||
|
!autogpt_platform/frontend/.env
|
||||||
|
!autogpt_platform/frontend/.env.default
|
||||||
|
!autogpt_platform/frontend/.env.production
|
||||||
|
|
||||||
|
# Python artifacts
|
||||||
|
**/__pycache__/
|
||||||
|
**/*.pyc
|
||||||
|
**/*.pyo
|
||||||
|
**/.venv/
|
||||||
|
**/.ruff_cache/
|
||||||
|
**/.pytest_cache/
|
||||||
|
**/.coverage
|
||||||
|
**/htmlcov/
|
||||||
|
|
||||||
|
# Node artifacts
|
||||||
|
**/node_modules/
|
||||||
|
**/.next/
|
||||||
|
**/storybook-static/
|
||||||
|
**/playwright-report/
|
||||||
|
**/test-results/
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
**/dist/
|
||||||
|
**/build/
|
||||||
|
!autogpt_platform/frontend/src/**/build/
|
||||||
|
**/target/
|
||||||
|
|
||||||
|
# Logs and temp files
|
||||||
|
**/*.log
|
||||||
|
**/*.tmp
|
||||||
|
|||||||
42
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
42
.github/workflows/claude-ci-failure-auto-fix.yml
vendored
@@ -40,6 +40,48 @@ jobs:
|
|||||||
git checkout -b "$BRANCH_NAME"
|
git checkout -b "$BRANCH_NAME"
|
||||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Backend Python/Poetry setup (so Claude can run linting/tests)
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.11"
|
||||||
|
|
||||||
|
- name: Set up Python dependency cache
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pypoetry
|
||||||
|
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||||
|
|
||||||
|
- name: Install Poetry
|
||||||
|
run: |
|
||||||
|
cd autogpt_platform/backend
|
||||||
|
HEAD_POETRY_VERSION=$(python3 ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||||
|
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||||
|
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Install Python dependencies
|
||||||
|
working-directory: autogpt_platform/backend
|
||||||
|
run: poetry install
|
||||||
|
|
||||||
|
- name: Generate Prisma Client
|
||||||
|
working-directory: autogpt_platform/backend
|
||||||
|
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
|
# Frontend Node.js/pnpm setup (so Claude can run linting/tests)
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: "22"
|
||||||
|
cache: "pnpm"
|
||||||
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
|
|
||||||
|
- name: Install JavaScript dependencies
|
||||||
|
working-directory: autogpt_platform/frontend
|
||||||
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
- name: Get CI failure details
|
- name: Get CI failure details
|
||||||
id: failure_details
|
id: failure_details
|
||||||
uses: actions/github-script@v8
|
uses: actions/github-script@v8
|
||||||
|
|||||||
22
.github/workflows/claude-dependabot.yml
vendored
22
.github/workflows/claude-dependabot.yml
vendored
@@ -77,27 +77,15 @@ jobs:
|
|||||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22"
|
node-version: "22"
|
||||||
|
cache: "pnpm"
|
||||||
- name: Enable corepack
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set pnpm store directory
|
|
||||||
run: |
|
|
||||||
pnpm config set store-dir ~/.pnpm-store
|
|
||||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Cache frontend dependencies
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
|
||||||
path: ~/.pnpm-store
|
|
||||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install JavaScript dependencies
|
- name: Install JavaScript dependencies
|
||||||
working-directory: autogpt_platform/frontend
|
working-directory: autogpt_platform/frontend
|
||||||
|
|||||||
22
.github/workflows/claude.yml
vendored
22
.github/workflows/claude.yml
vendored
@@ -93,27 +93,15 @@ jobs:
|
|||||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: "22"
|
node-version: "22"
|
||||||
|
cache: "pnpm"
|
||||||
- name: Enable corepack
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Set pnpm store directory
|
|
||||||
run: |
|
|
||||||
pnpm config set store-dir ~/.pnpm-store
|
|
||||||
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Cache frontend dependencies
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
|
||||||
path: ~/.pnpm-store
|
|
||||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install JavaScript dependencies
|
- name: Install JavaScript dependencies
|
||||||
working-directory: autogpt_platform/frontend
|
working-directory: autogpt_platform/frontend
|
||||||
|
|||||||
241
.github/workflows/platform-frontend-ci.yml
vendored
241
.github/workflows/platform-frontend-ci.yml
vendored
@@ -26,7 +26,6 @@ jobs:
|
|||||||
setup:
|
setup:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
|
||||||
components-changed: ${{ steps.filter.outputs.components }}
|
components-changed: ${{ steps.filter.outputs.components }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -41,28 +40,17 @@ jobs:
|
|||||||
components:
|
components:
|
||||||
- 'autogpt_platform/frontend/src/components/**'
|
- 'autogpt_platform/frontend/src/components/**'
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Generate cache key
|
- name: Set up Node
|
||||||
id: cache-key
|
uses: actions/setup-node@v6
|
||||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Cache dependencies
|
|
||||||
uses: actions/cache@v5
|
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ steps.cache-key.outputs.key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies to populate cache
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@@ -73,22 +61,15 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -111,22 +92,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -141,10 +115,8 @@ jobs:
|
|||||||
exitOnceUploaded: true
|
exitOnceUploaded: true
|
||||||
|
|
||||||
e2e_test:
|
e2e_test:
|
||||||
|
name: end-to-end tests
|
||||||
runs-on: big-boi
|
runs-on: big-boi
|
||||||
needs: setup
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -152,19 +124,11 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Platform - Copy default supabase .env
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
|
||||||
run: corepack enable
|
|
||||||
|
|
||||||
- name: Copy default supabase .env
|
|
||||||
run: |
|
run: |
|
||||||
cp ../.env.default ../.env
|
cp ../.env.default ../.env
|
||||||
|
|
||||||
- name: Copy backend .env and set OpenAI API key
|
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||||
run: |
|
run: |
|
||||||
cp ../backend/.env.default ../backend/.env
|
cp ../backend/.env.default ../backend/.env
|
||||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||||
@@ -172,77 +136,125 @@ jobs:
|
|||||||
# Used by E2E test data script to generate embeddings for approved store agents
|
# Used by E2E test data script to generate embeddings for approved store agents
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Platform - Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
driver-opts: network=host
|
||||||
|
|
||||||
- name: Cache Docker layers
|
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||||
|
uses: crazy-max/ghaction-github-runtime@v3
|
||||||
|
|
||||||
|
- name: Set up Platform - Build Docker images (with cache)
|
||||||
|
working-directory: autogpt_platform
|
||||||
|
run: |
|
||||||
|
pip install pyyaml
|
||||||
|
|
||||||
|
# Resolve extends and generate a flat compose file that bake can understand
|
||||||
|
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||||
|
|
||||||
|
# Add cache configuration to the resolved compose file
|
||||||
|
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||||
|
--source docker-compose.resolved.yml \
|
||||||
|
--cache-from "type=gha" \
|
||||||
|
--cache-to "type=gha,mode=max" \
|
||||||
|
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||||
|
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
||||||
|
--git-ref "${{ github.ref }}"
|
||||||
|
|
||||||
|
# Build with bake using the resolved compose file (now includes cache config)
|
||||||
|
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||||
|
env:
|
||||||
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
|
|
||||||
|
- name: Set up tests - Cache E2E test data
|
||||||
|
id: e2e-data-cache
|
||||||
uses: actions/cache@v5
|
uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
path: /tmp/.buildx-cache
|
path: /tmp/e2e_test_data.sql
|
||||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-frontend-test-
|
|
||||||
|
|
||||||
- name: Run docker compose
|
- name: Set up Platform - Start Supabase DB + Auth
|
||||||
run: |
|
run: |
|
||||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||||
|
echo "Waiting for database to be ready..."
|
||||||
|
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||||
|
echo "Waiting for auth service to be ready..."
|
||||||
|
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||||
|
|
||||||
|
- name: Set up Platform - Run migrations
|
||||||
|
run: |
|
||||||
|
echo "Running migrations..."
|
||||||
|
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||||
|
echo "✅ Migrations completed"
|
||||||
env:
|
env:
|
||||||
DOCKER_BUILDKIT: 1
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
|
||||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
|
||||||
|
|
||||||
- name: Move cache
|
- name: Set up tests - Load cached E2E test data
|
||||||
|
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||||
run: |
|
run: |
|
||||||
rm -rf /tmp/.buildx-cache
|
echo "✅ Found cached E2E test data, restoring..."
|
||||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
{
|
||||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
echo "SET session_replication_role = 'replica';"
|
||||||
fi
|
cat /tmp/e2e_test_data.sql
|
||||||
|
echo "SET session_replication_role = 'origin';"
|
||||||
|
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||||
|
# Refresh materialized views after restore
|
||||||
|
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||||
|
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||||
|
|
||||||
- name: Wait for services to be ready
|
echo "✅ E2E test data restored from cache"
|
||||||
|
|
||||||
|
- name: Set up Platform - Start (all other services)
|
||||||
run: |
|
run: |
|
||||||
|
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||||
echo "Waiting for rest_server to be ready..."
|
echo "Waiting for rest_server to be ready..."
|
||||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||||
echo "Waiting for database to be ready..."
|
env:
|
||||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
NEXT_PUBLIC_PW_TEST: true
|
||||||
|
|
||||||
- name: Create E2E test data
|
- name: Set up tests - Create E2E test data
|
||||||
|
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||||
run: |
|
run: |
|
||||||
echo "Creating E2E test data..."
|
echo "Creating E2E test data..."
|
||||||
# First try to run the script from inside the container
|
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
echo "❌ E2E test data creation failed!"
|
||||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||||
echo "❌ E2E test data creation failed!"
|
exit 1
|
||||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
}
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
else
|
|
||||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
|
||||||
# Copy the script into the container and run it
|
|
||||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
|
||||||
echo "❌ Failed to copy script to container"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
|
||||||
echo "❌ E2E test data creation failed!"
|
|
||||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||||
uses: actions/cache@v5
|
echo "Dumping database for cache..."
|
||||||
|
{
|
||||||
|
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||||
|
pg_dump -U postgres --data-only --column-inserts \
|
||||||
|
--table='auth.users' postgres
|
||||||
|
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||||
|
pg_dump -U postgres --data-only --column-inserts \
|
||||||
|
--schema=platform \
|
||||||
|
--exclude-table='platform._prisma_migrations' \
|
||||||
|
--exclude-table='platform.apscheduler_jobs' \
|
||||||
|
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||||
|
postgres
|
||||||
|
} > /tmp/e2e_test_data.sql
|
||||||
|
|
||||||
|
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||||
|
|
||||||
|
- name: Set up tests - Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Set up tests - Set up Node
|
||||||
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Set up tests - Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
- name: Install Browser 'chromium'
|
- name: Set up tests - Install browser 'chromium'
|
||||||
run: pnpm playwright install --with-deps chromium
|
run: pnpm playwright install --with-deps chromium
|
||||||
|
|
||||||
- name: Run Playwright tests
|
- name: Run Playwright tests
|
||||||
@@ -269,7 +281,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Print Final Docker Compose logs
|
- name: Print Final Docker Compose logs
|
||||||
if: always()
|
if: always()
|
||||||
run: docker compose -f ../docker-compose.yml logs
|
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||||
|
|
||||||
integration_test:
|
integration_test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -281,22 +293,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: "22.18.0"
|
|
||||||
|
|
||||||
- name: Enable corepack
|
- name: Enable corepack
|
||||||
run: corepack enable
|
run: corepack enable
|
||||||
|
|
||||||
- name: Restore dependencies cache
|
- name: Set up Node
|
||||||
uses: actions/cache@v5
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
path: ~/.pnpm-store
|
node-version: "22.18.0"
|
||||||
key: ${{ needs.setup.outputs.cache-key }}
|
cache: "pnpm"
|
||||||
restore-keys: |
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
|
||||||
${{ runner.os }}-pnpm-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
|
|||||||
195
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
195
.github/workflows/scripts/docker-ci-fix-compose-build-cache.py
vendored
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Add cache configuration to a resolved docker-compose file for all services
|
||||||
|
that have a build key, and ensure image names match what docker compose expects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_BRANCH = "dev"
|
||||||
|
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Add cache config to a resolved compose file"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--source",
|
||||||
|
required=True,
|
||||||
|
help="Source compose file to read (should be output of `docker compose config`)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--cache-from",
|
||||||
|
default="type=gha",
|
||||||
|
help="Cache source configuration",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--cache-to",
|
||||||
|
default="type=gha,mode=max",
|
||||||
|
help="Cache destination configuration",
|
||||||
|
)
|
||||||
|
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||||
|
parser.add_argument(
|
||||||
|
f"--{component}-hash",
|
||||||
|
default="",
|
||||||
|
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--git-ref",
|
||||||
|
default="",
|
||||||
|
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
|
||||||
|
git_ref_scope = ""
|
||||||
|
if args.git_ref:
|
||||||
|
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
|
||||||
|
|
||||||
|
with open(args.source, "r") as f:
|
||||||
|
compose = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Get project name from compose file or default
|
||||||
|
project_name = compose.get("name", "autogpt_platform")
|
||||||
|
|
||||||
|
def get_image_name(dockerfile: str, target: str) -> str:
|
||||||
|
"""Generate image name based on Dockerfile folder and build target."""
|
||||||
|
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
||||||
|
if len(dockerfile_parts) >= 2:
|
||||||
|
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
||||||
|
else:
|
||||||
|
folder_name = "app"
|
||||||
|
return f"{project_name}-{folder_name}:{target}"
|
||||||
|
|
||||||
|
def get_build_key(dockerfile: str, target: str) -> str:
|
||||||
|
"""Generate a unique key for a Dockerfile+target combination."""
|
||||||
|
return f"{dockerfile}:{target}"
|
||||||
|
|
||||||
|
def get_component(dockerfile: str) -> str | None:
|
||||||
|
"""Get component name (frontend/backend) from dockerfile path."""
|
||||||
|
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||||
|
if component in dockerfile:
|
||||||
|
return component
|
||||||
|
return None
|
||||||
|
|
||||||
|
# First pass: collect all services with build configs and identify duplicates
|
||||||
|
# Track which (dockerfile, target) combinations we've seen
|
||||||
|
build_key_to_first_service: dict[str, str] = {}
|
||||||
|
services_to_build: list[str] = []
|
||||||
|
services_to_dedupe: list[str] = []
|
||||||
|
|
||||||
|
for service_name, service_config in compose.get("services", {}).items():
|
||||||
|
if "build" not in service_config:
|
||||||
|
continue
|
||||||
|
|
||||||
|
build_config = service_config["build"]
|
||||||
|
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||||
|
target = build_config.get("target", "default")
|
||||||
|
build_key = get_build_key(dockerfile, target)
|
||||||
|
|
||||||
|
if build_key not in build_key_to_first_service:
|
||||||
|
# First service with this build config - it will do the actual build
|
||||||
|
build_key_to_first_service[build_key] = service_name
|
||||||
|
services_to_build.append(service_name)
|
||||||
|
else:
|
||||||
|
# Duplicate - will just use the image from the first service
|
||||||
|
services_to_dedupe.append(service_name)
|
||||||
|
|
||||||
|
# Second pass: configure builds and deduplicate
|
||||||
|
modified_services = []
|
||||||
|
for service_name, service_config in compose.get("services", {}).items():
|
||||||
|
if "build" not in service_config:
|
||||||
|
continue
|
||||||
|
|
||||||
|
build_config = service_config["build"]
|
||||||
|
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||||
|
target = build_config.get("target", "latest")
|
||||||
|
image_name = get_image_name(dockerfile, target)
|
||||||
|
|
||||||
|
# Set image name for all services (needed for both builders and deduped)
|
||||||
|
service_config["image"] = image_name
|
||||||
|
|
||||||
|
if service_name in services_to_dedupe:
|
||||||
|
# Remove build config - this service will use the pre-built image
|
||||||
|
del service_config["build"]
|
||||||
|
continue
|
||||||
|
|
||||||
|
# This service will do the actual build - add cache config
|
||||||
|
cache_from_list = []
|
||||||
|
cache_to_list = []
|
||||||
|
|
||||||
|
component = get_component(dockerfile)
|
||||||
|
if not component:
|
||||||
|
# Skip services that don't clearly match frontend/backend
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get the hash for this component
|
||||||
|
component_hash = getattr(args, f"{component}_hash")
|
||||||
|
|
||||||
|
# Scope format: platform-{component}-{target}-{hash|ref}
|
||||||
|
# Example: platform-backend-server-abc123
|
||||||
|
|
||||||
|
if "type=gha" in args.cache_from:
|
||||||
|
# 1. Primary: exact hash match (most specific)
|
||||||
|
if component_hash:
|
||||||
|
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||||
|
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
|
||||||
|
|
||||||
|
# 2. Fallback: branch-based cache
|
||||||
|
if git_ref_scope:
|
||||||
|
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||||
|
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
|
||||||
|
|
||||||
|
# 3. Fallback: dev branch cache (for PRs/feature branches)
|
||||||
|
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
|
||||||
|
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
|
||||||
|
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
|
||||||
|
|
||||||
|
if "type=gha" in args.cache_to:
|
||||||
|
# Write to both hash-based and branch-based scopes
|
||||||
|
if component_hash:
|
||||||
|
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||||
|
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
|
||||||
|
|
||||||
|
if git_ref_scope:
|
||||||
|
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||||
|
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
|
||||||
|
|
||||||
|
# Ensure we have at least one cache source/target
|
||||||
|
if not cache_from_list:
|
||||||
|
cache_from_list.append(args.cache_from)
|
||||||
|
if not cache_to_list:
|
||||||
|
cache_to_list.append(args.cache_to)
|
||||||
|
|
||||||
|
build_config["cache_from"] = cache_from_list
|
||||||
|
build_config["cache_to"] = cache_to_list
|
||||||
|
modified_services.append(service_name)
|
||||||
|
|
||||||
|
# Write back to the same file
|
||||||
|
with open(args.source, "w") as f:
|
||||||
|
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
||||||
|
|
||||||
|
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
||||||
|
for svc in modified_services:
|
||||||
|
svc_config = compose["services"][svc]
|
||||||
|
build_cfg = svc_config.get("build", {})
|
||||||
|
cache_from_list = build_cfg.get("cache_from", ["none"])
|
||||||
|
cache_to_list = build_cfg.get("cache_to", ["none"])
|
||||||
|
print(f" - {svc}")
|
||||||
|
print(f" image: {svc_config.get('image', 'N/A')}")
|
||||||
|
print(f" cache_from: {cache_from_list}")
|
||||||
|
print(f" cache_to: {cache_to_list}")
|
||||||
|
if services_to_dedupe:
|
||||||
|
print(
|
||||||
|
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
||||||
|
)
|
||||||
|
for svc in services_to_dedupe:
|
||||||
|
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -45,6 +45,11 @@ AutoGPT Platform is a monorepo containing:
|
|||||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||||
|
|
||||||
|
### Branching Strategy
|
||||||
|
|
||||||
|
- **`dev`** is the main development branch. All PRs should target `dev`.
|
||||||
|
- **`master`** is the production branch. Only used for production releases.
|
||||||
|
|
||||||
### Creating Pull Requests
|
### Creating Pull Requests
|
||||||
|
|
||||||
- Create the PR against the `dev` branch of the repository.
|
- Create the PR against the `dev` branch of the repository.
|
||||||
|
|||||||
169
autogpt_platform/autogpt_libs/poetry.lock
generated
169
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cryptography"
|
name = "cryptography"
|
||||||
version = "46.0.4"
|
version = "46.0.5"
|
||||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
|
{file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
|
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
|
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
|
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
|
{file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"},
|
||||||
{file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
|
{file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
|
{file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
|
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
|
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
|
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
|
{file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"},
|
||||||
{file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
|
{file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
|
{file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
|
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
|
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
|
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
|
{file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"},
|
||||||
{file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
|
{file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"},
|
||||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
|
{file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"},
|
||||||
{file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
|
{file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
|||||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||||
sdist = ["build (>=1.0.0)"]
|
sdist = ["build (>=1.0.0)"]
|
||||||
ssh = ["bcrypt (>=3.1.5)"]
|
ssh = ["bcrypt (>=3.1.5)"]
|
||||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||||
test-randomorder = ["pytest-randomly"]
|
test-randomorder = ["pytest-randomly"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -570,24 +570,25 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastapi"
|
name = "fastapi"
|
||||||
version = "0.128.0"
|
version = "0.128.7"
|
||||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
|
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||||
{file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
|
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
annotated-doc = ">=0.0.2"
|
annotated-doc = ">=0.0.2"
|
||||||
pydantic = ">=2.7.0"
|
pydantic = ">=2.7.0"
|
||||||
starlette = ">=0.40.0,<0.51.0"
|
starlette = ">=0.40.0,<1.0.0"
|
||||||
typing-extensions = ">=4.8.0"
|
typing-extensions = ">=4.8.0"
|
||||||
|
typing-inspection = ">=0.4.2"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||||
|
|
||||||
@@ -1062,14 +1063,14 @@ urllib3 = ">=1.26.0,<3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "launchdarkly-server-sdk"
|
name = "launchdarkly-server-sdk"
|
||||||
version = "9.14.1"
|
version = "9.15.0"
|
||||||
description = "LaunchDarkly SDK for Python"
|
description = "LaunchDarkly SDK for Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.10"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -1478,14 +1479,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "postgrest"
|
name = "postgrest"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
|
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||||
{file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
|
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2248,14 +2249,14 @@ cli = ["click (>=5.0)"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "realtime"
|
name = "realtime"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = ""
|
description = ""
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
|
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||||
{file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
|
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2436,14 +2437,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "storage3"
|
name = "storage3"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Supabase Storage client for Python."
|
description = "Supabase Storage client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
|
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||||
{file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
|
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2487,35 +2488,35 @@ python-dateutil = ">=2.6.0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase"
|
name = "supabase"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Supabase client for Python."
|
description = "Supabase client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
|
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||||
{file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
|
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
httpx = ">=0.26,<0.29"
|
httpx = ">=0.26,<0.29"
|
||||||
postgrest = "2.27.2"
|
postgrest = "2.28.0"
|
||||||
realtime = "2.27.2"
|
realtime = "2.28.0"
|
||||||
storage3 = "2.27.2"
|
storage3 = "2.28.0"
|
||||||
supabase-auth = "2.27.2"
|
supabase-auth = "2.28.0"
|
||||||
supabase-functions = "2.27.2"
|
supabase-functions = "2.28.0"
|
||||||
yarl = ">=1.22.0"
|
yarl = ">=1.22.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-auth"
|
name = "supabase-auth"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Python Client Library for Supabase Auth"
|
description = "Python Client Library for Supabase Auth"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
|
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||||
{file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
|
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2525,14 +2526,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-functions"
|
name = "supabase-functions"
|
||||||
version = "2.27.2"
|
version = "2.28.0"
|
||||||
description = "Library for Supabase Functions"
|
description = "Library for Supabase Functions"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
|
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||||
{file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
|
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -2911,4 +2912,4 @@ type = ["pytest-mypy"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<4.0"
|
python-versions = ">=3.10,<4.0"
|
||||||
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
|
content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb"
|
||||||
|
|||||||
@@ -11,14 +11,14 @@ python = ">=3.10,<4.0"
|
|||||||
colorama = "^0.4.6"
|
colorama = "^0.4.6"
|
||||||
cryptography = "^46.0"
|
cryptography = "^46.0"
|
||||||
expiringdict = "^1.2.2"
|
expiringdict = "^1.2.2"
|
||||||
fastapi = "^0.128.0"
|
fastapi = "^0.128.7"
|
||||||
google-cloud-logging = "^3.13.0"
|
google-cloud-logging = "^3.13.0"
|
||||||
launchdarkly-server-sdk = "^9.14.1"
|
launchdarkly-server-sdk = "^9.15.0"
|
||||||
pydantic = "^2.12.5"
|
pydantic = "^2.12.5"
|
||||||
pydantic-settings = "^2.12.0"
|
pydantic-settings = "^2.12.0"
|
||||||
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
||||||
redis = "^6.2.0"
|
redis = "^6.2.0"
|
||||||
supabase = "^2.27.2"
|
supabase = "^2.28.0"
|
||||||
uvicorn = "^0.40.0"
|
uvicorn = "^0.40.0"
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
|||||||
@@ -104,6 +104,12 @@ TWITTER_CLIENT_SECRET=
|
|||||||
# Make a new workspace for your OAuth APP -- trust me
|
# Make a new workspace for your OAuth APP -- trust me
|
||||||
# https://linear.app/settings/api/applications/new
|
# https://linear.app/settings/api/applications/new
|
||||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||||
|
LINEAR_API_KEY=
|
||||||
|
# Linear project and team IDs for the feature request tracker.
|
||||||
|
# Find these in your Linear workspace URL: linear.app/<workspace>/project/<project-id>
|
||||||
|
# and in team settings. Used by the chat copilot to file and search feature requests.
|
||||||
|
LINEAR_FEATURE_REQUEST_PROJECT_ID=
|
||||||
|
LINEAR_FEATURE_REQUEST_TEAM_ID=
|
||||||
LINEAR_CLIENT_ID=
|
LINEAR_CLIENT_ID=
|
||||||
LINEAR_CLIENT_SECRET=
|
LINEAR_CLIENT_SECRET=
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
# ============================ DEPENDENCY BUILDER ============================ #
|
||||||
|
|
||||||
FROM debian:13-slim AS builder
|
FROM debian:13-slim AS builder
|
||||||
|
|
||||||
# Set environment variables
|
# Set environment variables
|
||||||
@@ -51,7 +53,9 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
|||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
FROM debian:13-slim AS server_dependencies
|
# ============================== BACKEND SERVER ============================== #
|
||||||
|
|
||||||
|
FROM debian:13-slim AS server
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@@ -63,15 +67,14 @@ ENV POETRY_HOME=/opt/poetry \
|
|||||||
ENV PATH=/opt/poetry/bin:$PATH
|
ENV PATH=/opt/poetry/bin:$PATH
|
||||||
|
|
||||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||||
RUN apt-get update && apt-get install -y \
|
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
python3.13 \
|
python3.13 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
ffmpeg \
|
ffmpeg \
|
||||||
imagemagick \
|
imagemagick \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy only necessary files from builder
|
|
||||||
COPY --from=builder /app /app
|
|
||||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||||
# Copy Node.js installation for Prisma
|
# Copy Node.js installation for Prisma
|
||||||
@@ -81,30 +84,54 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|||||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
|
||||||
|
|
||||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
|
||||||
RUN mkdir -p /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
|
||||||
|
|
||||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
FROM server_dependencies AS migrate
|
# Copy only the .venv from builder (not the entire /app directory)
|
||||||
|
# The .venv includes the generated Prisma client
|
||||||
|
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
||||||
|
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||||
|
|
||||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
# Copy dependency files + autogpt_libs (path dependency)
|
||||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
||||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
|
||||||
|
|
||||||
FROM server_dependencies AS server
|
# Copy backend code + docs (for Copilot docs search)
|
||||||
|
COPY autogpt_platform/backend ./
|
||||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
|
||||||
COPY docs /app/docs
|
COPY docs /app/docs
|
||||||
RUN poetry install --no-ansi --only-root
|
RUN poetry install --no-ansi --only-root
|
||||||
|
|
||||||
ENV PORT=8000
|
ENV PORT=8000
|
||||||
|
|
||||||
CMD ["poetry", "run", "rest"]
|
CMD ["poetry", "run", "rest"]
|
||||||
|
|
||||||
|
# =============================== DB MIGRATOR =============================== #
|
||||||
|
|
||||||
|
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
||||||
|
FROM debian:13-slim AS migrate
|
||||||
|
|
||||||
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
python3.13 \
|
||||||
|
python3-pip \
|
||||||
|
ca-certificates \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy Node.js from builder (needed for Prisma CLI)
|
||||||
|
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||||
|
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||||
|
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||||
|
|
||||||
|
# Copy Prisma binaries
|
||||||
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
|
# Install prisma-client-py directly (much smaller than copying full venv)
|
||||||
|
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
||||||
|
|
||||||
|
COPY autogpt_platform/backend/schema.prisma ./
|
||||||
|
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||||
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
|
COPY autogpt_platform/backend/migrations ./migrations
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ from .tools.models import (
|
|||||||
AgentPreviewResponse,
|
AgentPreviewResponse,
|
||||||
AgentSavedResponse,
|
AgentSavedResponse,
|
||||||
AgentsFoundResponse,
|
AgentsFoundResponse,
|
||||||
|
BlockDetailsResponse,
|
||||||
BlockListResponse,
|
BlockListResponse,
|
||||||
BlockOutputResponse,
|
BlockOutputResponse,
|
||||||
ClarificationNeededResponse,
|
ClarificationNeededResponse,
|
||||||
@@ -971,6 +972,7 @@ ToolResponseUnion = (
|
|||||||
| AgentSavedResponse
|
| AgentSavedResponse
|
||||||
| ClarificationNeededResponse
|
| ClarificationNeededResponse
|
||||||
| BlockListResponse
|
| BlockListResponse
|
||||||
|
| BlockDetailsResponse
|
||||||
| BlockOutputResponse
|
| BlockOutputResponse
|
||||||
| DocSearchResultsResponse
|
| DocSearchResultsResponse
|
||||||
| DocPageResponse
|
| DocPageResponse
|
||||||
|
|||||||
@@ -1245,6 +1245,7 @@ async def _stream_chat_chunks(
|
|||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_error = e
|
last_error = e
|
||||||
|
|
||||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||||
retry_count += 1
|
retry_count += 1
|
||||||
# Calculate delay with exponential backoff
|
# Calculate delay with exponential backoff
|
||||||
@@ -1260,12 +1261,27 @@ async def _stream_chat_chunks(
|
|||||||
continue # Retry the stream
|
continue # Retry the stream
|
||||||
else:
|
else:
|
||||||
# Non-retryable error or max retries exceeded
|
# Non-retryable error or max retries exceeded
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Error in stream (not retrying): {e!s}",
|
error=e,
|
||||||
exc_info=True,
|
context="stream (not retrying)",
|
||||||
|
session_id=session.session_id if session else None,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=model,
|
||||||
|
retry_count=retry_count,
|
||||||
)
|
)
|
||||||
error_code = None
|
error_code = None
|
||||||
error_text = str(e)
|
error_text = str(e)
|
||||||
|
|
||||||
|
error_details = _extract_api_error_details(e)
|
||||||
|
if error_details.get("response_body"):
|
||||||
|
body = error_details["response_body"]
|
||||||
|
if isinstance(body, dict):
|
||||||
|
err = body.get("error")
|
||||||
|
if isinstance(err, dict) and err.get("message"):
|
||||||
|
error_text = err["message"]
|
||||||
|
elif body.get("message"):
|
||||||
|
error_text = body["message"]
|
||||||
|
|
||||||
if _is_region_blocked_error(e):
|
if _is_region_blocked_error(e):
|
||||||
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
||||||
error_text = (
|
error_text = (
|
||||||
@@ -1282,9 +1298,13 @@ async def _stream_chat_chunks(
|
|||||||
|
|
||||||
# If we exit the retry loop without returning, it means we exhausted retries
|
# If we exit the retry loop without returning, it means we exhausted retries
|
||||||
if last_error:
|
if last_error:
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Max retries ({MAX_RETRIES}) exceeded. Last error: {last_error!s}",
|
error=last_error,
|
||||||
exc_info=True,
|
context=f"stream (max retries {MAX_RETRIES} exceeded)",
|
||||||
|
session_id=session.session_id if session else None,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=model,
|
||||||
|
retry_count=MAX_RETRIES,
|
||||||
)
|
)
|
||||||
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
||||||
yield StreamFinish()
|
yield StreamFinish()
|
||||||
@@ -1857,6 +1877,7 @@ async def _generate_llm_continuation(
|
|||||||
break # Success, exit retry loop
|
break # Success, exit retry loop
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_error = e
|
last_error = e
|
||||||
|
|
||||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||||
retry_count += 1
|
retry_count += 1
|
||||||
delay = min(
|
delay = min(
|
||||||
@@ -1870,17 +1891,25 @@ async def _generate_llm_continuation(
|
|||||||
await asyncio.sleep(delay)
|
await asyncio.sleep(delay)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# Non-retryable error - log and exit gracefully
|
# Non-retryable error - log details and exit gracefully
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Non-retryable error in LLM continuation: {e!s}",
|
error=e,
|
||||||
exc_info=True,
|
context="LLM continuation (not retrying)",
|
||||||
|
session_id=session_id,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=config.model,
|
||||||
|
retry_count=retry_count,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
if last_error:
|
if last_error:
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Max retries ({MAX_RETRIES}) exceeded for LLM continuation. "
|
error=last_error,
|
||||||
f"Last error: {last_error!s}"
|
context=f"LLM continuation (max retries {MAX_RETRIES} exceeded)",
|
||||||
|
session_id=session_id,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=config.model,
|
||||||
|
retry_count=MAX_RETRIES,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1920,6 +1949,91 @@ async def _generate_llm_continuation(
|
|||||||
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _log_api_error(
|
||||||
|
error: Exception,
|
||||||
|
context: str,
|
||||||
|
session_id: str | None = None,
|
||||||
|
message_count: int | None = None,
|
||||||
|
model: str | None = None,
|
||||||
|
retry_count: int = 0,
|
||||||
|
) -> None:
|
||||||
|
"""Log detailed API error information for debugging."""
|
||||||
|
details = _extract_api_error_details(error)
|
||||||
|
details["context"] = context
|
||||||
|
details["session_id"] = session_id
|
||||||
|
details["message_count"] = message_count
|
||||||
|
details["model"] = model
|
||||||
|
details["retry_count"] = retry_count
|
||||||
|
|
||||||
|
if isinstance(error, RateLimitError):
|
||||||
|
logger.warning(f"Rate limit error in {context}: {details}", exc_info=error)
|
||||||
|
elif isinstance(error, APIConnectionError):
|
||||||
|
logger.warning(f"API connection error in {context}: {details}", exc_info=error)
|
||||||
|
elif isinstance(error, APIStatusError) and error.status_code >= 500:
|
||||||
|
logger.error(f"API server error (5xx) in {context}: {details}", exc_info=error)
|
||||||
|
else:
|
||||||
|
logger.error(f"API error in {context}: {details}", exc_info=error)
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_api_error_details(error: Exception) -> dict[str, Any]:
|
||||||
|
"""Extract detailed information from OpenAI/OpenRouter API errors."""
|
||||||
|
error_msg = str(error)
|
||||||
|
details: dict[str, Any] = {
|
||||||
|
"error_type": type(error).__name__,
|
||||||
|
"error_message": error_msg[:500] + "..." if len(error_msg) > 500 else error_msg,
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasattr(error, "code"):
|
||||||
|
details["code"] = getattr(error, "code", None)
|
||||||
|
if hasattr(error, "param"):
|
||||||
|
details["param"] = getattr(error, "param", None)
|
||||||
|
|
||||||
|
if isinstance(error, APIStatusError):
|
||||||
|
details["status_code"] = error.status_code
|
||||||
|
details["request_id"] = getattr(error, "request_id", None)
|
||||||
|
|
||||||
|
if hasattr(error, "body") and error.body:
|
||||||
|
details["response_body"] = _sanitize_error_body(error.body)
|
||||||
|
|
||||||
|
if hasattr(error, "response") and error.response:
|
||||||
|
headers = error.response.headers
|
||||||
|
details["openrouter_provider"] = headers.get("x-openrouter-provider")
|
||||||
|
details["openrouter_model"] = headers.get("x-openrouter-model")
|
||||||
|
details["retry_after"] = headers.get("retry-after")
|
||||||
|
details["rate_limit_remaining"] = headers.get("x-ratelimit-remaining")
|
||||||
|
|
||||||
|
return details
|
||||||
|
|
||||||
|
|
||||||
|
def _sanitize_error_body(
|
||||||
|
body: Any, max_length: int = 2000
|
||||||
|
) -> dict[str, Any] | str | None:
|
||||||
|
"""Extract only safe fields from error response body to avoid logging sensitive data."""
|
||||||
|
if not isinstance(body, dict):
|
||||||
|
# Non-dict bodies (e.g., HTML error pages) - return truncated string
|
||||||
|
if body is not None:
|
||||||
|
body_str = str(body)
|
||||||
|
if len(body_str) > max_length:
|
||||||
|
return body_str[:max_length] + "...[truncated]"
|
||||||
|
return body_str
|
||||||
|
return None
|
||||||
|
|
||||||
|
safe_fields = ("message", "type", "code", "param", "error")
|
||||||
|
sanitized: dict[str, Any] = {}
|
||||||
|
|
||||||
|
for field in safe_fields:
|
||||||
|
if field in body:
|
||||||
|
value = body[field]
|
||||||
|
if field == "error" and isinstance(value, dict):
|
||||||
|
sanitized[field] = _sanitize_error_body(value, max_length)
|
||||||
|
elif isinstance(value, str) and len(value) > max_length:
|
||||||
|
sanitized[field] = value[:max_length] + "...[truncated]"
|
||||||
|
else:
|
||||||
|
sanitized[field] = value
|
||||||
|
|
||||||
|
return sanitized if sanitized else None
|
||||||
|
|
||||||
|
|
||||||
async def _generate_llm_continuation_with_streaming(
|
async def _generate_llm_continuation_with_streaming(
|
||||||
session_id: str,
|
session_id: str,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ from .base import BaseTool
|
|||||||
from .create_agent import CreateAgentTool
|
from .create_agent import CreateAgentTool
|
||||||
from .customize_agent import CustomizeAgentTool
|
from .customize_agent import CustomizeAgentTool
|
||||||
from .edit_agent import EditAgentTool
|
from .edit_agent import EditAgentTool
|
||||||
|
from .feature_requests import CreateFeatureRequestTool, SearchFeatureRequestsTool
|
||||||
from .find_agent import FindAgentTool
|
from .find_agent import FindAgentTool
|
||||||
from .find_block import FindBlockTool
|
from .find_block import FindBlockTool
|
||||||
from .find_library_agent import FindLibraryAgentTool
|
from .find_library_agent import FindLibraryAgentTool
|
||||||
@@ -45,6 +46,9 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
|||||||
"view_agent_output": AgentOutputTool(),
|
"view_agent_output": AgentOutputTool(),
|
||||||
"search_docs": SearchDocsTool(),
|
"search_docs": SearchDocsTool(),
|
||||||
"get_doc_page": GetDocPageTool(),
|
"get_doc_page": GetDocPageTool(),
|
||||||
|
# Feature request tools
|
||||||
|
"search_feature_requests": SearchFeatureRequestsTool(),
|
||||||
|
"create_feature_request": CreateFeatureRequestTool(),
|
||||||
# Workspace tools for CoPilot file operations
|
# Workspace tools for CoPilot file operations
|
||||||
"list_workspace_files": ListWorkspaceFilesTool(),
|
"list_workspace_files": ListWorkspaceFilesTool(),
|
||||||
"read_workspace_file": ReadWorkspaceFileTool(),
|
"read_workspace_file": ReadWorkspaceFileTool(),
|
||||||
|
|||||||
@@ -0,0 +1,448 @@
|
|||||||
|
"""Feature request tools - search and create feature requests via Linear."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from pydantic import SecretStr
|
||||||
|
|
||||||
|
from backend.api.features.chat.model import ChatSession
|
||||||
|
from backend.api.features.chat.tools.base import BaseTool
|
||||||
|
from backend.api.features.chat.tools.models import (
|
||||||
|
ErrorResponse,
|
||||||
|
FeatureRequestCreatedResponse,
|
||||||
|
FeatureRequestInfo,
|
||||||
|
FeatureRequestSearchResponse,
|
||||||
|
NoResultsResponse,
|
||||||
|
ToolResponseBase,
|
||||||
|
)
|
||||||
|
from backend.blocks.linear._api import LinearClient
|
||||||
|
from backend.data.model import APIKeyCredentials
|
||||||
|
from backend.data.user import get_user_email_by_id
|
||||||
|
from backend.util.settings import Settings
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MAX_SEARCH_RESULTS = 10
|
||||||
|
|
||||||
|
# GraphQL queries/mutations
|
||||||
|
SEARCH_ISSUES_QUERY = """
|
||||||
|
query SearchFeatureRequests($term: String!, $filter: IssueFilter, $first: Int) {
|
||||||
|
searchIssues(term: $term, filter: $filter, first: $first) {
|
||||||
|
nodes {
|
||||||
|
id
|
||||||
|
identifier
|
||||||
|
title
|
||||||
|
description
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
CUSTOMER_UPSERT_MUTATION = """
|
||||||
|
mutation CustomerUpsert($input: CustomerUpsertInput!) {
|
||||||
|
customerUpsert(input: $input) {
|
||||||
|
success
|
||||||
|
customer {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
externalIds
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
ISSUE_CREATE_MUTATION = """
|
||||||
|
mutation IssueCreate($input: IssueCreateInput!) {
|
||||||
|
issueCreate(input: $input) {
|
||||||
|
success
|
||||||
|
issue {
|
||||||
|
id
|
||||||
|
identifier
|
||||||
|
title
|
||||||
|
url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
CUSTOMER_NEED_CREATE_MUTATION = """
|
||||||
|
mutation CustomerNeedCreate($input: CustomerNeedCreateInput!) {
|
||||||
|
customerNeedCreate(input: $input) {
|
||||||
|
success
|
||||||
|
need {
|
||||||
|
id
|
||||||
|
body
|
||||||
|
customer {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
}
|
||||||
|
issue {
|
||||||
|
id
|
||||||
|
identifier
|
||||||
|
title
|
||||||
|
url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
_settings: Settings | None = None
|
||||||
|
|
||||||
|
|
||||||
|
def _get_settings() -> Settings:
|
||||||
|
global _settings
|
||||||
|
if _settings is None:
|
||||||
|
_settings = Settings()
|
||||||
|
return _settings
|
||||||
|
|
||||||
|
|
||||||
|
def _get_linear_config() -> tuple[LinearClient, str, str]:
|
||||||
|
"""Return a configured Linear client, project ID, and team ID.
|
||||||
|
|
||||||
|
Raises RuntimeError if any required setting is missing.
|
||||||
|
"""
|
||||||
|
secrets = _get_settings().secrets
|
||||||
|
if not secrets.linear_api_key:
|
||||||
|
raise RuntimeError("LINEAR_API_KEY is not configured")
|
||||||
|
if not secrets.linear_feature_request_project_id:
|
||||||
|
raise RuntimeError("LINEAR_FEATURE_REQUEST_PROJECT_ID is not configured")
|
||||||
|
if not secrets.linear_feature_request_team_id:
|
||||||
|
raise RuntimeError("LINEAR_FEATURE_REQUEST_TEAM_ID is not configured")
|
||||||
|
|
||||||
|
credentials = APIKeyCredentials(
|
||||||
|
id="system-linear",
|
||||||
|
provider="linear",
|
||||||
|
api_key=SecretStr(secrets.linear_api_key),
|
||||||
|
title="System Linear API Key",
|
||||||
|
)
|
||||||
|
client = LinearClient(credentials=credentials)
|
||||||
|
return (
|
||||||
|
client,
|
||||||
|
secrets.linear_feature_request_project_id,
|
||||||
|
secrets.linear_feature_request_team_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SearchFeatureRequestsTool(BaseTool):
|
||||||
|
"""Tool for searching existing feature requests in Linear."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
return "search_feature_requests"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def description(self) -> str:
|
||||||
|
return (
|
||||||
|
"Search existing feature requests to check if a similar request "
|
||||||
|
"already exists before creating a new one. Returns matching feature "
|
||||||
|
"requests with their ID, title, and description."
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def parameters(self) -> dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Search term to find matching feature requests.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["query"],
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def requires_auth(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def _execute(
|
||||||
|
self,
|
||||||
|
user_id: str | None,
|
||||||
|
session: ChatSession,
|
||||||
|
**kwargs,
|
||||||
|
) -> ToolResponseBase:
|
||||||
|
query = kwargs.get("query", "").strip()
|
||||||
|
session_id = session.session_id if session else None
|
||||||
|
|
||||||
|
if not query:
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Please provide a search query.",
|
||||||
|
error="Missing query parameter",
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
client, project_id, _team_id = _get_linear_config()
|
||||||
|
data = await client.query(
|
||||||
|
SEARCH_ISSUES_QUERY,
|
||||||
|
{
|
||||||
|
"term": query,
|
||||||
|
"filter": {
|
||||||
|
"project": {"id": {"eq": project_id}},
|
||||||
|
},
|
||||||
|
"first": MAX_SEARCH_RESULTS,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
nodes = data.get("searchIssues", {}).get("nodes", [])
|
||||||
|
|
||||||
|
if not nodes:
|
||||||
|
return NoResultsResponse(
|
||||||
|
message=f"No feature requests found matching '{query}'.",
|
||||||
|
suggestions=[
|
||||||
|
"Try different keywords",
|
||||||
|
"Use broader search terms",
|
||||||
|
"You can create a new feature request if none exists",
|
||||||
|
],
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
results = [
|
||||||
|
FeatureRequestInfo(
|
||||||
|
id=node["id"],
|
||||||
|
identifier=node["identifier"],
|
||||||
|
title=node["title"],
|
||||||
|
description=node.get("description"),
|
||||||
|
)
|
||||||
|
for node in nodes
|
||||||
|
]
|
||||||
|
|
||||||
|
return FeatureRequestSearchResponse(
|
||||||
|
message=f"Found {len(results)} feature request(s) matching '{query}'.",
|
||||||
|
results=results,
|
||||||
|
count=len(results),
|
||||||
|
query=query,
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("Failed to search feature requests")
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Failed to search feature requests.",
|
||||||
|
error=str(e),
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CreateFeatureRequestTool(BaseTool):
|
||||||
|
"""Tool for creating feature requests (or adding needs to existing ones)."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
return "create_feature_request"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def description(self) -> str:
|
||||||
|
return (
|
||||||
|
"Create a new feature request or add a customer need to an existing one. "
|
||||||
|
"Always search first with search_feature_requests to avoid duplicates. "
|
||||||
|
"If a matching request exists, pass its ID as existing_issue_id to add "
|
||||||
|
"the user's need to it instead of creating a duplicate."
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def parameters(self) -> dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"title": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Title for the feature request.",
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Detailed description of what the user wants and why.",
|
||||||
|
},
|
||||||
|
"existing_issue_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": (
|
||||||
|
"If adding a need to an existing feature request, "
|
||||||
|
"provide its Linear issue ID (from search results). "
|
||||||
|
"Omit to create a new feature request."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["title", "description"],
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def requires_auth(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def _find_or_create_customer(
|
||||||
|
self, client: LinearClient, user_id: str, name: str
|
||||||
|
) -> dict:
|
||||||
|
"""Find existing customer by user_id or create a new one via upsert.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client: Linear API client.
|
||||||
|
user_id: Stable external ID used to deduplicate customers.
|
||||||
|
name: Human-readable display name (e.g. the user's email).
|
||||||
|
"""
|
||||||
|
data = await client.mutate(
|
||||||
|
CUSTOMER_UPSERT_MUTATION,
|
||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"name": name,
|
||||||
|
"externalId": user_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
result = data.get("customerUpsert", {})
|
||||||
|
if not result.get("success"):
|
||||||
|
raise RuntimeError(f"Failed to upsert customer: {data}")
|
||||||
|
return result["customer"]
|
||||||
|
|
||||||
|
async def _execute(
|
||||||
|
self,
|
||||||
|
user_id: str | None,
|
||||||
|
session: ChatSession,
|
||||||
|
**kwargs,
|
||||||
|
) -> ToolResponseBase:
|
||||||
|
title = kwargs.get("title", "").strip()
|
||||||
|
description = kwargs.get("description", "").strip()
|
||||||
|
existing_issue_id = kwargs.get("existing_issue_id")
|
||||||
|
session_id = session.session_id if session else None
|
||||||
|
|
||||||
|
if not title or not description:
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Both title and description are required.",
|
||||||
|
error="Missing required parameters",
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not user_id:
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Authentication required to create feature requests.",
|
||||||
|
error="Missing user_id",
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
client, project_id, team_id = _get_linear_config()
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("Failed to initialize Linear client")
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Failed to create feature request.",
|
||||||
|
error=str(e),
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Resolve a human-readable name (email) for the Linear customer record.
|
||||||
|
# Fall back to user_id if the lookup fails or returns None.
|
||||||
|
try:
|
||||||
|
customer_display_name = await get_user_email_by_id(user_id) or user_id
|
||||||
|
except Exception:
|
||||||
|
customer_display_name = user_id
|
||||||
|
|
||||||
|
# Step 1: Find or create customer for this user
|
||||||
|
try:
|
||||||
|
customer = await self._find_or_create_customer(
|
||||||
|
client, user_id, customer_display_name
|
||||||
|
)
|
||||||
|
customer_id = customer["id"]
|
||||||
|
customer_name = customer["name"]
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("Failed to upsert customer in Linear")
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Failed to create feature request.",
|
||||||
|
error=str(e),
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 2: Create or reuse issue
|
||||||
|
issue_id: str | None = None
|
||||||
|
issue_identifier: str | None = None
|
||||||
|
if existing_issue_id:
|
||||||
|
# Add need to existing issue - we still need the issue details for response
|
||||||
|
is_new_issue = False
|
||||||
|
issue_id = existing_issue_id
|
||||||
|
else:
|
||||||
|
# Create new issue in the feature requests project
|
||||||
|
try:
|
||||||
|
data = await client.mutate(
|
||||||
|
ISSUE_CREATE_MUTATION,
|
||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"title": title,
|
||||||
|
"description": description,
|
||||||
|
"teamId": team_id,
|
||||||
|
"projectId": project_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
result = data.get("issueCreate", {})
|
||||||
|
if not result.get("success"):
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Failed to create feature request issue.",
|
||||||
|
error=str(data),
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
issue = result["issue"]
|
||||||
|
issue_id = issue["id"]
|
||||||
|
issue_identifier = issue.get("identifier")
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("Failed to create feature request issue")
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Failed to create feature request.",
|
||||||
|
error=str(e),
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
is_new_issue = True
|
||||||
|
|
||||||
|
# Step 3: Create customer need on the issue
|
||||||
|
try:
|
||||||
|
data = await client.mutate(
|
||||||
|
CUSTOMER_NEED_CREATE_MUTATION,
|
||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"customerId": customer_id,
|
||||||
|
"issueId": issue_id,
|
||||||
|
"body": description,
|
||||||
|
"priority": 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
need_result = data.get("customerNeedCreate", {})
|
||||||
|
if not need_result.get("success"):
|
||||||
|
orphaned = (
|
||||||
|
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||||
|
if is_new_issue
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Failed to attach customer need to the feature request.",
|
||||||
|
error=str(data),
|
||||||
|
details=orphaned,
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
need = need_result["need"]
|
||||||
|
issue_info = need["issue"]
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("Failed to create customer need")
|
||||||
|
orphaned = (
|
||||||
|
{"issue_id": issue_id, "issue_identifier": issue_identifier}
|
||||||
|
if is_new_issue
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
return ErrorResponse(
|
||||||
|
message="Failed to attach customer need to the feature request.",
|
||||||
|
error=str(e),
|
||||||
|
details=orphaned,
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
return FeatureRequestCreatedResponse(
|
||||||
|
message=(
|
||||||
|
f"{'Created new feature request' if is_new_issue else 'Added your request to existing feature request'}: "
|
||||||
|
f"{issue_info['title']}."
|
||||||
|
),
|
||||||
|
issue_id=issue_info["id"],
|
||||||
|
issue_identifier=issue_info["identifier"],
|
||||||
|
issue_title=issue_info["title"],
|
||||||
|
issue_url=issue_info.get("url", ""),
|
||||||
|
is_new_issue=is_new_issue,
|
||||||
|
customer_name=customer_name,
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
@@ -0,0 +1,615 @@
|
|||||||
|
"""Tests for SearchFeatureRequestsTool and CreateFeatureRequestTool."""
|
||||||
|
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from backend.api.features.chat.tools.feature_requests import (
|
||||||
|
CreateFeatureRequestTool,
|
||||||
|
SearchFeatureRequestsTool,
|
||||||
|
)
|
||||||
|
from backend.api.features.chat.tools.models import (
|
||||||
|
ErrorResponse,
|
||||||
|
FeatureRequestCreatedResponse,
|
||||||
|
FeatureRequestSearchResponse,
|
||||||
|
NoResultsResponse,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._test_data import make_session
|
||||||
|
|
||||||
|
_TEST_USER_ID = "test-user-feature-requests"
|
||||||
|
_TEST_USER_EMAIL = "testuser@example.com"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
_FAKE_PROJECT_ID = "test-project-id"
|
||||||
|
_FAKE_TEAM_ID = "test-team-id"
|
||||||
|
|
||||||
|
|
||||||
|
def _mock_linear_config(*, query_return=None, mutate_return=None):
|
||||||
|
"""Return a patched _get_linear_config that yields a mock LinearClient."""
|
||||||
|
client = AsyncMock()
|
||||||
|
if query_return is not None:
|
||||||
|
client.query.return_value = query_return
|
||||||
|
if mutate_return is not None:
|
||||||
|
client.mutate.return_value = mutate_return
|
||||||
|
return (
|
||||||
|
patch(
|
||||||
|
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||||
|
return_value=(client, _FAKE_PROJECT_ID, _FAKE_TEAM_ID),
|
||||||
|
),
|
||||||
|
client,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _search_response(nodes: list[dict]) -> dict:
|
||||||
|
return {"searchIssues": {"nodes": nodes}}
|
||||||
|
|
||||||
|
|
||||||
|
def _customer_upsert_response(
|
||||||
|
customer_id: str = "cust-1", name: str = _TEST_USER_EMAIL, success: bool = True
|
||||||
|
) -> dict:
|
||||||
|
return {
|
||||||
|
"customerUpsert": {
|
||||||
|
"success": success,
|
||||||
|
"customer": {"id": customer_id, "name": name, "externalIds": [name]},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _issue_create_response(
|
||||||
|
issue_id: str = "issue-1",
|
||||||
|
identifier: str = "FR-1",
|
||||||
|
title: str = "New Feature",
|
||||||
|
success: bool = True,
|
||||||
|
) -> dict:
|
||||||
|
return {
|
||||||
|
"issueCreate": {
|
||||||
|
"success": success,
|
||||||
|
"issue": {
|
||||||
|
"id": issue_id,
|
||||||
|
"identifier": identifier,
|
||||||
|
"title": title,
|
||||||
|
"url": f"https://linear.app/issue/{identifier}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _need_create_response(
|
||||||
|
need_id: str = "need-1",
|
||||||
|
issue_id: str = "issue-1",
|
||||||
|
identifier: str = "FR-1",
|
||||||
|
title: str = "New Feature",
|
||||||
|
success: bool = True,
|
||||||
|
) -> dict:
|
||||||
|
return {
|
||||||
|
"customerNeedCreate": {
|
||||||
|
"success": success,
|
||||||
|
"need": {
|
||||||
|
"id": need_id,
|
||||||
|
"body": "description",
|
||||||
|
"customer": {"id": "cust-1", "name": _TEST_USER_EMAIL},
|
||||||
|
"issue": {
|
||||||
|
"id": issue_id,
|
||||||
|
"identifier": identifier,
|
||||||
|
"title": title,
|
||||||
|
"url": f"https://linear.app/issue/{identifier}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# SearchFeatureRequestsTool
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestSearchFeatureRequestsTool:
|
||||||
|
"""Tests for SearchFeatureRequestsTool._execute."""
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_successful_search(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
nodes = [
|
||||||
|
{
|
||||||
|
"id": "id-1",
|
||||||
|
"identifier": "FR-1",
|
||||||
|
"title": "Dark mode",
|
||||||
|
"description": "Add dark mode support",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "id-2",
|
||||||
|
"identifier": "FR-2",
|
||||||
|
"title": "Dark theme",
|
||||||
|
"description": None,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
patcher, _ = _mock_linear_config(query_return=_search_response(nodes))
|
||||||
|
with patcher:
|
||||||
|
tool = SearchFeatureRequestsTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID, session=session, query="dark mode"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, FeatureRequestSearchResponse)
|
||||||
|
assert resp.count == 2
|
||||||
|
assert resp.results[0].id == "id-1"
|
||||||
|
assert resp.results[1].identifier == "FR-2"
|
||||||
|
assert resp.query == "dark mode"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_no_results(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, _ = _mock_linear_config(query_return=_search_response([]))
|
||||||
|
with patcher:
|
||||||
|
tool = SearchFeatureRequestsTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID, session=session, query="nonexistent"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, NoResultsResponse)
|
||||||
|
assert "nonexistent" in resp.message
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_empty_query_returns_error(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
tool = SearchFeatureRequestsTool()
|
||||||
|
resp = await tool._execute(user_id=_TEST_USER_ID, session=session, query=" ")
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "query" in resp.error.lower()
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_missing_query_returns_error(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
tool = SearchFeatureRequestsTool()
|
||||||
|
resp = await tool._execute(user_id=_TEST_USER_ID, session=session)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_api_failure(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.query.side_effect = RuntimeError("Linear API down")
|
||||||
|
with patcher:
|
||||||
|
tool = SearchFeatureRequestsTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID, session=session, query="test"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "Linear API down" in resp.error
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_malformed_node_returns_error(self):
|
||||||
|
"""A node missing required keys should be caught by the try/except."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
# Node missing 'identifier' key
|
||||||
|
bad_nodes = [{"id": "id-1", "title": "Missing identifier"}]
|
||||||
|
patcher, _ = _mock_linear_config(query_return=_search_response(bad_nodes))
|
||||||
|
with patcher:
|
||||||
|
tool = SearchFeatureRequestsTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID, session=session, query="test"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_linear_client_init_failure(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||||
|
side_effect=RuntimeError("No API key"),
|
||||||
|
):
|
||||||
|
tool = SearchFeatureRequestsTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID, session=session, query="test"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "No API key" in resp.error
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# CreateFeatureRequestTool
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class TestCreateFeatureRequestTool:
|
||||||
|
"""Tests for CreateFeatureRequestTool._execute."""
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def _patch_email_lookup(self):
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.feature_requests.get_user_email_by_id",
|
||||||
|
new_callable=AsyncMock,
|
||||||
|
return_value=_TEST_USER_EMAIL,
|
||||||
|
):
|
||||||
|
yield
|
||||||
|
|
||||||
|
# ---- Happy paths -------------------------------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_create_new_issue(self):
|
||||||
|
"""Full happy path: upsert customer -> create issue -> attach need."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
_issue_create_response(),
|
||||||
|
_need_create_response(),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="New Feature",
|
||||||
|
description="Please add this",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||||
|
assert resp.is_new_issue is True
|
||||||
|
assert resp.issue_identifier == "FR-1"
|
||||||
|
assert resp.customer_name == _TEST_USER_EMAIL
|
||||||
|
assert client.mutate.call_count == 3
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_add_need_to_existing_issue(self):
|
||||||
|
"""When existing_issue_id is provided, skip issue creation."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
_need_create_response(issue_id="existing-1", identifier="FR-99"),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Existing Feature",
|
||||||
|
description="Me too",
|
||||||
|
existing_issue_id="existing-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, FeatureRequestCreatedResponse)
|
||||||
|
assert resp.is_new_issue is False
|
||||||
|
assert resp.issue_id == "existing-1"
|
||||||
|
# Only 2 mutations: customer upsert + need create (no issue create)
|
||||||
|
assert client.mutate.call_count == 2
|
||||||
|
|
||||||
|
# ---- Validation errors -------------------------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_missing_title(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="",
|
||||||
|
description="some desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "required" in resp.error.lower()
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_missing_description(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Some title",
|
||||||
|
description="",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "required" in resp.error.lower()
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_missing_user_id(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=None,
|
||||||
|
session=session,
|
||||||
|
title="Some title",
|
||||||
|
description="Some desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "user_id" in resp.error.lower()
|
||||||
|
|
||||||
|
# ---- Linear client init failure ----------------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_linear_client_init_failure(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.feature_requests._get_linear_config",
|
||||||
|
side_effect=RuntimeError("No API key"),
|
||||||
|
):
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "No API key" in resp.error
|
||||||
|
|
||||||
|
# ---- Customer upsert failures ------------------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_customer_upsert_api_error(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = RuntimeError("Customer API error")
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "Customer API error" in resp.error
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_customer_upsert_not_success(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.return_value = _customer_upsert_response(success=False)
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_customer_malformed_response(self):
|
||||||
|
"""Customer dict missing 'id' key should be caught."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
# success=True but customer has no 'id'
|
||||||
|
client.mutate.return_value = {
|
||||||
|
"customerUpsert": {
|
||||||
|
"success": True,
|
||||||
|
"customer": {"name": _TEST_USER_ID},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
|
||||||
|
# ---- Issue creation failures -------------------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_issue_create_api_error(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
RuntimeError("Issue create failed"),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "Issue create failed" in resp.error
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_issue_create_not_success(self):
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
_issue_create_response(success=False),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert "Failed to create feature request issue" in resp.message
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_issue_create_malformed_response(self):
|
||||||
|
"""issueCreate success=True but missing 'issue' key."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
{"issueCreate": {"success": True}}, # no 'issue' key
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
|
||||||
|
# ---- Customer need attachment failures ---------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_need_create_api_error_new_issue(self):
|
||||||
|
"""Need creation fails after new issue was created -> orphaned issue info."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
_issue_create_response(issue_id="orphan-1", identifier="FR-10"),
|
||||||
|
RuntimeError("Need attach failed"),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.error is not None
|
||||||
|
assert "Need attach failed" in resp.error
|
||||||
|
assert resp.details is not None
|
||||||
|
assert resp.details["issue_id"] == "orphan-1"
|
||||||
|
assert resp.details["issue_identifier"] == "FR-10"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_need_create_api_error_existing_issue(self):
|
||||||
|
"""Need creation fails on existing issue -> no orphaned info."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
RuntimeError("Need attach failed"),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
existing_issue_id="existing-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.details is None
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_need_create_not_success_includes_orphaned_info(self):
|
||||||
|
"""customerNeedCreate returns success=False -> includes orphaned issue."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
_issue_create_response(issue_id="orphan-2", identifier="FR-20"),
|
||||||
|
_need_create_response(success=False),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.details is not None
|
||||||
|
assert resp.details["issue_id"] == "orphan-2"
|
||||||
|
assert resp.details["issue_identifier"] == "FR-20"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_need_create_not_success_existing_issue_no_details(self):
|
||||||
|
"""customerNeedCreate fails on existing issue -> no orphaned info."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
_need_create_response(success=False),
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
existing_issue_id="existing-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.details is None
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_need_create_malformed_response(self):
|
||||||
|
"""need_result missing 'need' key after success=True."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
patcher, client = _mock_linear_config()
|
||||||
|
client.mutate.side_effect = [
|
||||||
|
_customer_upsert_response(),
|
||||||
|
_issue_create_response(),
|
||||||
|
{"customerNeedCreate": {"success": True}}, # no 'need' key
|
||||||
|
]
|
||||||
|
|
||||||
|
with patcher:
|
||||||
|
tool = CreateFeatureRequestTool()
|
||||||
|
resp = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
title="Title",
|
||||||
|
description="Desc",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(resp, ErrorResponse)
|
||||||
|
assert resp.details is not None
|
||||||
|
assert resp.details["issue_id"] == "issue-1"
|
||||||
@@ -7,7 +7,6 @@ from backend.api.features.chat.model import ChatSession
|
|||||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||||
from backend.api.features.chat.tools.models import (
|
from backend.api.features.chat.tools.models import (
|
||||||
BlockInfoSummary,
|
BlockInfoSummary,
|
||||||
BlockInputFieldInfo,
|
|
||||||
BlockListResponse,
|
BlockListResponse,
|
||||||
ErrorResponse,
|
ErrorResponse,
|
||||||
NoResultsResponse,
|
NoResultsResponse,
|
||||||
@@ -55,7 +54,8 @@ class FindBlockTool(BaseTool):
|
|||||||
"Blocks are reusable components that perform specific tasks like "
|
"Blocks are reusable components that perform specific tasks like "
|
||||||
"sending emails, making API calls, processing text, etc. "
|
"sending emails, making API calls, processing text, etc. "
|
||||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||||
"The response includes each block's id, required_inputs, and input_schema."
|
"The response includes each block's id, name, and description. "
|
||||||
|
"Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it."
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -124,7 +124,7 @@ class FindBlockTool(BaseTool):
|
|||||||
session_id=session_id,
|
session_id=session_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Enrich results with full block information
|
# Enrich results with block information
|
||||||
blocks: list[BlockInfoSummary] = []
|
blocks: list[BlockInfoSummary] = []
|
||||||
for result in results:
|
for result in results:
|
||||||
block_id = result["content_id"]
|
block_id = result["content_id"]
|
||||||
@@ -141,65 +141,11 @@ class FindBlockTool(BaseTool):
|
|||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Get input/output schemas
|
|
||||||
input_schema = {}
|
|
||||||
output_schema = {}
|
|
||||||
try:
|
|
||||||
input_schema = block.input_schema.jsonschema()
|
|
||||||
except Exception as e:
|
|
||||||
logger.debug(
|
|
||||||
"Failed to generate input schema for block %s: %s",
|
|
||||||
block_id,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
output_schema = block.output_schema.jsonschema()
|
|
||||||
except Exception as e:
|
|
||||||
logger.debug(
|
|
||||||
"Failed to generate output schema for block %s: %s",
|
|
||||||
block_id,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get categories from block instance
|
|
||||||
categories = []
|
|
||||||
if hasattr(block, "categories") and block.categories:
|
|
||||||
categories = [cat.value for cat in block.categories]
|
|
||||||
|
|
||||||
# Extract required inputs for easier use
|
|
||||||
required_inputs: list[BlockInputFieldInfo] = []
|
|
||||||
if input_schema:
|
|
||||||
properties = input_schema.get("properties", {})
|
|
||||||
required_fields = set(input_schema.get("required", []))
|
|
||||||
# Get credential field names to exclude from required inputs
|
|
||||||
credentials_fields = set(
|
|
||||||
block.input_schema.get_credentials_fields().keys()
|
|
||||||
)
|
|
||||||
|
|
||||||
for field_name, field_schema in properties.items():
|
|
||||||
# Skip credential fields - they're handled separately
|
|
||||||
if field_name in credentials_fields:
|
|
||||||
continue
|
|
||||||
|
|
||||||
required_inputs.append(
|
|
||||||
BlockInputFieldInfo(
|
|
||||||
name=field_name,
|
|
||||||
type=field_schema.get("type", "string"),
|
|
||||||
description=field_schema.get("description", ""),
|
|
||||||
required=field_name in required_fields,
|
|
||||||
default=field_schema.get("default"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
blocks.append(
|
blocks.append(
|
||||||
BlockInfoSummary(
|
BlockInfoSummary(
|
||||||
id=block_id,
|
id=block_id,
|
||||||
name=block.name,
|
name=block.name,
|
||||||
description=block.description or "",
|
description=block.description or "",
|
||||||
categories=categories,
|
|
||||||
input_schema=input_schema,
|
|
||||||
output_schema=output_schema,
|
|
||||||
required_inputs=required_inputs,
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -228,8 +174,7 @@ class FindBlockTool(BaseTool):
|
|||||||
return BlockListResponse(
|
return BlockListResponse(
|
||||||
message=(
|
message=(
|
||||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||||
"To execute a block, use run_block with the block's 'id' field "
|
"To see a block's inputs/outputs and execute it, use run_block with the block's 'id' - providing no inputs."
|
||||||
"and provide 'input_data' matching the block's input_schema."
|
|
||||||
),
|
),
|
||||||
blocks=blocks,
|
blocks=blocks,
|
||||||
count=len(blocks),
|
count=len(blocks),
|
||||||
|
|||||||
@@ -18,7 +18,13 @@ _TEST_USER_ID = "test-user-find-block"
|
|||||||
|
|
||||||
|
|
||||||
def make_mock_block(
|
def make_mock_block(
|
||||||
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
block_id: str,
|
||||||
|
name: str,
|
||||||
|
block_type: BlockType,
|
||||||
|
disabled: bool = False,
|
||||||
|
input_schema: dict | None = None,
|
||||||
|
output_schema: dict | None = None,
|
||||||
|
credentials_fields: dict | None = None,
|
||||||
):
|
):
|
||||||
"""Create a mock block for testing."""
|
"""Create a mock block for testing."""
|
||||||
mock = MagicMock()
|
mock = MagicMock()
|
||||||
@@ -28,10 +34,13 @@ def make_mock_block(
|
|||||||
mock.block_type = block_type
|
mock.block_type = block_type
|
||||||
mock.disabled = disabled
|
mock.disabled = disabled
|
||||||
mock.input_schema = MagicMock()
|
mock.input_schema = MagicMock()
|
||||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
mock.input_schema.jsonschema.return_value = input_schema or {
|
||||||
mock.input_schema.get_credentials_fields.return_value = {}
|
"properties": {},
|
||||||
|
"required": [],
|
||||||
|
}
|
||||||
|
mock.input_schema.get_credentials_fields.return_value = credentials_fields or {}
|
||||||
mock.output_schema = MagicMock()
|
mock.output_schema = MagicMock()
|
||||||
mock.output_schema.jsonschema.return_value = {}
|
mock.output_schema.jsonschema.return_value = output_schema or {}
|
||||||
mock.categories = []
|
mock.categories = []
|
||||||
return mock
|
return mock
|
||||||
|
|
||||||
@@ -137,3 +146,241 @@ class TestFindBlockFiltering:
|
|||||||
assert isinstance(response, BlockListResponse)
|
assert isinstance(response, BlockListResponse)
|
||||||
assert len(response.blocks) == 1
|
assert len(response.blocks) == 1
|
||||||
assert response.blocks[0].id == "normal-block-id"
|
assert response.blocks[0].id == "normal-block-id"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_response_size_average_chars_per_block(self):
|
||||||
|
"""Measure average chars per block in the serialized response."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
# Realistic block definitions modeled after real blocks
|
||||||
|
block_defs = [
|
||||||
|
{
|
||||||
|
"id": "http-block-id",
|
||||||
|
"name": "Send Web Request",
|
||||||
|
"input_schema": {
|
||||||
|
"properties": {
|
||||||
|
"url": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The URL to send the request to",
|
||||||
|
},
|
||||||
|
"method": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The HTTP method to use",
|
||||||
|
},
|
||||||
|
"headers": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Headers to include in the request",
|
||||||
|
},
|
||||||
|
"json_format": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "If true, send the body as JSON",
|
||||||
|
},
|
||||||
|
"body": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Form/JSON body payload",
|
||||||
|
},
|
||||||
|
"credentials": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "HTTP credentials",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["url", "method"],
|
||||||
|
},
|
||||||
|
"output_schema": {
|
||||||
|
"properties": {
|
||||||
|
"response": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "The response from the server",
|
||||||
|
},
|
||||||
|
"client_error": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Errors on 4xx status codes",
|
||||||
|
},
|
||||||
|
"server_error": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Errors on 5xx status codes",
|
||||||
|
},
|
||||||
|
"error": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Errors for all other exceptions",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"credentials_fields": {"credentials": True},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "email-block-id",
|
||||||
|
"name": "Send Email",
|
||||||
|
"input_schema": {
|
||||||
|
"properties": {
|
||||||
|
"to_email": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Recipient email address",
|
||||||
|
},
|
||||||
|
"subject": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Subject of the email",
|
||||||
|
},
|
||||||
|
"body": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Body of the email",
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "SMTP Config",
|
||||||
|
},
|
||||||
|
"credentials": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "SMTP credentials",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["to_email", "subject", "body", "credentials"],
|
||||||
|
},
|
||||||
|
"output_schema": {
|
||||||
|
"properties": {
|
||||||
|
"status": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Status of the email sending operation",
|
||||||
|
},
|
||||||
|
"error": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Error message if sending failed",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"credentials_fields": {"credentials": True},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "claude-code-block-id",
|
||||||
|
"name": "Claude Code",
|
||||||
|
"input_schema": {
|
||||||
|
"properties": {
|
||||||
|
"e2b_credentials": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "API key for E2B platform",
|
||||||
|
},
|
||||||
|
"anthropic_credentials": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "API key for Anthropic",
|
||||||
|
},
|
||||||
|
"prompt": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Task or instruction for Claude Code",
|
||||||
|
},
|
||||||
|
"timeout": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Sandbox timeout in seconds",
|
||||||
|
},
|
||||||
|
"setup_commands": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "Shell commands to run before execution",
|
||||||
|
},
|
||||||
|
"working_directory": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Working directory for Claude Code",
|
||||||
|
},
|
||||||
|
"session_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Session ID to resume a conversation",
|
||||||
|
},
|
||||||
|
"sandbox_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Sandbox ID to reconnect to",
|
||||||
|
},
|
||||||
|
"conversation_history": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Previous conversation history",
|
||||||
|
},
|
||||||
|
"dispose_sandbox": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Whether to dispose sandbox after execution",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"e2b_credentials",
|
||||||
|
"anthropic_credentials",
|
||||||
|
"prompt",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"output_schema": {
|
||||||
|
"properties": {
|
||||||
|
"response": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Output from Claude Code execution",
|
||||||
|
},
|
||||||
|
"files": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "Files created/modified by Claude Code",
|
||||||
|
},
|
||||||
|
"conversation_history": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Full conversation history",
|
||||||
|
},
|
||||||
|
"session_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Session ID for this conversation",
|
||||||
|
},
|
||||||
|
"sandbox_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "ID of the sandbox instance",
|
||||||
|
},
|
||||||
|
"error": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Error message if execution failed",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"credentials_fields": {
|
||||||
|
"e2b_credentials": True,
|
||||||
|
"anthropic_credentials": True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
search_results = [
|
||||||
|
{"content_id": d["id"], "score": 0.9 - i * 0.1}
|
||||||
|
for i, d in enumerate(block_defs)
|
||||||
|
]
|
||||||
|
mock_blocks = {
|
||||||
|
d["id"]: make_mock_block(
|
||||||
|
block_id=d["id"],
|
||||||
|
name=d["name"],
|
||||||
|
block_type=BlockType.STANDARD,
|
||||||
|
input_schema=d["input_schema"],
|
||||||
|
output_schema=d["output_schema"],
|
||||||
|
credentials_fields=d["credentials_fields"],
|
||||||
|
)
|
||||||
|
for d in block_defs
|
||||||
|
}
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
||||||
|
new_callable=AsyncMock,
|
||||||
|
return_value=(search_results, len(search_results)),
|
||||||
|
), patch(
|
||||||
|
"backend.api.features.chat.tools.find_block.get_block",
|
||||||
|
side_effect=lambda bid: mock_blocks.get(bid),
|
||||||
|
):
|
||||||
|
tool = FindBlockTool()
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID, session=session, query="test"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(response, BlockListResponse)
|
||||||
|
assert response.count == len(block_defs)
|
||||||
|
|
||||||
|
total_chars = len(response.model_dump_json())
|
||||||
|
avg_chars = total_chars // response.count
|
||||||
|
|
||||||
|
# Print for visibility in test output
|
||||||
|
print(f"\nTotal response size: {total_chars} chars")
|
||||||
|
print(f"Number of blocks: {response.count}")
|
||||||
|
print(f"Average chars per block: {avg_chars}")
|
||||||
|
|
||||||
|
# The old response was ~90K for 10 blocks (~9K per block).
|
||||||
|
# Previous optimization reduced it to ~1.5K per block (no raw JSON schemas).
|
||||||
|
# Now with only id/name/description, we expect ~300 chars per block.
|
||||||
|
assert avg_chars < 500, (
|
||||||
|
f"Average chars per block ({avg_chars}) exceeds 500. "
|
||||||
|
f"Total response: {total_chars} chars for {response.count} blocks."
|
||||||
|
)
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ class ResponseType(str, Enum):
|
|||||||
AGENT_SAVED = "agent_saved"
|
AGENT_SAVED = "agent_saved"
|
||||||
CLARIFICATION_NEEDED = "clarification_needed"
|
CLARIFICATION_NEEDED = "clarification_needed"
|
||||||
BLOCK_LIST = "block_list"
|
BLOCK_LIST = "block_list"
|
||||||
|
BLOCK_DETAILS = "block_details"
|
||||||
BLOCK_OUTPUT = "block_output"
|
BLOCK_OUTPUT = "block_output"
|
||||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||||
DOC_PAGE = "doc_page"
|
DOC_PAGE = "doc_page"
|
||||||
@@ -40,6 +41,9 @@ class ResponseType(str, Enum):
|
|||||||
OPERATION_IN_PROGRESS = "operation_in_progress"
|
OPERATION_IN_PROGRESS = "operation_in_progress"
|
||||||
# Input validation
|
# Input validation
|
||||||
INPUT_VALIDATION_ERROR = "input_validation_error"
|
INPUT_VALIDATION_ERROR = "input_validation_error"
|
||||||
|
# Feature request types
|
||||||
|
FEATURE_REQUEST_SEARCH = "feature_request_search"
|
||||||
|
FEATURE_REQUEST_CREATED = "feature_request_created"
|
||||||
|
|
||||||
|
|
||||||
# Base response model
|
# Base response model
|
||||||
@@ -334,13 +338,6 @@ class BlockInfoSummary(BaseModel):
|
|||||||
id: str
|
id: str
|
||||||
name: str
|
name: str
|
||||||
description: str
|
description: str
|
||||||
categories: list[str]
|
|
||||||
input_schema: dict[str, Any]
|
|
||||||
output_schema: dict[str, Any]
|
|
||||||
required_inputs: list[BlockInputFieldInfo] = Field(
|
|
||||||
default_factory=list,
|
|
||||||
description="List of required input fields for this block",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BlockListResponse(ToolResponseBase):
|
class BlockListResponse(ToolResponseBase):
|
||||||
@@ -350,10 +347,25 @@ class BlockListResponse(ToolResponseBase):
|
|||||||
blocks: list[BlockInfoSummary]
|
blocks: list[BlockInfoSummary]
|
||||||
count: int
|
count: int
|
||||||
query: str
|
query: str
|
||||||
usage_hint: str = Field(
|
|
||||||
default="To execute a block, call run_block with block_id set to the block's "
|
|
||||||
"'id' field and input_data containing the required fields from input_schema."
|
class BlockDetails(BaseModel):
|
||||||
)
|
"""Detailed block information."""
|
||||||
|
|
||||||
|
id: str
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
inputs: dict[str, Any] = {}
|
||||||
|
outputs: dict[str, Any] = {}
|
||||||
|
credentials: list[CredentialsMetaInput] = []
|
||||||
|
|
||||||
|
|
||||||
|
class BlockDetailsResponse(ToolResponseBase):
|
||||||
|
"""Response for block details (first run_block attempt)."""
|
||||||
|
|
||||||
|
type: ResponseType = ResponseType.BLOCK_DETAILS
|
||||||
|
block: BlockDetails
|
||||||
|
user_authenticated: bool = False
|
||||||
|
|
||||||
|
|
||||||
class BlockOutputResponse(ToolResponseBase):
|
class BlockOutputResponse(ToolResponseBase):
|
||||||
@@ -421,3 +433,34 @@ class AsyncProcessingResponse(ToolResponseBase):
|
|||||||
status: str = "accepted" # Must be "accepted" for detection
|
status: str = "accepted" # Must be "accepted" for detection
|
||||||
operation_id: str | None = None
|
operation_id: str | None = None
|
||||||
task_id: str | None = None
|
task_id: str | None = None
|
||||||
|
|
||||||
|
|
||||||
|
# Feature request models
|
||||||
|
class FeatureRequestInfo(BaseModel):
|
||||||
|
"""Information about a feature request issue."""
|
||||||
|
|
||||||
|
id: str
|
||||||
|
identifier: str
|
||||||
|
title: str
|
||||||
|
description: str | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class FeatureRequestSearchResponse(ToolResponseBase):
|
||||||
|
"""Response for search_feature_requests tool."""
|
||||||
|
|
||||||
|
type: ResponseType = ResponseType.FEATURE_REQUEST_SEARCH
|
||||||
|
results: list[FeatureRequestInfo]
|
||||||
|
count: int
|
||||||
|
query: str
|
||||||
|
|
||||||
|
|
||||||
|
class FeatureRequestCreatedResponse(ToolResponseBase):
|
||||||
|
"""Response for create_feature_request tool."""
|
||||||
|
|
||||||
|
type: ResponseType = ResponseType.FEATURE_REQUEST_CREATED
|
||||||
|
issue_id: str
|
||||||
|
issue_identifier: str
|
||||||
|
issue_title: str
|
||||||
|
issue_url: str
|
||||||
|
is_new_issue: bool # False if added to existing
|
||||||
|
customer_name: str
|
||||||
|
|||||||
@@ -23,8 +23,11 @@ from backend.util.exceptions import BlockError
|
|||||||
from .base import BaseTool
|
from .base import BaseTool
|
||||||
from .helpers import get_inputs_from_schema
|
from .helpers import get_inputs_from_schema
|
||||||
from .models import (
|
from .models import (
|
||||||
|
BlockDetails,
|
||||||
|
BlockDetailsResponse,
|
||||||
BlockOutputResponse,
|
BlockOutputResponse,
|
||||||
ErrorResponse,
|
ErrorResponse,
|
||||||
|
InputValidationErrorResponse,
|
||||||
SetupInfo,
|
SetupInfo,
|
||||||
SetupRequirementsResponse,
|
SetupRequirementsResponse,
|
||||||
ToolResponseBase,
|
ToolResponseBase,
|
||||||
@@ -51,8 +54,8 @@ class RunBlockTool(BaseTool):
|
|||||||
"Execute a specific block with the provided input data. "
|
"Execute a specific block with the provided input data. "
|
||||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||||
"do NOT guess or make up block IDs. "
|
"do NOT guess or make up block IDs. "
|
||||||
"Use the 'id' from find_block results and provide input_data "
|
"On first attempt (without input_data), returns detailed schema showing "
|
||||||
"matching the block's required_inputs."
|
"required inputs and outputs. Then call again with proper input_data to execute."
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -67,11 +70,19 @@ class RunBlockTool(BaseTool):
|
|||||||
"NEVER guess this - always get it from find_block first."
|
"NEVER guess this - always get it from find_block first."
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
"block_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": (
|
||||||
|
"The block's human-readable name from find_block results. "
|
||||||
|
"Used for display purposes in the UI."
|
||||||
|
),
|
||||||
|
},
|
||||||
"input_data": {
|
"input_data": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": (
|
"description": (
|
||||||
"Input values for the block. Use the 'required_inputs' field "
|
"Input values for the block. "
|
||||||
"from find_block to see what fields are needed."
|
"First call with empty {} to see the block's schema, "
|
||||||
|
"then call again with proper values to execute."
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -156,6 +167,34 @@ class RunBlockTool(BaseTool):
|
|||||||
await self._resolve_block_credentials(user_id, block, input_data)
|
await self._resolve_block_credentials(user_id, block, input_data)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Get block schemas for details/validation
|
||||||
|
try:
|
||||||
|
input_schema: dict[str, Any] = block.input_schema.jsonschema()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
"Failed to generate input schema for block %s: %s",
|
||||||
|
block_id,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
return ErrorResponse(
|
||||||
|
message=f"Block '{block.name}' has an invalid input schema",
|
||||||
|
error=str(e),
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
output_schema: dict[str, Any] = block.output_schema.jsonschema()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
"Failed to generate output schema for block %s: %s",
|
||||||
|
block_id,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
return ErrorResponse(
|
||||||
|
message=f"Block '{block.name}' has an invalid output schema",
|
||||||
|
error=str(e),
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
if missing_credentials:
|
if missing_credentials:
|
||||||
# Return setup requirements response with missing credentials
|
# Return setup requirements response with missing credentials
|
||||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||||
@@ -188,6 +227,53 @@ class RunBlockTool(BaseTool):
|
|||||||
graph_version=None,
|
graph_version=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Check if this is a first attempt (required inputs missing)
|
||||||
|
# Return block details so user can see what inputs are needed
|
||||||
|
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
||||||
|
required_keys = set(input_schema.get("required", []))
|
||||||
|
required_non_credential_keys = required_keys - credentials_fields
|
||||||
|
provided_input_keys = set(input_data.keys()) - credentials_fields
|
||||||
|
|
||||||
|
# Check for unknown input fields
|
||||||
|
valid_fields = (
|
||||||
|
set(input_schema.get("properties", {}).keys()) - credentials_fields
|
||||||
|
)
|
||||||
|
unrecognized_fields = provided_input_keys - valid_fields
|
||||||
|
if unrecognized_fields:
|
||||||
|
return InputValidationErrorResponse(
|
||||||
|
message=(
|
||||||
|
f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. "
|
||||||
|
f"Block was not executed. Please use the correct field names from the schema."
|
||||||
|
),
|
||||||
|
session_id=session_id,
|
||||||
|
unrecognized_fields=sorted(unrecognized_fields),
|
||||||
|
inputs=input_schema,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Show details when not all required non-credential inputs are provided
|
||||||
|
if not (required_non_credential_keys <= provided_input_keys):
|
||||||
|
# Get credentials info for the response
|
||||||
|
credentials_meta = []
|
||||||
|
for field_name, cred_meta in matched_credentials.items():
|
||||||
|
credentials_meta.append(cred_meta)
|
||||||
|
|
||||||
|
return BlockDetailsResponse(
|
||||||
|
message=(
|
||||||
|
f"Block '{block.name}' details. "
|
||||||
|
"Provide input_data matching the inputs schema to execute the block."
|
||||||
|
),
|
||||||
|
session_id=session_id,
|
||||||
|
block=BlockDetails(
|
||||||
|
id=block_id,
|
||||||
|
name=block.name,
|
||||||
|
description=block.description or "",
|
||||||
|
inputs=input_schema,
|
||||||
|
outputs=output_schema,
|
||||||
|
credentials=credentials_meta,
|
||||||
|
),
|
||||||
|
user_authenticated=True,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Get or create user's workspace for CoPilot file operations
|
# Get or create user's workspace for CoPilot file operations
|
||||||
workspace = await get_or_create_workspace(user_id)
|
workspace = await get_or_create_workspace(user_id)
|
||||||
|
|||||||
@@ -1,10 +1,15 @@
|
|||||||
"""Tests for block execution guards in RunBlockTool."""
|
"""Tests for block execution guards and input validation in RunBlockTool."""
|
||||||
|
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from backend.api.features.chat.tools.models import ErrorResponse
|
from backend.api.features.chat.tools.models import (
|
||||||
|
BlockDetailsResponse,
|
||||||
|
BlockOutputResponse,
|
||||||
|
ErrorResponse,
|
||||||
|
InputValidationErrorResponse,
|
||||||
|
)
|
||||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||||
from backend.blocks._base import BlockType
|
from backend.blocks._base import BlockType
|
||||||
|
|
||||||
@@ -28,6 +33,39 @@ def make_mock_block(
|
|||||||
return mock
|
return mock
|
||||||
|
|
||||||
|
|
||||||
|
def make_mock_block_with_schema(
|
||||||
|
block_id: str,
|
||||||
|
name: str,
|
||||||
|
input_properties: dict,
|
||||||
|
required_fields: list[str],
|
||||||
|
output_properties: dict | None = None,
|
||||||
|
):
|
||||||
|
"""Create a mock block with a defined input/output schema for validation tests."""
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.id = block_id
|
||||||
|
mock.name = name
|
||||||
|
mock.block_type = BlockType.STANDARD
|
||||||
|
mock.disabled = False
|
||||||
|
mock.description = f"Test block: {name}"
|
||||||
|
|
||||||
|
input_schema = {
|
||||||
|
"properties": input_properties,
|
||||||
|
"required": required_fields,
|
||||||
|
}
|
||||||
|
mock.input_schema = MagicMock()
|
||||||
|
mock.input_schema.jsonschema.return_value = input_schema
|
||||||
|
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||||
|
mock.input_schema.get_credentials_fields.return_value = {}
|
||||||
|
|
||||||
|
output_schema = {
|
||||||
|
"properties": output_properties or {"result": {"type": "string"}},
|
||||||
|
}
|
||||||
|
mock.output_schema = MagicMock()
|
||||||
|
mock.output_schema.jsonschema.return_value = output_schema
|
||||||
|
|
||||||
|
return mock
|
||||||
|
|
||||||
|
|
||||||
class TestRunBlockFiltering:
|
class TestRunBlockFiltering:
|
||||||
"""Tests for block execution guards in RunBlockTool."""
|
"""Tests for block execution guards in RunBlockTool."""
|
||||||
|
|
||||||
@@ -104,3 +142,221 @@ class TestRunBlockFiltering:
|
|||||||
# (may be other errors like missing credentials, but not the exclusion guard)
|
# (may be other errors like missing credentials, but not the exclusion guard)
|
||||||
if isinstance(response, ErrorResponse):
|
if isinstance(response, ErrorResponse):
|
||||||
assert "cannot be run directly in CoPilot" not in response.message
|
assert "cannot be run directly in CoPilot" not in response.message
|
||||||
|
|
||||||
|
|
||||||
|
class TestRunBlockInputValidation:
|
||||||
|
"""Tests for input field validation in RunBlockTool.
|
||||||
|
|
||||||
|
run_block rejects unknown input field names with InputValidationErrorResponse,
|
||||||
|
preventing silent failures where incorrect keys would be ignored and the block
|
||||||
|
would execute with default values instead of the caller's intended values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_unknown_input_fields_are_rejected(self):
|
||||||
|
"""run_block rejects unknown input fields instead of silently ignoring them.
|
||||||
|
|
||||||
|
Scenario: The AI Text Generator block has a field called 'model' (for LLM model
|
||||||
|
selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model'
|
||||||
|
instead. The block should reject the request and return the valid schema.
|
||||||
|
"""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
mock_block = make_mock_block_with_schema(
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
name="AI Text Generator",
|
||||||
|
input_properties={
|
||||||
|
"prompt": {"type": "string", "description": "The prompt to send"},
|
||||||
|
"model": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The LLM model to use",
|
||||||
|
"default": "gpt-4o-mini",
|
||||||
|
},
|
||||||
|
"sys_prompt": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "System prompt",
|
||||||
|
"default": "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required_fields=["prompt"],
|
||||||
|
output_properties={"response": {"type": "string"}},
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_block",
|
||||||
|
return_value=mock_block,
|
||||||
|
):
|
||||||
|
tool = RunBlockTool()
|
||||||
|
|
||||||
|
# Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key)
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
input_data={
|
||||||
|
"prompt": "Write a haiku about coding",
|
||||||
|
"LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model'
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(response, InputValidationErrorResponse)
|
||||||
|
assert "LLM_Model" in response.unrecognized_fields
|
||||||
|
assert "Block was not executed" in response.message
|
||||||
|
assert "inputs" in response.model_dump() # valid schema included
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_multiple_wrong_keys_are_all_reported(self):
|
||||||
|
"""All unrecognized field names are reported in a single error response."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
mock_block = make_mock_block_with_schema(
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
name="AI Text Generator",
|
||||||
|
input_properties={
|
||||||
|
"prompt": {"type": "string"},
|
||||||
|
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||||
|
"sys_prompt": {"type": "string", "default": ""},
|
||||||
|
"retry": {"type": "integer", "default": 3},
|
||||||
|
},
|
||||||
|
required_fields=["prompt"],
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_block",
|
||||||
|
return_value=mock_block,
|
||||||
|
):
|
||||||
|
tool = RunBlockTool()
|
||||||
|
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
input_data={
|
||||||
|
"prompt": "Hello", # correct
|
||||||
|
"llm_model": "claude-opus-4-6", # WRONG - should be 'model'
|
||||||
|
"system_prompt": "Be helpful", # WRONG - should be 'sys_prompt'
|
||||||
|
"retries": 5, # WRONG - should be 'retry'
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(response, InputValidationErrorResponse)
|
||||||
|
assert set(response.unrecognized_fields) == {
|
||||||
|
"llm_model",
|
||||||
|
"system_prompt",
|
||||||
|
"retries",
|
||||||
|
}
|
||||||
|
assert "Block was not executed" in response.message
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_unknown_fields_rejected_even_with_missing_required(self):
|
||||||
|
"""Unknown fields are caught before the missing-required-fields check."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
mock_block = make_mock_block_with_schema(
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
name="AI Text Generator",
|
||||||
|
input_properties={
|
||||||
|
"prompt": {"type": "string"},
|
||||||
|
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||||
|
},
|
||||||
|
required_fields=["prompt"],
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_block",
|
||||||
|
return_value=mock_block,
|
||||||
|
):
|
||||||
|
tool = RunBlockTool()
|
||||||
|
|
||||||
|
# 'prompt' is missing AND 'LLM_Model' is an unknown field
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
input_data={
|
||||||
|
"LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Unknown fields are caught first
|
||||||
|
assert isinstance(response, InputValidationErrorResponse)
|
||||||
|
assert "LLM_Model" in response.unrecognized_fields
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_correct_inputs_still_execute(self):
|
||||||
|
"""Correct input field names pass validation and the block executes."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
mock_block = make_mock_block_with_schema(
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
name="AI Text Generator",
|
||||||
|
input_properties={
|
||||||
|
"prompt": {"type": "string"},
|
||||||
|
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||||
|
},
|
||||||
|
required_fields=["prompt"],
|
||||||
|
)
|
||||||
|
|
||||||
|
async def mock_execute(input_data, **kwargs):
|
||||||
|
yield "response", "Generated text"
|
||||||
|
|
||||||
|
mock_block.execute = mock_execute
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_block",
|
||||||
|
return_value=mock_block,
|
||||||
|
),
|
||||||
|
patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_or_create_workspace",
|
||||||
|
new_callable=AsyncMock,
|
||||||
|
return_value=MagicMock(id="test-workspace-id"),
|
||||||
|
),
|
||||||
|
):
|
||||||
|
tool = RunBlockTool()
|
||||||
|
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
input_data={
|
||||||
|
"prompt": "Write a haiku",
|
||||||
|
"model": "gpt-4o-mini", # correct field name
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(response, BlockOutputResponse)
|
||||||
|
assert response.success is True
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_missing_required_fields_returns_details(self):
|
||||||
|
"""Missing required fields returns BlockDetailsResponse with schema."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
mock_block = make_mock_block_with_schema(
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
name="AI Text Generator",
|
||||||
|
input_properties={
|
||||||
|
"prompt": {"type": "string"},
|
||||||
|
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||||
|
},
|
||||||
|
required_fields=["prompt"],
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_block",
|
||||||
|
return_value=mock_block,
|
||||||
|
):
|
||||||
|
tool = RunBlockTool()
|
||||||
|
|
||||||
|
# Only provide valid optional field, missing required 'prompt'
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
block_id="ai-text-gen-id",
|
||||||
|
input_data={
|
||||||
|
"model": "gpt-4o-mini", # valid but optional
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(response, BlockDetailsResponse)
|
||||||
|
|||||||
@@ -0,0 +1,153 @@
|
|||||||
|
"""Tests for BlockDetailsResponse in RunBlockTool."""
|
||||||
|
|
||||||
|
from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from backend.api.features.chat.tools.models import BlockDetailsResponse
|
||||||
|
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||||
|
from backend.blocks._base import BlockType
|
||||||
|
from backend.data.model import CredentialsMetaInput
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
|
||||||
|
from ._test_data import make_session
|
||||||
|
|
||||||
|
_TEST_USER_ID = "test-user-run-block-details"
|
||||||
|
|
||||||
|
|
||||||
|
def make_mock_block_with_inputs(
|
||||||
|
block_id: str, name: str, description: str = "Test description"
|
||||||
|
):
|
||||||
|
"""Create a mock block with input/output schemas for testing."""
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.id = block_id
|
||||||
|
mock.name = name
|
||||||
|
mock.description = description
|
||||||
|
mock.block_type = BlockType.STANDARD
|
||||||
|
mock.disabled = False
|
||||||
|
|
||||||
|
# Input schema with non-credential fields
|
||||||
|
mock.input_schema = MagicMock()
|
||||||
|
mock.input_schema.jsonschema.return_value = {
|
||||||
|
"properties": {
|
||||||
|
"url": {"type": "string", "description": "URL to fetch"},
|
||||||
|
"method": {"type": "string", "description": "HTTP method"},
|
||||||
|
},
|
||||||
|
"required": ["url"],
|
||||||
|
}
|
||||||
|
mock.input_schema.get_credentials_fields.return_value = {}
|
||||||
|
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||||
|
|
||||||
|
# Output schema
|
||||||
|
mock.output_schema = MagicMock()
|
||||||
|
mock.output_schema.jsonschema.return_value = {
|
||||||
|
"properties": {
|
||||||
|
"response": {"type": "object", "description": "HTTP response"},
|
||||||
|
"error": {"type": "string", "description": "Error message"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return mock
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_run_block_returns_details_when_no_input_provided():
|
||||||
|
"""When run_block is called without input_data, it should return BlockDetailsResponse."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
# Create a block with inputs
|
||||||
|
http_block = make_mock_block_with_inputs(
|
||||||
|
"http-block-id", "HTTP Request", "Send HTTP requests"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_block",
|
||||||
|
return_value=http_block,
|
||||||
|
):
|
||||||
|
# Mock credentials check to return no missing credentials
|
||||||
|
with patch.object(
|
||||||
|
RunBlockTool,
|
||||||
|
"_resolve_block_credentials",
|
||||||
|
new_callable=AsyncMock,
|
||||||
|
return_value=({}, []), # (matched_credentials, missing_credentials)
|
||||||
|
):
|
||||||
|
tool = RunBlockTool()
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
block_id="http-block-id",
|
||||||
|
input_data={}, # Empty input data
|
||||||
|
)
|
||||||
|
|
||||||
|
# Should return BlockDetailsResponse showing the schema
|
||||||
|
assert isinstance(response, BlockDetailsResponse)
|
||||||
|
assert response.block.id == "http-block-id"
|
||||||
|
assert response.block.name == "HTTP Request"
|
||||||
|
assert response.block.description == "Send HTTP requests"
|
||||||
|
assert "url" in response.block.inputs["properties"]
|
||||||
|
assert "method" in response.block.inputs["properties"]
|
||||||
|
assert "response" in response.block.outputs["properties"]
|
||||||
|
assert response.user_authenticated is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(loop_scope="session")
|
||||||
|
async def test_run_block_returns_details_when_only_credentials_provided():
|
||||||
|
"""When only credentials are provided (no actual input), should return details."""
|
||||||
|
session = make_session(user_id=_TEST_USER_ID)
|
||||||
|
|
||||||
|
# Create a block with both credential and non-credential inputs
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.id = "api-block-id"
|
||||||
|
mock.name = "API Call"
|
||||||
|
mock.description = "Make API calls"
|
||||||
|
mock.block_type = BlockType.STANDARD
|
||||||
|
mock.disabled = False
|
||||||
|
|
||||||
|
mock.input_schema = MagicMock()
|
||||||
|
mock.input_schema.jsonschema.return_value = {
|
||||||
|
"properties": {
|
||||||
|
"credentials": {"type": "object", "description": "API credentials"},
|
||||||
|
"endpoint": {"type": "string", "description": "API endpoint"},
|
||||||
|
},
|
||||||
|
"required": ["credentials", "endpoint"],
|
||||||
|
}
|
||||||
|
mock.input_schema.get_credentials_fields.return_value = {"credentials": True}
|
||||||
|
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||||
|
|
||||||
|
mock.output_schema = MagicMock()
|
||||||
|
mock.output_schema.jsonschema.return_value = {
|
||||||
|
"properties": {"result": {"type": "object"}}
|
||||||
|
}
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.run_block.get_block",
|
||||||
|
return_value=mock,
|
||||||
|
):
|
||||||
|
with patch.object(
|
||||||
|
RunBlockTool,
|
||||||
|
"_resolve_block_credentials",
|
||||||
|
new_callable=AsyncMock,
|
||||||
|
return_value=(
|
||||||
|
{
|
||||||
|
"credentials": CredentialsMetaInput(
|
||||||
|
id="cred-id",
|
||||||
|
provider=ProviderName("test_provider"),
|
||||||
|
type="api_key",
|
||||||
|
title="Test Credential",
|
||||||
|
)
|
||||||
|
},
|
||||||
|
[],
|
||||||
|
),
|
||||||
|
):
|
||||||
|
tool = RunBlockTool()
|
||||||
|
response = await tool._execute(
|
||||||
|
user_id=_TEST_USER_ID,
|
||||||
|
session=session,
|
||||||
|
block_id="api-block-id",
|
||||||
|
input_data={"credentials": {"some": "cred"}}, # Only credential
|
||||||
|
)
|
||||||
|
|
||||||
|
# Should return details because no non-credential inputs provided
|
||||||
|
assert isinstance(response, BlockDetailsResponse)
|
||||||
|
assert response.block.id == "api-block-id"
|
||||||
|
assert response.block.name == "API Call"
|
||||||
@@ -32,14 +32,6 @@ from backend.data.model import (
|
|||||||
from backend.integrations.providers import ProviderName
|
from backend.integrations.providers import ProviderName
|
||||||
from backend.util import json
|
from backend.util import json
|
||||||
from backend.util.logging import TruncatedLogger
|
from backend.util.logging import TruncatedLogger
|
||||||
from backend.util.openai_responses import (
|
|
||||||
convert_tools_to_responses_format,
|
|
||||||
extract_responses_content,
|
|
||||||
extract_responses_reasoning,
|
|
||||||
extract_responses_tool_calls,
|
|
||||||
extract_usage,
|
|
||||||
requires_responses_api,
|
|
||||||
)
|
|
||||||
from backend.util.prompt import compress_context, estimate_token_count
|
from backend.util.prompt import compress_context, estimate_token_count
|
||||||
from backend.util.text import TextFormatter
|
from backend.util.text import TextFormatter
|
||||||
|
|
||||||
@@ -667,72 +659,38 @@ async def llm_call(
|
|||||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
||||||
|
|
||||||
if provider == "openai":
|
if provider == "openai":
|
||||||
|
tools_param = tools if tools else openai.NOT_GIVEN
|
||||||
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
||||||
|
response_format = None
|
||||||
|
|
||||||
# Check if this model requires the Responses API (reasoning models: o1, o3, etc.)
|
parallel_tool_calls = get_parallel_tool_calls_param(
|
||||||
if requires_responses_api(llm_model.value):
|
llm_model, parallel_tool_calls
|
||||||
# Use responses.create for reasoning models
|
)
|
||||||
tools_converted = (
|
|
||||||
convert_tools_to_responses_format(tools) if tools else None
|
|
||||||
)
|
|
||||||
|
|
||||||
response = await oai_client.responses.create(
|
if force_json_output:
|
||||||
model=llm_model.value,
|
response_format = {"type": "json_object"}
|
||||||
input=prompt, # type: ignore
|
|
||||||
tools=tools_converted, # type: ignore
|
|
||||||
max_output_tokens=max_tokens,
|
|
||||||
store=False, # Don't persist conversations
|
|
||||||
)
|
|
||||||
|
|
||||||
tool_calls = extract_responses_tool_calls(response)
|
response = await oai_client.chat.completions.create(
|
||||||
reasoning = extract_responses_reasoning(response)
|
model=llm_model.value,
|
||||||
content = extract_responses_content(response)
|
messages=prompt, # type: ignore
|
||||||
prompt_tokens, completion_tokens = extract_usage(response, True)
|
response_format=response_format, # type: ignore
|
||||||
|
max_completion_tokens=max_tokens,
|
||||||
|
tools=tools_param, # type: ignore
|
||||||
|
parallel_tool_calls=parallel_tool_calls,
|
||||||
|
)
|
||||||
|
|
||||||
return LLMResponse(
|
tool_calls = extract_openai_tool_calls(response)
|
||||||
raw_response=response,
|
reasoning = extract_openai_reasoning(response)
|
||||||
prompt=prompt,
|
|
||||||
response=content,
|
|
||||||
tool_calls=tool_calls,
|
|
||||||
prompt_tokens=prompt_tokens,
|
|
||||||
completion_tokens=completion_tokens,
|
|
||||||
reasoning=reasoning,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Use chat.completions.create for standard models
|
|
||||||
tools_param = tools if tools else openai.NOT_GIVEN
|
|
||||||
response_format = None
|
|
||||||
|
|
||||||
parallel_tool_calls = get_parallel_tool_calls_param(
|
return LLMResponse(
|
||||||
llm_model, parallel_tool_calls
|
raw_response=response.choices[0].message,
|
||||||
)
|
prompt=prompt,
|
||||||
|
response=response.choices[0].message.content or "",
|
||||||
if force_json_output:
|
tool_calls=tool_calls,
|
||||||
response_format = {"type": "json_object"}
|
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||||
|
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
||||||
response = await oai_client.chat.completions.create(
|
reasoning=reasoning,
|
||||||
model=llm_model.value,
|
)
|
||||||
messages=prompt, # type: ignore
|
|
||||||
response_format=response_format, # type: ignore
|
|
||||||
max_completion_tokens=max_tokens,
|
|
||||||
tools=tools_param, # type: ignore
|
|
||||||
parallel_tool_calls=parallel_tool_calls,
|
|
||||||
)
|
|
||||||
|
|
||||||
tool_calls = extract_openai_tool_calls(response)
|
|
||||||
reasoning = extract_openai_reasoning(response)
|
|
||||||
|
|
||||||
return LLMResponse(
|
|
||||||
raw_response=response.choices[0].message,
|
|
||||||
prompt=prompt,
|
|
||||||
response=response.choices[0].message.content or "",
|
|
||||||
tool_calls=tool_calls,
|
|
||||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
|
||||||
completion_tokens=(
|
|
||||||
response.usage.completion_tokens if response.usage else 0
|
|
||||||
),
|
|
||||||
reasoning=reasoning,
|
|
||||||
)
|
|
||||||
elif provider == "anthropic":
|
elif provider == "anthropic":
|
||||||
|
|
||||||
an_tools = convert_openai_tool_fmt_to_anthropic(tools)
|
an_tools = convert_openai_tool_fmt_to_anthropic(tools)
|
||||||
|
|||||||
@@ -1,185 +0,0 @@
|
|||||||
"""Helpers for OpenAI Responses API migration.
|
|
||||||
|
|
||||||
This module provides utilities for conditionally using OpenAI's Responses API
|
|
||||||
instead of Chat Completions for reasoning models (o1, o3, etc.) that require it.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
# Exact model identifiers that require the Responses API.
|
|
||||||
# Use exact matching to avoid false positives on future models.
|
|
||||||
# NOTE: Update this set when OpenAI releases new reasoning models.
|
|
||||||
REASONING_MODELS = frozenset(
|
|
||||||
{
|
|
||||||
# O1 family
|
|
||||||
"o1",
|
|
||||||
"o1-mini",
|
|
||||||
"o1-preview",
|
|
||||||
"o1-2024-12-17",
|
|
||||||
# O3 family
|
|
||||||
"o3",
|
|
||||||
"o3-mini",
|
|
||||||
"o3-2025-04-16",
|
|
||||||
"o3-mini-2025-01-31",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def requires_responses_api(model: str) -> bool:
|
|
||||||
"""Check if model requires the Responses API (exact match).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model: The model identifier string (e.g., "o3-mini", "gpt-4o")
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the model requires responses.create, False otherwise
|
|
||||||
"""
|
|
||||||
return model in REASONING_MODELS
|
|
||||||
|
|
||||||
|
|
||||||
def convert_tools_to_responses_format(tools: list[dict] | None) -> list[dict]:
|
|
||||||
"""Convert Chat Completions tool format to Responses API format.
|
|
||||||
|
|
||||||
The Responses API uses internally-tagged polymorphism (flatter structure)
|
|
||||||
and functions are strict by default.
|
|
||||||
|
|
||||||
Chat Completions format:
|
|
||||||
{"type": "function", "function": {"name": "...", "parameters": {...}}}
|
|
||||||
|
|
||||||
Responses API format:
|
|
||||||
{"type": "function", "name": "...", "parameters": {...}}
|
|
||||||
|
|
||||||
Args:
|
|
||||||
tools: List of tools in Chat Completions format
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of tools in Responses API format
|
|
||||||
"""
|
|
||||||
if not tools:
|
|
||||||
return []
|
|
||||||
|
|
||||||
converted = []
|
|
||||||
for tool in tools:
|
|
||||||
if tool.get("type") == "function":
|
|
||||||
func = tool.get("function", {})
|
|
||||||
converted.append(
|
|
||||||
{
|
|
||||||
"type": "function",
|
|
||||||
"name": func.get("name"),
|
|
||||||
"description": func.get("description"),
|
|
||||||
"parameters": func.get("parameters"),
|
|
||||||
# Note: strict=True is default in Responses API
|
|
||||||
}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Pass through non-function tools as-is
|
|
||||||
converted.append(tool)
|
|
||||||
return converted
|
|
||||||
|
|
||||||
|
|
||||||
def extract_responses_tool_calls(response: Any) -> list[dict] | None:
|
|
||||||
"""Extract tool calls from Responses API response.
|
|
||||||
|
|
||||||
The Responses API returns tool calls as separate items in the output array
|
|
||||||
with type="function_call".
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response: The Responses API response object
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of tool calls in a normalized format, or None if no tool calls
|
|
||||||
"""
|
|
||||||
tool_calls = []
|
|
||||||
for item in response.output:
|
|
||||||
if getattr(item, "type", None) == "function_call":
|
|
||||||
tool_calls.append(
|
|
||||||
{
|
|
||||||
"id": item.call_id,
|
|
||||||
"type": "function",
|
|
||||||
"function": {
|
|
||||||
"name": item.name,
|
|
||||||
"arguments": item.arguments,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return tool_calls if tool_calls else None
|
|
||||||
|
|
||||||
|
|
||||||
def extract_usage(response: Any, is_responses_api: bool) -> tuple[int, int]:
|
|
||||||
"""Extract token usage from either API response.
|
|
||||||
|
|
||||||
The Responses API uses different field names for token counts:
|
|
||||||
- Chat Completions: prompt_tokens, completion_tokens
|
|
||||||
- Responses API: input_tokens, output_tokens
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response: The API response object
|
|
||||||
is_responses_api: True if response is from Responses API
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (prompt_tokens, completion_tokens)
|
|
||||||
"""
|
|
||||||
if not response.usage:
|
|
||||||
return 0, 0
|
|
||||||
|
|
||||||
if is_responses_api:
|
|
||||||
# Responses API uses different field names
|
|
||||||
return (
|
|
||||||
getattr(response.usage, "input_tokens", 0),
|
|
||||||
getattr(response.usage, "output_tokens", 0),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Chat Completions API
|
|
||||||
return (
|
|
||||||
getattr(response.usage, "prompt_tokens", 0),
|
|
||||||
getattr(response.usage, "completion_tokens", 0),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_responses_content(response: Any) -> str:
|
|
||||||
"""Extract text content from Responses API response.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response: The Responses API response object
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The text content from the response, or empty string if none
|
|
||||||
"""
|
|
||||||
# The SDK provides a helper property
|
|
||||||
if hasattr(response, "output_text"):
|
|
||||||
return response.output_text or ""
|
|
||||||
|
|
||||||
# Fallback: manually extract from output items
|
|
||||||
for item in response.output:
|
|
||||||
if getattr(item, "type", None) == "message":
|
|
||||||
for content in getattr(item, "content", []):
|
|
||||||
if getattr(content, "type", None) == "output_text":
|
|
||||||
return getattr(content, "text", "")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
def extract_responses_reasoning(response: Any) -> str | None:
|
|
||||||
"""Extract reasoning content from Responses API response.
|
|
||||||
|
|
||||||
Reasoning models return their reasoning process in the response,
|
|
||||||
which can be useful for debugging or display.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response: The Responses API response object
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The reasoning text, or None if not present
|
|
||||||
"""
|
|
||||||
for item in response.output:
|
|
||||||
if getattr(item, "type", None) == "reasoning":
|
|
||||||
# Reasoning items may have summary or content
|
|
||||||
summary = getattr(item, "summary", [])
|
|
||||||
if summary:
|
|
||||||
# Join summary items if present
|
|
||||||
texts = []
|
|
||||||
for s in summary:
|
|
||||||
if hasattr(s, "text"):
|
|
||||||
texts.append(s.text)
|
|
||||||
if texts:
|
|
||||||
return "\n".join(texts)
|
|
||||||
return None
|
|
||||||
@@ -1,155 +0,0 @@
|
|||||||
"""Tests for OpenAI Responses API helpers."""
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from backend.util.openai_responses import (
|
|
||||||
REASONING_MODELS,
|
|
||||||
convert_tools_to_responses_format,
|
|
||||||
requires_responses_api,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestRequiresResponsesApi:
|
|
||||||
"""Tests for the requires_responses_api function."""
|
|
||||||
|
|
||||||
def test_o1_models_require_responses_api(self):
|
|
||||||
"""O1 family models should require the Responses API."""
|
|
||||||
assert requires_responses_api("o1") is True
|
|
||||||
assert requires_responses_api("o1-mini") is True
|
|
||||||
assert requires_responses_api("o1-preview") is True
|
|
||||||
assert requires_responses_api("o1-2024-12-17") is True
|
|
||||||
|
|
||||||
def test_o3_models_require_responses_api(self):
|
|
||||||
"""O3 family models should require the Responses API."""
|
|
||||||
assert requires_responses_api("o3") is True
|
|
||||||
assert requires_responses_api("o3-mini") is True
|
|
||||||
assert requires_responses_api("o3-2025-04-16") is True
|
|
||||||
assert requires_responses_api("o3-mini-2025-01-31") is True
|
|
||||||
|
|
||||||
def test_gpt_models_do_not_require_responses_api(self):
|
|
||||||
"""GPT models should NOT require the Responses API."""
|
|
||||||
assert requires_responses_api("gpt-4o") is False
|
|
||||||
assert requires_responses_api("gpt-4o-mini") is False
|
|
||||||
assert requires_responses_api("gpt-4-turbo") is False
|
|
||||||
assert requires_responses_api("gpt-3.5-turbo") is False
|
|
||||||
assert requires_responses_api("gpt-5") is False
|
|
||||||
assert requires_responses_api("gpt-5-mini") is False
|
|
||||||
|
|
||||||
def test_other_models_do_not_require_responses_api(self):
|
|
||||||
"""Other provider models should NOT require the Responses API."""
|
|
||||||
assert requires_responses_api("claude-3-opus") is False
|
|
||||||
assert requires_responses_api("llama-3.3-70b") is False
|
|
||||||
assert requires_responses_api("gemini-pro") is False
|
|
||||||
|
|
||||||
def test_empty_string_does_not_require_responses_api(self):
|
|
||||||
"""Empty string should not require the Responses API."""
|
|
||||||
assert requires_responses_api("") is False
|
|
||||||
|
|
||||||
def test_exact_matching_no_false_positives(self):
|
|
||||||
"""Should not match models that just start with 'o1' or 'o3'."""
|
|
||||||
# These are hypothetical models that start with o1/o3 but aren't
|
|
||||||
# actually reasoning models
|
|
||||||
assert requires_responses_api("o1-turbo-hypothetical") is False
|
|
||||||
assert requires_responses_api("o3-fast-hypothetical") is False
|
|
||||||
assert requires_responses_api("o100") is False
|
|
||||||
|
|
||||||
|
|
||||||
class TestConvertToolsToResponsesFormat:
|
|
||||||
"""Tests for the convert_tools_to_responses_format function."""
|
|
||||||
|
|
||||||
def test_empty_tools_returns_empty_list(self):
|
|
||||||
"""Empty or None tools should return empty list."""
|
|
||||||
assert convert_tools_to_responses_format(None) == []
|
|
||||||
assert convert_tools_to_responses_format([]) == []
|
|
||||||
|
|
||||||
def test_converts_function_tool_format(self):
|
|
||||||
"""Should convert Chat Completions function format to Responses format."""
|
|
||||||
chat_completions_tools = [
|
|
||||||
{
|
|
||||||
"type": "function",
|
|
||||||
"function": {
|
|
||||||
"name": "get_weather",
|
|
||||||
"description": "Get the weather in a location",
|
|
||||||
"parameters": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"location": {"type": "string"},
|
|
||||||
},
|
|
||||||
"required": ["location"],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
result = convert_tools_to_responses_format(chat_completions_tools)
|
|
||||||
|
|
||||||
assert len(result) == 1
|
|
||||||
assert result[0]["type"] == "function"
|
|
||||||
assert result[0]["name"] == "get_weather"
|
|
||||||
assert result[0]["description"] == "Get the weather in a location"
|
|
||||||
assert result[0]["parameters"] == {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"location": {"type": "string"},
|
|
||||||
},
|
|
||||||
"required": ["location"],
|
|
||||||
}
|
|
||||||
# Should not have nested "function" key
|
|
||||||
assert "function" not in result[0]
|
|
||||||
|
|
||||||
def test_handles_multiple_tools(self):
|
|
||||||
"""Should handle multiple tools."""
|
|
||||||
chat_completions_tools = [
|
|
||||||
{
|
|
||||||
"type": "function",
|
|
||||||
"function": {
|
|
||||||
"name": "tool_1",
|
|
||||||
"description": "First tool",
|
|
||||||
"parameters": {"type": "object", "properties": {}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "function",
|
|
||||||
"function": {
|
|
||||||
"name": "tool_2",
|
|
||||||
"description": "Second tool",
|
|
||||||
"parameters": {"type": "object", "properties": {}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
result = convert_tools_to_responses_format(chat_completions_tools)
|
|
||||||
|
|
||||||
assert len(result) == 2
|
|
||||||
assert result[0]["name"] == "tool_1"
|
|
||||||
assert result[1]["name"] == "tool_2"
|
|
||||||
|
|
||||||
def test_passes_through_non_function_tools(self):
|
|
||||||
"""Non-function tools should be passed through as-is."""
|
|
||||||
tools = [{"type": "web_search", "config": {"enabled": True}}]
|
|
||||||
|
|
||||||
result = convert_tools_to_responses_format(tools)
|
|
||||||
|
|
||||||
assert result == tools
|
|
||||||
|
|
||||||
|
|
||||||
class TestReasoningModelsSet:
|
|
||||||
"""Tests for the REASONING_MODELS constant."""
|
|
||||||
|
|
||||||
def test_reasoning_models_is_frozenset(self):
|
|
||||||
"""REASONING_MODELS should be a frozenset (immutable)."""
|
|
||||||
assert isinstance(REASONING_MODELS, frozenset)
|
|
||||||
|
|
||||||
def test_contains_expected_models(self):
|
|
||||||
"""Should contain all expected reasoning models."""
|
|
||||||
expected = {
|
|
||||||
"o1",
|
|
||||||
"o1-mini",
|
|
||||||
"o1-preview",
|
|
||||||
"o1-2024-12-17",
|
|
||||||
"o3",
|
|
||||||
"o3-mini",
|
|
||||||
"o3-2025-04-16",
|
|
||||||
"o3-mini-2025-01-31",
|
|
||||||
}
|
|
||||||
assert expected.issubset(REASONING_MODELS)
|
|
||||||
@@ -662,6 +662,17 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
|||||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||||
|
|
||||||
|
linear_api_key: str = Field(
|
||||||
|
default="", description="Linear API key for system-level operations"
|
||||||
|
)
|
||||||
|
linear_feature_request_project_id: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Linear project ID where feature requests are tracked",
|
||||||
|
)
|
||||||
|
linear_feature_request_team_id: str = Field(
|
||||||
|
default="",
|
||||||
|
description="Linear team ID used when creating feature request issues",
|
||||||
|
)
|
||||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||||
|
|
||||||
|
|||||||
68
autogpt_platform/backend/poetry.lock
generated
68
autogpt_platform/backend/poetry.lock
generated
@@ -441,14 +441,14 @@ develop = true
|
|||||||
colorama = "^0.4.6"
|
colorama = "^0.4.6"
|
||||||
cryptography = "^46.0"
|
cryptography = "^46.0"
|
||||||
expiringdict = "^1.2.2"
|
expiringdict = "^1.2.2"
|
||||||
fastapi = "^0.128.0"
|
fastapi = "^0.128.7"
|
||||||
google-cloud-logging = "^3.13.0"
|
google-cloud-logging = "^3.13.0"
|
||||||
launchdarkly-server-sdk = "^9.14.1"
|
launchdarkly-server-sdk = "^9.15.0"
|
||||||
pydantic = "^2.12.5"
|
pydantic = "^2.12.5"
|
||||||
pydantic-settings = "^2.12.0"
|
pydantic-settings = "^2.12.0"
|
||||||
pyjwt = {version = "^2.11.0", extras = ["crypto"]}
|
pyjwt = {version = "^2.11.0", extras = ["crypto"]}
|
||||||
redis = "^6.2.0"
|
redis = "^6.2.0"
|
||||||
supabase = "^2.27.2"
|
supabase = "^2.28.0"
|
||||||
uvicorn = "^0.40.0"
|
uvicorn = "^0.40.0"
|
||||||
|
|
||||||
[package.source]
|
[package.source]
|
||||||
@@ -1382,14 +1382,14 @@ tzdata = "*"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastapi"
|
name = "fastapi"
|
||||||
version = "0.128.6"
|
version = "0.128.7"
|
||||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"},
|
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||||
{file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"},
|
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -3117,14 +3117,14 @@ urllib3 = ">=1.26.0,<3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "launchdarkly-server-sdk"
|
name = "launchdarkly-server-sdk"
|
||||||
version = "9.14.1"
|
version = "9.15.0"
|
||||||
description = "LaunchDarkly SDK for Python"
|
description = "LaunchDarkly SDK for Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.10"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -4728,14 +4728,14 @@ tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "postgrest"
|
name = "postgrest"
|
||||||
version = "2.27.3"
|
version = "2.28.0"
|
||||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "postgrest-2.27.3-py3-none-any.whl", hash = "sha256:ed79123af7127edd78d538bfe8351d277e45b1a36994a4dbf57ae27dde87a7b7"},
|
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||||
{file = "postgrest-2.27.3.tar.gz", hash = "sha256:c2e2679addfc8eaab23197bad7ddaee6cbb4cbe8c483ebd2d2e5219543037cc3"},
|
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -6260,14 +6260,14 @@ all = ["numpy"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "realtime"
|
name = "realtime"
|
||||||
version = "2.27.3"
|
version = "2.28.0"
|
||||||
description = ""
|
description = ""
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "realtime-2.27.3-py3-none-any.whl", hash = "sha256:f571115f86988e33c41c895cb3fba2eaa1b693aeaede3617288f44274ca90f43"},
|
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||||
{file = "realtime-2.27.3.tar.gz", hash = "sha256:02b082243107656a5ef3fb63e8e2ab4c40bc199abb45adb8a42ed63f089a1041"},
|
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -7024,14 +7024,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "storage3"
|
name = "storage3"
|
||||||
version = "2.27.3"
|
version = "2.28.0"
|
||||||
description = "Supabase Storage client for Python."
|
description = "Supabase Storage client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "storage3-2.27.3-py3-none-any.whl", hash = "sha256:11a05b7da84bccabeeea12d940bca3760cf63fe6ca441868677335cfe4fdfbe0"},
|
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||||
{file = "storage3-2.27.3.tar.gz", hash = "sha256:dc1a4a010cf36d5482c5cb6c1c28fc5f00e23284342b89e4ae43b5eae8501ddb"},
|
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -7091,35 +7091,35 @@ typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase"
|
name = "supabase"
|
||||||
version = "2.27.3"
|
version = "2.28.0"
|
||||||
description = "Supabase client for Python."
|
description = "Supabase client for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase-2.27.3-py3-none-any.whl", hash = "sha256:082a74642fcf9954693f1ce8c251baf23e4bda26ffdbc8dcd4c99c82e60d69ff"},
|
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||||
{file = "supabase-2.27.3.tar.gz", hash = "sha256:5e5a348232ac4315c1032ddd687278f0b982465471f0cbb52bca7e6a66495ff3"},
|
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
httpx = ">=0.26,<0.29"
|
httpx = ">=0.26,<0.29"
|
||||||
postgrest = "2.27.3"
|
postgrest = "2.28.0"
|
||||||
realtime = "2.27.3"
|
realtime = "2.28.0"
|
||||||
storage3 = "2.27.3"
|
storage3 = "2.28.0"
|
||||||
supabase-auth = "2.27.3"
|
supabase-auth = "2.28.0"
|
||||||
supabase-functions = "2.27.3"
|
supabase-functions = "2.28.0"
|
||||||
yarl = ">=1.22.0"
|
yarl = ">=1.22.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-auth"
|
name = "supabase-auth"
|
||||||
version = "2.27.3"
|
version = "2.28.0"
|
||||||
description = "Python Client Library for Supabase Auth"
|
description = "Python Client Library for Supabase Auth"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_auth-2.27.3-py3-none-any.whl", hash = "sha256:82a4262eaad85383319d394dab0eea11fcf3ebd774062aef8ea3874ae2f02579"},
|
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||||
{file = "supabase_auth-2.27.3.tar.gz", hash = "sha256:39894d4bc60b6f23b5cff4d0d7d4c1659e5d69563cadf014d4896f780ca8ca78"},
|
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -7129,14 +7129,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "supabase-functions"
|
name = "supabase-functions"
|
||||||
version = "2.27.3"
|
version = "2.28.0"
|
||||||
description = "Library for Supabase Functions"
|
description = "Library for Supabase Functions"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "supabase_functions-2.27.3-py3-none-any.whl", hash = "sha256:9d14a931d49ede1c6cf5fbfceb11c44061535ba1c3f310f15384964d86a83d9e"},
|
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||||
{file = "supabase_functions-2.27.3.tar.gz", hash = "sha256:e954f1646da8ca6e7e16accef58d0884a5f97b25956ee98e7d4927a210ed92f9"},
|
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<3.14"
|
python-versions = ">=3.10,<3.14"
|
||||||
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
content-hash = "fa9c5deadf593e815dd2190f58e22152373900603f5f244b9616cd721de84d2f"
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
|
|||||||
sqlalchemy = "^2.0.40"
|
sqlalchemy = "^2.0.40"
|
||||||
strenum = "^0.4.9"
|
strenum = "^0.4.9"
|
||||||
stripe = "^11.5.0"
|
stripe = "^11.5.0"
|
||||||
supabase = "2.27.3"
|
supabase = "2.28.0"
|
||||||
tenacity = "^9.1.4"
|
tenacity = "^9.1.4"
|
||||||
todoist-api-python = "^2.1.7"
|
todoist-api-python = "^2.1.7"
|
||||||
tweepy = "^4.16.0"
|
tweepy = "^4.16.0"
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ services:
|
|||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: migrate
|
target: migrate
|
||||||
command: ["sh", "-c", "poetry run prisma generate && poetry run gen-prisma-stub && poetry run prisma migrate deploy"]
|
command: ["sh", "-c", "prisma generate && python3 gen_prisma_types_stub.py && prisma migrate deploy"]
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
@@ -56,7 +56,7 @@ services:
|
|||||||
test:
|
test:
|
||||||
[
|
[
|
||||||
"CMD-SHELL",
|
"CMD-SHELL",
|
||||||
"poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
"prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||||
]
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import {
|
|||||||
} from "@/app/api/__generated__/endpoints/graphs/graphs";
|
} from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
||||||
import { GraphExecutionMeta } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/use-agent-runs";
|
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||||
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||||
import { useShallow } from "zustand/react/shallow";
|
import { useShallow } from "zustand/react/shallow";
|
||||||
import { useEffect, useState } from "react";
|
import { useEffect, useState } from "react";
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { useCallback } from "react";
|
import { useCallback } from "react";
|
||||||
|
|
||||||
import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
|
import { AgentRunDraftView } from "@/app/(platform)/build/components/legacy-builder/agent-run-draft-view";
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
import type {
|
import type {
|
||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ import {
|
|||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { useQueryClient } from "@tanstack/react-query";
|
import { useQueryClient } from "@tanstack/react-query";
|
||||||
import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store";
|
import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store";
|
||||||
import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
|
import { CronExpressionDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
|
||||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
||||||
import { CalendarClockIcon } from "lucide-react";
|
import { CalendarClockIcon } from "lucide-react";
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import {
|
|||||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||||
|
|
||||||
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
|
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
|
||||||
import { ScheduleTaskDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
|
import { ScheduleTaskDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
|
||||||
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
|
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
|
||||||
import type { ButtonAction } from "@/components/__legacy__/types";
|
import type { ButtonAction } from "@/components/__legacy__/types";
|
||||||
import {
|
import {
|
||||||
@@ -53,7 +53,10 @@ import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
|
|||||||
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
|
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
|
||||||
|
|
||||||
import { analytics } from "@/services/analytics";
|
import { analytics } from "@/services/analytics";
|
||||||
import { AgentStatus, AgentStatusChip } from "./agent-status-chip";
|
import {
|
||||||
|
AgentStatus,
|
||||||
|
AgentStatusChip,
|
||||||
|
} from "@/app/(platform)/build/components/legacy-builder/agent-status-chip";
|
||||||
|
|
||||||
export function AgentRunDraftView({
|
export function AgentRunDraftView({
|
||||||
graph,
|
graph,
|
||||||
@@ -15,6 +15,10 @@ import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
|||||||
import { useEffect, useRef, useState } from "react";
|
import { useEffect, useRef, useState } from "react";
|
||||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||||
|
import {
|
||||||
|
CreateFeatureRequestTool,
|
||||||
|
SearchFeatureRequestsTool,
|
||||||
|
} from "../../tools/FeatureRequests/FeatureRequests";
|
||||||
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
|
||||||
import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
|
import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
|
||||||
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
|
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
|
||||||
@@ -254,6 +258,20 @@ export const ChatMessagesContainer = ({
|
|||||||
part={part as ToolUIPart}
|
part={part as ToolUIPart}
|
||||||
/>
|
/>
|
||||||
);
|
);
|
||||||
|
case "tool-search_feature_requests":
|
||||||
|
return (
|
||||||
|
<SearchFeatureRequestsTool
|
||||||
|
key={`${message.id}-${i}`}
|
||||||
|
part={part as ToolUIPart}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
case "tool-create_feature_request":
|
||||||
|
return (
|
||||||
|
<CreateFeatureRequestTool
|
||||||
|
key={`${message.id}-${i}`}
|
||||||
|
part={part as ToolUIPart}
|
||||||
|
/>
|
||||||
|
);
|
||||||
default:
|
default:
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,10 @@ import { Text } from "@/components/atoms/Text/Text";
|
|||||||
import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider";
|
import { CopilotChatActionsProvider } from "../components/CopilotChatActionsProvider/CopilotChatActionsProvider";
|
||||||
import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent";
|
import { CreateAgentTool } from "../tools/CreateAgent/CreateAgent";
|
||||||
import { EditAgentTool } from "../tools/EditAgent/EditAgent";
|
import { EditAgentTool } from "../tools/EditAgent/EditAgent";
|
||||||
|
import {
|
||||||
|
CreateFeatureRequestTool,
|
||||||
|
SearchFeatureRequestsTool,
|
||||||
|
} from "../tools/FeatureRequests/FeatureRequests";
|
||||||
import { FindAgentsTool } from "../tools/FindAgents/FindAgents";
|
import { FindAgentsTool } from "../tools/FindAgents/FindAgents";
|
||||||
import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks";
|
import { FindBlocksTool } from "../tools/FindBlocks/FindBlocks";
|
||||||
import { RunAgentTool } from "../tools/RunAgent/RunAgent";
|
import { RunAgentTool } from "../tools/RunAgent/RunAgent";
|
||||||
@@ -45,6 +49,8 @@ const SECTIONS = [
|
|||||||
"Tool: Create Agent",
|
"Tool: Create Agent",
|
||||||
"Tool: Edit Agent",
|
"Tool: Edit Agent",
|
||||||
"Tool: View Agent Output",
|
"Tool: View Agent Output",
|
||||||
|
"Tool: Search Feature Requests",
|
||||||
|
"Tool: Create Feature Request",
|
||||||
"Full Conversation Example",
|
"Full Conversation Example",
|
||||||
] as const;
|
] as const;
|
||||||
|
|
||||||
@@ -1421,6 +1427,235 @@ export default function StyleguidePage() {
|
|||||||
</SubSection>
|
</SubSection>
|
||||||
</Section>
|
</Section>
|
||||||
|
|
||||||
|
{/* ============================================================= */}
|
||||||
|
{/* SEARCH FEATURE REQUESTS */}
|
||||||
|
{/* ============================================================= */}
|
||||||
|
|
||||||
|
<Section title="Tool: Search Feature Requests">
|
||||||
|
<SubSection label="Input streaming">
|
||||||
|
<SearchFeatureRequestsTool
|
||||||
|
part={{
|
||||||
|
type: "tool-search_feature_requests",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "input-streaming",
|
||||||
|
input: { query: "dark mode" },
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Input available">
|
||||||
|
<SearchFeatureRequestsTool
|
||||||
|
part={{
|
||||||
|
type: "tool-search_feature_requests",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "input-available",
|
||||||
|
input: { query: "dark mode" },
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output available (with results)">
|
||||||
|
<SearchFeatureRequestsTool
|
||||||
|
part={{
|
||||||
|
type: "tool-search_feature_requests",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-available",
|
||||||
|
input: { query: "dark mode" },
|
||||||
|
output: {
|
||||||
|
type: "feature_request_search",
|
||||||
|
message:
|
||||||
|
'Found 2 feature request(s) matching "dark mode".',
|
||||||
|
query: "dark mode",
|
||||||
|
count: 2,
|
||||||
|
results: [
|
||||||
|
{
|
||||||
|
id: "fr-001",
|
||||||
|
identifier: "INT-42",
|
||||||
|
title: "Add dark mode to the platform",
|
||||||
|
description:
|
||||||
|
"Users have requested a dark mode option for the builder and copilot interfaces to reduce eye strain during long sessions.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "fr-002",
|
||||||
|
identifier: "INT-87",
|
||||||
|
title: "Dark theme for agent output viewer",
|
||||||
|
description:
|
||||||
|
"Specifically requesting dark theme support for the agent output/execution viewer panel.",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output available (no results)">
|
||||||
|
<SearchFeatureRequestsTool
|
||||||
|
part={{
|
||||||
|
type: "tool-search_feature_requests",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-available",
|
||||||
|
input: { query: "teleportation" },
|
||||||
|
output: {
|
||||||
|
type: "no_results",
|
||||||
|
message:
|
||||||
|
"No feature requests found matching 'teleportation'.",
|
||||||
|
suggestions: [
|
||||||
|
"Try different keywords",
|
||||||
|
"Use broader search terms",
|
||||||
|
"You can create a new feature request if none exists",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output available (error)">
|
||||||
|
<SearchFeatureRequestsTool
|
||||||
|
part={{
|
||||||
|
type: "tool-search_feature_requests",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-available",
|
||||||
|
input: { query: "dark mode" },
|
||||||
|
output: {
|
||||||
|
type: "error",
|
||||||
|
message: "Failed to search feature requests.",
|
||||||
|
error: "LINEAR_API_KEY environment variable is not set",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output error">
|
||||||
|
<SearchFeatureRequestsTool
|
||||||
|
part={{
|
||||||
|
type: "tool-search_feature_requests",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-error",
|
||||||
|
input: { query: "dark mode" },
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
</Section>
|
||||||
|
|
||||||
|
{/* ============================================================= */}
|
||||||
|
{/* CREATE FEATURE REQUEST */}
|
||||||
|
{/* ============================================================= */}
|
||||||
|
|
||||||
|
<Section title="Tool: Create Feature Request">
|
||||||
|
<SubSection label="Input streaming">
|
||||||
|
<CreateFeatureRequestTool
|
||||||
|
part={{
|
||||||
|
type: "tool-create_feature_request",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "input-streaming",
|
||||||
|
input: {
|
||||||
|
title: "Add dark mode",
|
||||||
|
description: "I would love dark mode for the platform.",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Input available">
|
||||||
|
<CreateFeatureRequestTool
|
||||||
|
part={{
|
||||||
|
type: "tool-create_feature_request",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "input-available",
|
||||||
|
input: {
|
||||||
|
title: "Add dark mode",
|
||||||
|
description: "I would love dark mode for the platform.",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output available (new issue created)">
|
||||||
|
<CreateFeatureRequestTool
|
||||||
|
part={{
|
||||||
|
type: "tool-create_feature_request",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-available",
|
||||||
|
input: {
|
||||||
|
title: "Add dark mode",
|
||||||
|
description: "I would love dark mode for the platform.",
|
||||||
|
},
|
||||||
|
output: {
|
||||||
|
type: "feature_request_created",
|
||||||
|
message:
|
||||||
|
"Created new feature request [INT-105] Add dark mode.",
|
||||||
|
issue_id: "issue-new-123",
|
||||||
|
issue_identifier: "INT-105",
|
||||||
|
issue_title: "Add dark mode",
|
||||||
|
issue_url:
|
||||||
|
"https://linear.app/autogpt/issue/INT-105/add-dark-mode",
|
||||||
|
is_new_issue: true,
|
||||||
|
customer_name: "user-abc-123",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output available (added to existing issue)">
|
||||||
|
<CreateFeatureRequestTool
|
||||||
|
part={{
|
||||||
|
type: "tool-create_feature_request",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-available",
|
||||||
|
input: {
|
||||||
|
title: "Dark mode support",
|
||||||
|
description:
|
||||||
|
"Please add dark mode, it would help with long sessions.",
|
||||||
|
existing_issue_id: "fr-001",
|
||||||
|
},
|
||||||
|
output: {
|
||||||
|
type: "feature_request_created",
|
||||||
|
message:
|
||||||
|
"Added your request to existing feature request [INT-42] Add dark mode to the platform.",
|
||||||
|
issue_id: "fr-001",
|
||||||
|
issue_identifier: "INT-42",
|
||||||
|
issue_title: "Add dark mode to the platform",
|
||||||
|
issue_url:
|
||||||
|
"https://linear.app/autogpt/issue/INT-42/add-dark-mode-to-the-platform",
|
||||||
|
is_new_issue: false,
|
||||||
|
customer_name: "user-xyz-789",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output available (error)">
|
||||||
|
<CreateFeatureRequestTool
|
||||||
|
part={{
|
||||||
|
type: "tool-create_feature_request",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-available",
|
||||||
|
input: {
|
||||||
|
title: "Add dark mode",
|
||||||
|
description: "I would love dark mode.",
|
||||||
|
},
|
||||||
|
output: {
|
||||||
|
type: "error",
|
||||||
|
message:
|
||||||
|
"Failed to attach customer need to the feature request.",
|
||||||
|
error: "Linear API request failed (500): Internal error",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
|
||||||
|
<SubSection label="Output error">
|
||||||
|
<CreateFeatureRequestTool
|
||||||
|
part={{
|
||||||
|
type: "tool-create_feature_request",
|
||||||
|
toolCallId: uid(),
|
||||||
|
state: "output-error",
|
||||||
|
input: { title: "Add dark mode" },
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</SubSection>
|
||||||
|
</Section>
|
||||||
|
|
||||||
{/* ============================================================= */}
|
{/* ============================================================= */}
|
||||||
{/* FULL CONVERSATION EXAMPLE */}
|
{/* FULL CONVERSATION EXAMPLE */}
|
||||||
{/* ============================================================= */}
|
{/* ============================================================= */}
|
||||||
|
|||||||
@@ -0,0 +1,227 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import type { ToolUIPart } from "ai";
|
||||||
|
import { useMemo } from "react";
|
||||||
|
|
||||||
|
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||||
|
import {
|
||||||
|
ContentBadge,
|
||||||
|
ContentCard,
|
||||||
|
ContentCardDescription,
|
||||||
|
ContentCardHeader,
|
||||||
|
ContentCardTitle,
|
||||||
|
ContentGrid,
|
||||||
|
ContentMessage,
|
||||||
|
ContentSuggestionsList,
|
||||||
|
} from "../../components/ToolAccordion/AccordionContent";
|
||||||
|
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||||
|
import {
|
||||||
|
AccordionIcon,
|
||||||
|
getAccordionTitle,
|
||||||
|
getAnimationText,
|
||||||
|
getFeatureRequestOutput,
|
||||||
|
isCreatedOutput,
|
||||||
|
isErrorOutput,
|
||||||
|
isNoResultsOutput,
|
||||||
|
isSearchResultsOutput,
|
||||||
|
ToolIcon,
|
||||||
|
type FeatureRequestToolType,
|
||||||
|
} from "./helpers";
|
||||||
|
|
||||||
|
export interface FeatureRequestToolPart {
|
||||||
|
type: FeatureRequestToolType;
|
||||||
|
toolCallId: string;
|
||||||
|
state: ToolUIPart["state"];
|
||||||
|
input?: unknown;
|
||||||
|
output?: unknown;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
part: FeatureRequestToolPart;
|
||||||
|
}
|
||||||
|
|
||||||
|
function truncate(text: string, maxChars: number): string {
|
||||||
|
const trimmed = text.trim();
|
||||||
|
if (trimmed.length <= maxChars) return trimmed;
|
||||||
|
return `${trimmed.slice(0, maxChars).trimEnd()}…`;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SearchFeatureRequestsTool({ part }: Props) {
|
||||||
|
const output = getFeatureRequestOutput(part);
|
||||||
|
const text = getAnimationText(part);
|
||||||
|
const isStreaming =
|
||||||
|
part.state === "input-streaming" || part.state === "input-available";
|
||||||
|
const isError =
|
||||||
|
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||||
|
|
||||||
|
const normalized = useMemo(() => {
|
||||||
|
if (!output) return null;
|
||||||
|
return { title: getAccordionTitle(part.type, output) };
|
||||||
|
}, [output, part.type]);
|
||||||
|
|
||||||
|
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||||
|
|
||||||
|
const searchOutput =
|
||||||
|
isOutputAvailable && output && isSearchResultsOutput(output)
|
||||||
|
? output
|
||||||
|
: null;
|
||||||
|
const noResultsOutput =
|
||||||
|
isOutputAvailable && output && isNoResultsOutput(output) ? output : null;
|
||||||
|
const errorOutput =
|
||||||
|
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||||
|
|
||||||
|
const hasExpandableContent =
|
||||||
|
isOutputAvailable &&
|
||||||
|
((!!searchOutput && searchOutput.count > 0) ||
|
||||||
|
!!noResultsOutput ||
|
||||||
|
!!errorOutput);
|
||||||
|
|
||||||
|
const accordionDescription =
|
||||||
|
hasExpandableContent && searchOutput
|
||||||
|
? `Found ${searchOutput.count} result${searchOutput.count === 1 ? "" : "s"} for "${searchOutput.query}"`
|
||||||
|
: hasExpandableContent && (noResultsOutput || errorOutput)
|
||||||
|
? ((noResultsOutput ?? errorOutput)?.message ?? null)
|
||||||
|
: null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="py-2">
|
||||||
|
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||||
|
<ToolIcon
|
||||||
|
toolType={part.type}
|
||||||
|
isStreaming={isStreaming}
|
||||||
|
isError={isError}
|
||||||
|
/>
|
||||||
|
<MorphingTextAnimation
|
||||||
|
text={text}
|
||||||
|
className={isError ? "text-red-500" : undefined}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{hasExpandableContent && normalized && (
|
||||||
|
<ToolAccordion
|
||||||
|
icon={<AccordionIcon toolType={part.type} />}
|
||||||
|
title={normalized.title}
|
||||||
|
description={accordionDescription}
|
||||||
|
>
|
||||||
|
{searchOutput && (
|
||||||
|
<ContentGrid>
|
||||||
|
{searchOutput.results.map((r) => (
|
||||||
|
<ContentCard key={r.id}>
|
||||||
|
<ContentCardHeader>
|
||||||
|
<ContentCardTitle>{r.title}</ContentCardTitle>
|
||||||
|
</ContentCardHeader>
|
||||||
|
{r.description && (
|
||||||
|
<ContentCardDescription>
|
||||||
|
{truncate(r.description, 200)}
|
||||||
|
</ContentCardDescription>
|
||||||
|
)}
|
||||||
|
</ContentCard>
|
||||||
|
))}
|
||||||
|
</ContentGrid>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{noResultsOutput && (
|
||||||
|
<div>
|
||||||
|
<ContentMessage>{noResultsOutput.message}</ContentMessage>
|
||||||
|
{noResultsOutput.suggestions &&
|
||||||
|
noResultsOutput.suggestions.length > 0 && (
|
||||||
|
<ContentSuggestionsList items={noResultsOutput.suggestions} />
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{errorOutput && (
|
||||||
|
<div>
|
||||||
|
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||||
|
{errorOutput.error && (
|
||||||
|
<ContentCardDescription>
|
||||||
|
{errorOutput.error}
|
||||||
|
</ContentCardDescription>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</ToolAccordion>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function CreateFeatureRequestTool({ part }: Props) {
|
||||||
|
const output = getFeatureRequestOutput(part);
|
||||||
|
const text = getAnimationText(part);
|
||||||
|
const isStreaming =
|
||||||
|
part.state === "input-streaming" || part.state === "input-available";
|
||||||
|
const isError =
|
||||||
|
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||||
|
|
||||||
|
const normalized = useMemo(() => {
|
||||||
|
if (!output) return null;
|
||||||
|
return { title: getAccordionTitle(part.type, output) };
|
||||||
|
}, [output, part.type]);
|
||||||
|
|
||||||
|
const isOutputAvailable = part.state === "output-available" && !!output;
|
||||||
|
|
||||||
|
const createdOutput =
|
||||||
|
isOutputAvailable && output && isCreatedOutput(output) ? output : null;
|
||||||
|
const errorOutput =
|
||||||
|
isOutputAvailable && output && isErrorOutput(output) ? output : null;
|
||||||
|
|
||||||
|
const hasExpandableContent =
|
||||||
|
isOutputAvailable && (!!createdOutput || !!errorOutput);
|
||||||
|
|
||||||
|
const accordionDescription =
|
||||||
|
hasExpandableContent && createdOutput
|
||||||
|
? createdOutput.issue_title
|
||||||
|
: hasExpandableContent && errorOutput
|
||||||
|
? errorOutput.message
|
||||||
|
: null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="py-2">
|
||||||
|
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||||
|
<ToolIcon
|
||||||
|
toolType={part.type}
|
||||||
|
isStreaming={isStreaming}
|
||||||
|
isError={isError}
|
||||||
|
/>
|
||||||
|
<MorphingTextAnimation
|
||||||
|
text={text}
|
||||||
|
className={isError ? "text-red-500" : undefined}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{hasExpandableContent && normalized && (
|
||||||
|
<ToolAccordion
|
||||||
|
icon={<AccordionIcon toolType={part.type} />}
|
||||||
|
title={normalized.title}
|
||||||
|
description={accordionDescription}
|
||||||
|
>
|
||||||
|
{createdOutput && (
|
||||||
|
<ContentCard>
|
||||||
|
<ContentCardHeader>
|
||||||
|
<ContentCardTitle>{createdOutput.issue_title}</ContentCardTitle>
|
||||||
|
</ContentCardHeader>
|
||||||
|
<div className="mt-2 flex items-center gap-2">
|
||||||
|
<ContentBadge>
|
||||||
|
{createdOutput.is_new_issue ? "New" : "Existing"}
|
||||||
|
</ContentBadge>
|
||||||
|
</div>
|
||||||
|
<ContentMessage>{createdOutput.message}</ContentMessage>
|
||||||
|
</ContentCard>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{errorOutput && (
|
||||||
|
<div>
|
||||||
|
<ContentMessage>{errorOutput.message}</ContentMessage>
|
||||||
|
{errorOutput.error && (
|
||||||
|
<ContentCardDescription>
|
||||||
|
{errorOutput.error}
|
||||||
|
</ContentCardDescription>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</ToolAccordion>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,271 @@
|
|||||||
|
import {
|
||||||
|
CheckCircleIcon,
|
||||||
|
LightbulbIcon,
|
||||||
|
MagnifyingGlassIcon,
|
||||||
|
PlusCircleIcon,
|
||||||
|
} from "@phosphor-icons/react";
|
||||||
|
import type { ToolUIPart } from "ai";
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Types (local until API client is regenerated) */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
interface FeatureRequestInfo {
|
||||||
|
id: string;
|
||||||
|
identifier: string;
|
||||||
|
title: string;
|
||||||
|
description?: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface FeatureRequestSearchResponse {
|
||||||
|
type: "feature_request_search";
|
||||||
|
message: string;
|
||||||
|
results: FeatureRequestInfo[];
|
||||||
|
count: number;
|
||||||
|
query: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface FeatureRequestCreatedResponse {
|
||||||
|
type: "feature_request_created";
|
||||||
|
message: string;
|
||||||
|
issue_id: string;
|
||||||
|
issue_identifier: string;
|
||||||
|
issue_title: string;
|
||||||
|
issue_url: string;
|
||||||
|
is_new_issue: boolean;
|
||||||
|
customer_name: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface NoResultsResponse {
|
||||||
|
type: "no_results";
|
||||||
|
message: string;
|
||||||
|
suggestions?: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ErrorResponse {
|
||||||
|
type: "error";
|
||||||
|
message: string;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type FeatureRequestOutput =
|
||||||
|
| FeatureRequestSearchResponse
|
||||||
|
| FeatureRequestCreatedResponse
|
||||||
|
| NoResultsResponse
|
||||||
|
| ErrorResponse;
|
||||||
|
|
||||||
|
export type FeatureRequestToolType =
|
||||||
|
| "tool-search_feature_requests"
|
||||||
|
| "tool-create_feature_request"
|
||||||
|
| string;
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Output parsing */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
function parseOutput(output: unknown): FeatureRequestOutput | null {
|
||||||
|
if (!output) return null;
|
||||||
|
if (typeof output === "string") {
|
||||||
|
const trimmed = output.trim();
|
||||||
|
if (!trimmed) return null;
|
||||||
|
try {
|
||||||
|
return parseOutput(JSON.parse(trimmed) as unknown);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (typeof output === "object") {
|
||||||
|
const type = (output as { type?: unknown }).type;
|
||||||
|
if (
|
||||||
|
type === "feature_request_search" ||
|
||||||
|
type === "feature_request_created" ||
|
||||||
|
type === "no_results" ||
|
||||||
|
type === "error"
|
||||||
|
) {
|
||||||
|
return output as FeatureRequestOutput;
|
||||||
|
}
|
||||||
|
// Fallback structural checks
|
||||||
|
if ("results" in output && "query" in output)
|
||||||
|
return output as FeatureRequestSearchResponse;
|
||||||
|
if ("issue_identifier" in output)
|
||||||
|
return output as FeatureRequestCreatedResponse;
|
||||||
|
if ("suggestions" in output && !("error" in output))
|
||||||
|
return output as NoResultsResponse;
|
||||||
|
if ("error" in output || "details" in output)
|
||||||
|
return output as ErrorResponse;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getFeatureRequestOutput(
|
||||||
|
part: unknown,
|
||||||
|
): FeatureRequestOutput | null {
|
||||||
|
if (!part || typeof part !== "object") return null;
|
||||||
|
return parseOutput((part as { output?: unknown }).output);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Type guards */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
export function isSearchResultsOutput(
|
||||||
|
output: FeatureRequestOutput,
|
||||||
|
): output is FeatureRequestSearchResponse {
|
||||||
|
return (
|
||||||
|
output.type === "feature_request_search" ||
|
||||||
|
("results" in output && "query" in output)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isCreatedOutput(
|
||||||
|
output: FeatureRequestOutput,
|
||||||
|
): output is FeatureRequestCreatedResponse {
|
||||||
|
return (
|
||||||
|
output.type === "feature_request_created" || "issue_identifier" in output
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isNoResultsOutput(
|
||||||
|
output: FeatureRequestOutput,
|
||||||
|
): output is NoResultsResponse {
|
||||||
|
return (
|
||||||
|
output.type === "no_results" ||
|
||||||
|
("suggestions" in output && !("error" in output))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isErrorOutput(
|
||||||
|
output: FeatureRequestOutput,
|
||||||
|
): output is ErrorResponse {
|
||||||
|
return output.type === "error" || "error" in output;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Accordion metadata */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
export function getAccordionTitle(
|
||||||
|
toolType: FeatureRequestToolType,
|
||||||
|
output: FeatureRequestOutput,
|
||||||
|
): string {
|
||||||
|
if (toolType === "tool-search_feature_requests") {
|
||||||
|
if (isSearchResultsOutput(output)) return "Feature requests";
|
||||||
|
if (isNoResultsOutput(output)) return "No feature requests found";
|
||||||
|
return "Feature request search error";
|
||||||
|
}
|
||||||
|
if (isCreatedOutput(output)) {
|
||||||
|
return output.is_new_issue
|
||||||
|
? "Feature request created"
|
||||||
|
: "Added to feature request";
|
||||||
|
}
|
||||||
|
if (isErrorOutput(output)) return "Feature request error";
|
||||||
|
return "Feature request";
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Animation text */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
interface AnimationPart {
|
||||||
|
type: FeatureRequestToolType;
|
||||||
|
state: ToolUIPart["state"];
|
||||||
|
input?: unknown;
|
||||||
|
output?: unknown;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getAnimationText(part: AnimationPart): string {
|
||||||
|
if (part.type === "tool-search_feature_requests") {
|
||||||
|
const query = (part.input as { query?: string } | undefined)?.query?.trim();
|
||||||
|
const queryText = query ? ` for "${query}"` : "";
|
||||||
|
|
||||||
|
switch (part.state) {
|
||||||
|
case "input-streaming":
|
||||||
|
case "input-available":
|
||||||
|
return `Searching feature requests${queryText}`;
|
||||||
|
case "output-available": {
|
||||||
|
const output = parseOutput(part.output);
|
||||||
|
if (!output) return `Searching feature requests${queryText}`;
|
||||||
|
if (isSearchResultsOutput(output)) {
|
||||||
|
return `Found ${output.count} feature request${output.count === 1 ? "" : "s"}${queryText}`;
|
||||||
|
}
|
||||||
|
if (isNoResultsOutput(output))
|
||||||
|
return `No feature requests found${queryText}`;
|
||||||
|
return `Error searching feature requests${queryText}`;
|
||||||
|
}
|
||||||
|
case "output-error":
|
||||||
|
return `Error searching feature requests${queryText}`;
|
||||||
|
default:
|
||||||
|
return "Searching feature requests";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create_feature_request
|
||||||
|
const title = (part.input as { title?: string } | undefined)?.title?.trim();
|
||||||
|
const titleText = title ? ` "${title}"` : "";
|
||||||
|
|
||||||
|
switch (part.state) {
|
||||||
|
case "input-streaming":
|
||||||
|
case "input-available":
|
||||||
|
return `Creating feature request${titleText}`;
|
||||||
|
case "output-available": {
|
||||||
|
const output = parseOutput(part.output);
|
||||||
|
if (!output) return `Creating feature request${titleText}`;
|
||||||
|
if (isCreatedOutput(output)) {
|
||||||
|
return output.is_new_issue
|
||||||
|
? "Feature request created"
|
||||||
|
: "Added to existing feature request";
|
||||||
|
}
|
||||||
|
if (isErrorOutput(output)) return "Error creating feature request";
|
||||||
|
return `Created feature request${titleText}`;
|
||||||
|
}
|
||||||
|
case "output-error":
|
||||||
|
return "Error creating feature request";
|
||||||
|
default:
|
||||||
|
return "Creating feature request";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
/* Icons */
|
||||||
|
/* ------------------------------------------------------------------ */
|
||||||
|
|
||||||
|
export function ToolIcon({
|
||||||
|
toolType,
|
||||||
|
isStreaming,
|
||||||
|
isError,
|
||||||
|
}: {
|
||||||
|
toolType: FeatureRequestToolType;
|
||||||
|
isStreaming?: boolean;
|
||||||
|
isError?: boolean;
|
||||||
|
}) {
|
||||||
|
const IconComponent =
|
||||||
|
toolType === "tool-create_feature_request"
|
||||||
|
? PlusCircleIcon
|
||||||
|
: MagnifyingGlassIcon;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<IconComponent
|
||||||
|
size={14}
|
||||||
|
weight="regular"
|
||||||
|
className={
|
||||||
|
isError
|
||||||
|
? "text-red-500"
|
||||||
|
: isStreaming
|
||||||
|
? "text-neutral-500"
|
||||||
|
: "text-neutral-400"
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function AccordionIcon({
|
||||||
|
toolType,
|
||||||
|
}: {
|
||||||
|
toolType: FeatureRequestToolType;
|
||||||
|
}) {
|
||||||
|
const IconComponent =
|
||||||
|
toolType === "tool-create_feature_request"
|
||||||
|
? CheckCircleIcon
|
||||||
|
: LightbulbIcon;
|
||||||
|
return <IconComponent size={32} weight="light" />;
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@
|
|||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||||
|
import { BlockDetailsCard } from "./components/BlockDetailsCard/BlockDetailsCard";
|
||||||
import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard";
|
import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard";
|
||||||
import { ErrorCard } from "./components/ErrorCard/ErrorCard";
|
import { ErrorCard } from "./components/ErrorCard/ErrorCard";
|
||||||
import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard";
|
import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard";
|
||||||
@@ -11,6 +12,7 @@ import {
|
|||||||
getAnimationText,
|
getAnimationText,
|
||||||
getRunBlockToolOutput,
|
getRunBlockToolOutput,
|
||||||
isRunBlockBlockOutput,
|
isRunBlockBlockOutput,
|
||||||
|
isRunBlockDetailsOutput,
|
||||||
isRunBlockErrorOutput,
|
isRunBlockErrorOutput,
|
||||||
isRunBlockSetupRequirementsOutput,
|
isRunBlockSetupRequirementsOutput,
|
||||||
ToolIcon,
|
ToolIcon,
|
||||||
@@ -41,6 +43,7 @@ export function RunBlockTool({ part }: Props) {
|
|||||||
part.state === "output-available" &&
|
part.state === "output-available" &&
|
||||||
!!output &&
|
!!output &&
|
||||||
(isRunBlockBlockOutput(output) ||
|
(isRunBlockBlockOutput(output) ||
|
||||||
|
isRunBlockDetailsOutput(output) ||
|
||||||
isRunBlockSetupRequirementsOutput(output) ||
|
isRunBlockSetupRequirementsOutput(output) ||
|
||||||
isRunBlockErrorOutput(output));
|
isRunBlockErrorOutput(output));
|
||||||
|
|
||||||
@@ -58,6 +61,10 @@ export function RunBlockTool({ part }: Props) {
|
|||||||
<ToolAccordion {...getAccordionMeta(output)}>
|
<ToolAccordion {...getAccordionMeta(output)}>
|
||||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||||
|
|
||||||
|
{isRunBlockDetailsOutput(output) && (
|
||||||
|
<BlockDetailsCard output={output} />
|
||||||
|
)}
|
||||||
|
|
||||||
{isRunBlockSetupRequirementsOutput(output) && (
|
{isRunBlockSetupRequirementsOutput(output) && (
|
||||||
<SetupRequirementsCard output={output} />
|
<SetupRequirementsCard output={output} />
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -0,0 +1,188 @@
|
|||||||
|
import type { Meta, StoryObj } from "@storybook/nextjs";
|
||||||
|
import { ResponseType } from "@/app/api/__generated__/models/responseType";
|
||||||
|
import type { BlockDetailsResponse } from "../../helpers";
|
||||||
|
import { BlockDetailsCard } from "./BlockDetailsCard";
|
||||||
|
|
||||||
|
const meta: Meta<typeof BlockDetailsCard> = {
|
||||||
|
title: "Copilot/RunBlock/BlockDetailsCard",
|
||||||
|
component: BlockDetailsCard,
|
||||||
|
parameters: {
|
||||||
|
layout: "centered",
|
||||||
|
},
|
||||||
|
tags: ["autodocs"],
|
||||||
|
decorators: [
|
||||||
|
(Story) => (
|
||||||
|
<div style={{ maxWidth: 480 }}>
|
||||||
|
<Story />
|
||||||
|
</div>
|
||||||
|
),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
export default meta;
|
||||||
|
type Story = StoryObj<typeof meta>;
|
||||||
|
|
||||||
|
const baseBlock: BlockDetailsResponse = {
|
||||||
|
type: ResponseType.block_details,
|
||||||
|
message:
|
||||||
|
"Here are the details for the GetWeather block. Provide the required inputs to run it.",
|
||||||
|
session_id: "session-123",
|
||||||
|
user_authenticated: true,
|
||||||
|
block: {
|
||||||
|
id: "block-abc-123",
|
||||||
|
name: "GetWeather",
|
||||||
|
description: "Fetches current weather data for a given location.",
|
||||||
|
inputs: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
location: {
|
||||||
|
title: "Location",
|
||||||
|
type: "string",
|
||||||
|
description:
|
||||||
|
"City name or coordinates (e.g. 'London' or '51.5,-0.1')",
|
||||||
|
},
|
||||||
|
units: {
|
||||||
|
title: "Units",
|
||||||
|
type: "string",
|
||||||
|
description: "Temperature units: 'metric' or 'imperial'",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required: ["location"],
|
||||||
|
},
|
||||||
|
outputs: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
temperature: {
|
||||||
|
title: "Temperature",
|
||||||
|
type: "number",
|
||||||
|
description: "Current temperature in the requested units",
|
||||||
|
},
|
||||||
|
condition: {
|
||||||
|
title: "Condition",
|
||||||
|
type: "string",
|
||||||
|
description: "Weather condition description (e.g. 'Sunny', 'Rain')",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
credentials: [],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export const Default: Story = {
|
||||||
|
args: {
|
||||||
|
output: baseBlock,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export const InputsOnly: Story = {
|
||||||
|
args: {
|
||||||
|
output: {
|
||||||
|
...baseBlock,
|
||||||
|
message: "This block requires inputs. No outputs are defined.",
|
||||||
|
block: {
|
||||||
|
...baseBlock.block,
|
||||||
|
outputs: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export const OutputsOnly: Story = {
|
||||||
|
args: {
|
||||||
|
output: {
|
||||||
|
...baseBlock,
|
||||||
|
message: "This block has no required inputs.",
|
||||||
|
block: {
|
||||||
|
...baseBlock.block,
|
||||||
|
inputs: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export const ManyFields: Story = {
|
||||||
|
args: {
|
||||||
|
output: {
|
||||||
|
...baseBlock,
|
||||||
|
message: "Block with many input and output fields.",
|
||||||
|
block: {
|
||||||
|
...baseBlock.block,
|
||||||
|
name: "SendEmail",
|
||||||
|
description: "Sends an email via SMTP.",
|
||||||
|
inputs: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
to: {
|
||||||
|
title: "To",
|
||||||
|
type: "string",
|
||||||
|
description: "Recipient email address",
|
||||||
|
},
|
||||||
|
subject: {
|
||||||
|
title: "Subject",
|
||||||
|
type: "string",
|
||||||
|
description: "Email subject line",
|
||||||
|
},
|
||||||
|
body: {
|
||||||
|
title: "Body",
|
||||||
|
type: "string",
|
||||||
|
description: "Email body content",
|
||||||
|
},
|
||||||
|
cc: {
|
||||||
|
title: "CC",
|
||||||
|
type: "string",
|
||||||
|
description: "CC recipients (comma-separated)",
|
||||||
|
},
|
||||||
|
bcc: {
|
||||||
|
title: "BCC",
|
||||||
|
type: "string",
|
||||||
|
description: "BCC recipients (comma-separated)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required: ["to", "subject", "body"],
|
||||||
|
},
|
||||||
|
outputs: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
message_id: {
|
||||||
|
title: "Message ID",
|
||||||
|
type: "string",
|
||||||
|
description: "Unique ID of the sent email",
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
title: "Status",
|
||||||
|
type: "string",
|
||||||
|
description: "Delivery status",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export const NoFieldDescriptions: Story = {
|
||||||
|
args: {
|
||||||
|
output: {
|
||||||
|
...baseBlock,
|
||||||
|
message: "Fields without descriptions.",
|
||||||
|
block: {
|
||||||
|
...baseBlock.block,
|
||||||
|
name: "SimpleBlock",
|
||||||
|
inputs: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
input_a: { title: "Input A", type: "string" },
|
||||||
|
input_b: { title: "Input B", type: "number" },
|
||||||
|
},
|
||||||
|
required: ["input_a"],
|
||||||
|
},
|
||||||
|
outputs: {
|
||||||
|
type: "object",
|
||||||
|
properties: {
|
||||||
|
result: { title: "Result", type: "string" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
@@ -0,0 +1,103 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import type { BlockDetailsResponse } from "../../helpers";
|
||||||
|
import {
|
||||||
|
ContentBadge,
|
||||||
|
ContentCard,
|
||||||
|
ContentCardDescription,
|
||||||
|
ContentCardTitle,
|
||||||
|
ContentGrid,
|
||||||
|
ContentMessage,
|
||||||
|
} from "../../../../components/ToolAccordion/AccordionContent";
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
output: BlockDetailsResponse;
|
||||||
|
}
|
||||||
|
|
||||||
|
function SchemaFieldList({
|
||||||
|
title,
|
||||||
|
properties,
|
||||||
|
required,
|
||||||
|
}: {
|
||||||
|
title: string;
|
||||||
|
properties: Record<string, unknown>;
|
||||||
|
required?: string[];
|
||||||
|
}) {
|
||||||
|
const entries = Object.entries(properties);
|
||||||
|
if (entries.length === 0) return null;
|
||||||
|
|
||||||
|
const requiredSet = new Set(required ?? []);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ContentCard>
|
||||||
|
<ContentCardTitle className="text-xs">{title}</ContentCardTitle>
|
||||||
|
<div className="mt-2 grid gap-2">
|
||||||
|
{entries.map(([name, schema]) => {
|
||||||
|
const field = schema as Record<string, unknown> | undefined;
|
||||||
|
const fieldTitle =
|
||||||
|
typeof field?.title === "string" ? field.title : name;
|
||||||
|
const fieldType =
|
||||||
|
typeof field?.type === "string" ? field.type : "unknown";
|
||||||
|
const description =
|
||||||
|
typeof field?.description === "string"
|
||||||
|
? field.description
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div key={name} className="rounded-xl border p-2">
|
||||||
|
<div className="flex items-center justify-between gap-2">
|
||||||
|
<ContentCardTitle className="text-xs">
|
||||||
|
{fieldTitle}
|
||||||
|
</ContentCardTitle>
|
||||||
|
<div className="flex gap-1">
|
||||||
|
<ContentBadge>{fieldType}</ContentBadge>
|
||||||
|
{requiredSet.has(name) && (
|
||||||
|
<ContentBadge>Required</ContentBadge>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{description && (
|
||||||
|
<ContentCardDescription className="mt-1 text-xs">
|
||||||
|
{description}
|
||||||
|
</ContentCardDescription>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
</ContentCard>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function BlockDetailsCard({ output }: Props) {
|
||||||
|
const inputs = output.block.inputs as {
|
||||||
|
properties?: Record<string, unknown>;
|
||||||
|
required?: string[];
|
||||||
|
} | null;
|
||||||
|
const outputs = output.block.outputs as {
|
||||||
|
properties?: Record<string, unknown>;
|
||||||
|
required?: string[];
|
||||||
|
} | null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ContentGrid>
|
||||||
|
<ContentMessage>{output.message}</ContentMessage>
|
||||||
|
|
||||||
|
{inputs?.properties && Object.keys(inputs.properties).length > 0 && (
|
||||||
|
<SchemaFieldList
|
||||||
|
title="Inputs"
|
||||||
|
properties={inputs.properties}
|
||||||
|
required={inputs.required}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{outputs?.properties && Object.keys(outputs.properties).length > 0 && (
|
||||||
|
<SchemaFieldList
|
||||||
|
title="Outputs"
|
||||||
|
properties={outputs.properties}
|
||||||
|
required={outputs.required}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
</ContentGrid>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -10,18 +10,37 @@ import {
|
|||||||
import type { ToolUIPart } from "ai";
|
import type { ToolUIPart } from "ai";
|
||||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||||
|
|
||||||
|
/** Block details returned on first run_block attempt (before input_data provided). */
|
||||||
|
export interface BlockDetailsResponse {
|
||||||
|
type: typeof ResponseType.block_details;
|
||||||
|
message: string;
|
||||||
|
session_id?: string | null;
|
||||||
|
block: {
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
description: string;
|
||||||
|
inputs: Record<string, unknown>;
|
||||||
|
outputs: Record<string, unknown>;
|
||||||
|
credentials: unknown[];
|
||||||
|
};
|
||||||
|
user_authenticated: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
export interface RunBlockInput {
|
export interface RunBlockInput {
|
||||||
block_id?: string;
|
block_id?: string;
|
||||||
|
block_name?: string;
|
||||||
input_data?: Record<string, unknown>;
|
input_data?: Record<string, unknown>;
|
||||||
}
|
}
|
||||||
|
|
||||||
export type RunBlockToolOutput =
|
export type RunBlockToolOutput =
|
||||||
| SetupRequirementsResponse
|
| SetupRequirementsResponse
|
||||||
|
| BlockDetailsResponse
|
||||||
| BlockOutputResponse
|
| BlockOutputResponse
|
||||||
| ErrorResponse;
|
| ErrorResponse;
|
||||||
|
|
||||||
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
||||||
ResponseType.setup_requirements,
|
ResponseType.setup_requirements,
|
||||||
|
ResponseType.block_details,
|
||||||
ResponseType.block_output,
|
ResponseType.block_output,
|
||||||
ResponseType.error,
|
ResponseType.error,
|
||||||
]);
|
]);
|
||||||
@@ -35,6 +54,15 @@ export function isRunBlockSetupRequirementsOutput(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function isRunBlockDetailsOutput(
|
||||||
|
output: RunBlockToolOutput,
|
||||||
|
): output is BlockDetailsResponse {
|
||||||
|
return (
|
||||||
|
output.type === ResponseType.block_details ||
|
||||||
|
("block" in output && typeof output.block === "object")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
export function isRunBlockBlockOutput(
|
export function isRunBlockBlockOutput(
|
||||||
output: RunBlockToolOutput,
|
output: RunBlockToolOutput,
|
||||||
): output is BlockOutputResponse {
|
): output is BlockOutputResponse {
|
||||||
@@ -64,6 +92,7 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
|
|||||||
return output as RunBlockToolOutput;
|
return output as RunBlockToolOutput;
|
||||||
}
|
}
|
||||||
if ("block_id" in output) return output as BlockOutputResponse;
|
if ("block_id" in output) return output as BlockOutputResponse;
|
||||||
|
if ("block" in output) return output as BlockDetailsResponse;
|
||||||
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
||||||
if ("error" in output || "details" in output)
|
if ("error" in output || "details" in output)
|
||||||
return output as ErrorResponse;
|
return output as ErrorResponse;
|
||||||
@@ -84,17 +113,25 @@ export function getAnimationText(part: {
|
|||||||
output?: unknown;
|
output?: unknown;
|
||||||
}): string {
|
}): string {
|
||||||
const input = part.input as RunBlockInput | undefined;
|
const input = part.input as RunBlockInput | undefined;
|
||||||
|
const blockName = input?.block_name?.trim();
|
||||||
const blockId = input?.block_id?.trim();
|
const blockId = input?.block_id?.trim();
|
||||||
const blockText = blockId ? ` "${blockId}"` : "";
|
// Prefer block_name if available, otherwise fall back to block_id
|
||||||
|
const blockText = blockName
|
||||||
|
? ` "${blockName}"`
|
||||||
|
: blockId
|
||||||
|
? ` "${blockId}"`
|
||||||
|
: "";
|
||||||
|
|
||||||
switch (part.state) {
|
switch (part.state) {
|
||||||
case "input-streaming":
|
case "input-streaming":
|
||||||
case "input-available":
|
case "input-available":
|
||||||
return `Running the block${blockText}`;
|
return `Running${blockText}`;
|
||||||
case "output-available": {
|
case "output-available": {
|
||||||
const output = parseOutput(part.output);
|
const output = parseOutput(part.output);
|
||||||
if (!output) return `Running the block${blockText}`;
|
if (!output) return `Running${blockText}`;
|
||||||
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
||||||
|
if (isRunBlockDetailsOutput(output))
|
||||||
|
return `Details for "${output.block.name}"`;
|
||||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||||
return `Setup needed for "${output.setup_info.agent_name}"`;
|
return `Setup needed for "${output.setup_info.agent_name}"`;
|
||||||
}
|
}
|
||||||
@@ -158,6 +195,21 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isRunBlockDetailsOutput(output)) {
|
||||||
|
const inputKeys = Object.keys(
|
||||||
|
(output.block.inputs as { properties?: Record<string, unknown> })
|
||||||
|
?.properties ?? {},
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
icon,
|
||||||
|
title: output.block.name,
|
||||||
|
description:
|
||||||
|
inputKeys.length > 0
|
||||||
|
? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available`
|
||||||
|
: output.message,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||||
const missingCredsCount = Object.keys(
|
const missingCredsCount = Object.keys(
|
||||||
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
||||||
|
|||||||
@@ -1,631 +0,0 @@
|
|||||||
"use client";
|
|
||||||
import { useParams, useRouter } from "next/navigation";
|
|
||||||
import { useQueryState } from "nuqs";
|
|
||||||
import React, {
|
|
||||||
useCallback,
|
|
||||||
useEffect,
|
|
||||||
useMemo,
|
|
||||||
useRef,
|
|
||||||
useState,
|
|
||||||
} from "react";
|
|
||||||
|
|
||||||
import {
|
|
||||||
Graph,
|
|
||||||
GraphExecution,
|
|
||||||
GraphExecutionID,
|
|
||||||
GraphExecutionMeta,
|
|
||||||
GraphID,
|
|
||||||
LibraryAgent,
|
|
||||||
LibraryAgentID,
|
|
||||||
LibraryAgentPreset,
|
|
||||||
LibraryAgentPresetID,
|
|
||||||
Schedule,
|
|
||||||
ScheduleID,
|
|
||||||
} from "@/lib/autogpt-server-api";
|
|
||||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
|
||||||
import { exportAsJSONFile } from "@/lib/utils";
|
|
||||||
|
|
||||||
import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog";
|
|
||||||
import type { ButtonAction } from "@/components/__legacy__/types";
|
|
||||||
import { Button } from "@/components/__legacy__/ui/button";
|
|
||||||
import {
|
|
||||||
Dialog,
|
|
||||||
DialogContent,
|
|
||||||
DialogDescription,
|
|
||||||
DialogFooter,
|
|
||||||
DialogHeader,
|
|
||||||
DialogTitle,
|
|
||||||
} from "@/components/__legacy__/ui/dialog";
|
|
||||||
import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading";
|
|
||||||
import {
|
|
||||||
useToast,
|
|
||||||
useToastOnFail,
|
|
||||||
} from "@/components/molecules/Toast/use-toast";
|
|
||||||
import { AgentRunDetailsView } from "./components/agent-run-details-view";
|
|
||||||
import { AgentRunDraftView } from "./components/agent-run-draft-view";
|
|
||||||
import { CreatePresetDialog } from "./components/create-preset-dialog";
|
|
||||||
import { useAgentRunsInfinite } from "./use-agent-runs";
|
|
||||||
import { AgentRunsSelectorList } from "./components/agent-runs-selector-list";
|
|
||||||
import { AgentScheduleDetailsView } from "./components/agent-schedule-details-view";
|
|
||||||
|
|
||||||
export function OldAgentLibraryView() {
|
|
||||||
const { id: agentID }: { id: LibraryAgentID } = useParams();
|
|
||||||
const [executionId, setExecutionId] = useQueryState("executionId");
|
|
||||||
const toastOnFail = useToastOnFail();
|
|
||||||
const { toast } = useToast();
|
|
||||||
const router = useRouter();
|
|
||||||
const api = useBackendAPI();
|
|
||||||
|
|
||||||
// ============================ STATE =============================
|
|
||||||
|
|
||||||
const [graph, setGraph] = useState<Graph | null>(null); // Graph version corresponding to LibraryAgent
|
|
||||||
const [agent, setAgent] = useState<LibraryAgent | null>(null);
|
|
||||||
const agentRunsQuery = useAgentRunsInfinite(graph?.id); // only runs once graph.id is known
|
|
||||||
const agentRuns = agentRunsQuery.agentRuns;
|
|
||||||
const [agentPresets, setAgentPresets] = useState<LibraryAgentPreset[]>([]);
|
|
||||||
const [schedules, setSchedules] = useState<Schedule[]>([]);
|
|
||||||
const [selectedView, selectView] = useState<
|
|
||||||
| { type: "run"; id?: GraphExecutionID }
|
|
||||||
| { type: "preset"; id: LibraryAgentPresetID }
|
|
||||||
| { type: "schedule"; id: ScheduleID }
|
|
||||||
>({ type: "run" });
|
|
||||||
const [selectedRun, setSelectedRun] = useState<
|
|
||||||
GraphExecution | GraphExecutionMeta | null
|
|
||||||
>(null);
|
|
||||||
const selectedSchedule =
|
|
||||||
selectedView.type == "schedule"
|
|
||||||
? schedules.find((s) => s.id == selectedView.id)
|
|
||||||
: null;
|
|
||||||
const [isFirstLoad, setIsFirstLoad] = useState<boolean>(true);
|
|
||||||
const [agentDeleteDialogOpen, setAgentDeleteDialogOpen] =
|
|
||||||
useState<boolean>(false);
|
|
||||||
const [confirmingDeleteAgentRun, setConfirmingDeleteAgentRun] =
|
|
||||||
useState<GraphExecutionMeta | null>(null);
|
|
||||||
const [confirmingDeleteAgentPreset, setConfirmingDeleteAgentPreset] =
|
|
||||||
useState<LibraryAgentPresetID | null>(null);
|
|
||||||
const [copyAgentDialogOpen, setCopyAgentDialogOpen] = useState(false);
|
|
||||||
const [creatingPresetFromExecutionID, setCreatingPresetFromExecutionID] =
|
|
||||||
useState<GraphExecutionID | null>(null);
|
|
||||||
|
|
||||||
// Set page title with agent name
|
|
||||||
useEffect(() => {
|
|
||||||
if (agent) {
|
|
||||||
document.title = `${agent.name} - Library - AutoGPT Platform`;
|
|
||||||
}
|
|
||||||
}, [agent]);
|
|
||||||
|
|
||||||
const openRunDraftView = useCallback(() => {
|
|
||||||
selectView({ type: "run" });
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const selectRun = useCallback((id: GraphExecutionID) => {
|
|
||||||
selectView({ type: "run", id });
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const selectPreset = useCallback((id: LibraryAgentPresetID) => {
|
|
||||||
selectView({ type: "preset", id });
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const selectSchedule = useCallback((id: ScheduleID) => {
|
|
||||||
selectView({ type: "schedule", id });
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const graphVersions = useRef<Record<number, Graph>>({});
|
|
||||||
const loadingGraphVersions = useRef<Record<number, Promise<Graph>>>({});
|
|
||||||
const getGraphVersion = useCallback(
|
|
||||||
async (graphID: GraphID, version: number) => {
|
|
||||||
if (version in graphVersions.current)
|
|
||||||
return graphVersions.current[version];
|
|
||||||
if (version in loadingGraphVersions.current)
|
|
||||||
return loadingGraphVersions.current[version];
|
|
||||||
|
|
||||||
const pendingGraph = api.getGraph(graphID, version).then((graph) => {
|
|
||||||
graphVersions.current[version] = graph;
|
|
||||||
return graph;
|
|
||||||
});
|
|
||||||
// Cache promise as well to avoid duplicate requests
|
|
||||||
loadingGraphVersions.current[version] = pendingGraph;
|
|
||||||
return pendingGraph;
|
|
||||||
},
|
|
||||||
[api, graphVersions, loadingGraphVersions],
|
|
||||||
);
|
|
||||||
|
|
||||||
const lastRefresh = useRef<number>(0);
|
|
||||||
const refreshPageData = useCallback(() => {
|
|
||||||
if (Date.now() - lastRefresh.current < 2e3) return; // 2 second debounce
|
|
||||||
lastRefresh.current = Date.now();
|
|
||||||
|
|
||||||
api.getLibraryAgent(agentID).then((agent) => {
|
|
||||||
setAgent(agent);
|
|
||||||
|
|
||||||
getGraphVersion(agent.graph_id, agent.graph_version).then(
|
|
||||||
(_graph) =>
|
|
||||||
(graph && graph.version == _graph.version) || setGraph(_graph),
|
|
||||||
);
|
|
||||||
Promise.all([
|
|
||||||
agentRunsQuery.refetchRuns(),
|
|
||||||
api.listLibraryAgentPresets({
|
|
||||||
graph_id: agent.graph_id,
|
|
||||||
page_size: 100,
|
|
||||||
}),
|
|
||||||
]).then(([runsQueryResult, presets]) => {
|
|
||||||
setAgentPresets(presets.presets);
|
|
||||||
|
|
||||||
const newestAgentRunsResponse = runsQueryResult.data?.pages[0];
|
|
||||||
if (!newestAgentRunsResponse || newestAgentRunsResponse.status != 200)
|
|
||||||
return;
|
|
||||||
const newestAgentRuns = newestAgentRunsResponse.data.executions;
|
|
||||||
// Preload the corresponding graph versions for the latest 10 runs
|
|
||||||
new Set(
|
|
||||||
newestAgentRuns.slice(0, 10).map((run) => run.graph_version),
|
|
||||||
).forEach((version) => getGraphVersion(agent.graph_id, version));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}, [api, agentID, getGraphVersion, graph]);
|
|
||||||
|
|
||||||
// On first load: select the latest run
|
|
||||||
useEffect(() => {
|
|
||||||
// Only for first load or first execution
|
|
||||||
if (selectedView.id || !isFirstLoad) return;
|
|
||||||
if (agentRuns.length == 0 && agentPresets.length == 0) return;
|
|
||||||
|
|
||||||
setIsFirstLoad(false);
|
|
||||||
if (agentRuns.length > 0) {
|
|
||||||
// select latest run
|
|
||||||
const latestRun = agentRuns.reduce((latest, current) => {
|
|
||||||
if (!latest.started_at && !current.started_at) return latest;
|
|
||||||
if (!latest.started_at) return current;
|
|
||||||
if (!current.started_at) return latest;
|
|
||||||
return latest.started_at > current.started_at ? latest : current;
|
|
||||||
}, agentRuns[0]);
|
|
||||||
selectRun(latestRun.id as GraphExecutionID);
|
|
||||||
} else {
|
|
||||||
// select top preset
|
|
||||||
const latestPreset = agentPresets.toSorted(
|
|
||||||
(a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
|
|
||||||
)[0];
|
|
||||||
selectPreset(latestPreset.id);
|
|
||||||
}
|
|
||||||
}, [
|
|
||||||
isFirstLoad,
|
|
||||||
selectedView.id,
|
|
||||||
agentRuns,
|
|
||||||
agentPresets,
|
|
||||||
selectRun,
|
|
||||||
selectPreset,
|
|
||||||
]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (executionId) {
|
|
||||||
selectRun(executionId as GraphExecutionID);
|
|
||||||
setExecutionId(null);
|
|
||||||
}
|
|
||||||
}, [executionId, selectRun, setExecutionId]);
|
|
||||||
|
|
||||||
// Initial load
|
|
||||||
useEffect(() => {
|
|
||||||
refreshPageData();
|
|
||||||
|
|
||||||
// Show a toast when the WebSocket connection disconnects
|
|
||||||
let connectionToast: ReturnType<typeof toast> | null = null;
|
|
||||||
const cancelDisconnectHandler = api.onWebSocketDisconnect(() => {
|
|
||||||
connectionToast ??= toast({
|
|
||||||
title: "Connection to server was lost",
|
|
||||||
variant: "destructive",
|
|
||||||
description: (
|
|
||||||
<div className="flex items-center">
|
|
||||||
Trying to reconnect...
|
|
||||||
<LoadingSpinner className="ml-1.5 size-3.5" />
|
|
||||||
</div>
|
|
||||||
),
|
|
||||||
duration: Infinity,
|
|
||||||
dismissable: true,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
const cancelConnectHandler = api.onWebSocketConnect(() => {
|
|
||||||
if (connectionToast)
|
|
||||||
connectionToast.update({
|
|
||||||
id: connectionToast.id,
|
|
||||||
title: "✅ Connection re-established",
|
|
||||||
variant: "default",
|
|
||||||
description: (
|
|
||||||
<div className="flex items-center">
|
|
||||||
Refreshing data...
|
|
||||||
<LoadingSpinner className="ml-1.5 size-3.5" />
|
|
||||||
</div>
|
|
||||||
),
|
|
||||||
duration: 2000,
|
|
||||||
dismissable: true,
|
|
||||||
});
|
|
||||||
connectionToast = null;
|
|
||||||
});
|
|
||||||
return () => {
|
|
||||||
cancelDisconnectHandler();
|
|
||||||
cancelConnectHandler();
|
|
||||||
};
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
// Subscribe to WebSocket updates for agent runs
|
|
||||||
useEffect(() => {
|
|
||||||
if (!agent?.graph_id) return;
|
|
||||||
|
|
||||||
return api.onWebSocketConnect(() => {
|
|
||||||
refreshPageData(); // Sync up on (re)connect
|
|
||||||
|
|
||||||
// Subscribe to all executions for this agent
|
|
||||||
api.subscribeToGraphExecutions(agent.graph_id);
|
|
||||||
});
|
|
||||||
}, [api, agent?.graph_id, refreshPageData]);
|
|
||||||
|
|
||||||
// Handle execution updates
|
|
||||||
useEffect(() => {
|
|
||||||
const detachExecUpdateHandler = api.onWebSocketMessage(
|
|
||||||
"graph_execution_event",
|
|
||||||
(data) => {
|
|
||||||
if (data.graph_id != agent?.graph_id) return;
|
|
||||||
|
|
||||||
agentRunsQuery.upsertAgentRun(data);
|
|
||||||
if (data.id === selectedView.id) {
|
|
||||||
// Update currently viewed run
|
|
||||||
setSelectedRun(data);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
detachExecUpdateHandler();
|
|
||||||
};
|
|
||||||
}, [api, agent?.graph_id, selectedView.id]);
|
|
||||||
|
|
||||||
// Pre-load selectedRun based on selectedView
|
|
||||||
useEffect(() => {
|
|
||||||
if (selectedView.type != "run" || !selectedView.id) return;
|
|
||||||
|
|
||||||
const newSelectedRun = agentRuns.find((run) => run.id == selectedView.id);
|
|
||||||
if (selectedView.id !== selectedRun?.id) {
|
|
||||||
// Pull partial data from "cache" while waiting for the rest to load
|
|
||||||
setSelectedRun((newSelectedRun as GraphExecutionMeta) ?? null);
|
|
||||||
}
|
|
||||||
}, [api, selectedView, agentRuns, selectedRun?.id]);
|
|
||||||
|
|
||||||
// Load selectedRun based on selectedView; refresh on agent refresh
|
|
||||||
useEffect(() => {
|
|
||||||
if (selectedView.type != "run" || !selectedView.id || !agent) return;
|
|
||||||
|
|
||||||
api
|
|
||||||
.getGraphExecutionInfo(agent.graph_id, selectedView.id)
|
|
||||||
.then(async (run) => {
|
|
||||||
// Ensure corresponding graph version is available before rendering I/O
|
|
||||||
await getGraphVersion(run.graph_id, run.graph_version);
|
|
||||||
setSelectedRun(run);
|
|
||||||
});
|
|
||||||
}, [api, selectedView, agent, getGraphVersion]);
|
|
||||||
|
|
||||||
const fetchSchedules = useCallback(async () => {
|
|
||||||
if (!agent) return;
|
|
||||||
|
|
||||||
setSchedules(await api.listGraphExecutionSchedules(agent.graph_id));
|
|
||||||
}, [api, agent?.graph_id]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
fetchSchedules();
|
|
||||||
}, [fetchSchedules]);
|
|
||||||
|
|
||||||
// =========================== ACTIONS ============================
|
|
||||||
|
|
||||||
const deleteRun = useCallback(
|
|
||||||
async (run: GraphExecutionMeta) => {
|
|
||||||
if (run.status == "RUNNING" || run.status == "QUEUED") {
|
|
||||||
await api.stopGraphExecution(run.graph_id, run.id);
|
|
||||||
}
|
|
||||||
await api.deleteGraphExecution(run.id);
|
|
||||||
|
|
||||||
setConfirmingDeleteAgentRun(null);
|
|
||||||
if (selectedView.type == "run" && selectedView.id == run.id) {
|
|
||||||
openRunDraftView();
|
|
||||||
}
|
|
||||||
agentRunsQuery.removeAgentRun(run.id);
|
|
||||||
},
|
|
||||||
[api, selectedView, openRunDraftView],
|
|
||||||
);
|
|
||||||
|
|
||||||
const deletePreset = useCallback(
|
|
||||||
async (presetID: LibraryAgentPresetID) => {
|
|
||||||
await api.deleteLibraryAgentPreset(presetID);
|
|
||||||
|
|
||||||
setConfirmingDeleteAgentPreset(null);
|
|
||||||
if (selectedView.type == "preset" && selectedView.id == presetID) {
|
|
||||||
openRunDraftView();
|
|
||||||
}
|
|
||||||
setAgentPresets((presets) => presets.filter((p) => p.id !== presetID));
|
|
||||||
},
|
|
||||||
[api, selectedView, openRunDraftView],
|
|
||||||
);
|
|
||||||
|
|
||||||
const deleteSchedule = useCallback(
|
|
||||||
async (scheduleID: ScheduleID) => {
|
|
||||||
const removedSchedule =
|
|
||||||
await api.deleteGraphExecutionSchedule(scheduleID);
|
|
||||||
|
|
||||||
setSchedules((schedules) => {
|
|
||||||
const newSchedules = schedules.filter(
|
|
||||||
(s) => s.id !== removedSchedule.id,
|
|
||||||
);
|
|
||||||
if (
|
|
||||||
selectedView.type == "schedule" &&
|
|
||||||
selectedView.id == removedSchedule.id
|
|
||||||
) {
|
|
||||||
if (newSchedules.length > 0) {
|
|
||||||
// Select next schedule if available
|
|
||||||
selectSchedule(newSchedules[0].id);
|
|
||||||
} else {
|
|
||||||
// Reset to draft view if current schedule was deleted
|
|
||||||
openRunDraftView();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return newSchedules;
|
|
||||||
});
|
|
||||||
openRunDraftView();
|
|
||||||
},
|
|
||||||
[schedules, api],
|
|
||||||
);
|
|
||||||
|
|
||||||
const handleCreatePresetFromRun = useCallback(
|
|
||||||
async (name: string, description: string) => {
|
|
||||||
if (!creatingPresetFromExecutionID) return;
|
|
||||||
|
|
||||||
await api
|
|
||||||
.createLibraryAgentPreset({
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
graph_execution_id: creatingPresetFromExecutionID,
|
|
||||||
})
|
|
||||||
.then((preset) => {
|
|
||||||
setAgentPresets((prev) => [...prev, preset]);
|
|
||||||
selectPreset(preset.id);
|
|
||||||
setCreatingPresetFromExecutionID(null);
|
|
||||||
})
|
|
||||||
.catch(toastOnFail("create a preset"));
|
|
||||||
},
|
|
||||||
[api, creatingPresetFromExecutionID, selectPreset, toast],
|
|
||||||
);
|
|
||||||
|
|
||||||
const downloadGraph = useCallback(
|
|
||||||
async () =>
|
|
||||||
agent &&
|
|
||||||
// Export sanitized graph from backend
|
|
||||||
api
|
|
||||||
.getGraph(agent.graph_id, agent.graph_version, true)
|
|
||||||
.then((graph) =>
|
|
||||||
exportAsJSONFile(graph, `${graph.name}_v${graph.version}.json`),
|
|
||||||
),
|
|
||||||
[api, agent],
|
|
||||||
);
|
|
||||||
|
|
||||||
const copyAgent = useCallback(async () => {
|
|
||||||
setCopyAgentDialogOpen(false);
|
|
||||||
api
|
|
||||||
.forkLibraryAgent(agentID)
|
|
||||||
.then((newAgent) => {
|
|
||||||
router.push(`/library/agents/${newAgent.id}`);
|
|
||||||
})
|
|
||||||
.catch((error) => {
|
|
||||||
console.error("Error copying agent:", error);
|
|
||||||
toast({
|
|
||||||
title: "Error copying agent",
|
|
||||||
description: `An error occurred while copying the agent: ${error.message}`,
|
|
||||||
variant: "destructive",
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}, [agentID, api, router, toast]);
|
|
||||||
|
|
||||||
const agentActions: ButtonAction[] = useMemo(
|
|
||||||
() => [
|
|
||||||
{
|
|
||||||
label: "Customize agent",
|
|
||||||
href: `/build?flowID=${agent?.graph_id}&flowVersion=${agent?.graph_version}`,
|
|
||||||
disabled: !agent?.can_access_graph,
|
|
||||||
},
|
|
||||||
{ label: "Export agent to file", callback: downloadGraph },
|
|
||||||
...(!agent?.can_access_graph
|
|
||||||
? [
|
|
||||||
{
|
|
||||||
label: "Edit a copy",
|
|
||||||
callback: () => setCopyAgentDialogOpen(true),
|
|
||||||
},
|
|
||||||
]
|
|
||||||
: []),
|
|
||||||
{
|
|
||||||
label: "Delete agent",
|
|
||||||
callback: () => setAgentDeleteDialogOpen(true),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
[agent, downloadGraph],
|
|
||||||
);
|
|
||||||
|
|
||||||
const runGraph =
|
|
||||||
graphVersions.current[selectedRun?.graph_version ?? 0] ?? graph;
|
|
||||||
|
|
||||||
const onCreateSchedule = useCallback(
|
|
||||||
(schedule: Schedule) => {
|
|
||||||
setSchedules((prev) => [...prev, schedule]);
|
|
||||||
selectSchedule(schedule.id);
|
|
||||||
},
|
|
||||||
[selectView],
|
|
||||||
);
|
|
||||||
|
|
||||||
const onCreatePreset = useCallback(
|
|
||||||
(preset: LibraryAgentPreset) => {
|
|
||||||
setAgentPresets((prev) => [...prev, preset]);
|
|
||||||
selectPreset(preset.id);
|
|
||||||
},
|
|
||||||
[selectPreset],
|
|
||||||
);
|
|
||||||
|
|
||||||
const onUpdatePreset = useCallback(
|
|
||||||
(updated: LibraryAgentPreset) => {
|
|
||||||
setAgentPresets((prev) =>
|
|
||||||
prev.map((p) => (p.id === updated.id ? updated : p)),
|
|
||||||
);
|
|
||||||
selectPreset(updated.id);
|
|
||||||
},
|
|
||||||
[selectPreset],
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!agent || !graph) {
|
|
||||||
return <LoadingBox className="h-[90vh]" />;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="container justify-stretch p-0 pt-16 lg:flex">
|
|
||||||
{/* Sidebar w/ list of runs */}
|
|
||||||
{/* TODO: render this below header in sm and md layouts */}
|
|
||||||
<AgentRunsSelectorList
|
|
||||||
className="agpt-div w-full border-b pb-2 lg:w-auto lg:border-b-0 lg:border-r lg:pb-0"
|
|
||||||
agent={agent}
|
|
||||||
agentRunsQuery={agentRunsQuery}
|
|
||||||
agentPresets={agentPresets}
|
|
||||||
schedules={schedules}
|
|
||||||
selectedView={selectedView}
|
|
||||||
onSelectRun={selectRun}
|
|
||||||
onSelectPreset={selectPreset}
|
|
||||||
onSelectSchedule={selectSchedule}
|
|
||||||
onSelectDraftNewRun={openRunDraftView}
|
|
||||||
doDeleteRun={setConfirmingDeleteAgentRun}
|
|
||||||
doDeletePreset={setConfirmingDeleteAgentPreset}
|
|
||||||
doDeleteSchedule={deleteSchedule}
|
|
||||||
doCreatePresetFromRun={setCreatingPresetFromExecutionID}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<div className="flex-1">
|
|
||||||
{/* Header */}
|
|
||||||
<div className="agpt-div w-full border-b">
|
|
||||||
<h1
|
|
||||||
data-testid="agent-title"
|
|
||||||
className="font-poppins text-3xl font-medium"
|
|
||||||
>
|
|
||||||
{
|
|
||||||
agent.name /* TODO: use dynamic/custom run title - https://github.com/Significant-Gravitas/AutoGPT/issues/9184 */
|
|
||||||
}
|
|
||||||
</h1>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Run / Schedule views */}
|
|
||||||
{(selectedView.type == "run" && selectedView.id ? (
|
|
||||||
selectedRun && runGraph ? (
|
|
||||||
<AgentRunDetailsView
|
|
||||||
agent={agent}
|
|
||||||
graph={runGraph}
|
|
||||||
run={selectedRun}
|
|
||||||
agentActions={agentActions}
|
|
||||||
onRun={selectRun}
|
|
||||||
doDeleteRun={() => setConfirmingDeleteAgentRun(selectedRun)}
|
|
||||||
doCreatePresetFromRun={() =>
|
|
||||||
setCreatingPresetFromExecutionID(selectedRun.id)
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
) : null
|
|
||||||
) : selectedView.type == "run" ? (
|
|
||||||
/* Draft new runs / Create new presets */
|
|
||||||
<AgentRunDraftView
|
|
||||||
graph={graph}
|
|
||||||
onRun={selectRun}
|
|
||||||
onCreateSchedule={onCreateSchedule}
|
|
||||||
onCreatePreset={onCreatePreset}
|
|
||||||
agentActions={agentActions}
|
|
||||||
recommendedScheduleCron={agent?.recommended_schedule_cron || null}
|
|
||||||
/>
|
|
||||||
) : selectedView.type == "preset" ? (
|
|
||||||
/* Edit & update presets */
|
|
||||||
<AgentRunDraftView
|
|
||||||
graph={graph}
|
|
||||||
agentPreset={
|
|
||||||
agentPresets.find((preset) => preset.id == selectedView.id)!
|
|
||||||
}
|
|
||||||
onRun={selectRun}
|
|
||||||
recommendedScheduleCron={agent?.recommended_schedule_cron || null}
|
|
||||||
onCreateSchedule={onCreateSchedule}
|
|
||||||
onUpdatePreset={onUpdatePreset}
|
|
||||||
doDeletePreset={setConfirmingDeleteAgentPreset}
|
|
||||||
agentActions={agentActions}
|
|
||||||
/>
|
|
||||||
) : selectedView.type == "schedule" ? (
|
|
||||||
selectedSchedule &&
|
|
||||||
graph && (
|
|
||||||
<AgentScheduleDetailsView
|
|
||||||
graph={graph}
|
|
||||||
schedule={selectedSchedule}
|
|
||||||
// agent={agent}
|
|
||||||
agentActions={agentActions}
|
|
||||||
onForcedRun={selectRun}
|
|
||||||
doDeleteSchedule={deleteSchedule}
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
) : null) || <LoadingBox className="h-[70vh]" />}
|
|
||||||
|
|
||||||
<DeleteConfirmDialog
|
|
||||||
entityType="agent"
|
|
||||||
open={agentDeleteDialogOpen}
|
|
||||||
onOpenChange={setAgentDeleteDialogOpen}
|
|
||||||
onDoDelete={() =>
|
|
||||||
agent &&
|
|
||||||
api.deleteLibraryAgent(agent.id).then(() => router.push("/library"))
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<DeleteConfirmDialog
|
|
||||||
entityType="agent run"
|
|
||||||
open={!!confirmingDeleteAgentRun}
|
|
||||||
onOpenChange={(open) => !open && setConfirmingDeleteAgentRun(null)}
|
|
||||||
onDoDelete={() =>
|
|
||||||
confirmingDeleteAgentRun && deleteRun(confirmingDeleteAgentRun)
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<DeleteConfirmDialog
|
|
||||||
entityType={agent.has_external_trigger ? "trigger" : "agent preset"}
|
|
||||||
open={!!confirmingDeleteAgentPreset}
|
|
||||||
onOpenChange={(open) => !open && setConfirmingDeleteAgentPreset(null)}
|
|
||||||
onDoDelete={() =>
|
|
||||||
confirmingDeleteAgentPreset &&
|
|
||||||
deletePreset(confirmingDeleteAgentPreset)
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
{/* Copy agent confirmation dialog */}
|
|
||||||
<Dialog
|
|
||||||
onOpenChange={setCopyAgentDialogOpen}
|
|
||||||
open={copyAgentDialogOpen}
|
|
||||||
>
|
|
||||||
<DialogContent>
|
|
||||||
<DialogHeader>
|
|
||||||
<DialogTitle>You're making an editable copy</DialogTitle>
|
|
||||||
<DialogDescription className="pt-2">
|
|
||||||
The original Marketplace agent stays the same and cannot be
|
|
||||||
edited. We'll save a new version of this agent to your
|
|
||||||
Library. From there, you can customize it however you'd
|
|
||||||
like by clicking "Customize agent" — this will open
|
|
||||||
the builder where you can see and modify the inner workings.
|
|
||||||
</DialogDescription>
|
|
||||||
</DialogHeader>
|
|
||||||
<DialogFooter className="justify-end">
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="outline"
|
|
||||||
onClick={() => setCopyAgentDialogOpen(false)}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button type="button" onClick={copyAgent}>
|
|
||||||
Continue
|
|
||||||
</Button>
|
|
||||||
</DialogFooter>
|
|
||||||
</DialogContent>
|
|
||||||
</Dialog>
|
|
||||||
<CreatePresetDialog
|
|
||||||
open={!!creatingPresetFromExecutionID}
|
|
||||||
onOpenChange={() => setCreatingPresetFromExecutionID(null)}
|
|
||||||
onConfirm={handleCreatePresetFromRun}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,445 +0,0 @@
|
|||||||
"use client";
|
|
||||||
import { format, formatDistanceToNow, formatDistanceStrict } from "date-fns";
|
|
||||||
import React, { useCallback, useMemo, useEffect } from "react";
|
|
||||||
|
|
||||||
import {
|
|
||||||
Graph,
|
|
||||||
GraphExecution,
|
|
||||||
GraphExecutionID,
|
|
||||||
GraphExecutionMeta,
|
|
||||||
LibraryAgent,
|
|
||||||
} from "@/lib/autogpt-server-api";
|
|
||||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
|
||||||
|
|
||||||
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
|
|
||||||
import type { ButtonAction } from "@/components/__legacy__/types";
|
|
||||||
import {
|
|
||||||
Card,
|
|
||||||
CardContent,
|
|
||||||
CardHeader,
|
|
||||||
CardTitle,
|
|
||||||
} from "@/components/__legacy__/ui/card";
|
|
||||||
import {
|
|
||||||
IconRefresh,
|
|
||||||
IconSquare,
|
|
||||||
IconCircleAlert,
|
|
||||||
} from "@/components/__legacy__/ui/icons";
|
|
||||||
import { Input } from "@/components/__legacy__/ui/input";
|
|
||||||
import LoadingBox from "@/components/__legacy__/ui/loading";
|
|
||||||
import {
|
|
||||||
Tooltip,
|
|
||||||
TooltipContent,
|
|
||||||
TooltipProvider,
|
|
||||||
TooltipTrigger,
|
|
||||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
|
||||||
import { useToastOnFail } from "@/components/molecules/Toast/use-toast";
|
|
||||||
|
|
||||||
import { AgentRunStatus, agentRunStatusMap } from "./agent-run-status-chip";
|
|
||||||
import useCredits from "@/hooks/useCredits";
|
|
||||||
import { AgentRunOutputView } from "./agent-run-output-view";
|
|
||||||
import { analytics } from "@/services/analytics";
|
|
||||||
import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList";
|
|
||||||
import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews";
|
|
||||||
|
|
||||||
export function AgentRunDetailsView({
|
|
||||||
agent,
|
|
||||||
graph,
|
|
||||||
run,
|
|
||||||
agentActions,
|
|
||||||
onRun,
|
|
||||||
doDeleteRun,
|
|
||||||
doCreatePresetFromRun,
|
|
||||||
}: {
|
|
||||||
agent: LibraryAgent;
|
|
||||||
graph: Graph;
|
|
||||||
run: GraphExecution | GraphExecutionMeta;
|
|
||||||
agentActions: ButtonAction[];
|
|
||||||
onRun: (runID: GraphExecutionID) => void;
|
|
||||||
doDeleteRun: () => void;
|
|
||||||
doCreatePresetFromRun: () => void;
|
|
||||||
}): React.ReactNode {
|
|
||||||
const api = useBackendAPI();
|
|
||||||
const { formatCredits } = useCredits();
|
|
||||||
|
|
||||||
const runStatus: AgentRunStatus = useMemo(
|
|
||||||
() => agentRunStatusMap[run.status],
|
|
||||||
[run],
|
|
||||||
);
|
|
||||||
|
|
||||||
const {
|
|
||||||
pendingReviews,
|
|
||||||
isLoading: reviewsLoading,
|
|
||||||
refetch: refetchReviews,
|
|
||||||
} = usePendingReviewsForExecution(run.id);
|
|
||||||
|
|
||||||
const toastOnFail = useToastOnFail();
|
|
||||||
|
|
||||||
// Refetch pending reviews when execution status changes to REVIEW
|
|
||||||
useEffect(() => {
|
|
||||||
if (runStatus === "review" && run.id) {
|
|
||||||
refetchReviews();
|
|
||||||
}
|
|
||||||
}, [runStatus, run.id, refetchReviews]);
|
|
||||||
|
|
||||||
const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => {
|
|
||||||
if (!run) return [];
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
label: "Status",
|
|
||||||
value: runStatus.charAt(0).toUpperCase() + runStatus.slice(1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
label: "Started",
|
|
||||||
value: run.started_at
|
|
||||||
? `${formatDistanceToNow(run.started_at, { addSuffix: true })}, ${format(run.started_at, "HH:mm")}`
|
|
||||||
: "—",
|
|
||||||
},
|
|
||||||
...(run.stats
|
|
||||||
? [
|
|
||||||
{
|
|
||||||
label: "Duration",
|
|
||||||
value: formatDistanceStrict(0, run.stats.duration * 1000),
|
|
||||||
},
|
|
||||||
{ label: "Steps", value: run.stats.node_exec_count },
|
|
||||||
{ label: "Cost", value: formatCredits(run.stats.cost) },
|
|
||||||
]
|
|
||||||
: []),
|
|
||||||
];
|
|
||||||
}, [run, runStatus, formatCredits]);
|
|
||||||
|
|
||||||
const agentRunInputs:
|
|
||||||
| Record<
|
|
||||||
string,
|
|
||||||
{
|
|
||||||
title?: string;
|
|
||||||
/* type: BlockIOSubType; */
|
|
||||||
value: string | number | undefined;
|
|
||||||
}
|
|
||||||
>
|
|
||||||
| undefined = useMemo(() => {
|
|
||||||
if (!run.inputs) return undefined;
|
|
||||||
// TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168
|
|
||||||
|
|
||||||
// Add type info from agent input schema
|
|
||||||
return Object.fromEntries(
|
|
||||||
Object.entries(run.inputs).map(([k, v]) => [
|
|
||||||
k,
|
|
||||||
{
|
|
||||||
title: graph.input_schema.properties[k]?.title,
|
|
||||||
// type: graph.input_schema.properties[k].type, // TODO: implement typed graph inputs
|
|
||||||
value: typeof v == "object" ? JSON.stringify(v, undefined, 2) : v,
|
|
||||||
},
|
|
||||||
]),
|
|
||||||
);
|
|
||||||
}, [graph, run]);
|
|
||||||
|
|
||||||
const runAgain = useCallback(() => {
|
|
||||||
if (
|
|
||||||
!run.inputs ||
|
|
||||||
!(graph.credentials_input_schema?.required ?? []).every(
|
|
||||||
(k) => k in (run.credential_inputs ?? {}),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (run.preset_id) {
|
|
||||||
return api
|
|
||||||
.executeLibraryAgentPreset(
|
|
||||||
run.preset_id,
|
|
||||||
run.inputs!,
|
|
||||||
run.credential_inputs!,
|
|
||||||
)
|
|
||||||
.then(({ id }) => {
|
|
||||||
analytics.sendDatafastEvent("run_agent", {
|
|
||||||
name: graph.name,
|
|
||||||
id: graph.id,
|
|
||||||
});
|
|
||||||
onRun(id);
|
|
||||||
})
|
|
||||||
.catch(toastOnFail("execute agent preset"));
|
|
||||||
}
|
|
||||||
|
|
||||||
return api
|
|
||||||
.executeGraph(
|
|
||||||
graph.id,
|
|
||||||
graph.version,
|
|
||||||
run.inputs!,
|
|
||||||
run.credential_inputs!,
|
|
||||||
"library",
|
|
||||||
)
|
|
||||||
.then(({ id }) => {
|
|
||||||
analytics.sendDatafastEvent("run_agent", {
|
|
||||||
name: graph.name,
|
|
||||||
id: graph.id,
|
|
||||||
});
|
|
||||||
onRun(id);
|
|
||||||
})
|
|
||||||
.catch(toastOnFail("execute agent"));
|
|
||||||
}, [api, graph, run, onRun, toastOnFail]);
|
|
||||||
|
|
||||||
const stopRun = useCallback(
|
|
||||||
() => api.stopGraphExecution(graph.id, run.id),
|
|
||||||
[api, graph.id, run.id],
|
|
||||||
);
|
|
||||||
|
|
||||||
const agentRunOutputs:
|
|
||||||
| Record<
|
|
||||||
string,
|
|
||||||
{
|
|
||||||
title?: string;
|
|
||||||
/* type: BlockIOSubType; */
|
|
||||||
values: Array<React.ReactNode>;
|
|
||||||
}
|
|
||||||
>
|
|
||||||
| null
|
|
||||||
| undefined = useMemo(() => {
|
|
||||||
if (!("outputs" in run)) return undefined;
|
|
||||||
if (!["running", "success", "failed", "stopped"].includes(runStatus))
|
|
||||||
return null;
|
|
||||||
|
|
||||||
// Add type info from agent input schema
|
|
||||||
return Object.fromEntries(
|
|
||||||
Object.entries(run.outputs).map(([k, vv]) => [
|
|
||||||
k,
|
|
||||||
{
|
|
||||||
title: graph.output_schema.properties[k].title,
|
|
||||||
/* type: agent.output_schema.properties[k].type */
|
|
||||||
values: vv.map((v) =>
|
|
||||||
typeof v == "object" ? JSON.stringify(v, undefined, 2) : v,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
]),
|
|
||||||
);
|
|
||||||
}, [graph, run, runStatus]);
|
|
||||||
|
|
||||||
const runActions: ButtonAction[] = useMemo(
|
|
||||||
() => [
|
|
||||||
...(["running", "queued"].includes(runStatus)
|
|
||||||
? ([
|
|
||||||
{
|
|
||||||
label: (
|
|
||||||
<>
|
|
||||||
<IconSquare className="mr-2 size-4" />
|
|
||||||
Stop run
|
|
||||||
</>
|
|
||||||
),
|
|
||||||
variant: "secondary",
|
|
||||||
callback: stopRun,
|
|
||||||
},
|
|
||||||
] satisfies ButtonAction[])
|
|
||||||
: []),
|
|
||||||
...(["success", "failed", "stopped"].includes(runStatus) &&
|
|
||||||
!graph.has_external_trigger &&
|
|
||||||
(graph.credentials_input_schema?.required ?? []).every(
|
|
||||||
(k) => k in (run.credential_inputs ?? {}),
|
|
||||||
)
|
|
||||||
? [
|
|
||||||
{
|
|
||||||
label: (
|
|
||||||
<>
|
|
||||||
<IconRefresh className="mr-2 size-4" />
|
|
||||||
Run again
|
|
||||||
</>
|
|
||||||
),
|
|
||||||
callback: runAgain,
|
|
||||||
dataTestId: "run-again-button",
|
|
||||||
},
|
|
||||||
]
|
|
||||||
: []),
|
|
||||||
...(agent.can_access_graph
|
|
||||||
? [
|
|
||||||
{
|
|
||||||
label: "Open run in builder",
|
|
||||||
href: `/build?flowID=${run.graph_id}&flowVersion=${run.graph_version}&flowExecutionID=${run.id}`,
|
|
||||||
},
|
|
||||||
]
|
|
||||||
: []),
|
|
||||||
{ label: "Create preset from run", callback: doCreatePresetFromRun },
|
|
||||||
{ label: "Delete run", variant: "secondary", callback: doDeleteRun },
|
|
||||||
],
|
|
||||||
[
|
|
||||||
runStatus,
|
|
||||||
runAgain,
|
|
||||||
stopRun,
|
|
||||||
doDeleteRun,
|
|
||||||
doCreatePresetFromRun,
|
|
||||||
graph.has_external_trigger,
|
|
||||||
graph.credentials_input_schema?.required,
|
|
||||||
agent.can_access_graph,
|
|
||||||
run.graph_id,
|
|
||||||
run.graph_version,
|
|
||||||
run.id,
|
|
||||||
],
|
|
||||||
);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="agpt-div flex gap-6">
|
|
||||||
<div className="flex flex-1 flex-col gap-4">
|
|
||||||
<Card className="agpt-box">
|
|
||||||
<CardHeader>
|
|
||||||
<CardTitle className="font-poppins text-lg">Info</CardTitle>
|
|
||||||
</CardHeader>
|
|
||||||
|
|
||||||
<CardContent>
|
|
||||||
<div className="flex justify-stretch gap-4">
|
|
||||||
{infoStats.map(({ label, value }) => (
|
|
||||||
<div key={label} className="flex-1">
|
|
||||||
<p className="text-sm font-medium text-black">{label}</p>
|
|
||||||
<p className="text-sm text-neutral-600">{value}</p>
|
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
{run.status === "FAILED" && (
|
|
||||||
<div className="mt-4 rounded-md border border-red-200 bg-red-50 p-3 dark:border-red-800 dark:bg-red-900/20">
|
|
||||||
<p className="text-sm text-red-800 dark:text-red-200">
|
|
||||||
<strong>Error:</strong>{" "}
|
|
||||||
{run.stats?.error ||
|
|
||||||
"The execution failed due to an internal error. You can re-run the agent to retry."}
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
|
|
||||||
{/* Smart Agent Execution Summary */}
|
|
||||||
{run.stats?.activity_status && (
|
|
||||||
<Card className="agpt-box">
|
|
||||||
<CardHeader>
|
|
||||||
<CardTitle className="flex items-center gap-2 font-poppins text-lg">
|
|
||||||
Task Summary
|
|
||||||
<TooltipProvider>
|
|
||||||
<Tooltip>
|
|
||||||
<TooltipTrigger asChild>
|
|
||||||
<IconCircleAlert className="size-4 cursor-help text-neutral-500 hover:text-neutral-700" />
|
|
||||||
</TooltipTrigger>
|
|
||||||
<TooltipContent>
|
|
||||||
<p className="max-w-xs">
|
|
||||||
This AI-generated summary describes how the agent
|
|
||||||
handled your task. It’s an experimental feature and may
|
|
||||||
occasionally be inaccurate.
|
|
||||||
</p>
|
|
||||||
</TooltipContent>
|
|
||||||
</Tooltip>
|
|
||||||
</TooltipProvider>
|
|
||||||
</CardTitle>
|
|
||||||
</CardHeader>
|
|
||||||
<CardContent className="space-y-4">
|
|
||||||
<p className="text-sm leading-relaxed text-neutral-700">
|
|
||||||
{run.stats.activity_status}
|
|
||||||
</p>
|
|
||||||
|
|
||||||
{/* Correctness Score */}
|
|
||||||
{typeof run.stats.correctness_score === "number" && (
|
|
||||||
<div className="flex items-center gap-3 rounded-lg bg-neutral-50 p-3">
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<span className="text-sm font-medium text-neutral-600">
|
|
||||||
Success Estimate:
|
|
||||||
</span>
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<div className="relative h-2 w-16 overflow-hidden rounded-full bg-neutral-200">
|
|
||||||
<div
|
|
||||||
className={`h-full transition-all ${
|
|
||||||
run.stats.correctness_score >= 0.8
|
|
||||||
? "bg-green-500"
|
|
||||||
: run.stats.correctness_score >= 0.6
|
|
||||||
? "bg-yellow-500"
|
|
||||||
: run.stats.correctness_score >= 0.4
|
|
||||||
? "bg-orange-500"
|
|
||||||
: "bg-red-500"
|
|
||||||
}`}
|
|
||||||
style={{
|
|
||||||
width: `${Math.round(run.stats.correctness_score * 100)}%`,
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<span className="text-sm font-medium">
|
|
||||||
{Math.round(run.stats.correctness_score * 100)}%
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<TooltipProvider>
|
|
||||||
<Tooltip>
|
|
||||||
<TooltipTrigger asChild>
|
|
||||||
<IconCircleAlert className="size-4 cursor-help text-neutral-400 hover:text-neutral-600" />
|
|
||||||
</TooltipTrigger>
|
|
||||||
<TooltipContent>
|
|
||||||
<p className="max-w-xs">
|
|
||||||
AI-generated estimate of how well this execution
|
|
||||||
achieved its intended purpose. This score indicates
|
|
||||||
{run.stats.correctness_score >= 0.8
|
|
||||||
? " the agent was highly successful."
|
|
||||||
: run.stats.correctness_score >= 0.6
|
|
||||||
? " the agent was mostly successful with minor issues."
|
|
||||||
: run.stats.correctness_score >= 0.4
|
|
||||||
? " the agent was partially successful with some gaps."
|
|
||||||
: " the agent had limited success with significant issues."}
|
|
||||||
</p>
|
|
||||||
</TooltipContent>
|
|
||||||
</Tooltip>
|
|
||||||
</TooltipProvider>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{agentRunOutputs !== null && (
|
|
||||||
<AgentRunOutputView agentRunOutputs={agentRunOutputs} />
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Pending Reviews Section */}
|
|
||||||
{runStatus === "review" && (
|
|
||||||
<Card className="agpt-box">
|
|
||||||
<CardHeader>
|
|
||||||
<CardTitle className="font-poppins text-lg">
|
|
||||||
Pending Reviews ({pendingReviews.length})
|
|
||||||
</CardTitle>
|
|
||||||
</CardHeader>
|
|
||||||
<CardContent>
|
|
||||||
{reviewsLoading ? (
|
|
||||||
<LoadingBox spinnerSize={12} className="h-24" />
|
|
||||||
) : pendingReviews.length > 0 ? (
|
|
||||||
<PendingReviewsList
|
|
||||||
reviews={pendingReviews}
|
|
||||||
onReviewComplete={refetchReviews}
|
|
||||||
emptyMessage="No pending reviews for this execution"
|
|
||||||
/>
|
|
||||||
) : (
|
|
||||||
<div className="py-4 text-neutral-600">
|
|
||||||
No pending reviews for this execution
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<Card className="agpt-box">
|
|
||||||
<CardHeader>
|
|
||||||
<CardTitle className="font-poppins text-lg">Input</CardTitle>
|
|
||||||
</CardHeader>
|
|
||||||
<CardContent className="flex flex-col gap-4">
|
|
||||||
{agentRunInputs !== undefined ? (
|
|
||||||
Object.entries(agentRunInputs).map(([key, { title, value }]) => (
|
|
||||||
<div key={key} className="flex flex-col gap-1.5">
|
|
||||||
<label className="text-sm font-medium">{title || key}</label>
|
|
||||||
<Input value={value} className="rounded-full" disabled />
|
|
||||||
</div>
|
|
||||||
))
|
|
||||||
) : (
|
|
||||||
<LoadingBox spinnerSize={12} className="h-24" />
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Run / Agent Actions */}
|
|
||||||
<aside className="w-48 xl:w-56">
|
|
||||||
<div className="flex flex-col gap-8">
|
|
||||||
<ActionButtonGroup title="Run actions" actions={runActions} />
|
|
||||||
|
|
||||||
<ActionButtonGroup title="Agent actions" actions={agentActions} />
|
|
||||||
</div>
|
|
||||||
</aside>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,178 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
|
||||||
import React, { useMemo } from "react";
|
|
||||||
|
|
||||||
import {
|
|
||||||
Card,
|
|
||||||
CardContent,
|
|
||||||
CardHeader,
|
|
||||||
CardTitle,
|
|
||||||
} from "@/components/__legacy__/ui/card";
|
|
||||||
|
|
||||||
import LoadingBox from "@/components/__legacy__/ui/loading";
|
|
||||||
import type { OutputMetadata } from "../../../../../../../../components/contextual/OutputRenderers";
|
|
||||||
import {
|
|
||||||
globalRegistry,
|
|
||||||
OutputActions,
|
|
||||||
OutputItem,
|
|
||||||
} from "../../../../../../../../components/contextual/OutputRenderers";
|
|
||||||
|
|
||||||
export function AgentRunOutputView({
|
|
||||||
agentRunOutputs,
|
|
||||||
}: {
|
|
||||||
agentRunOutputs:
|
|
||||||
| Record<
|
|
||||||
string,
|
|
||||||
{
|
|
||||||
title?: string;
|
|
||||||
/* type: BlockIOSubType; */
|
|
||||||
values: Array<React.ReactNode>;
|
|
||||||
}
|
|
||||||
>
|
|
||||||
| undefined;
|
|
||||||
}) {
|
|
||||||
const enableEnhancedOutputHandling = useGetFlag(
|
|
||||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Prepare items for the renderer system
|
|
||||||
const outputItems = useMemo(() => {
|
|
||||||
if (!agentRunOutputs) return [];
|
|
||||||
|
|
||||||
const items: Array<{
|
|
||||||
key: string;
|
|
||||||
label: string;
|
|
||||||
value: unknown;
|
|
||||||
metadata?: OutputMetadata;
|
|
||||||
renderer: any;
|
|
||||||
}> = [];
|
|
||||||
|
|
||||||
Object.entries(agentRunOutputs).forEach(([key, { title, values }]) => {
|
|
||||||
values.forEach((value, index) => {
|
|
||||||
// Enhanced metadata extraction
|
|
||||||
const metadata: OutputMetadata = {};
|
|
||||||
|
|
||||||
// Type guard to safely access properties
|
|
||||||
if (
|
|
||||||
typeof value === "object" &&
|
|
||||||
value !== null &&
|
|
||||||
!React.isValidElement(value)
|
|
||||||
) {
|
|
||||||
const objValue = value as any;
|
|
||||||
if (objValue.type) metadata.type = objValue.type;
|
|
||||||
if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
|
|
||||||
if (objValue.filename) metadata.filename = objValue.filename;
|
|
||||||
}
|
|
||||||
|
|
||||||
const renderer = globalRegistry.getRenderer(value, metadata);
|
|
||||||
if (renderer) {
|
|
||||||
items.push({
|
|
||||||
key: `${key}-${index}`,
|
|
||||||
label: index === 0 ? title || key : "",
|
|
||||||
value,
|
|
||||||
metadata,
|
|
||||||
renderer,
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
const textRenderer = globalRegistry
|
|
||||||
.getAllRenderers()
|
|
||||||
.find((r) => r.name === "TextRenderer");
|
|
||||||
if (textRenderer) {
|
|
||||||
items.push({
|
|
||||||
key: `${key}-${index}`,
|
|
||||||
label: index === 0 ? title || key : "",
|
|
||||||
value: JSON.stringify(value, null, 2),
|
|
||||||
metadata,
|
|
||||||
renderer: textRenderer,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
return items;
|
|
||||||
}, [agentRunOutputs]);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
{enableEnhancedOutputHandling ? (
|
|
||||||
<Card className="agpt-box" style={{ maxWidth: "950px" }}>
|
|
||||||
<CardHeader>
|
|
||||||
<div className="flex items-center justify-between">
|
|
||||||
<CardTitle className="font-poppins text-lg">Output</CardTitle>
|
|
||||||
{outputItems.length > 0 && (
|
|
||||||
<OutputActions
|
|
||||||
items={outputItems.map((item) => ({
|
|
||||||
value: item.value,
|
|
||||||
metadata: item.metadata,
|
|
||||||
renderer: item.renderer,
|
|
||||||
}))}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</CardHeader>
|
|
||||||
|
|
||||||
<CardContent
|
|
||||||
className="flex flex-col gap-4"
|
|
||||||
style={{ maxWidth: "660px" }}
|
|
||||||
>
|
|
||||||
{agentRunOutputs !== undefined ? (
|
|
||||||
outputItems.length > 0 ? (
|
|
||||||
outputItems.map((item) => (
|
|
||||||
<OutputItem
|
|
||||||
key={item.key}
|
|
||||||
value={item.value}
|
|
||||||
metadata={item.metadata}
|
|
||||||
renderer={item.renderer}
|
|
||||||
label={item.label}
|
|
||||||
/>
|
|
||||||
))
|
|
||||||
) : (
|
|
||||||
<p className="text-sm text-muted-foreground">
|
|
||||||
No outputs to display
|
|
||||||
</p>
|
|
||||||
)
|
|
||||||
) : (
|
|
||||||
<LoadingBox spinnerSize={12} className="h-24" />
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
) : (
|
|
||||||
<Card className="agpt-box" style={{ maxWidth: "950px" }}>
|
|
||||||
<CardHeader>
|
|
||||||
<CardTitle className="font-poppins text-lg">Output</CardTitle>
|
|
||||||
</CardHeader>
|
|
||||||
|
|
||||||
<CardContent
|
|
||||||
className="flex flex-col gap-4"
|
|
||||||
style={{ maxWidth: "660px" }}
|
|
||||||
>
|
|
||||||
{agentRunOutputs !== undefined ? (
|
|
||||||
Object.entries(agentRunOutputs).map(
|
|
||||||
([key, { title, values }]) => (
|
|
||||||
<div key={key} className="flex flex-col gap-1.5">
|
|
||||||
<label className="text-sm font-medium">
|
|
||||||
{title || key}
|
|
||||||
</label>
|
|
||||||
{values.map((value, i) => (
|
|
||||||
<p
|
|
||||||
className="resize-none overflow-x-auto whitespace-pre-wrap break-words border-none text-sm text-neutral-700 disabled:cursor-not-allowed"
|
|
||||||
key={i}
|
|
||||||
>
|
|
||||||
{value}
|
|
||||||
</p>
|
|
||||||
))}
|
|
||||||
{/* TODO: pretty type-dependent rendering */}
|
|
||||||
</div>
|
|
||||||
),
|
|
||||||
)
|
|
||||||
) : (
|
|
||||||
<LoadingBox spinnerSize={12} className="h-24" />
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
)}
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
import React from "react";
|
|
||||||
|
|
||||||
import { Badge } from "@/components/__legacy__/ui/badge";
|
|
||||||
|
|
||||||
import { GraphExecutionMeta } from "@/lib/autogpt-server-api/types";
|
|
||||||
|
|
||||||
export type AgentRunStatus =
|
|
||||||
| "success"
|
|
||||||
| "failed"
|
|
||||||
| "queued"
|
|
||||||
| "running"
|
|
||||||
| "stopped"
|
|
||||||
| "scheduled"
|
|
||||||
| "draft"
|
|
||||||
| "review";
|
|
||||||
|
|
||||||
export const agentRunStatusMap: Record<
|
|
||||||
GraphExecutionMeta["status"],
|
|
||||||
AgentRunStatus
|
|
||||||
> = {
|
|
||||||
INCOMPLETE: "draft",
|
|
||||||
COMPLETED: "success",
|
|
||||||
FAILED: "failed",
|
|
||||||
QUEUED: "queued",
|
|
||||||
RUNNING: "running",
|
|
||||||
TERMINATED: "stopped",
|
|
||||||
REVIEW: "review",
|
|
||||||
};
|
|
||||||
|
|
||||||
const statusData: Record<
|
|
||||||
AgentRunStatus,
|
|
||||||
{ label: string; variant: keyof typeof statusStyles }
|
|
||||||
> = {
|
|
||||||
success: { label: "Success", variant: "success" },
|
|
||||||
running: { label: "Running", variant: "info" },
|
|
||||||
failed: { label: "Failed", variant: "destructive" },
|
|
||||||
queued: { label: "Queued", variant: "warning" },
|
|
||||||
draft: { label: "Draft", variant: "secondary" },
|
|
||||||
stopped: { label: "Stopped", variant: "secondary" },
|
|
||||||
scheduled: { label: "Scheduled", variant: "secondary" },
|
|
||||||
review: { label: "In Review", variant: "warning" },
|
|
||||||
};
|
|
||||||
|
|
||||||
const statusStyles = {
|
|
||||||
success:
|
|
||||||
"bg-green-100 text-green-800 hover:bg-green-100 hover:text-green-800",
|
|
||||||
destructive: "bg-red-100 text-red-800 hover:bg-red-100 hover:text-red-800",
|
|
||||||
warning:
|
|
||||||
"bg-yellow-100 text-yellow-800 hover:bg-yellow-100 hover:text-yellow-800",
|
|
||||||
info: "bg-blue-100 text-blue-800 hover:bg-blue-100 hover:text-blue-800",
|
|
||||||
secondary:
|
|
||||||
"bg-slate-100 text-slate-800 hover:bg-slate-100 hover:text-slate-800",
|
|
||||||
};
|
|
||||||
|
|
||||||
export function AgentRunStatusChip({
|
|
||||||
status,
|
|
||||||
}: {
|
|
||||||
status: AgentRunStatus;
|
|
||||||
}): React.ReactElement {
|
|
||||||
return (
|
|
||||||
<Badge
|
|
||||||
variant="secondary"
|
|
||||||
className={`text-xs font-medium ${statusStyles[statusData[status]?.variant]} rounded-[45px] px-[9px] py-[3px]`}
|
|
||||||
>
|
|
||||||
{statusData[status]?.label}
|
|
||||||
</Badge>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,130 +0,0 @@
|
|||||||
import React from "react";
|
|
||||||
import { formatDistanceToNow, isPast } from "date-fns";
|
|
||||||
|
|
||||||
import { cn } from "@/lib/utils";
|
|
||||||
|
|
||||||
import { Link2Icon, Link2OffIcon, MoreVertical } from "lucide-react";
|
|
||||||
import { Card, CardContent } from "@/components/__legacy__/ui/card";
|
|
||||||
import { Button } from "@/components/__legacy__/ui/button";
|
|
||||||
import {
|
|
||||||
DropdownMenu,
|
|
||||||
DropdownMenuContent,
|
|
||||||
DropdownMenuItem,
|
|
||||||
DropdownMenuTrigger,
|
|
||||||
} from "@/components/__legacy__/ui/dropdown-menu";
|
|
||||||
|
|
||||||
import { AgentStatus, AgentStatusChip } from "./agent-status-chip";
|
|
||||||
import { AgentRunStatus, AgentRunStatusChip } from "./agent-run-status-chip";
|
|
||||||
import { PushPinSimpleIcon } from "@phosphor-icons/react";
|
|
||||||
|
|
||||||
export type AgentRunSummaryProps = (
|
|
||||||
| {
|
|
||||||
type: "run";
|
|
||||||
status: AgentRunStatus;
|
|
||||||
}
|
|
||||||
| {
|
|
||||||
type: "preset";
|
|
||||||
status?: undefined;
|
|
||||||
}
|
|
||||||
| {
|
|
||||||
type: "preset.triggered";
|
|
||||||
status: AgentStatus;
|
|
||||||
}
|
|
||||||
| {
|
|
||||||
type: "schedule";
|
|
||||||
status: "scheduled";
|
|
||||||
}
|
|
||||||
) & {
|
|
||||||
title: string;
|
|
||||||
timestamp?: number | Date;
|
|
||||||
selected?: boolean;
|
|
||||||
onClick?: () => void;
|
|
||||||
// onRename: () => void;
|
|
||||||
onDelete: () => void;
|
|
||||||
onPinAsPreset?: () => void;
|
|
||||||
className?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
export function AgentRunSummaryCard({
|
|
||||||
type,
|
|
||||||
status,
|
|
||||||
title,
|
|
||||||
timestamp,
|
|
||||||
selected = false,
|
|
||||||
onClick,
|
|
||||||
// onRename,
|
|
||||||
onDelete,
|
|
||||||
onPinAsPreset,
|
|
||||||
className,
|
|
||||||
}: AgentRunSummaryProps): React.ReactElement {
|
|
||||||
return (
|
|
||||||
<Card
|
|
||||||
className={cn(
|
|
||||||
"agpt-rounded-card cursor-pointer border-zinc-300",
|
|
||||||
selected ? "agpt-card-selected" : "",
|
|
||||||
className,
|
|
||||||
)}
|
|
||||||
onClick={onClick}
|
|
||||||
>
|
|
||||||
<CardContent className="relative p-2.5 lg:p-4">
|
|
||||||
{(type == "run" || type == "schedule") && (
|
|
||||||
<AgentRunStatusChip status={status} />
|
|
||||||
)}
|
|
||||||
{type == "preset" && (
|
|
||||||
<div className="flex items-center text-sm font-medium text-neutral-700">
|
|
||||||
<PushPinSimpleIcon className="mr-1 size-4 text-foreground" /> Preset
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
{type == "preset.triggered" && (
|
|
||||||
<div className="flex items-center justify-between">
|
|
||||||
<AgentStatusChip status={status} />
|
|
||||||
|
|
||||||
<div className="flex items-center text-sm font-medium text-neutral-700">
|
|
||||||
{status == "inactive" ? (
|
|
||||||
<Link2OffIcon className="mr-1 size-4 text-foreground" />
|
|
||||||
) : (
|
|
||||||
<Link2Icon className="mr-1 size-4 text-foreground" />
|
|
||||||
)}{" "}
|
|
||||||
Trigger
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<div className="mt-5 flex items-center justify-between">
|
|
||||||
<h3 className="truncate pr-2 text-base font-medium text-neutral-900">
|
|
||||||
{title}
|
|
||||||
</h3>
|
|
||||||
|
|
||||||
<DropdownMenu>
|
|
||||||
<DropdownMenuTrigger asChild>
|
|
||||||
<Button variant="ghost" className="h-5 w-5 p-0">
|
|
||||||
<MoreVertical className="h-5 w-5" />
|
|
||||||
</Button>
|
|
||||||
</DropdownMenuTrigger>
|
|
||||||
<DropdownMenuContent>
|
|
||||||
{onPinAsPreset && (
|
|
||||||
<DropdownMenuItem onClick={onPinAsPreset}>
|
|
||||||
Pin as a preset
|
|
||||||
</DropdownMenuItem>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* <DropdownMenuItem onClick={onRename}>Rename</DropdownMenuItem> */}
|
|
||||||
|
|
||||||
<DropdownMenuItem onClick={onDelete}>Delete</DropdownMenuItem>
|
|
||||||
</DropdownMenuContent>
|
|
||||||
</DropdownMenu>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{timestamp && (
|
|
||||||
<p
|
|
||||||
className="mt-1 text-sm font-normal text-neutral-500"
|
|
||||||
title={new Date(timestamp).toString()}
|
|
||||||
>
|
|
||||||
{isPast(timestamp) ? "Ran" : "Runs in"}{" "}
|
|
||||||
{formatDistanceToNow(timestamp, { addSuffix: true })}
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,237 +0,0 @@
|
|||||||
"use client";
|
|
||||||
import { Plus } from "lucide-react";
|
|
||||||
import React, { useEffect, useState } from "react";
|
|
||||||
|
|
||||||
import {
|
|
||||||
GraphExecutionID,
|
|
||||||
GraphExecutionMeta,
|
|
||||||
LibraryAgent,
|
|
||||||
LibraryAgentPreset,
|
|
||||||
LibraryAgentPresetID,
|
|
||||||
Schedule,
|
|
||||||
ScheduleID,
|
|
||||||
} from "@/lib/autogpt-server-api";
|
|
||||||
import { cn } from "@/lib/utils";
|
|
||||||
|
|
||||||
import { Badge } from "@/components/__legacy__/ui/badge";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import LoadingBox, { LoadingSpinner } from "@/components/__legacy__/ui/loading";
|
|
||||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
|
||||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
|
||||||
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
|
|
||||||
import { AgentRunsQuery } from "../use-agent-runs";
|
|
||||||
import { agentRunStatusMap } from "./agent-run-status-chip";
|
|
||||||
import { AgentRunSummaryCard } from "./agent-run-summary-card";
|
|
||||||
|
|
||||||
interface AgentRunsSelectorListProps {
|
|
||||||
agent: LibraryAgent;
|
|
||||||
agentRunsQuery: AgentRunsQuery;
|
|
||||||
agentPresets: LibraryAgentPreset[];
|
|
||||||
schedules: Schedule[];
|
|
||||||
selectedView: { type: "run" | "preset" | "schedule"; id?: string };
|
|
||||||
allowDraftNewRun?: boolean;
|
|
||||||
onSelectRun: (id: GraphExecutionID) => void;
|
|
||||||
onSelectPreset: (preset: LibraryAgentPresetID) => void;
|
|
||||||
onSelectSchedule: (id: ScheduleID) => void;
|
|
||||||
onSelectDraftNewRun: () => void;
|
|
||||||
doDeleteRun: (id: GraphExecutionMeta) => void;
|
|
||||||
doDeletePreset: (id: LibraryAgentPresetID) => void;
|
|
||||||
doDeleteSchedule: (id: ScheduleID) => void;
|
|
||||||
doCreatePresetFromRun?: (id: GraphExecutionID) => void;
|
|
||||||
className?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function AgentRunsSelectorList({
|
|
||||||
agent,
|
|
||||||
agentRunsQuery: {
|
|
||||||
agentRuns,
|
|
||||||
agentRunCount,
|
|
||||||
agentRunsLoading,
|
|
||||||
hasMoreRuns,
|
|
||||||
fetchMoreRuns,
|
|
||||||
isFetchingMoreRuns,
|
|
||||||
},
|
|
||||||
agentPresets,
|
|
||||||
schedules,
|
|
||||||
selectedView,
|
|
||||||
allowDraftNewRun = true,
|
|
||||||
onSelectRun,
|
|
||||||
onSelectPreset,
|
|
||||||
onSelectSchedule,
|
|
||||||
onSelectDraftNewRun,
|
|
||||||
doDeleteRun,
|
|
||||||
doDeletePreset,
|
|
||||||
doDeleteSchedule,
|
|
||||||
doCreatePresetFromRun,
|
|
||||||
className,
|
|
||||||
}: AgentRunsSelectorListProps): React.ReactElement {
|
|
||||||
const [activeListTab, setActiveListTab] = useState<"runs" | "scheduled">(
|
|
||||||
"runs",
|
|
||||||
);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (selectedView.type === "schedule") {
|
|
||||||
setActiveListTab("scheduled");
|
|
||||||
} else {
|
|
||||||
setActiveListTab("runs");
|
|
||||||
}
|
|
||||||
}, [selectedView]);
|
|
||||||
|
|
||||||
const listItemClasses = "h-28 w-72 lg:w-full lg:h-32";
|
|
||||||
|
|
||||||
return (
|
|
||||||
<aside className={cn("flex flex-col gap-4", className)}>
|
|
||||||
{allowDraftNewRun ? (
|
|
||||||
<Button
|
|
||||||
className={"mb-4 hidden lg:flex"}
|
|
||||||
onClick={onSelectDraftNewRun}
|
|
||||||
leftIcon={<Plus className="h-6 w-6" />}
|
|
||||||
>
|
|
||||||
New {agent.has_external_trigger ? "trigger" : "run"}
|
|
||||||
</Button>
|
|
||||||
) : null}
|
|
||||||
|
|
||||||
<div className="flex gap-2">
|
|
||||||
<Badge
|
|
||||||
variant={activeListTab === "runs" ? "secondary" : "outline"}
|
|
||||||
className="cursor-pointer gap-2 rounded-full text-base"
|
|
||||||
onClick={() => setActiveListTab("runs")}
|
|
||||||
>
|
|
||||||
<span>Runs</span>
|
|
||||||
<span className="text-neutral-600">
|
|
||||||
{agentRunCount ?? <LoadingSpinner className="size-4" />}
|
|
||||||
</span>
|
|
||||||
</Badge>
|
|
||||||
|
|
||||||
<Badge
|
|
||||||
variant={activeListTab === "scheduled" ? "secondary" : "outline"}
|
|
||||||
className="cursor-pointer gap-2 rounded-full text-base"
|
|
||||||
onClick={() => setActiveListTab("scheduled")}
|
|
||||||
>
|
|
||||||
<span>Scheduled</span>
|
|
||||||
<span className="text-neutral-600">{schedules.length}</span>
|
|
||||||
</Badge>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Runs / Schedules list */}
|
|
||||||
{agentRunsLoading && activeListTab === "runs" ? (
|
|
||||||
<LoadingBox className="h-28 w-full lg:h-[calc(100vh-300px)] lg:w-72 xl:w-80" />
|
|
||||||
) : (
|
|
||||||
<ScrollArea
|
|
||||||
className="w-full lg:h-[calc(100vh-300px)] lg:w-72 xl:w-80"
|
|
||||||
orientation={window.innerWidth >= 1024 ? "vertical" : "horizontal"}
|
|
||||||
>
|
|
||||||
<InfiniteScroll
|
|
||||||
direction={window.innerWidth >= 1024 ? "vertical" : "horizontal"}
|
|
||||||
hasNextPage={hasMoreRuns}
|
|
||||||
fetchNextPage={fetchMoreRuns}
|
|
||||||
isFetchingNextPage={isFetchingMoreRuns}
|
|
||||||
>
|
|
||||||
<div className="flex items-center gap-2 lg:flex-col">
|
|
||||||
{/* New Run button - only in small layouts */}
|
|
||||||
{allowDraftNewRun && (
|
|
||||||
<Button
|
|
||||||
size="large"
|
|
||||||
className={
|
|
||||||
"flex h-12 w-40 items-center gap-2 py-6 lg:hidden " +
|
|
||||||
(selectedView.type == "run" && !selectedView.id
|
|
||||||
? "agpt-card-selected text-accent"
|
|
||||||
: "")
|
|
||||||
}
|
|
||||||
onClick={onSelectDraftNewRun}
|
|
||||||
leftIcon={<Plus className="h-6 w-6" />}
|
|
||||||
>
|
|
||||||
New {agent.has_external_trigger ? "trigger" : "run"}
|
|
||||||
</Button>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{activeListTab === "runs" ? (
|
|
||||||
<>
|
|
||||||
{agentPresets
|
|
||||||
.filter((preset) => preset.webhook) // Triggers
|
|
||||||
.toSorted(
|
|
||||||
(a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
|
|
||||||
)
|
|
||||||
.map((preset) => (
|
|
||||||
<AgentRunSummaryCard
|
|
||||||
className={cn(listItemClasses, "lg:h-auto")}
|
|
||||||
key={preset.id}
|
|
||||||
type="preset.triggered"
|
|
||||||
status={preset.is_active ? "active" : "inactive"}
|
|
||||||
title={preset.name}
|
|
||||||
// timestamp={preset.last_run_time} // TODO: implement this
|
|
||||||
selected={selectedView.id === preset.id}
|
|
||||||
onClick={() => onSelectPreset(preset.id)}
|
|
||||||
onDelete={() => doDeletePreset(preset.id)}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
{agentPresets
|
|
||||||
.filter((preset) => !preset.webhook) // Presets
|
|
||||||
.toSorted(
|
|
||||||
(a, b) => b.updated_at.getTime() - a.updated_at.getTime(),
|
|
||||||
)
|
|
||||||
.map((preset) => (
|
|
||||||
<AgentRunSummaryCard
|
|
||||||
className={cn(listItemClasses, "lg:h-auto")}
|
|
||||||
key={preset.id}
|
|
||||||
type="preset"
|
|
||||||
title={preset.name}
|
|
||||||
// timestamp={preset.last_run_time} // TODO: implement this
|
|
||||||
selected={selectedView.id === preset.id}
|
|
||||||
onClick={() => onSelectPreset(preset.id)}
|
|
||||||
onDelete={() => doDeletePreset(preset.id)}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
{agentPresets.length > 0 && <Separator className="my-1" />}
|
|
||||||
{agentRuns
|
|
||||||
.toSorted((a, b) => {
|
|
||||||
const aTime = a.started_at?.getTime() ?? 0;
|
|
||||||
const bTime = b.started_at?.getTime() ?? 0;
|
|
||||||
return bTime - aTime;
|
|
||||||
})
|
|
||||||
.map((run) => (
|
|
||||||
<AgentRunSummaryCard
|
|
||||||
className={listItemClasses}
|
|
||||||
key={run.id}
|
|
||||||
type="run"
|
|
||||||
status={agentRunStatusMap[run.status]}
|
|
||||||
title={
|
|
||||||
(run.preset_id
|
|
||||||
? agentPresets.find((p) => p.id == run.preset_id)
|
|
||||||
?.name
|
|
||||||
: null) ?? agent.name
|
|
||||||
}
|
|
||||||
timestamp={run.started_at ?? undefined}
|
|
||||||
selected={selectedView.id === run.id}
|
|
||||||
onClick={() => onSelectRun(run.id)}
|
|
||||||
onDelete={() => doDeleteRun(run as GraphExecutionMeta)}
|
|
||||||
onPinAsPreset={
|
|
||||||
doCreatePresetFromRun
|
|
||||||
? () => doCreatePresetFromRun(run.id)
|
|
||||||
: undefined
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
schedules.map((schedule) => (
|
|
||||||
<AgentRunSummaryCard
|
|
||||||
className={listItemClasses}
|
|
||||||
key={schedule.id}
|
|
||||||
type="schedule"
|
|
||||||
status="scheduled" // TODO: implement active/inactive status for schedules
|
|
||||||
title={schedule.name}
|
|
||||||
timestamp={schedule.next_run_time}
|
|
||||||
selected={selectedView.id === schedule.id}
|
|
||||||
onClick={() => onSelectSchedule(schedule.id)}
|
|
||||||
onDelete={() => doDeleteSchedule(schedule.id)}
|
|
||||||
/>
|
|
||||||
))
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</InfiniteScroll>
|
|
||||||
</ScrollArea>
|
|
||||||
)}
|
|
||||||
</aside>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,180 +0,0 @@
|
|||||||
"use client";
|
|
||||||
import React, { useCallback, useMemo } from "react";
|
|
||||||
|
|
||||||
import {
|
|
||||||
Graph,
|
|
||||||
GraphExecutionID,
|
|
||||||
Schedule,
|
|
||||||
ScheduleID,
|
|
||||||
} from "@/lib/autogpt-server-api";
|
|
||||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
|
||||||
|
|
||||||
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
|
|
||||||
import type { ButtonAction } from "@/components/__legacy__/types";
|
|
||||||
import {
|
|
||||||
Card,
|
|
||||||
CardContent,
|
|
||||||
CardHeader,
|
|
||||||
CardTitle,
|
|
||||||
} from "@/components/__legacy__/ui/card";
|
|
||||||
import { IconCross } from "@/components/__legacy__/ui/icons";
|
|
||||||
import { Input } from "@/components/__legacy__/ui/input";
|
|
||||||
import LoadingBox from "@/components/__legacy__/ui/loading";
|
|
||||||
import { useToastOnFail } from "@/components/molecules/Toast/use-toast";
|
|
||||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
|
||||||
import { formatScheduleTime } from "@/lib/timezone-utils";
|
|
||||||
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
|
|
||||||
import { PlayIcon } from "lucide-react";
|
|
||||||
|
|
||||||
import { AgentRunStatus } from "./agent-run-status-chip";
|
|
||||||
|
|
||||||
export function AgentScheduleDetailsView({
|
|
||||||
graph,
|
|
||||||
schedule,
|
|
||||||
agentActions,
|
|
||||||
onForcedRun,
|
|
||||||
doDeleteSchedule,
|
|
||||||
}: {
|
|
||||||
graph: Graph;
|
|
||||||
schedule: Schedule;
|
|
||||||
agentActions: ButtonAction[];
|
|
||||||
onForcedRun: (runID: GraphExecutionID) => void;
|
|
||||||
doDeleteSchedule: (scheduleID: ScheduleID) => void;
|
|
||||||
}): React.ReactNode {
|
|
||||||
const api = useBackendAPI();
|
|
||||||
|
|
||||||
const selectedRunStatus: AgentRunStatus = "scheduled";
|
|
||||||
|
|
||||||
const toastOnFail = useToastOnFail();
|
|
||||||
|
|
||||||
// Get user's timezone for displaying schedule times
|
|
||||||
const userTimezone = useUserTimezone();
|
|
||||||
|
|
||||||
const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => {
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
label: "Status",
|
|
||||||
value:
|
|
||||||
selectedRunStatus.charAt(0).toUpperCase() +
|
|
||||||
selectedRunStatus.slice(1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
label: "Schedule",
|
|
||||||
value: humanizeCronExpression(schedule.cron),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
label: "Next run",
|
|
||||||
value: formatScheduleTime(schedule.next_run_time, userTimezone),
|
|
||||||
},
|
|
||||||
];
|
|
||||||
}, [schedule, selectedRunStatus, userTimezone]);
|
|
||||||
|
|
||||||
const agentRunInputs: Record<
|
|
||||||
string,
|
|
||||||
{ title?: string; /* type: BlockIOSubType; */ value: any }
|
|
||||||
> = useMemo(() => {
|
|
||||||
// TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168
|
|
||||||
|
|
||||||
// Add type info from agent input schema
|
|
||||||
return Object.fromEntries(
|
|
||||||
Object.entries(schedule.input_data).map(([k, v]) => [
|
|
||||||
k,
|
|
||||||
{
|
|
||||||
title: graph.input_schema.properties[k].title,
|
|
||||||
/* TODO: type: agent.input_schema.properties[k].type */
|
|
||||||
value: v,
|
|
||||||
},
|
|
||||||
]),
|
|
||||||
);
|
|
||||||
}, [graph, schedule]);
|
|
||||||
|
|
||||||
const runNow = useCallback(
|
|
||||||
() =>
|
|
||||||
api
|
|
||||||
.executeGraph(
|
|
||||||
graph.id,
|
|
||||||
graph.version,
|
|
||||||
schedule.input_data,
|
|
||||||
schedule.input_credentials,
|
|
||||||
"library",
|
|
||||||
)
|
|
||||||
.then((run) => onForcedRun(run.id))
|
|
||||||
.catch(toastOnFail("execute agent")),
|
|
||||||
[api, graph, schedule, onForcedRun, toastOnFail],
|
|
||||||
);
|
|
||||||
|
|
||||||
const runActions: ButtonAction[] = useMemo(
|
|
||||||
() => [
|
|
||||||
{
|
|
||||||
label: (
|
|
||||||
<>
|
|
||||||
<PlayIcon className="mr-2 size-4" />
|
|
||||||
Run now
|
|
||||||
</>
|
|
||||||
),
|
|
||||||
callback: runNow,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
label: (
|
|
||||||
<>
|
|
||||||
<IconCross className="mr-2 size-4 px-0.5" />
|
|
||||||
Delete schedule
|
|
||||||
</>
|
|
||||||
),
|
|
||||||
callback: () => doDeleteSchedule(schedule.id),
|
|
||||||
variant: "destructive",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
[runNow],
|
|
||||||
);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="agpt-div flex gap-6">
|
|
||||||
<div className="flex flex-1 flex-col gap-4">
|
|
||||||
<Card className="agpt-box">
|
|
||||||
<CardHeader>
|
|
||||||
<CardTitle className="font-poppins text-lg">Info</CardTitle>
|
|
||||||
</CardHeader>
|
|
||||||
|
|
||||||
<CardContent>
|
|
||||||
<div className="flex justify-stretch gap-4">
|
|
||||||
{infoStats.map(({ label, value }) => (
|
|
||||||
<div key={label} className="flex-1">
|
|
||||||
<p className="text-sm font-medium text-black">{label}</p>
|
|
||||||
<p className="text-sm text-neutral-600">{value}</p>
|
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
|
|
||||||
<Card className="agpt-box">
|
|
||||||
<CardHeader>
|
|
||||||
<CardTitle className="font-poppins text-lg">Input</CardTitle>
|
|
||||||
</CardHeader>
|
|
||||||
<CardContent className="flex flex-col gap-4">
|
|
||||||
{agentRunInputs !== undefined ? (
|
|
||||||
Object.entries(agentRunInputs).map(([key, { title, value }]) => (
|
|
||||||
<div key={key} className="flex flex-col gap-1.5">
|
|
||||||
<label className="text-sm font-medium">{title || key}</label>
|
|
||||||
<Input value={value} className="rounded-full" disabled />
|
|
||||||
</div>
|
|
||||||
))
|
|
||||||
) : (
|
|
||||||
<LoadingBox spinnerSize={12} className="h-24" />
|
|
||||||
)}
|
|
||||||
</CardContent>
|
|
||||||
</Card>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Run / Agent Actions */}
|
|
||||||
<aside className="w-48 xl:w-56">
|
|
||||||
<div className="flex flex-col gap-8">
|
|
||||||
<ActionButtonGroup title="Run actions" actions={runActions} />
|
|
||||||
|
|
||||||
<ActionButtonGroup title="Agent actions" actions={agentActions} />
|
|
||||||
</div>
|
|
||||||
</aside>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import React, { useState } from "react";
|
|
||||||
import { Button } from "@/components/__legacy__/ui/button";
|
|
||||||
import {
|
|
||||||
Dialog,
|
|
||||||
DialogContent,
|
|
||||||
DialogDescription,
|
|
||||||
DialogFooter,
|
|
||||||
DialogHeader,
|
|
||||||
DialogTitle,
|
|
||||||
} from "@/components/__legacy__/ui/dialog";
|
|
||||||
import { Input } from "@/components/__legacy__/ui/input";
|
|
||||||
import { Textarea } from "@/components/__legacy__/ui/textarea";
|
|
||||||
|
|
||||||
interface CreatePresetDialogProps {
|
|
||||||
open: boolean;
|
|
||||||
onOpenChange: (open: boolean) => void;
|
|
||||||
onConfirm: (name: string, description: string) => Promise<void> | void;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function CreatePresetDialog({
|
|
||||||
open,
|
|
||||||
onOpenChange,
|
|
||||||
onConfirm,
|
|
||||||
}: CreatePresetDialogProps) {
|
|
||||||
const [name, setName] = useState("");
|
|
||||||
const [description, setDescription] = useState("");
|
|
||||||
|
|
||||||
const handleSubmit = async () => {
|
|
||||||
if (name.trim()) {
|
|
||||||
await onConfirm(name.trim(), description.trim());
|
|
||||||
setName("");
|
|
||||||
setDescription("");
|
|
||||||
onOpenChange(false);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleCancel = () => {
|
|
||||||
setName("");
|
|
||||||
setDescription("");
|
|
||||||
onOpenChange(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleKeyDown = (e: React.KeyboardEvent) => {
|
|
||||||
if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) {
|
|
||||||
e.preventDefault();
|
|
||||||
handleSubmit();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog open={open} onOpenChange={onOpenChange}>
|
|
||||||
<DialogContent className="sm:max-w-[425px]">
|
|
||||||
<DialogHeader>
|
|
||||||
<DialogTitle>Create Preset</DialogTitle>
|
|
||||||
<DialogDescription>
|
|
||||||
Give your preset a name and description to help identify it later.
|
|
||||||
</DialogDescription>
|
|
||||||
</DialogHeader>
|
|
||||||
<div className="grid gap-4 py-4">
|
|
||||||
<div className="grid gap-2">
|
|
||||||
<label htmlFor="preset-name" className="text-sm font-medium">
|
|
||||||
Name *
|
|
||||||
</label>
|
|
||||||
<Input
|
|
||||||
id="preset-name"
|
|
||||||
placeholder="Enter preset name"
|
|
||||||
value={name}
|
|
||||||
onChange={(e) => setName(e.target.value)}
|
|
||||||
onKeyDown={handleKeyDown}
|
|
||||||
autoFocus
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="grid gap-2">
|
|
||||||
<label htmlFor="preset-description" className="text-sm font-medium">
|
|
||||||
Description
|
|
||||||
</label>
|
|
||||||
<Textarea
|
|
||||||
id="preset-description"
|
|
||||||
placeholder="Optional description"
|
|
||||||
value={description}
|
|
||||||
onChange={(e) => setDescription(e.target.value)}
|
|
||||||
onKeyDown={handleKeyDown}
|
|
||||||
rows={3}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<DialogFooter>
|
|
||||||
<Button variant="outline" onClick={handleCancel}>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button onClick={handleSubmit} disabled={!name.trim()}>
|
|
||||||
Create Preset
|
|
||||||
</Button>
|
|
||||||
</DialogFooter>
|
|
||||||
</DialogContent>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,210 +0,0 @@
|
|||||||
import {
|
|
||||||
GraphExecutionMeta as LegacyGraphExecutionMeta,
|
|
||||||
GraphID,
|
|
||||||
GraphExecutionID,
|
|
||||||
} from "@/lib/autogpt-server-api";
|
|
||||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
|
||||||
import {
|
|
||||||
getPaginatedTotalCount,
|
|
||||||
getPaginationNextPageNumber,
|
|
||||||
unpaginate,
|
|
||||||
} from "@/app/api/helpers";
|
|
||||||
import {
|
|
||||||
getV1ListGraphExecutionsResponse,
|
|
||||||
getV1ListGraphExecutionsResponse200,
|
|
||||||
useGetV1ListGraphExecutionsInfinite,
|
|
||||||
} from "@/app/api/__generated__/endpoints/graphs/graphs";
|
|
||||||
import { GraphExecutionsPaginated } from "@/app/api/__generated__/models/graphExecutionsPaginated";
|
|
||||||
import { GraphExecutionMeta as RawGraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
|
||||||
|
|
||||||
export type GraphExecutionMeta = Omit<
|
|
||||||
RawGraphExecutionMeta,
|
|
||||||
"id" | "user_id" | "graph_id" | "preset_id" | "stats"
|
|
||||||
> &
|
|
||||||
Pick<
|
|
||||||
LegacyGraphExecutionMeta,
|
|
||||||
"id" | "user_id" | "graph_id" | "preset_id" | "stats"
|
|
||||||
>;
|
|
||||||
|
|
||||||
/** Hook to fetch runs for a specific graph, with support for infinite scroll.
|
|
||||||
*
|
|
||||||
* @param graphID - The ID of the graph to fetch agent runs for. This parameter is
|
|
||||||
* optional in the sense that the hook doesn't run unless it is passed.
|
|
||||||
* This way, it can be used in components where the graph ID is not
|
|
||||||
* immediately available.
|
|
||||||
*/
|
|
||||||
export const useAgentRunsInfinite = (graphID?: GraphID) => {
|
|
||||||
const queryClient = getQueryClient();
|
|
||||||
const {
|
|
||||||
data: queryResults,
|
|
||||||
refetch: refetchRuns,
|
|
||||||
isPending: agentRunsLoading,
|
|
||||||
isRefetching: agentRunsReloading,
|
|
||||||
hasNextPage: hasMoreRuns,
|
|
||||||
fetchNextPage: fetchMoreRuns,
|
|
||||||
isFetchingNextPage: isFetchingMoreRuns,
|
|
||||||
queryKey,
|
|
||||||
} = useGetV1ListGraphExecutionsInfinite(
|
|
||||||
graphID!,
|
|
||||||
{ page: 1, page_size: 20 },
|
|
||||||
{
|
|
||||||
query: {
|
|
||||||
getNextPageParam: getPaginationNextPageNumber,
|
|
||||||
|
|
||||||
// Prevent query from running if graphID is not available (yet)
|
|
||||||
...(!graphID
|
|
||||||
? {
|
|
||||||
enabled: false,
|
|
||||||
queryFn: () =>
|
|
||||||
// Fake empty response if graphID is not available (yet)
|
|
||||||
Promise.resolve({
|
|
||||||
status: 200,
|
|
||||||
data: {
|
|
||||||
executions: [],
|
|
||||||
pagination: {
|
|
||||||
current_page: 1,
|
|
||||||
page_size: 20,
|
|
||||||
total_items: 0,
|
|
||||||
total_pages: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
headers: new Headers(),
|
|
||||||
} satisfies getV1ListGraphExecutionsResponse),
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
queryClient,
|
|
||||||
);
|
|
||||||
|
|
||||||
const agentRuns = queryResults ? unpaginate(queryResults, "executions") : [];
|
|
||||||
const agentRunCount = getPaginatedTotalCount(queryResults);
|
|
||||||
|
|
||||||
const upsertAgentRun = (newAgentRun: GraphExecutionMeta) => {
|
|
||||||
queryClient.setQueryData(
|
|
||||||
queryKey,
|
|
||||||
(currentQueryData: typeof queryResults) => {
|
|
||||||
if (!currentQueryData?.pages || agentRunCount === undefined)
|
|
||||||
return currentQueryData;
|
|
||||||
|
|
||||||
const exists = currentQueryData.pages.some((page) => {
|
|
||||||
if (page.status !== 200) return false;
|
|
||||||
|
|
||||||
const response = page.data;
|
|
||||||
return response.executions.some((run) => run.id === newAgentRun.id);
|
|
||||||
});
|
|
||||||
if (exists) {
|
|
||||||
// If the run already exists, we update it
|
|
||||||
return {
|
|
||||||
...currentQueryData,
|
|
||||||
pages: currentQueryData.pages.map((page) => {
|
|
||||||
if (page.status !== 200) return page;
|
|
||||||
const response = page.data;
|
|
||||||
const executions = response.executions;
|
|
||||||
|
|
||||||
const index = executions.findIndex(
|
|
||||||
(run) => run.id === newAgentRun.id,
|
|
||||||
);
|
|
||||||
if (index === -1) return page;
|
|
||||||
|
|
||||||
const newExecutions = [...executions];
|
|
||||||
newExecutions[index] = newAgentRun;
|
|
||||||
|
|
||||||
return {
|
|
||||||
...page,
|
|
||||||
data: {
|
|
||||||
...response,
|
|
||||||
executions: newExecutions,
|
|
||||||
},
|
|
||||||
} satisfies getV1ListGraphExecutionsResponse;
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the run does not exist, we add it to the first page
|
|
||||||
const page = currentQueryData
|
|
||||||
.pages[0] as getV1ListGraphExecutionsResponse200 & {
|
|
||||||
headers: Headers;
|
|
||||||
};
|
|
||||||
const updatedExecutions = [newAgentRun, ...page.data.executions];
|
|
||||||
const updatedPage = {
|
|
||||||
...page,
|
|
||||||
data: {
|
|
||||||
...page.data,
|
|
||||||
executions: updatedExecutions,
|
|
||||||
},
|
|
||||||
} satisfies getV1ListGraphExecutionsResponse;
|
|
||||||
const updatedPages = [updatedPage, ...currentQueryData.pages.slice(1)];
|
|
||||||
return {
|
|
||||||
...currentQueryData,
|
|
||||||
pages: updatedPages.map(
|
|
||||||
// Increment the total runs count in the pagination info of all pages
|
|
||||||
(page) =>
|
|
||||||
page.status === 200
|
|
||||||
? {
|
|
||||||
...page,
|
|
||||||
data: {
|
|
||||||
...page.data,
|
|
||||||
pagination: {
|
|
||||||
...page.data.pagination,
|
|
||||||
total_items: agentRunCount + 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
: page,
|
|
||||||
),
|
|
||||||
};
|
|
||||||
},
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
const removeAgentRun = (runID: GraphExecutionID) => {
|
|
||||||
queryClient.setQueryData(
|
|
||||||
[queryKey, { page: 1, page_size: 20 }],
|
|
||||||
(currentQueryData: typeof queryResults) => {
|
|
||||||
if (!currentQueryData?.pages) return currentQueryData;
|
|
||||||
|
|
||||||
let found = false;
|
|
||||||
return {
|
|
||||||
...currentQueryData,
|
|
||||||
pages: currentQueryData.pages.map((page) => {
|
|
||||||
const response = page.data as GraphExecutionsPaginated;
|
|
||||||
const filteredExecutions = response.executions.filter(
|
|
||||||
(run) => run.id !== runID,
|
|
||||||
);
|
|
||||||
if (filteredExecutions.length < response.executions.length) {
|
|
||||||
found = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
...page,
|
|
||||||
data: {
|
|
||||||
...response,
|
|
||||||
executions: filteredExecutions,
|
|
||||||
pagination: {
|
|
||||||
...response.pagination,
|
|
||||||
total_items:
|
|
||||||
response.pagination.total_items - (found ? 1 : 0),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
},
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
return {
|
|
||||||
agentRuns: agentRuns as GraphExecutionMeta[],
|
|
||||||
refetchRuns,
|
|
||||||
agentRunCount,
|
|
||||||
agentRunsLoading: agentRunsLoading || agentRunsReloading,
|
|
||||||
hasMoreRuns,
|
|
||||||
fetchMoreRuns,
|
|
||||||
isFetchingMoreRuns,
|
|
||||||
upsertAgentRun,
|
|
||||||
removeAgentRun,
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
export type AgentRunsQuery = ReturnType<typeof useAgentRunsInfinite>;
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { OldAgentLibraryView } from "../../agents/[id]/components/OldAgentLibraryView/OldAgentLibraryView";
|
|
||||||
|
|
||||||
export default function OldAgentLibraryPage() {
|
|
||||||
return <OldAgentLibraryView />;
|
|
||||||
}
|
|
||||||
@@ -1053,6 +1053,7 @@
|
|||||||
"$ref": "#/components/schemas/ClarificationNeededResponse"
|
"$ref": "#/components/schemas/ClarificationNeededResponse"
|
||||||
},
|
},
|
||||||
{ "$ref": "#/components/schemas/BlockListResponse" },
|
{ "$ref": "#/components/schemas/BlockListResponse" },
|
||||||
|
{ "$ref": "#/components/schemas/BlockDetailsResponse" },
|
||||||
{ "$ref": "#/components/schemas/BlockOutputResponse" },
|
{ "$ref": "#/components/schemas/BlockOutputResponse" },
|
||||||
{ "$ref": "#/components/schemas/DocSearchResultsResponse" },
|
{ "$ref": "#/components/schemas/DocSearchResultsResponse" },
|
||||||
{ "$ref": "#/components/schemas/DocPageResponse" },
|
{ "$ref": "#/components/schemas/DocPageResponse" },
|
||||||
@@ -6958,6 +6959,58 @@
|
|||||||
"enum": ["run", "byte", "second"],
|
"enum": ["run", "byte", "second"],
|
||||||
"title": "BlockCostType"
|
"title": "BlockCostType"
|
||||||
},
|
},
|
||||||
|
"BlockDetails": {
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string", "title": "Id" },
|
||||||
|
"name": { "type": "string", "title": "Name" },
|
||||||
|
"description": { "type": "string", "title": "Description" },
|
||||||
|
"inputs": {
|
||||||
|
"additionalProperties": true,
|
||||||
|
"type": "object",
|
||||||
|
"title": "Inputs",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"additionalProperties": true,
|
||||||
|
"type": "object",
|
||||||
|
"title": "Outputs",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"credentials": {
|
||||||
|
"items": { "$ref": "#/components/schemas/CredentialsMetaInput" },
|
||||||
|
"type": "array",
|
||||||
|
"title": "Credentials",
|
||||||
|
"default": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object",
|
||||||
|
"required": ["id", "name", "description"],
|
||||||
|
"title": "BlockDetails",
|
||||||
|
"description": "Detailed block information."
|
||||||
|
},
|
||||||
|
"BlockDetailsResponse": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"$ref": "#/components/schemas/ResponseType",
|
||||||
|
"default": "block_details"
|
||||||
|
},
|
||||||
|
"message": { "type": "string", "title": "Message" },
|
||||||
|
"session_id": {
|
||||||
|
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||||
|
"title": "Session Id"
|
||||||
|
},
|
||||||
|
"block": { "$ref": "#/components/schemas/BlockDetails" },
|
||||||
|
"user_authenticated": {
|
||||||
|
"type": "boolean",
|
||||||
|
"title": "User Authenticated",
|
||||||
|
"default": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object",
|
||||||
|
"required": ["message", "block"],
|
||||||
|
"title": "BlockDetailsResponse",
|
||||||
|
"description": "Response for block details (first run_block attempt)."
|
||||||
|
},
|
||||||
"BlockInfo": {
|
"BlockInfo": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"id": { "type": "string", "title": "Id" },
|
"id": { "type": "string", "title": "Id" },
|
||||||
@@ -7013,62 +7066,13 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"id": { "type": "string", "title": "Id" },
|
"id": { "type": "string", "title": "Id" },
|
||||||
"name": { "type": "string", "title": "Name" },
|
"name": { "type": "string", "title": "Name" },
|
||||||
"description": { "type": "string", "title": "Description" },
|
"description": { "type": "string", "title": "Description" }
|
||||||
"categories": {
|
|
||||||
"items": { "type": "string" },
|
|
||||||
"type": "array",
|
|
||||||
"title": "Categories"
|
|
||||||
},
|
|
||||||
"input_schema": {
|
|
||||||
"additionalProperties": true,
|
|
||||||
"type": "object",
|
|
||||||
"title": "Input Schema"
|
|
||||||
},
|
|
||||||
"output_schema": {
|
|
||||||
"additionalProperties": true,
|
|
||||||
"type": "object",
|
|
||||||
"title": "Output Schema"
|
|
||||||
},
|
|
||||||
"required_inputs": {
|
|
||||||
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
|
|
||||||
"type": "array",
|
|
||||||
"title": "Required Inputs",
|
|
||||||
"description": "List of required input fields for this block"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": [
|
"required": ["id", "name", "description"],
|
||||||
"id",
|
|
||||||
"name",
|
|
||||||
"description",
|
|
||||||
"categories",
|
|
||||||
"input_schema",
|
|
||||||
"output_schema"
|
|
||||||
],
|
|
||||||
"title": "BlockInfoSummary",
|
"title": "BlockInfoSummary",
|
||||||
"description": "Summary of a block for search results."
|
"description": "Summary of a block for search results."
|
||||||
},
|
},
|
||||||
"BlockInputFieldInfo": {
|
|
||||||
"properties": {
|
|
||||||
"name": { "type": "string", "title": "Name" },
|
|
||||||
"type": { "type": "string", "title": "Type" },
|
|
||||||
"description": {
|
|
||||||
"type": "string",
|
|
||||||
"title": "Description",
|
|
||||||
"default": ""
|
|
||||||
},
|
|
||||||
"required": {
|
|
||||||
"type": "boolean",
|
|
||||||
"title": "Required",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"default": { "anyOf": [{}, { "type": "null" }], "title": "Default" }
|
|
||||||
},
|
|
||||||
"type": "object",
|
|
||||||
"required": ["name", "type"],
|
|
||||||
"title": "BlockInputFieldInfo",
|
|
||||||
"description": "Information about a block input field."
|
|
||||||
},
|
|
||||||
"BlockListResponse": {
|
"BlockListResponse": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"type": {
|
"type": {
|
||||||
@@ -7086,12 +7090,7 @@
|
|||||||
"title": "Blocks"
|
"title": "Blocks"
|
||||||
},
|
},
|
||||||
"count": { "type": "integer", "title": "Count" },
|
"count": { "type": "integer", "title": "Count" },
|
||||||
"query": { "type": "string", "title": "Query" },
|
"query": { "type": "string", "title": "Query" }
|
||||||
"usage_hint": {
|
|
||||||
"type": "string",
|
|
||||||
"title": "Usage Hint",
|
|
||||||
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": ["message", "blocks", "count", "query"],
|
"required": ["message", "blocks", "count", "query"],
|
||||||
@@ -10484,6 +10483,7 @@
|
|||||||
"agent_saved",
|
"agent_saved",
|
||||||
"clarification_needed",
|
"clarification_needed",
|
||||||
"block_list",
|
"block_list",
|
||||||
|
"block_details",
|
||||||
"block_output",
|
"block_output",
|
||||||
"doc_search_results",
|
"doc_search_results",
|
||||||
"doc_page",
|
"doc_page",
|
||||||
@@ -10495,7 +10495,9 @@
|
|||||||
"operation_started",
|
"operation_started",
|
||||||
"operation_pending",
|
"operation_pending",
|
||||||
"operation_in_progress",
|
"operation_in_progress",
|
||||||
"input_validation_error"
|
"input_validation_error",
|
||||||
|
"feature_request_search",
|
||||||
|
"feature_request_created"
|
||||||
],
|
],
|
||||||
"title": "ResponseType",
|
"title": "ResponseType",
|
||||||
"description": "Types of tool responses."
|
"description": "Types of tool responses."
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import { useEffect, useState } from "react";
|
|||||||
import { Input } from "@/components/__legacy__/ui/input";
|
import { Input } from "@/components/__legacy__/ui/input";
|
||||||
import { Button } from "@/components/__legacy__/ui/button";
|
import { Button } from "@/components/__legacy__/ui/button";
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler";
|
import { CronScheduler } from "@/components/contextual/CronScheduler/cron-scheduler";
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
import { getTimezoneDisplayName } from "@/lib/timezone-utils";
|
import { getTimezoneDisplayName } from "@/lib/timezone-utils";
|
||||||
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
|
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
|
import { CronExpressionDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
|
||||||
import { Form, FormField } from "@/components/__legacy__/ui/form";
|
import { Form, FormField } from "@/components/__legacy__/ui/form";
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { Input } from "@/components/atoms/Input/Input";
|
import { Input } from "@/components/atoms/Input/Input";
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import { useFlags } from "launchdarkly-react-client-sdk";
|
|||||||
export enum Flag {
|
export enum Flag {
|
||||||
BETA_BLOCKS = "beta-blocks",
|
BETA_BLOCKS = "beta-blocks",
|
||||||
NEW_BLOCK_MENU = "new-block-menu",
|
NEW_BLOCK_MENU = "new-block-menu",
|
||||||
NEW_AGENT_RUNS = "new-agent-runs",
|
|
||||||
GRAPH_SEARCH = "graph-search",
|
GRAPH_SEARCH = "graph-search",
|
||||||
ENABLE_ENHANCED_OUTPUT_HANDLING = "enable-enhanced-output-handling",
|
ENABLE_ENHANCED_OUTPUT_HANDLING = "enable-enhanced-output-handling",
|
||||||
SHARE_EXECUTION_RESULTS = "share-execution-results",
|
SHARE_EXECUTION_RESULTS = "share-execution-results",
|
||||||
@@ -22,7 +21,6 @@ const isPwMockEnabled = process.env.NEXT_PUBLIC_PW_TEST === "true";
|
|||||||
const defaultFlags = {
|
const defaultFlags = {
|
||||||
[Flag.BETA_BLOCKS]: [],
|
[Flag.BETA_BLOCKS]: [],
|
||||||
[Flag.NEW_BLOCK_MENU]: false,
|
[Flag.NEW_BLOCK_MENU]: false,
|
||||||
[Flag.NEW_AGENT_RUNS]: false,
|
|
||||||
[Flag.GRAPH_SEARCH]: false,
|
[Flag.GRAPH_SEARCH]: false,
|
||||||
[Flag.ENABLE_ENHANCED_OUTPUT_HANDLING]: false,
|
[Flag.ENABLE_ENHANCED_OUTPUT_HANDLING]: false,
|
||||||
[Flag.SHARE_EXECUTION_RESULTS]: false,
|
[Flag.SHARE_EXECUTION_RESULTS]: false,
|
||||||
|
|||||||
165
plans/SECRT-1950-claude-ci-optimizations.md
Normal file
165
plans/SECRT-1950-claude-ci-optimizations.md
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
# Implementation Plan: SECRT-1950 - Apply E2E CI Optimizations to Claude Code Workflows
|
||||||
|
|
||||||
|
## Ticket
|
||||||
|
[SECRT-1950](https://linear.app/autogpt/issue/SECRT-1950)
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
Apply Pwuts's CI performance optimizations from PR #12090 to Claude Code workflows.
|
||||||
|
|
||||||
|
## Reference PR
|
||||||
|
https://github.com/Significant-Gravitas/AutoGPT/pull/12090
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Analysis
|
||||||
|
|
||||||
|
### Current State (claude.yml)
|
||||||
|
|
||||||
|
**pnpm caching (lines 104-118):**
|
||||||
|
```yaml
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: "22"
|
||||||
|
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Set pnpm store directory
|
||||||
|
run: |
|
||||||
|
pnpm config set store-dir ~/.pnpm-store
|
||||||
|
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Cache frontend dependencies
|
||||||
|
uses: actions/cache@v5
|
||||||
|
with:
|
||||||
|
path: ~/.pnpm-store
|
||||||
|
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||||
|
${{ runner.os }}-pnpm-
|
||||||
|
```
|
||||||
|
|
||||||
|
**Docker setup (lines 134-165):**
|
||||||
|
- Uses `docker-buildx-action@v3`
|
||||||
|
- Has manual Docker image caching via `actions/cache`
|
||||||
|
- Runs `docker compose up` without buildx bake optimization
|
||||||
|
|
||||||
|
### Pwuts's Optimizations (PR #12090)
|
||||||
|
|
||||||
|
1. **Simplified pnpm caching** - Use `setup-node` built-in cache:
|
||||||
|
```yaml
|
||||||
|
- name: Enable corepack
|
||||||
|
run: corepack enable
|
||||||
|
|
||||||
|
- name: Set up Node
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: "22.18.0"
|
||||||
|
cache: "pnpm"
|
||||||
|
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Docker build caching via buildx bake**:
|
||||||
|
```yaml
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
driver-opts: network=host
|
||||||
|
|
||||||
|
- name: Expose GHA cache to docker buildx CLI
|
||||||
|
uses: crazy-max/ghaction-github-runtime@v3
|
||||||
|
|
||||||
|
- name: Build Docker images (with cache)
|
||||||
|
run: |
|
||||||
|
pip install pyyaml
|
||||||
|
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||||
|
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||||
|
--source docker-compose.resolved.yml \
|
||||||
|
--cache-from "type=gha" \
|
||||||
|
--cache-to "type=gha,mode=max" \
|
||||||
|
...
|
||||||
|
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Proposed Changes
|
||||||
|
|
||||||
|
### 1. Update pnpm caching in `claude.yml`
|
||||||
|
|
||||||
|
**Before:**
|
||||||
|
- Manual cache key generation
|
||||||
|
- Separate `actions/cache` step
|
||||||
|
- Manual pnpm store directory config
|
||||||
|
|
||||||
|
**After:**
|
||||||
|
- Use `setup-node` built-in `cache: "pnpm"` option
|
||||||
|
- Remove manual cache step
|
||||||
|
- Keep `corepack enable` before `setup-node`
|
||||||
|
|
||||||
|
### 2. Update Docker build in `claude.yml`
|
||||||
|
|
||||||
|
**Before:**
|
||||||
|
- Manual Docker layer caching via `actions/cache` with `/tmp/.buildx-cache`
|
||||||
|
- Simple `docker compose build`
|
||||||
|
|
||||||
|
**After:**
|
||||||
|
- Use `crazy-max/ghaction-github-runtime@v3` to expose GHA cache
|
||||||
|
- Use `docker-ci-fix-compose-build-cache.py` script
|
||||||
|
- Build with `docker buildx bake`
|
||||||
|
|
||||||
|
### 3. Apply same changes to other Claude workflows
|
||||||
|
|
||||||
|
- `claude-dependabot.yml` - Check if it has similar patterns
|
||||||
|
- `claude-ci-failure-auto-fix.yml` - Check if it has similar patterns
|
||||||
|
- `copilot-setup-steps.yml` - Reusable workflow, may be the source of truth
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files to Modify
|
||||||
|
|
||||||
|
1. `.github/workflows/claude.yml`
|
||||||
|
2. `.github/workflows/claude-dependabot.yml` (if applicable)
|
||||||
|
3. `.github/workflows/claude-ci-failure-auto-fix.yml` (if applicable)
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- PR #12090 must be merged first (provides the `docker-ci-fix-compose-build-cache.py` script)
|
||||||
|
- Backend Dockerfile optimizations (already in PR #12090)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Test Plan
|
||||||
|
|
||||||
|
1. Create PR with changes
|
||||||
|
2. Trigger Claude workflow manually or via `@claude` mention on a test issue
|
||||||
|
3. Compare CI runtime before/after
|
||||||
|
4. Verify Claude agent still works correctly (can checkout, build, run tests)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Risk Assessment
|
||||||
|
|
||||||
|
**Low risk:**
|
||||||
|
- These are CI infrastructure changes, not code changes
|
||||||
|
- If caching fails, builds fall back to uncached (slower but works)
|
||||||
|
- Changes mirror proven patterns from PR #12090
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Questions for Reviewer
|
||||||
|
|
||||||
|
1. Should we wait for PR #12090 to merge before creating this PR?
|
||||||
|
2. Does `copilot-setup-steps.yml` need updating, or is it a separate concern?
|
||||||
|
3. Any concerns about cache key collisions between frontend E2E and Claude workflows?
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verified
|
||||||
|
|
||||||
|
- ✅ **`claude-dependabot.yml`**: Has same pnpm caching pattern as `claude.yml` (manual `actions/cache`) — NEEDS UPDATE
|
||||||
|
- ✅ **`claude-ci-failure-auto-fix.yml`**: Simple workflow with no pnpm or Docker caching — NO CHANGES NEEDED
|
||||||
|
- ✅ **Script path**: `docker-ci-fix-compose-build-cache.py` will be at `.github/workflows/scripts/` after PR #12090 merges
|
||||||
|
- ✅ **Test seed caching**: NOT APPLICABLE — Claude workflows spin up a dev environment but don't run E2E tests with pre-seeded data. The seed caching in PR #12090 is specific to the frontend E2E test suite which needs consistent test data. Claude just needs the services running.
|
||||||
Reference in New Issue
Block a user