mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-03-17 03:00:27 -04:00
Compare commits
18 Commits
feat/workf
...
feat/copil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da2d3418bd | ||
|
|
0faee668ab | ||
|
|
64790e769a | ||
|
|
3bc8db491f | ||
|
|
f0c3eb87d1 | ||
|
|
1db18f2f0a | ||
|
|
e542880660 | ||
|
|
f89a8e5de0 | ||
|
|
84c58b6624 | ||
|
|
05182b8368 | ||
|
|
f710bde7a9 | ||
|
|
1256e8a938 | ||
|
|
777b71d409 | ||
|
|
a6ecfe6e5b | ||
|
|
9cc93d84d4 | ||
|
|
32f1e51869 | ||
|
|
67a121bbe7 | ||
|
|
3c754825aa |
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -5,14 +5,12 @@ on:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
merge_group:
|
||||
|
||||
169
.github/workflows/platform-frontend-ci.yml
vendored
169
.github/workflows/platform-frontend-ci.yml
vendored
@@ -120,6 +120,175 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
exitOnceUploaded: true
|
||||
|
||||
e2e_test:
|
||||
name: end-to-end tests
|
||||
runs-on: big-boi
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Platform - Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||
env:
|
||||
# Used by E2E test data script to generate embeddings for approved store agents
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Set up Platform - Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v4
|
||||
|
||||
- name: Set up Platform - Build Docker images (with cache)
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
# Resolve extends and generate a flat compose file that bake can understand
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
|
||||
# Add cache configuration to the resolved compose file
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
||||
--git-ref "${{ github.ref }}"
|
||||
|
||||
# Build with bake using the resolved compose file (now includes cache config)
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Cache E2E test data
|
||||
id: e2e-data-cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/e2e_test_data.sql
|
||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
||||
|
||||
- name: Set up Platform - Start Supabase DB + Auth
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||
echo "Waiting for auth service to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||
|
||||
- name: Set up Platform - Run migrations
|
||||
run: |
|
||||
echo "Running migrations..."
|
||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||
echo "✅ Migrations completed"
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Load cached E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||
run: |
|
||||
echo "✅ Found cached E2E test data, restoring..."
|
||||
{
|
||||
echo "SET session_replication_role = 'replica';"
|
||||
cat /tmp/e2e_test_data.sql
|
||||
echo "SET session_replication_role = 'origin';"
|
||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||
# Refresh materialized views after restore
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||
|
||||
echo "✅ E2E test data restored from cache"
|
||||
|
||||
- name: Set up Platform - Start (all other services)
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Create E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||
echo "Dumping database for cache..."
|
||||
{
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--table='auth.users' postgres
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--schema=platform \
|
||||
--exclude-table='platform._prisma_migrations' \
|
||||
--exclude-table='platform.apscheduler_jobs' \
|
||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||
postgres
|
||||
} > /tmp/e2e_test_data.sql
|
||||
|
||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||
|
||||
- name: Set up tests - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up tests - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Set up tests - Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Set up tests - Install browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build
|
||||
continue-on-error: false
|
||||
|
||||
- name: Upload Playwright report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Upload Playwright test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-test-results
|
||||
path: test-results
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||
|
||||
integration_test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
314
.github/workflows/platform-fullstack-ci.yml
vendored
314
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -1,18 +1,14 @@
|
||||
name: AutoGPT Platform - Full-stack CI
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- ".github/workflows/scripts/docker-ci-fix-compose-build-cache.py"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- ".github/workflows/scripts/docker-ci-fix-compose-build-cache.py"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
@@ -28,28 +24,42 @@ defaults:
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies to populate cache
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
check-api-types:
|
||||
name: check API types
|
||||
runs-on: ubuntu-latest
|
||||
types:
|
||||
runs-on: big-boi
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -57,256 +67,70 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
# ------------------------ Backend setup ------------------------
|
||||
|
||||
- name: Set up Backend - Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Set up Backend - Install Poetry
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Installing Poetry version ${POETRY_VERSION}"
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
|
||||
|
||||
- name: Set up Backend - Set up dependency cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Set up Backend - Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Set up Backend - Generate Prisma client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
- name: Set up Frontend - Export OpenAPI schema from Backend
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run export-api-schema --output ../frontend/src/app/api/openapi.json
|
||||
|
||||
# ------------------------ Frontend setup ------------------------
|
||||
|
||||
- name: Set up Frontend - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Frontend - Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Set up Frontend - Install dependencies
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml --profile local up -d deps_backend
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Set up Frontend - Format OpenAPI schema
|
||||
id: format-schema
|
||||
run: pnpm prettier --write ./src/app/api/openapi.json
|
||||
- name: Setup .env
|
||||
run: cp .env.default .env
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Generate API queries
|
||||
run: pnpm generate:api:force
|
||||
|
||||
- name: Check for API schema changes
|
||||
run: |
|
||||
if ! git diff --exit-code src/app/api/openapi.json; then
|
||||
echo "❌ API schema changes detected in src/app/api/openapi.json"
|
||||
echo ""
|
||||
echo "The openapi.json file has been modified after exporting the API schema."
|
||||
echo "The openapi.json file has been modified after running 'pnpm generate:api-all'."
|
||||
echo "This usually means changes have been made in the BE endpoints without updating the Frontend."
|
||||
echo "The API schema is now out of sync with the Front-end queries."
|
||||
echo ""
|
||||
echo "To fix this:"
|
||||
echo "\nIn the backend directory:"
|
||||
echo "1. Run 'poetry run export-api-schema --output ../frontend/src/app/api/openapi.json'"
|
||||
echo "\nIn the frontend directory:"
|
||||
echo "2. Run 'pnpm prettier --write src/app/api/openapi.json'"
|
||||
echo "3. Run 'pnpm generate:api'"
|
||||
echo "4. Run 'pnpm types'"
|
||||
echo "5. Fix any TypeScript errors that may have been introduced"
|
||||
echo "6. Commit and push your changes"
|
||||
echo "1. Pull the backend 'docker compose pull && docker compose up -d --build --force-recreate'"
|
||||
echo "2. Run 'pnpm generate:api' locally"
|
||||
echo "3. Run 'pnpm types' locally"
|
||||
echo "4. Fix any TypeScript errors that may have been introduced"
|
||||
echo "5. Commit and push your changes"
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
echo "✅ No API schema changes detected"
|
||||
fi
|
||||
|
||||
- name: Set up Frontend - Generate API client
|
||||
id: generate-api-client
|
||||
run: pnpm orval --config ./orval.config.ts
|
||||
# Continue with type generation & check even if there are schema changes
|
||||
if: success() || (steps.format-schema.outcome == 'success')
|
||||
|
||||
- name: Check for TypeScript errors
|
||||
- name: Run Typescript checks
|
||||
run: pnpm types
|
||||
if: success() || (steps.generate-api-client.outcome == 'success')
|
||||
|
||||
e2e_test:
|
||||
name: end-to-end tests
|
||||
runs-on: big-boi
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Platform - Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||
env:
|
||||
# Used by E2E test data script to generate embeddings for approved store agents
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Set up Platform - Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v4
|
||||
|
||||
- name: Set up Platform - Build Docker images (with cache)
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
# Resolve extends and generate a flat compose file that bake can understand
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
|
||||
# Add cache configuration to the resolved compose file
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend/**') }}" \
|
||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src/**') }}" \
|
||||
--git-ref "${{ github.ref }}"
|
||||
|
||||
# Build with bake using the resolved compose file (now includes cache config)
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Cache E2E test data
|
||||
id: e2e-data-cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/e2e_test_data.sql
|
||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-fullstack-ci.yml') }}
|
||||
|
||||
- name: Set up Platform - Start Supabase DB + Auth
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||
echo "Waiting for auth service to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||
|
||||
- name: Set up Platform - Run migrations
|
||||
run: |
|
||||
echo "Running migrations..."
|
||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||
echo "✅ Migrations completed"
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Load cached E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||
run: |
|
||||
echo "✅ Found cached E2E test data, restoring..."
|
||||
{
|
||||
echo "SET session_replication_role = 'replica';"
|
||||
cat /tmp/e2e_test_data.sql
|
||||
echo "SET session_replication_role = 'origin';"
|
||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||
# Refresh materialized views after restore
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||
|
||||
echo "✅ E2E test data restored from cache"
|
||||
|
||||
- name: Set up Platform - Start (all other services)
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Create E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||
echo "Dumping database for cache..."
|
||||
{
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--table='auth.users' postgres
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--schema=platform \
|
||||
--exclude-table='platform._prisma_migrations' \
|
||||
--exclude-table='platform.apscheduler_jobs' \
|
||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||
postgres
|
||||
} > /tmp/e2e_test_data.sql
|
||||
|
||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||
|
||||
- name: Set up tests - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up tests - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Set up tests - Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Set up tests - Install browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build
|
||||
continue-on-error: false
|
||||
|
||||
- name: Upload Playwright report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Upload Playwright test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-test-results
|
||||
path: test-results
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
"""API endpoint for importing external workflows via CoPilot."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import pydantic
|
||||
from autogpt_libs.auth import requires_user
|
||||
from fastapi import APIRouter, HTTPException, Security
|
||||
|
||||
from backend.copilot.workflow_import.converter import build_copilot_prompt
|
||||
from backend.copilot.workflow_import.describers import describe_workflow
|
||||
from backend.copilot.workflow_import.format_detector import (
|
||||
SourcePlatform,
|
||||
detect_format,
|
||||
)
|
||||
from backend.copilot.workflow_import.url_fetcher import fetch_n8n_template
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class ImportWorkflowRequest(pydantic.BaseModel):
|
||||
"""Request body for importing an external workflow."""
|
||||
|
||||
workflow_json: dict[str, Any] | None = None
|
||||
template_url: str | None = None
|
||||
|
||||
@pydantic.model_validator(mode="after")
|
||||
def check_exactly_one_source(self) -> "ImportWorkflowRequest":
|
||||
has_json = self.workflow_json is not None
|
||||
has_url = self.template_url is not None
|
||||
if not has_json and not has_url:
|
||||
raise ValueError("Provide either 'workflow_json' or 'template_url'")
|
||||
if has_json and has_url:
|
||||
raise ValueError(
|
||||
"Provide only one of 'workflow_json' or 'template_url', not both"
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
class ImportWorkflowResponse(pydantic.BaseModel):
|
||||
"""Response from parsing an external workflow.
|
||||
|
||||
Returns a CoPilot prompt that the frontend uses to redirect the user
|
||||
to CoPilot, where the agentic agent-generator handles the conversion.
|
||||
"""
|
||||
|
||||
copilot_prompt: str
|
||||
source_format: str
|
||||
source_name: str
|
||||
|
||||
|
||||
@router.post(
|
||||
path="/workflow",
|
||||
summary="Import a workflow from another tool (n8n, Make.com, Zapier)",
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def import_workflow(
|
||||
request: ImportWorkflowRequest,
|
||||
) -> ImportWorkflowResponse:
|
||||
"""Parse an external workflow and return a CoPilot prompt.
|
||||
|
||||
Accepts either raw workflow JSON or a template URL (n8n only for now).
|
||||
The workflow is parsed and described, then a structured prompt is returned
|
||||
for CoPilot's agent-generator to handle the actual conversion.
|
||||
"""
|
||||
# Step 1: Get the raw workflow JSON
|
||||
if request.template_url is not None:
|
||||
try:
|
||||
workflow_json = await fetch_n8n_template(request.template_url)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e)) from e
|
||||
except RuntimeError as e:
|
||||
raise HTTPException(status_code=502, detail=str(e)) from e
|
||||
else:
|
||||
workflow_json = request.workflow_json
|
||||
assert workflow_json is not None # guaranteed by validator
|
||||
|
||||
# Step 2: Detect format
|
||||
fmt = detect_format(workflow_json)
|
||||
if fmt == SourcePlatform.UNKNOWN:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Could not detect workflow format. Supported formats: "
|
||||
"n8n, Make.com, Zapier. Ensure you're uploading a valid "
|
||||
"workflow export file.",
|
||||
)
|
||||
|
||||
# Step 3: Describe the workflow
|
||||
desc = describe_workflow(workflow_json, fmt)
|
||||
|
||||
# Step 4: Build CoPilot prompt
|
||||
prompt = build_copilot_prompt(desc)
|
||||
|
||||
return ImportWorkflowResponse(
|
||||
copilot_prompt=prompt,
|
||||
source_format=fmt.value,
|
||||
source_name=desc.name,
|
||||
)
|
||||
@@ -1,173 +0,0 @@
|
||||
"""Tests for workflow_import.py API endpoint."""
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import fastapi
|
||||
import pytest
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from backend.api.features.workflow_import import router
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(router)
|
||||
client = TestClient(app)
|
||||
|
||||
# Sample workflow fixtures
|
||||
N8N_WORKFLOW = {
|
||||
"name": "Email on Webhook",
|
||||
"nodes": [
|
||||
{
|
||||
"name": "Webhook",
|
||||
"type": "n8n-nodes-base.webhookTrigger",
|
||||
"parameters": {"path": "/incoming"},
|
||||
},
|
||||
{
|
||||
"name": "Send Email",
|
||||
"type": "n8n-nodes-base.gmail",
|
||||
"parameters": {"resource": "message", "operation": "send"},
|
||||
},
|
||||
],
|
||||
"connections": {
|
||||
"Webhook": {"main": [[{"node": "Send Email", "type": "main", "index": 0}]]}
|
||||
},
|
||||
}
|
||||
|
||||
MAKE_WORKFLOW = {
|
||||
"name": "Sheets to Calendar",
|
||||
"flow": [
|
||||
{
|
||||
"module": "google-sheets:watchUpdatedCells",
|
||||
"mapper": {"spreadsheetId": "abc"},
|
||||
},
|
||||
{
|
||||
"module": "google-calendar:createAnEvent",
|
||||
"mapper": {"title": "Meeting"},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
ZAPIER_WORKFLOW = {
|
||||
"name": "Gmail to Slack",
|
||||
"steps": [
|
||||
{"app": "Gmail", "action": "new_email"},
|
||||
{"app": "Slack", "action": "send_message", "params": {"channel": "#alerts"}},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_auth(mock_jwt_user):
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
class TestImportWorkflow:
|
||||
def test_import_n8n_workflow(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"workflow_json": N8N_WORKFLOW},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["source_format"] == "n8n"
|
||||
assert data["source_name"] == "Email on Webhook"
|
||||
assert "copilot_prompt" in data
|
||||
assert "n8n" in data["copilot_prompt"]
|
||||
assert "Email on Webhook" in data["copilot_prompt"]
|
||||
|
||||
def test_import_make_workflow(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"workflow_json": MAKE_WORKFLOW},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["source_format"] == "make"
|
||||
assert data["source_name"] == "Sheets to Calendar"
|
||||
assert "copilot_prompt" in data
|
||||
|
||||
def test_import_zapier_workflow(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"workflow_json": ZAPIER_WORKFLOW},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["source_format"] == "zapier"
|
||||
assert data["source_name"] == "Gmail to Slack"
|
||||
assert "copilot_prompt" in data
|
||||
|
||||
def test_prompt_includes_steps(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"workflow_json": N8N_WORKFLOW},
|
||||
)
|
||||
prompt = response.json()["copilot_prompt"]
|
||||
# Should include step details from the workflow
|
||||
assert "Webhook" in prompt or "webhook" in prompt
|
||||
assert "Gmail" in prompt or "gmail" in prompt
|
||||
|
||||
def test_no_source_provided(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={},
|
||||
)
|
||||
assert response.status_code == 422 # Pydantic validation error
|
||||
|
||||
def test_both_sources_provided(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={
|
||||
"workflow_json": N8N_WORKFLOW,
|
||||
"template_url": "https://n8n.io/workflows/123",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_unknown_format_returns_400(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"workflow_json": {"foo": "bar"}},
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert "Could not detect workflow format" in response.json()["detail"]
|
||||
|
||||
def test_url_fetch_bad_url_returns_400(self, mocker):
|
||||
mocker.patch(
|
||||
"backend.api.features.workflow_import.fetch_n8n_template",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=ValueError("Invalid URL format"),
|
||||
)
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"template_url": "https://bad-url.com"},
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert "Invalid URL format" in response.json()["detail"]
|
||||
|
||||
def test_url_fetch_upstream_error_returns_502(self, mocker):
|
||||
mocker.patch(
|
||||
"backend.api.features.workflow_import.fetch_n8n_template",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=RuntimeError("n8n API returned 500"),
|
||||
)
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"template_url": "https://n8n.io/workflows/123"},
|
||||
)
|
||||
assert response.status_code == 502
|
||||
assert "n8n API returned 500" in response.json()["detail"]
|
||||
|
||||
def test_response_model_shape(self):
|
||||
response = client.post(
|
||||
"/workflow",
|
||||
json={"workflow_json": N8N_WORKFLOW},
|
||||
)
|
||||
data = response.json()
|
||||
assert "copilot_prompt" in data
|
||||
assert "source_format" in data
|
||||
assert "source_name" in data
|
||||
assert isinstance(data["copilot_prompt"], str)
|
||||
assert len(data["copilot_prompt"]) > 0
|
||||
@@ -34,7 +34,6 @@ import backend.api.features.postmark.postmark
|
||||
import backend.api.features.store.model
|
||||
import backend.api.features.store.routes
|
||||
import backend.api.features.v1
|
||||
import backend.api.features.workflow_import
|
||||
import backend.api.features.workspace.routes as workspace_routes
|
||||
import backend.data.block
|
||||
import backend.data.db
|
||||
@@ -355,11 +354,6 @@ app.include_router(
|
||||
tags=["oauth"],
|
||||
prefix="/api/oauth",
|
||||
)
|
||||
app.include_router(
|
||||
backend.api.features.workflow_import.router,
|
||||
tags=["v2", "import"],
|
||||
prefix="/api/import",
|
||||
)
|
||||
|
||||
app.mount("/external-api", external_api)
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ from backend.copilot.response_model import (
|
||||
from backend.copilot.service import (
|
||||
_build_system_prompt,
|
||||
_generate_session_title,
|
||||
_get_openai_client,
|
||||
client,
|
||||
config,
|
||||
)
|
||||
from backend.copilot.tools import execute_tool, get_available_tools
|
||||
@@ -89,7 +89,7 @@ async def _compress_session_messages(
|
||||
result = await compress_context(
|
||||
messages=messages_dict,
|
||||
model=config.model,
|
||||
client=_get_openai_client(),
|
||||
client=client,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("[Baseline] Context compression with LLM failed: %s", e)
|
||||
@@ -235,7 +235,7 @@ async def stream_chat_completion_baseline(
|
||||
)
|
||||
if tools:
|
||||
create_kwargs["tools"] = tools
|
||||
response = await _get_openai_client().chat.completions.create(**create_kwargs) # type: ignore[arg-type] # dynamic kwargs
|
||||
response = await client.chat.completions.create(**create_kwargs) # type: ignore[arg-type] # dynamic kwargs
|
||||
|
||||
# Accumulate streamed response (text + tool calls)
|
||||
round_text = ""
|
||||
|
||||
@@ -11,34 +11,18 @@ from backend.copilot.tools import TOOL_REGISTRY
|
||||
# Shared technical notes that apply to both SDK and baseline modes
|
||||
_SHARED_TOOL_NOTES = """\
|
||||
|
||||
### Sharing files with the user
|
||||
After saving a file to the persistent workspace with `write_workspace_file`,
|
||||
share it with the user by embedding the `download_url` from the response in
|
||||
your message as a Markdown link or image:
|
||||
### Sharing files
|
||||
After `write_workspace_file`, embed the `download_url` in Markdown:
|
||||
- File: `[report.csv](workspace://file_id#text/csv)`
|
||||
- Image: ``
|
||||
- Video: ``
|
||||
|
||||
- **Any file** — shows as a clickable download link:
|
||||
`[report.csv](workspace://file_id#text/csv)`
|
||||
- **Image** — renders inline in chat:
|
||||
``
|
||||
- **Video** — renders inline in chat with player controls:
|
||||
``
|
||||
|
||||
The `download_url` field in the `write_workspace_file` response is already
|
||||
in the correct format — paste it directly after the `(` in the Markdown.
|
||||
|
||||
### Passing file content to tools — @@agptfile: references
|
||||
Instead of copying large file contents into a tool argument, pass a file
|
||||
reference and the platform will load the content for you.
|
||||
|
||||
Syntax: `@@agptfile:<uri>[<start>-<end>]`
|
||||
|
||||
- `<uri>` **must** start with `workspace://` or `/` (absolute path):
|
||||
- `workspace://<file_id>` — workspace file by ID
|
||||
- `workspace:///<path>` — workspace file by virtual path
|
||||
- `/absolute/local/path` — ephemeral or sdk_cwd file
|
||||
- E2B sandbox absolute path (e.g. `/home/user/script.py`)
|
||||
- `[<start>-<end>]` is an optional 1-indexed inclusive line range.
|
||||
- URIs that do not start with `workspace://` or `/` are **not** expanded.
|
||||
### File references — @@agptfile:
|
||||
Pass large file content to tools by reference: `@@agptfile:<uri>[<start>-<end>]`
|
||||
- `workspace://<file_id>` or `workspace:///<path>` — workspace files
|
||||
- `/absolute/path` — local/sandbox files
|
||||
- `[start-end]` — optional 1-indexed line range
|
||||
- Multiple refs per argument supported. Only `workspace://` and absolute paths are expanded.
|
||||
|
||||
Examples:
|
||||
```
|
||||
@@ -49,50 +33,16 @@ Examples:
|
||||
@@agptfile:/home/user/script.py
|
||||
```
|
||||
|
||||
You can embed a reference inside any string argument, or use it as the entire
|
||||
value. Multiple references in one argument are all expanded.
|
||||
**Structured data**: When the entire argument is a single file reference, the platform auto-parses by extension/MIME. Supported: JSON, JSONL, CSV, TSV, YAML, TOML, Parquet, Excel (.xlsx only). Unrecognised formats return plain string.
|
||||
|
||||
**Structured data**: When the **entire** argument value is a single file
|
||||
reference (no surrounding text), the platform automatically parses the file
|
||||
content based on its extension or MIME type. Supported formats: JSON, JSONL,
|
||||
CSV, TSV, YAML, TOML, Parquet, and Excel (.xlsx — first sheet only).
|
||||
For example, pass `@@agptfile:workspace://<id>` where the file is a `.csv` and
|
||||
the rows will be parsed into `list[list[str]]` automatically. If the format is
|
||||
unrecognised or parsing fails, the content is returned as a plain string.
|
||||
Legacy `.xls` files are **not** supported — only the modern `.xlsx` format.
|
||||
|
||||
**Type coercion**: The platform also coerces expanded values to match the
|
||||
block's expected input types. For example, if a block expects `list[list[str]]`
|
||||
and the expanded value is a JSON string, it will be parsed into the correct type.
|
||||
**Type coercion**: The platform auto-coerces expanded string values to match block input types (e.g. JSON string → `list[list[str]]`).
|
||||
|
||||
### Media file inputs (format: "file")
|
||||
Some block inputs accept media files — their schema shows `"format": "file"`.
|
||||
These fields accept:
|
||||
- **`workspace://<file_id>`** or **`workspace://<file_id>#<mime>`** — preferred
|
||||
for large files (images, videos, PDFs). The platform passes the reference
|
||||
directly to the block without reading the content into memory.
|
||||
- **`data:<mime>;base64,<payload>`** — inline base64 data URI, suitable for
|
||||
small files only.
|
||||
|
||||
When a block input has `format: "file"`, **pass the `workspace://` URI
|
||||
directly as the value** (do NOT wrap it in `@@agptfile:`). This avoids large
|
||||
payloads in tool arguments and preserves binary content (images, videos)
|
||||
that would be corrupted by text encoding.
|
||||
|
||||
Example — committing an image file to GitHub:
|
||||
```json
|
||||
{
|
||||
"files": [{
|
||||
"path": "docs/hero.png",
|
||||
"content": "workspace://abc123#image/png",
|
||||
"operation": "upsert"
|
||||
}]
|
||||
}
|
||||
```
|
||||
Inputs with `"format": "file"` accept `workspace://<file_id>` or `data:<mime>;base64,<payload>`.
|
||||
Pass the `workspace://` URI directly (do NOT wrap in `@@agptfile:`). This avoids large payloads and preserves binary content.
|
||||
|
||||
### Sub-agent tasks
|
||||
- When using the Task tool, NEVER set `run_in_background` to true.
|
||||
All tasks must run in the foreground.
|
||||
- Task tool: NEVER set `run_in_background` to true.
|
||||
"""
|
||||
|
||||
|
||||
@@ -128,30 +78,18 @@ def _build_storage_supplement(
|
||||
|
||||
## Tool notes
|
||||
|
||||
### Shell commands
|
||||
- The SDK built-in Bash tool is NOT available. Use the `bash_exec` MCP tool
|
||||
for shell commands — it runs {sandbox_type}.
|
||||
|
||||
### Working directory
|
||||
- Your working directory is: `{working_dir}`
|
||||
- All SDK file tools AND `bash_exec` operate on the same filesystem
|
||||
- Use relative paths or absolute paths under `{working_dir}` for all file operations
|
||||
|
||||
### Two storage systems — CRITICAL to understand
|
||||
### Shell & filesystem
|
||||
- Use `bash_exec` for shell commands ({sandbox_type}). Working dir: `{working_dir}`
|
||||
- All file tools share the same filesystem. Use relative or absolute paths under this dir.
|
||||
|
||||
### Storage — important
|
||||
1. **{storage_system_1_name}** (`{working_dir}`):
|
||||
{characteristics}
|
||||
{persistence}
|
||||
|
||||
2. **Persistent workspace** (cloud storage):
|
||||
- Files here **survive across sessions indefinitely**
|
||||
|
||||
### Moving files between storages
|
||||
- **{file_move_name_1_to_2}**: Copy to persistent workspace
|
||||
- **{file_move_name_2_to_1}**: Download for processing
|
||||
|
||||
### File persistence
|
||||
Important files (code, configs, outputs) should be saved to workspace to ensure they persist.
|
||||
2. **Persistent workspace** (cloud) — survives across sessions.
|
||||
- {file_move_name_1_to_2}: use `write_workspace_file`
|
||||
- {file_move_name_2_to_1}: use `read_workspace_file` with save_to_path
|
||||
- Save important files to workspace for persistence.
|
||||
{_SHARED_TOOL_NOTES}"""
|
||||
|
||||
|
||||
|
||||
@@ -28,24 +28,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
config = ChatConfig()
|
||||
settings = Settings()
|
||||
|
||||
_client: LangfuseAsyncOpenAI | None = None
|
||||
_langfuse = None
|
||||
client = LangfuseAsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
|
||||
|
||||
def _get_openai_client() -> LangfuseAsyncOpenAI:
|
||||
global _client
|
||||
if _client is None:
|
||||
_client = LangfuseAsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
return _client
|
||||
|
||||
|
||||
def _get_langfuse():
|
||||
global _langfuse
|
||||
if _langfuse is None:
|
||||
_langfuse = get_client()
|
||||
return _langfuse
|
||||
|
||||
langfuse = get_client()
|
||||
|
||||
# Default system prompt used when Langfuse is not configured
|
||||
# Provides minimal baseline tone and personality - all workflow, tools, and
|
||||
@@ -98,7 +84,7 @@ async def _get_system_prompt_template(context: str) -> str:
|
||||
else "latest"
|
||||
)
|
||||
prompt = await asyncio.to_thread(
|
||||
_get_langfuse().get_prompt,
|
||||
langfuse.get_prompt,
|
||||
config.langfuse_prompt_name,
|
||||
label=label,
|
||||
cache_ttl_seconds=config.langfuse_prompt_cache_ttl,
|
||||
@@ -172,7 +158,7 @@ async def _generate_session_title(
|
||||
"environment": settings.config.app_env.value,
|
||||
}
|
||||
|
||||
response = await _get_openai_client().chat.completions.create(
|
||||
response = await client.chat.completions.create(
|
||||
model=config.title_model,
|
||||
messages=[
|
||||
{
|
||||
|
||||
@@ -22,13 +22,11 @@ class AddUnderstandingTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return """Capture and store information about the user's business context,
|
||||
workflows, pain points, and automation goals. Call this tool whenever the user
|
||||
shares information about their business. Each call incrementally adds to the
|
||||
existing understanding - you don't need to provide all fields at once.
|
||||
|
||||
Use this to build a comprehensive profile that helps recommend better agents
|
||||
and automations for the user's specific needs."""
|
||||
return (
|
||||
"Store user's business context, workflows, pain points, and automation goals. "
|
||||
"Call whenever the user shares business info. Each call incrementally merges "
|
||||
"with existing data — provide only the fields you have."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
|
||||
@@ -408,18 +408,11 @@ class BrowserNavigateTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Navigate to a URL using a real browser. Returns an accessibility "
|
||||
"tree snapshot listing the page's interactive elements with @ref IDs "
|
||||
"(e.g. @e3) that can be used with browser_act. "
|
||||
"Session persists — cookies and login state carry over between calls. "
|
||||
"Use this (with browser_act) for multi-step interaction: login flows, "
|
||||
"form filling, button clicks, or anything requiring page interaction. "
|
||||
"For plain static pages, prefer web_fetch — no browser overhead. "
|
||||
"For authenticated pages: navigate to the login page first, use browser_act "
|
||||
"to fill credentials and submit, then navigate to the target page. "
|
||||
"Note: for slow SPAs, the returned snapshot may reflect a partially-loaded "
|
||||
"state. If elements seem missing, use browser_act with action='wait' and a "
|
||||
"CSS selector or millisecond delay, then take a browser_screenshot to verify."
|
||||
"Navigate to a URL in a real browser. Returns accessibility tree with @ref IDs "
|
||||
"for browser_act. Session persists (cookies/auth carry over). "
|
||||
"For static pages, prefer web_fetch. "
|
||||
"For SPAs, elements may load late — use browser_act with wait + browser_screenshot to verify. "
|
||||
"For auth: navigate to login, fill creds with browser_act, then navigate to target."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -429,13 +422,13 @@ class BrowserNavigateTool(BaseTool):
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The HTTP/HTTPS URL to navigate to.",
|
||||
"description": "HTTP/HTTPS URL to navigate to.",
|
||||
},
|
||||
"wait_for": {
|
||||
"type": "string",
|
||||
"enum": ["networkidle", "load", "domcontentloaded"],
|
||||
"default": "networkidle",
|
||||
"description": "When to consider navigation complete. Use 'networkidle' for SPAs (default).",
|
||||
"description": "Navigation completion strategy (default: networkidle).",
|
||||
},
|
||||
},
|
||||
"required": ["url"],
|
||||
@@ -554,14 +547,12 @@ class BrowserActTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Interact with the current browser page. Use @ref IDs from the "
|
||||
"snapshot (e.g. '@e3') to target elements. Returns an updated snapshot. "
|
||||
"Supported actions: click, dblclick, fill, type, scroll, hover, press, "
|
||||
"Interact with the current browser page using @ref IDs from the snapshot. "
|
||||
"Actions: click, dblclick, fill, type, scroll, hover, press, "
|
||||
"check, uncheck, select, wait, back, forward, reload. "
|
||||
"fill clears the field before typing; type appends without clearing. "
|
||||
"wait accepts a CSS selector (waits for element) or milliseconds string (e.g. '1000'). "
|
||||
"Example login flow: fill @e1 with email → fill @e2 with password → "
|
||||
"click @e3 (submit) → browser_navigate to the target page."
|
||||
"fill clears field first; type appends. "
|
||||
"wait accepts CSS selector or milliseconds (e.g. '1000'). "
|
||||
"Returns updated snapshot."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -587,30 +578,21 @@ class BrowserActTool(BaseTool):
|
||||
"forward",
|
||||
"reload",
|
||||
],
|
||||
"description": "The action to perform.",
|
||||
"description": "Action to perform.",
|
||||
},
|
||||
"target": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Element to target. Use @ref from snapshot (e.g. '@e3'), "
|
||||
"a CSS selector, or a text description. "
|
||||
"Required for: click, dblclick, fill, type, hover, check, uncheck, select. "
|
||||
"For wait: a CSS selector to wait for, or milliseconds as a string (e.g. '1000')."
|
||||
),
|
||||
"description": "@ref ID (e.g. '@e3'), CSS selector, or text description.",
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"For fill/type: the text to enter. "
|
||||
"For press: key name (e.g. 'Enter', 'Tab', 'Control+a'). "
|
||||
"For select: the option value to select."
|
||||
),
|
||||
"description": "Text for fill/type, key for press (e.g. 'Enter'), option for select.",
|
||||
},
|
||||
"direction": {
|
||||
"type": "string",
|
||||
"enum": ["up", "down", "left", "right"],
|
||||
"default": "down",
|
||||
"description": "For scroll: direction to scroll.",
|
||||
"description": "Scroll direction (default: down).",
|
||||
},
|
||||
},
|
||||
"required": ["action"],
|
||||
@@ -757,12 +739,10 @@ class BrowserScreenshotTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Take a screenshot of the current browser page and save it to the workspace. "
|
||||
"IMPORTANT: After calling this tool, immediately call read_workspace_file "
|
||||
"with the returned file_id to display the image inline to the user — "
|
||||
"the screenshot is not visible until you do this. "
|
||||
"With annotate=true (default), @ref labels are overlaid on interactive "
|
||||
"elements, making it easy to see which @ref ID maps to which element on screen."
|
||||
"Screenshot the current browser page and save to workspace. "
|
||||
"annotate=true overlays @ref labels on elements. "
|
||||
"IMPORTANT: After calling, you MUST immediately call read_workspace_file with the "
|
||||
"returned file_id to display the image inline."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -773,12 +753,12 @@ class BrowserScreenshotTool(BaseTool):
|
||||
"annotate": {
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
"description": "Overlay @ref labels on interactive elements (default: true).",
|
||||
"description": "Overlay @ref labels (default: true).",
|
||||
},
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"default": "screenshot.png",
|
||||
"description": "Filename to save in the workspace.",
|
||||
"description": "Workspace filename (default: screenshot.png).",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -108,22 +108,12 @@ class AgentOutputTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return """Retrieve execution outputs from agents in the user's library.
|
||||
|
||||
Identify the agent using one of:
|
||||
- agent_name: Fuzzy search in user's library
|
||||
- library_agent_id: Exact library agent ID
|
||||
- store_slug: Marketplace format 'username/agent-name'
|
||||
|
||||
Select which run to retrieve using:
|
||||
- execution_id: Specific execution ID
|
||||
- run_time: 'latest' (default), 'yesterday', 'last week', or ISO date 'YYYY-MM-DD'
|
||||
|
||||
Wait for completion (optional):
|
||||
- wait_if_running: Max seconds to wait if execution is still running (0-300).
|
||||
If the execution is running/queued, waits up to this many seconds for completion.
|
||||
Returns current status on timeout. If already finished, returns immediately.
|
||||
"""
|
||||
return (
|
||||
"Retrieve execution outputs from a library agent. "
|
||||
"Identify by agent_name, library_agent_id, or store_slug. "
|
||||
"Filter by execution_id or run_time. "
|
||||
"Optionally wait for running executions."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -132,32 +122,27 @@ class AgentOutputTool(BaseTool):
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "Agent name to search for in user's library (fuzzy match)",
|
||||
"description": "Agent name (fuzzy match).",
|
||||
},
|
||||
"library_agent_id": {
|
||||
"type": "string",
|
||||
"description": "Exact library agent ID",
|
||||
"description": "Library agent ID.",
|
||||
},
|
||||
"store_slug": {
|
||||
"type": "string",
|
||||
"description": "Marketplace identifier: 'username/agent-slug'",
|
||||
"description": "Marketplace 'username/agent-slug'.",
|
||||
},
|
||||
"execution_id": {
|
||||
"type": "string",
|
||||
"description": "Specific execution ID to retrieve",
|
||||
"description": "Specific execution ID.",
|
||||
},
|
||||
"run_time": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Time filter: 'latest', 'yesterday', 'last week', or 'YYYY-MM-DD'"
|
||||
),
|
||||
"description": "Time filter: 'latest', today/yesterday/last week/last 7 days/last month/last 30 days, 'YYYY-MM-DD', or ISO datetime.",
|
||||
},
|
||||
"wait_if_running": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait if execution is still running (0-300). "
|
||||
"If running, waits for completion. Returns current state on timeout."
|
||||
),
|
||||
"description": "Max seconds to wait if still running (0-300). Returns current state on timeout.",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
|
||||
@@ -41,15 +41,9 @@ class BashExecTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Execute a Bash command or script. "
|
||||
"Full Bash scripting is supported (loops, conditionals, pipes, "
|
||||
"functions, etc.). "
|
||||
"The working directory is shared with the SDK Read/Write/Edit/Glob/Grep "
|
||||
"tools — files created by either are immediately visible to both. "
|
||||
"Execution is killed after the timeout (default 30s, max 120s). "
|
||||
"Returns stdout and stderr. "
|
||||
"Useful for file manipulation, data processing, running scripts, "
|
||||
"and installing packages."
|
||||
"Execute a Bash command or script. Shares filesystem with SDK file tools. "
|
||||
"Useful for scripts, data processing, and package installation. "
|
||||
"Killed after timeout (default 30s, max 120s)."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -59,13 +53,11 @@ class BashExecTool(BaseTool):
|
||||
"properties": {
|
||||
"command": {
|
||||
"type": "string",
|
||||
"description": "Bash command or script to execute.",
|
||||
"description": "Bash command or script.",
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max execution time in seconds (default 30, max 120)."
|
||||
),
|
||||
"description": "Max seconds (default 30, max 120).",
|
||||
"default": 30,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -30,12 +30,7 @@ class ContinueRunBlockTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Continue executing a block after human review approval. "
|
||||
"Use this after a run_block call returned review_required. "
|
||||
"Pass the review_id from the review_required response. "
|
||||
"The block will execute with the original pre-approved input data."
|
||||
)
|
||||
return "Resume block execution after human review approval. Pass the review_id."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -44,10 +39,7 @@ class ContinueRunBlockTool(BaseTool):
|
||||
"properties": {
|
||||
"review_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The review_id from a previous review_required response. "
|
||||
"This resumes execution with the pre-approved input data."
|
||||
),
|
||||
"description": "review_id from the review_required response.",
|
||||
},
|
||||
},
|
||||
"required": ["review_id"],
|
||||
|
||||
@@ -23,12 +23,8 @@ class CreateAgentTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Create a new agent workflow. Pass `agent_json` with the complete "
|
||||
"agent graph JSON you generated using block schemas from find_block. "
|
||||
"The tool validates, auto-fixes, and saves.\n\n"
|
||||
"IMPORTANT: Before calling this tool, search for relevant existing agents "
|
||||
"using find_library_agent that could be used as building blocks. "
|
||||
"Pass their IDs in the library_agent_ids parameter."
|
||||
"Create a new agent from JSON (nodes + links). Validates, auto-fixes, and saves. "
|
||||
"Before calling, search for existing agents with find_library_agent."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -42,34 +38,21 @@ class CreateAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"agent_json": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"The agent JSON to validate and save. "
|
||||
"Must contain 'nodes' and 'links' arrays, and optionally "
|
||||
"'name' and 'description'."
|
||||
),
|
||||
"description": "Agent graph with 'nodes' and 'links' arrays.",
|
||||
},
|
||||
"library_agent_ids": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"List of library agent IDs to use as building blocks."
|
||||
),
|
||||
"description": "Library agent IDs as building blocks.",
|
||||
},
|
||||
"save": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"Whether to save the agent. Default is true. "
|
||||
"Set to false for preview only."
|
||||
),
|
||||
"description": "Save the agent (default: true). False for preview.",
|
||||
"default": True,
|
||||
},
|
||||
"folder_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Optional folder ID to save the agent into. "
|
||||
"If not provided, the agent is saved at root level. "
|
||||
"Use list_folders to find available folders."
|
||||
),
|
||||
"description": "Folder ID to save into (default: root).",
|
||||
},
|
||||
},
|
||||
"required": ["agent_json"],
|
||||
|
||||
@@ -23,9 +23,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Customize a marketplace or template agent. Pass `agent_json` "
|
||||
"with the complete customized agent JSON. The tool validates, "
|
||||
"auto-fixes, and saves."
|
||||
"Customize a marketplace/template agent. Validates, auto-fixes, and saves."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -39,32 +37,21 @@ class CustomizeAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"agent_json": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Complete customized agent JSON to validate and save. "
|
||||
"Optionally include 'name' and 'description'."
|
||||
),
|
||||
"description": "Customized agent JSON with nodes and links.",
|
||||
},
|
||||
"library_agent_ids": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"List of library agent IDs to use as building blocks."
|
||||
),
|
||||
"description": "Library agent IDs as building blocks.",
|
||||
},
|
||||
"save": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"Whether to save the customized agent. Default is true."
|
||||
),
|
||||
"description": "Save the agent (default: true). False for preview.",
|
||||
"default": True,
|
||||
},
|
||||
"folder_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Optional folder ID to save the agent into. "
|
||||
"If not provided, the agent is saved at root level. "
|
||||
"Use list_folders to find available folders."
|
||||
),
|
||||
"description": "Folder ID to save into (default: root).",
|
||||
},
|
||||
},
|
||||
"required": ["agent_json"],
|
||||
|
||||
@@ -23,12 +23,8 @@ class EditAgentTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Edit an existing agent. Pass `agent_json` with the complete "
|
||||
"updated agent JSON you generated. The tool validates, auto-fixes, "
|
||||
"and saves.\n\n"
|
||||
"IMPORTANT: Before calling this tool, if the changes involve adding new "
|
||||
"functionality, search for relevant existing agents using find_library_agent "
|
||||
"that could be used as building blocks."
|
||||
"Edit an existing agent. Validates, auto-fixes, and saves. "
|
||||
"Before calling, search for existing agents with find_library_agent."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -42,33 +38,20 @@ class EditAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"agent_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The ID of the agent to edit. "
|
||||
"Can be a graph ID or library agent ID."
|
||||
),
|
||||
"description": "Graph ID or library agent ID to edit.",
|
||||
},
|
||||
"agent_json": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Complete updated agent JSON to validate and save. "
|
||||
"Must contain 'nodes' and 'links'. "
|
||||
"Include 'name' and/or 'description' if they need "
|
||||
"to be updated."
|
||||
),
|
||||
"description": "Updated agent JSON with nodes and links.",
|
||||
},
|
||||
"library_agent_ids": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"List of library agent IDs to use as building blocks for the changes."
|
||||
),
|
||||
"description": "Library agent IDs as building blocks.",
|
||||
},
|
||||
"save": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"Whether to save the changes. "
|
||||
"Default is true. Set to false for preview only."
|
||||
),
|
||||
"description": "Save changes (default: true). False for preview.",
|
||||
"default": True,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -134,11 +134,7 @@ class SearchFeatureRequestsTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search existing feature requests to check if a similar request "
|
||||
"already exists before creating a new one. Returns matching feature "
|
||||
"requests with their ID, title, and description."
|
||||
)
|
||||
return "Search existing feature requests. Check before creating a new one."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -234,14 +230,9 @@ class CreateFeatureRequestTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Create a new feature request or add a customer need to an existing one. "
|
||||
"Always search first with search_feature_requests to avoid duplicates. "
|
||||
"If a matching request exists, pass its ID as existing_issue_id to add "
|
||||
"the user's need to it instead of creating a duplicate. "
|
||||
"IMPORTANT: Never include personally identifiable information (PII) in "
|
||||
"the title or description — no names, emails, phone numbers, company "
|
||||
"names, or other identifying details. Write titles and descriptions in "
|
||||
"generic, feature-focused language."
|
||||
"Create a feature request or add need to existing one. "
|
||||
"Search first to avoid duplicates. Pass existing_issue_id to add to existing. "
|
||||
"Never include PII (names, emails, phone numbers, company names) in title/description."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -251,28 +242,15 @@ class CreateFeatureRequestTool(BaseTool):
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Title for the feature request. Must be generic and "
|
||||
"feature-focused — do not include any user names, emails, "
|
||||
"company names, or other PII."
|
||||
),
|
||||
"description": "Feature request title. No PII.",
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Detailed description of what the user wants and why. "
|
||||
"Must not contain any personally identifiable information "
|
||||
"(PII) — describe the feature need generically without "
|
||||
"referencing specific users, companies, or contact details."
|
||||
),
|
||||
"description": "What the user wants and why. No PII.",
|
||||
},
|
||||
"existing_issue_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"If adding a need to an existing feature request, "
|
||||
"provide its Linear issue ID (from search results). "
|
||||
"Omit to create a new feature request."
|
||||
),
|
||||
"description": "Linear issue ID to add need to (from search results).",
|
||||
},
|
||||
},
|
||||
"required": ["title", "description"],
|
||||
|
||||
@@ -18,9 +18,7 @@ class FindAgentTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Discover agents from the marketplace based on capabilities and user needs."
|
||||
)
|
||||
return "Search marketplace agents by capability."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -29,7 +27,7 @@ class FindAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query describing what the user wants to accomplish. Use single keywords for best results.",
|
||||
"description": "Search keywords (single keywords work best).",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
|
||||
@@ -51,14 +51,7 @@ class FindBlockTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search for available blocks by name or description. "
|
||||
"Blocks are reusable components that perform specific tasks like "
|
||||
"sending emails, making API calls, processing text, etc. "
|
||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||
"The response includes each block's id, name, and description. "
|
||||
"Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it."
|
||||
)
|
||||
return "Search blocks by name or description. Returns block IDs for run_block. Always call this FIRST to get block IDs before using run_block."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -67,18 +60,11 @@ class FindBlockTool(BaseTool):
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Search query to find blocks by name or description. "
|
||||
"Use keywords like 'email', 'http', 'text', 'ai', etc."
|
||||
),
|
||||
"description": "Search keywords (e.g. 'email', 'http', 'ai').",
|
||||
},
|
||||
"include_schemas": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"If true, include full input_schema and output_schema "
|
||||
"for each block. Use when generating agent JSON that "
|
||||
"needs block schemas. Default is false."
|
||||
),
|
||||
"description": "Include full input/output schemas (for agent JSON generation).",
|
||||
"default": False,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -19,13 +19,8 @@ class FindLibraryAgentTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search for or list agents in the user's library. Use this to find "
|
||||
"agents the user has already added to their library, including agents "
|
||||
"they created or added from the marketplace. "
|
||||
"When creating agents with sub-agent composition, use this to get "
|
||||
"the agent's graph_id, graph_version, input_schema, and output_schema "
|
||||
"needed for AgentExecutorBlock nodes. "
|
||||
"Omit the query to list all agents."
|
||||
"Search user's library agents. Returns graph_id, schemas for sub-agent composition. "
|
||||
"Omit query to list all."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -35,10 +30,7 @@ class FindLibraryAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Search query to find agents by name or description. "
|
||||
"Omit to list all agents in the library."
|
||||
),
|
||||
"description": "Search by name/description. Omit to list all.",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
|
||||
@@ -22,20 +22,8 @@ class FixAgentGraphTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Auto-fix common issues in an agent JSON graph. Applies fixes for:\n"
|
||||
"- Missing or invalid UUIDs on nodes and links\n"
|
||||
"- StoreValueBlock prerequisites for ConditionBlock\n"
|
||||
"- Double curly brace escaping in prompt templates\n"
|
||||
"- AddToList/AddToDictionary prerequisite blocks\n"
|
||||
"- CodeExecutionBlock output field naming\n"
|
||||
"- Missing credentials configuration\n"
|
||||
"- Node X coordinate spacing (800+ units apart)\n"
|
||||
"- AI model default parameters\n"
|
||||
"- Link static properties based on input schema\n"
|
||||
"- Type mismatches (inserts conversion blocks)\n\n"
|
||||
"Returns the fixed agent JSON plus a list of fixes applied. "
|
||||
"After fixing, the agent is re-validated. If still invalid, "
|
||||
"the remaining errors are included in the response."
|
||||
"Auto-fix common agent JSON issues (UUIDs, types, credentials, spacing, etc.). "
|
||||
"Returns fixed JSON and list of fixes applied."
|
||||
)
|
||||
|
||||
@property
|
||||
|
||||
@@ -42,12 +42,7 @@ class GetAgentBuildingGuideTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Returns the complete guide for building agent JSON graphs, including "
|
||||
"block IDs, link structure, AgentInputBlock, AgentOutputBlock, "
|
||||
"AgentExecutorBlock (for sub-agent composition), and MCPToolBlock usage. "
|
||||
"Call this before generating agent JSON to ensure correct structure."
|
||||
)
|
||||
return "Get the agent JSON building guide (nodes, links, AgentExecutorBlock, MCPToolBlock usage). Call before generating agent JSON."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
|
||||
@@ -25,8 +25,7 @@ class GetDocPageTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Get the full content of a documentation page by its path. "
|
||||
"Use this after search_docs to read the complete content of a relevant page."
|
||||
"Read full documentation page content by path (from search_docs results)."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -36,10 +35,7 @@ class GetDocPageTool(BaseTool):
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The path to the documentation file, as returned by search_docs. "
|
||||
"Example: 'platform/block-sdk-guide.md'"
|
||||
),
|
||||
"description": "Doc file path (e.g. 'platform/block-sdk-guide.md').",
|
||||
},
|
||||
},
|
||||
"required": ["path"],
|
||||
|
||||
@@ -38,11 +38,7 @@ class GetMCPGuideTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Returns the MCP tool guide: known hosted server URLs (Notion, Linear, "
|
||||
"Stripe, Intercom, Cloudflare, Atlassian) and authentication workflow. "
|
||||
"Call before using run_mcp_tool if you need a server URL or auth info."
|
||||
)
|
||||
return "Get MCP server URLs and auth guide."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
|
||||
@@ -88,10 +88,7 @@ class CreateFolderTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Create a new folder in the user's library to organize agents. "
|
||||
"Optionally nest it inside an existing folder using parent_id."
|
||||
)
|
||||
return "Create a library folder. Use parent_id to nest inside another folder."
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
@@ -104,22 +101,19 @@ class CreateFolderTool(BaseTool):
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Name for the new folder (max 100 chars).",
|
||||
"description": "Folder name (max 100 chars).",
|
||||
},
|
||||
"parent_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"ID of the parent folder to nest inside. "
|
||||
"Omit to create at root level."
|
||||
),
|
||||
"description": "Parent folder ID (omit for root).",
|
||||
},
|
||||
"icon": {
|
||||
"type": "string",
|
||||
"description": "Optional icon identifier for the folder.",
|
||||
"description": "Icon identifier.",
|
||||
},
|
||||
"color": {
|
||||
"type": "string",
|
||||
"description": "Optional hex color code (#RRGGBB).",
|
||||
"description": "Hex color (#RRGGBB).",
|
||||
},
|
||||
},
|
||||
"required": ["name"],
|
||||
@@ -175,13 +169,8 @@ class ListFoldersTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"List the user's library folders. "
|
||||
"Omit parent_id to get the full folder tree. "
|
||||
"Provide parent_id to list only direct children of that folder. "
|
||||
"Set include_agents=true to also return the agents inside each folder "
|
||||
"and root-level agents not in any folder. Always set include_agents=true "
|
||||
"when the user asks about agents, wants to see what's in their folders, "
|
||||
"or mentions agents alongside folders."
|
||||
"List library folders. Omit parent_id for full tree. "
|
||||
"Set include_agents=true when user asks about agents in folders."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -195,17 +184,11 @@ class ListFoldersTool(BaseTool):
|
||||
"properties": {
|
||||
"parent_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"List children of this folder. "
|
||||
"Omit to get the full folder tree."
|
||||
),
|
||||
"description": "List children of this folder (omit for full tree).",
|
||||
},
|
||||
"include_agents": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"Whether to include the list of agents inside each folder. "
|
||||
"Defaults to false."
|
||||
),
|
||||
"description": "Include agents in each folder (default: false).",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
@@ -357,10 +340,7 @@ class MoveFolderTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Move a folder to a different parent folder. "
|
||||
"Set target_parent_id to null to move to root level."
|
||||
)
|
||||
return "Move a folder. Set target_parent_id to null for root."
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
@@ -373,14 +353,11 @@ class MoveFolderTool(BaseTool):
|
||||
"properties": {
|
||||
"folder_id": {
|
||||
"type": "string",
|
||||
"description": "ID of the folder to move.",
|
||||
"description": "Folder ID.",
|
||||
},
|
||||
"target_parent_id": {
|
||||
"type": ["string", "null"],
|
||||
"description": (
|
||||
"ID of the new parent folder. "
|
||||
"Use null to move to root level."
|
||||
),
|
||||
"description": "New parent folder ID (null for root).",
|
||||
},
|
||||
},
|
||||
"required": ["folder_id"],
|
||||
@@ -433,10 +410,7 @@ class DeleteFolderTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Delete a folder from the user's library. "
|
||||
"Agents inside the folder are moved to root level (not deleted)."
|
||||
)
|
||||
return "Delete a folder. Agents inside move to root (not deleted)."
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
@@ -499,10 +473,7 @@ class MoveAgentsToFolderTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Move one or more agents to a folder. "
|
||||
"Set folder_id to null to move agents to root level."
|
||||
)
|
||||
return "Move agents to a folder. Set folder_id to null for root."
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
@@ -516,13 +487,11 @@ class MoveAgentsToFolderTool(BaseTool):
|
||||
"agent_ids": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of library agent IDs to move.",
|
||||
"description": "Library agent IDs to move.",
|
||||
},
|
||||
"folder_id": {
|
||||
"type": ["string", "null"],
|
||||
"description": (
|
||||
"Target folder ID. Use null to move to root level."
|
||||
),
|
||||
"description": "Target folder ID (null for root).",
|
||||
},
|
||||
},
|
||||
"required": ["agent_ids"],
|
||||
|
||||
@@ -104,19 +104,11 @@ class RunAgentTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return """Run or schedule an agent from the marketplace or user's library.
|
||||
|
||||
The tool automatically handles the setup flow:
|
||||
- Returns missing inputs if required fields are not provided
|
||||
- Returns missing credentials if user needs to configure them
|
||||
- Executes immediately if all requirements are met
|
||||
- Schedules execution if cron expression is provided
|
||||
|
||||
Identify the agent using either:
|
||||
- username_agent_slug: Marketplace format 'username/agent-name'
|
||||
- library_agent_id: ID of an agent in the user's library
|
||||
|
||||
For scheduled execution, provide: schedule_name, cron, and optionally timezone."""
|
||||
return (
|
||||
"Run or schedule an agent. Automatically checks inputs and credentials. "
|
||||
"Identify by username_agent_slug ('user/agent') or library_agent_id. "
|
||||
"For scheduling, provide schedule_name + cron."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -125,40 +117,36 @@ class RunAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"username_agent_slug": {
|
||||
"type": "string",
|
||||
"description": "Agent identifier in format 'username/agent-name'",
|
||||
"description": "Marketplace format 'username/agent-name'.",
|
||||
},
|
||||
"library_agent_id": {
|
||||
"type": "string",
|
||||
"description": "Library agent ID from user's library",
|
||||
"description": "Library agent ID.",
|
||||
},
|
||||
"inputs": {
|
||||
"type": "object",
|
||||
"description": "Input values for the agent",
|
||||
"description": "Input values for the agent.",
|
||||
"additionalProperties": True,
|
||||
},
|
||||
"use_defaults": {
|
||||
"type": "boolean",
|
||||
"description": "Set to true to run with default values (user must confirm)",
|
||||
"description": "Run with default values (confirm with user first).",
|
||||
},
|
||||
"schedule_name": {
|
||||
"type": "string",
|
||||
"description": "Name for scheduled execution (triggers scheduling mode)",
|
||||
"description": "Name for scheduled execution.",
|
||||
},
|
||||
"cron": {
|
||||
"type": "string",
|
||||
"description": "Cron expression (5 fields: min hour day month weekday)",
|
||||
"description": "Cron expression (min hour day month weekday).",
|
||||
},
|
||||
"timezone": {
|
||||
"type": "string",
|
||||
"description": "IANA timezone for schedule (default: UTC)",
|
||||
"description": "IANA timezone (default: UTC).",
|
||||
},
|
||||
"wait_for_result": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait for execution to complete (0-300). "
|
||||
"If >0, blocks until the execution finishes or times out. "
|
||||
"Returns execution outputs when complete."
|
||||
),
|
||||
"description": "Max seconds to wait for completion (0-300).",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
|
||||
@@ -45,13 +45,10 @@ class RunBlockTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Execute a specific block with the provided input data. "
|
||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||
"do NOT guess or make up block IDs. "
|
||||
"On first attempt (without input_data), returns detailed schema showing "
|
||||
"required inputs and outputs. Then call again with proper input_data to execute. "
|
||||
"If a block requires human review, use continue_run_block with the "
|
||||
"review_id after the user approves."
|
||||
"Execute a block. IMPORTANT: Always get block_id from find_block first "
|
||||
"— do NOT guess or fabricate IDs. "
|
||||
"Call with empty input_data to see schema, then with data to execute. "
|
||||
"If review_required, use continue_run_block."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -61,28 +58,14 @@ class RunBlockTool(BaseTool):
|
||||
"properties": {
|
||||
"block_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The block's 'id' field from find_block results. "
|
||||
"NEVER guess this - always get it from find_block first."
|
||||
),
|
||||
},
|
||||
"block_name": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The block's human-readable name from find_block results. "
|
||||
"Used for display purposes in the UI."
|
||||
),
|
||||
"description": "Block ID from find_block results.",
|
||||
},
|
||||
"input_data": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Input values for the block. "
|
||||
"First call with empty {} to see the block's schema, "
|
||||
"then call again with proper values to execute."
|
||||
),
|
||||
"description": "Input values. Use {} first to see schema.",
|
||||
},
|
||||
},
|
||||
"required": ["block_id", "block_name", "input_data"],
|
||||
"required": ["block_id", "input_data"],
|
||||
}
|
||||
|
||||
@property
|
||||
|
||||
@@ -57,10 +57,9 @@ class RunMCPToolTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Connect to an MCP (Model Context Protocol) server to discover and execute its tools. "
|
||||
"Two-step: (1) call with server_url to list available tools, "
|
||||
"(2) call again with server_url + tool_name + tool_arguments to execute. "
|
||||
"Call get_mcp_guide for known server URLs and auth details."
|
||||
"Discover and execute MCP server tools. "
|
||||
"Call with server_url only to list tools, then with tool_name + tool_arguments to execute. "
|
||||
"Call get_mcp_guide first for server URLs and auth."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -70,24 +69,15 @@ class RunMCPToolTool(BaseTool):
|
||||
"properties": {
|
||||
"server_url": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"URL of the MCP server (Streamable HTTP endpoint), "
|
||||
"e.g. https://mcp.example.com/mcp"
|
||||
),
|
||||
"description": "MCP server URL (Streamable HTTP endpoint).",
|
||||
},
|
||||
"tool_name": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Name of the MCP tool to execute. "
|
||||
"Omit on first call to discover available tools."
|
||||
),
|
||||
"description": "Tool to execute. Omit to discover available tools.",
|
||||
},
|
||||
"tool_arguments": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Arguments to pass to the selected tool. "
|
||||
"Must match the tool's input schema returned during discovery."
|
||||
),
|
||||
"description": "Arguments matching the tool's input schema.",
|
||||
},
|
||||
},
|
||||
"required": ["server_url"],
|
||||
|
||||
@@ -38,11 +38,7 @@ class SearchDocsTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search the AutoGPT platform documentation for information about "
|
||||
"how to use the platform, build agents, configure blocks, and more. "
|
||||
"Returns relevant documentation sections. Use get_doc_page to read full content."
|
||||
)
|
||||
return "Search platform documentation by keyword. Use get_doc_page to read full results."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -51,10 +47,7 @@ class SearchDocsTool(BaseTool):
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Search query to find relevant documentation. "
|
||||
"Use natural language to describe what you're looking for."
|
||||
),
|
||||
"description": "Documentation search query.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
"""Schema regression tests for all registered CoPilot tools.
|
||||
|
||||
Validates that every tool in TOOL_REGISTRY produces a well-formed schema:
|
||||
- description is non-empty
|
||||
- all `required` fields exist in `properties`
|
||||
- every property has a `type` and `description`
|
||||
- total token budget does not regress past 8000 tokens
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
import pytest
|
||||
import tiktoken
|
||||
|
||||
from backend.copilot.tools import TOOL_REGISTRY
|
||||
|
||||
_TOKEN_BUDGET = 8_000
|
||||
|
||||
|
||||
def _get_all_tool_schemas() -> list[tuple[str, object]]:
|
||||
"""Return (tool_name, openai_schema) pairs for every registered tool."""
|
||||
return [(name, tool.as_openai_tool()) for name, tool in TOOL_REGISTRY.items()]
|
||||
|
||||
|
||||
_ALL_SCHEMAS = _get_all_tool_schemas()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"tool_name,schema",
|
||||
_ALL_SCHEMAS,
|
||||
ids=[name for name, _ in _ALL_SCHEMAS],
|
||||
)
|
||||
class TestToolSchema:
|
||||
"""Validate schema invariants for every registered tool."""
|
||||
|
||||
def test_description_non_empty(self, tool_name: str, schema: dict) -> None:
|
||||
desc = schema["function"].get("description", "")
|
||||
assert desc, f"Tool '{tool_name}' has an empty description"
|
||||
|
||||
def test_required_fields_exist_in_properties(
|
||||
self, tool_name: str, schema: dict
|
||||
) -> None:
|
||||
params = schema["function"].get("parameters", {})
|
||||
properties = params.get("properties", {})
|
||||
required = params.get("required", [])
|
||||
for field in required:
|
||||
assert field in properties, (
|
||||
f"Tool '{tool_name}': required field '{field}' "
|
||||
f"not found in properties {list(properties.keys())}"
|
||||
)
|
||||
|
||||
def test_every_property_has_type_and_description(
|
||||
self, tool_name: str, schema: dict
|
||||
) -> None:
|
||||
params = schema["function"].get("parameters", {})
|
||||
properties = params.get("properties", {})
|
||||
for prop_name, prop_def in properties.items():
|
||||
assert (
|
||||
"type" in prop_def
|
||||
), f"Tool '{tool_name}', property '{prop_name}' is missing 'type'"
|
||||
assert (
|
||||
"description" in prop_def
|
||||
), f"Tool '{tool_name}', property '{prop_name}' is missing 'description'"
|
||||
|
||||
|
||||
def test_total_schema_token_budget() -> None:
|
||||
"""Assert total tool schema size stays under the token budget.
|
||||
|
||||
This locks in the 34% token reduction from #12398 and prevents future
|
||||
description bloat from eroding the gains. Budget is set to 8000 tokens.
|
||||
Note: this measures tool JSON only (not the full system prompt); the actual
|
||||
baseline for tool schemas alone is ~6470 tokens, giving ~19% headroom.
|
||||
"""
|
||||
schemas = [tool.as_openai_tool() for tool in TOOL_REGISTRY.values()]
|
||||
serialized = json.dumps(schemas)
|
||||
enc = tiktoken.get_encoding("cl100k_base")
|
||||
total_tokens = len(enc.encode(serialized))
|
||||
assert total_tokens < _TOKEN_BUDGET, (
|
||||
f"Tool schemas use {total_tokens} tokens, exceeding budget of {_TOKEN_BUDGET}. "
|
||||
f"Description bloat detected — trim descriptions or raise the budget intentionally."
|
||||
)
|
||||
@@ -21,19 +21,7 @@ class ValidateAgentGraphTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Validate an agent JSON graph for correctness. Checks:\n"
|
||||
"- All block_ids reference real blocks\n"
|
||||
"- All links reference valid source/sink nodes and fields\n"
|
||||
"- Required input fields are wired or have defaults\n"
|
||||
"- Data types are compatible across links\n"
|
||||
"- Nested sink links use correct notation\n"
|
||||
"- Prompt templates use proper curly brace escaping\n"
|
||||
"- AgentExecutorBlock configurations are valid\n\n"
|
||||
"Call this after generating agent JSON to verify correctness. "
|
||||
"If validation fails, either fix issues manually based on the error "
|
||||
"descriptions, or call fix_agent_graph to auto-fix common problems."
|
||||
)
|
||||
return "Validate agent JSON for correctness (block_ids, links, types, schemas). On failure, use fix_agent_graph to auto-fix."
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
@@ -46,11 +34,7 @@ class ValidateAgentGraphTool(BaseTool):
|
||||
"properties": {
|
||||
"agent_json": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"The agent JSON to validate. Must contain 'nodes' and 'links' arrays. "
|
||||
"Each node needs: id (UUID), block_id, input_default, metadata. "
|
||||
"Each link needs: id (UUID), source_id, source_name, sink_id, sink_name."
|
||||
),
|
||||
"description": "Agent JSON with 'nodes' and 'links' arrays.",
|
||||
},
|
||||
},
|
||||
"required": ["agent_json"],
|
||||
|
||||
@@ -59,13 +59,7 @@ class WebFetchTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Fetch the content of a public web page by URL. "
|
||||
"Returns readable text extracted from HTML by default. "
|
||||
"Useful for reading documentation, articles, and API responses. "
|
||||
"Only supports HTTP/HTTPS GET requests to public URLs "
|
||||
"(private/internal network addresses are blocked)."
|
||||
)
|
||||
return "Fetch a public web page. Public URLs only — internal addresses blocked. Returns readable text from HTML by default."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -74,14 +68,11 @@ class WebFetchTool(BaseTool):
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The public HTTP/HTTPS URL to fetch.",
|
||||
"description": "Public HTTP/HTTPS URL.",
|
||||
},
|
||||
"extract_text": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"If true (default), extract readable text from HTML. "
|
||||
"If false, return raw content."
|
||||
),
|
||||
"description": "Extract text from HTML (default: true).",
|
||||
"default": True,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -321,13 +321,7 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"List files in the user's persistent workspace (cloud storage). "
|
||||
"These files survive across sessions. "
|
||||
"For ephemeral session files, use the SDK Read/Glob tools instead. "
|
||||
"Returns file names, paths, sizes, and metadata. "
|
||||
"Optionally filter by path prefix."
|
||||
)
|
||||
return "List persistent workspace files. For ephemeral session files, use SDK Glob/Read instead. Optionally filter by path prefix."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -336,24 +330,17 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
"properties": {
|
||||
"path_prefix": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Optional path prefix to filter files "
|
||||
"(e.g., '/documents/' to list only files in documents folder). "
|
||||
"By default, only files from the current session are listed."
|
||||
),
|
||||
"description": "Filter by path prefix (e.g. '/documents/').",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of files to return (default 50, max 100)",
|
||||
"description": "Max files to return (default 50, max 100).",
|
||||
"minimum": 1,
|
||||
"maximum": 100,
|
||||
},
|
||||
"include_all_sessions": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"If true, list files from all sessions. "
|
||||
"Default is false (only current session's files)."
|
||||
),
|
||||
"description": "Include files from all sessions (default: false).",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
@@ -436,18 +423,10 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Read a file from the user's persistent workspace (cloud storage). "
|
||||
"These files survive across sessions. "
|
||||
"For ephemeral session files, use the SDK Read tool instead. "
|
||||
"Specify either file_id or path to identify the file. "
|
||||
"For small text files, returns content directly. "
|
||||
"For large or binary files, returns metadata and a download URL. "
|
||||
"Use 'save_to_path' to copy the file to the working directory "
|
||||
"(sandbox or ephemeral) for processing with bash_exec or file tools. "
|
||||
"Use 'offset' and 'length' for paginated reads of large files "
|
||||
"(e.g., persisted tool outputs). "
|
||||
"Paths are scoped to the current session by default. "
|
||||
"Use /sessions/<session_id>/... for cross-session access."
|
||||
"Read a file from persistent workspace. Specify file_id or path. "
|
||||
"Small text/image files return inline; large/binary return metadata+URL. "
|
||||
"Use save_to_path to copy to working dir for processing. "
|
||||
"Use offset/length for paginated reads."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -457,48 +436,30 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
"properties": {
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "The file's unique ID (from list_workspace_files)",
|
||||
"description": "File ID from list_workspace_files.",
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The virtual file path (e.g., '/documents/report.pdf'). "
|
||||
"Scoped to current session by default."
|
||||
),
|
||||
"description": "Virtual file path (e.g. '/documents/report.pdf').",
|
||||
},
|
||||
"save_to_path": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"If provided, save the file to this path in the working "
|
||||
"directory (cloud sandbox when E2B is active, or "
|
||||
"ephemeral dir otherwise) so it can be processed with "
|
||||
"bash_exec or file tools. "
|
||||
"The file content is still returned in the response."
|
||||
),
|
||||
"description": "Copy file to this working directory path for processing.",
|
||||
},
|
||||
"force_download_url": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"If true, always return metadata+URL instead of inline content. "
|
||||
"Default is false (auto-selects based on file size/type)."
|
||||
),
|
||||
"description": "Always return metadata+URL instead of inline content.",
|
||||
},
|
||||
"offset": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Character offset to start reading from (0-based). "
|
||||
"Use with 'length' for paginated reads of large files."
|
||||
),
|
||||
"description": "Character offset for paginated reads (0-based).",
|
||||
},
|
||||
"length": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Maximum number of characters to return. "
|
||||
"Defaults to full file. Use with 'offset' for paginated reads."
|
||||
),
|
||||
"description": "Max characters to return for paginated reads.",
|
||||
},
|
||||
},
|
||||
"required": [], # At least one must be provided
|
||||
"required": [], # At least one of file_id or path must be provided
|
||||
}
|
||||
|
||||
@property
|
||||
@@ -653,15 +614,9 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Write or create a file in the user's persistent workspace (cloud storage). "
|
||||
"These files survive across sessions. "
|
||||
"For ephemeral session files, use the SDK Write tool instead. "
|
||||
"Provide content as plain text via 'content', OR base64-encoded via "
|
||||
"'content_base64', OR copy a file from the ephemeral working directory "
|
||||
"via 'source_path'. Exactly one of these three is required. "
|
||||
f"Maximum file size is {Config().max_file_size_mb}MB. "
|
||||
"Files are saved to the current session's folder by default. "
|
||||
"Use /sessions/<session_id>/... for cross-session access."
|
||||
"Write a file to persistent workspace (survives across sessions). "
|
||||
"Provide exactly one of: content (text), content_base64 (binary), "
|
||||
f"or source_path (copy from working dir). Max {Config().max_file_size_mb}MB."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -671,51 +626,31 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
"properties": {
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "Name for the file (e.g., 'report.pdf')",
|
||||
"description": "Filename (e.g. 'report.pdf').",
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Plain text content to write. Use this for text files "
|
||||
"(code, configs, documents, etc.). "
|
||||
"Mutually exclusive with content_base64 and source_path."
|
||||
),
|
||||
"description": "Plain text content. Mutually exclusive with content_base64/source_path.",
|
||||
},
|
||||
"content_base64": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Base64-encoded file content. Use this for binary files "
|
||||
"(images, PDFs, etc.). "
|
||||
"Mutually exclusive with content and source_path."
|
||||
),
|
||||
"description": "Base64-encoded binary content. Mutually exclusive with content/source_path.",
|
||||
},
|
||||
"source_path": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Path to a file in the ephemeral working directory to "
|
||||
"copy to workspace (e.g., '/tmp/copilot-.../output.csv'). "
|
||||
"Use this to persist files created by bash_exec or SDK Write. "
|
||||
"Mutually exclusive with content and content_base64."
|
||||
),
|
||||
"description": "Working directory path to copy to workspace. Mutually exclusive with content/content_base64.",
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Optional virtual path where to save the file "
|
||||
"(e.g., '/documents/report.pdf'). "
|
||||
"Defaults to '/{filename}'. Scoped to current session."
|
||||
),
|
||||
"description": "Virtual path (e.g. '/documents/report.pdf'). Defaults to '/{filename}'.",
|
||||
},
|
||||
"mime_type": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Optional MIME type of the file. "
|
||||
"Auto-detected from filename if not provided."
|
||||
),
|
||||
"description": "MIME type. Auto-detected from filename if omitted.",
|
||||
},
|
||||
"overwrite": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to overwrite if file exists at path (default: false)",
|
||||
"description": "Overwrite if file exists (default: false).",
|
||||
},
|
||||
},
|
||||
"required": ["filename"],
|
||||
@@ -842,12 +777,7 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Delete a file from the user's persistent workspace (cloud storage). "
|
||||
"Specify either file_id or path to identify the file. "
|
||||
"Paths are scoped to the current session by default. "
|
||||
"Use /sessions/<session_id>/... for cross-session access."
|
||||
)
|
||||
return "Delete a file from persistent workspace. Specify file_id or path."
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
@@ -856,17 +786,14 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
"properties": {
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "The file's unique ID (from list_workspace_files)",
|
||||
"description": "File ID from list_workspace_files.",
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The virtual file path (e.g., '/documents/report.pdf'). "
|
||||
"Scoped to current session by default."
|
||||
),
|
||||
"description": "Virtual file path.",
|
||||
},
|
||||
},
|
||||
"required": [], # At least one must be provided
|
||||
"required": [], # At least one of file_id or path must be provided
|
||||
}
|
||||
|
||||
@property
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
"""Workflow import module.
|
||||
|
||||
Parses workflows from n8n, Make.com, and Zapier into structured descriptions,
|
||||
then builds CoPilot prompts for the agentic agent-generator to handle conversion.
|
||||
"""
|
||||
|
||||
from .converter import build_copilot_prompt
|
||||
from .format_detector import SourcePlatform, detect_format
|
||||
from .models import WorkflowDescription
|
||||
|
||||
__all__ = [
|
||||
"SourcePlatform",
|
||||
"WorkflowDescription",
|
||||
"build_copilot_prompt",
|
||||
"detect_format",
|
||||
]
|
||||
@@ -1,49 +0,0 @@
|
||||
"""Build a CoPilot prompt from a WorkflowDescription.
|
||||
|
||||
Instead of a custom single-shot LLM conversion, we generate a structured
|
||||
prompt that CoPilot's existing agentic agent-generator handles. This reuses
|
||||
the multi-turn tool-use pipeline (find_block, create_agent, fixer, validator)
|
||||
for reliable workflow-to-agent conversion.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from .models import WorkflowDescription
|
||||
|
||||
|
||||
def build_copilot_prompt(desc: WorkflowDescription) -> str:
|
||||
"""Build a CoPilot prompt from a parsed WorkflowDescription.
|
||||
|
||||
The prompt describes the external workflow in enough detail for CoPilot's
|
||||
agent-generator to recreate it as an AutoGPT agent graph.
|
||||
|
||||
Args:
|
||||
desc: Structured description of the source workflow.
|
||||
|
||||
Returns:
|
||||
A user-facing prompt string for CoPilot.
|
||||
"""
|
||||
steps_text = ""
|
||||
for step in desc.steps:
|
||||
conns = (
|
||||
f" → connects to steps {step.connections_to}" if step.connections_to else ""
|
||||
)
|
||||
params_str = ""
|
||||
if step.parameters:
|
||||
truncated = json.dumps(step.parameters, default=str)[:300]
|
||||
params_str = f" (params: {truncated})"
|
||||
steps_text += (
|
||||
f" {step.order}. [{step.service}] {step.action}{params_str}{conns}\n"
|
||||
)
|
||||
|
||||
trigger_line = f"Trigger: {desc.trigger_type}" if desc.trigger_type else ""
|
||||
|
||||
return f"""I want to import a workflow from {desc.source_format.value} and recreate it as an AutoGPT agent.
|
||||
|
||||
**Workflow name**: {desc.name}
|
||||
**Description**: {desc.description}
|
||||
{trigger_line}
|
||||
|
||||
**Steps** (from the original {desc.source_format.value} workflow):
|
||||
{steps_text}
|
||||
Please build an AutoGPT agent that replicates this workflow. Map each step to the most appropriate AutoGPT block(s), wire them together, and save it.""".strip()
|
||||
@@ -1,269 +0,0 @@
|
||||
"""Extract structured WorkflowDescription from external workflow JSONs.
|
||||
|
||||
Each describer is a pure function that deterministically parses the source
|
||||
format into a platform-agnostic WorkflowDescription. No LLM calls are made here.
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from .models import SourcePlatform, StepDescription, WorkflowDescription
|
||||
|
||||
|
||||
def describe_workflow(
|
||||
json_data: dict[str, Any], fmt: SourcePlatform
|
||||
) -> WorkflowDescription:
|
||||
"""Route to the appropriate describer based on detected format."""
|
||||
describers = {
|
||||
SourcePlatform.N8N: describe_n8n_workflow,
|
||||
SourcePlatform.MAKE: describe_make_workflow,
|
||||
SourcePlatform.ZAPIER: describe_zapier_workflow,
|
||||
}
|
||||
describer = describers.get(fmt)
|
||||
if not describer:
|
||||
raise ValueError(f"No describer available for format: {fmt}")
|
||||
result = describer(json_data)
|
||||
if not result.steps:
|
||||
raise ValueError(f"Workflow contains no steps (format: {fmt.value})")
|
||||
return result
|
||||
|
||||
|
||||
def describe_n8n_workflow(json_data: dict[str, Any]) -> WorkflowDescription:
|
||||
"""Extract a structured description from an n8n workflow JSON."""
|
||||
nodes = json_data.get("nodes", [])
|
||||
connections = json_data.get("connections", {})
|
||||
|
||||
# Build node index by name for connection resolution
|
||||
node_index: dict[str, int] = {}
|
||||
steps: list[StepDescription] = []
|
||||
|
||||
for i, node in enumerate(nodes):
|
||||
if not isinstance(node, dict):
|
||||
continue
|
||||
|
||||
node_name = node.get("name", f"Node {i}")
|
||||
node_index[node_name] = len(steps)
|
||||
|
||||
node_type = node.get("type", "unknown")
|
||||
# Extract service name from type (e.g., "n8n-nodes-base.gmail" -> "Gmail")
|
||||
service = _extract_n8n_service(node_type)
|
||||
|
||||
# Build action description from type and parameters
|
||||
params = node.get("parameters", {})
|
||||
if not isinstance(params, dict):
|
||||
params = {}
|
||||
action = _describe_n8n_action(node_type, node_name, params)
|
||||
|
||||
# Extract key parameters (skip large/internal ones)
|
||||
clean_params = _clean_params(params)
|
||||
|
||||
steps.append(
|
||||
StepDescription(
|
||||
order=len(steps),
|
||||
action=action,
|
||||
service=service,
|
||||
parameters=clean_params,
|
||||
connections_to=[], # filled below
|
||||
)
|
||||
)
|
||||
|
||||
# Resolve connections: n8n format is {NodeName: {main: [[{node, type, index}]]}}
|
||||
for source_name, conn_data in connections.items():
|
||||
source_idx = node_index.get(source_name)
|
||||
if source_idx is None:
|
||||
continue
|
||||
main_outputs = conn_data.get("main", [])
|
||||
for output_group in main_outputs:
|
||||
if not isinstance(output_group, list):
|
||||
continue
|
||||
for conn in output_group:
|
||||
if not isinstance(conn, dict):
|
||||
continue
|
||||
target_name = conn.get("node")
|
||||
if not isinstance(target_name, str):
|
||||
continue
|
||||
target_idx = node_index.get(target_name)
|
||||
if target_idx is not None:
|
||||
steps[source_idx].connections_to.append(target_idx)
|
||||
|
||||
# Detect trigger type
|
||||
trigger_type = None
|
||||
if nodes and isinstance(nodes[0], dict):
|
||||
first_type = nodes[0].get("type", "")
|
||||
if isinstance(first_type, str) and (
|
||||
"trigger" in first_type.lower() or "webhook" in first_type.lower()
|
||||
):
|
||||
trigger_type = _extract_n8n_service(first_type)
|
||||
|
||||
return WorkflowDescription(
|
||||
name=json_data.get("name", "Imported n8n Workflow"),
|
||||
description=_build_workflow_summary(steps),
|
||||
steps=steps,
|
||||
trigger_type=trigger_type,
|
||||
source_format=SourcePlatform.N8N,
|
||||
)
|
||||
|
||||
|
||||
def describe_make_workflow(json_data: dict[str, Any]) -> WorkflowDescription:
|
||||
"""Extract a structured description from a Make.com scenario blueprint."""
|
||||
flow = json_data.get("flow", [])
|
||||
valid_modules = [m for m in flow if isinstance(m, dict)]
|
||||
steps: list[StepDescription] = []
|
||||
|
||||
for i, module in enumerate(valid_modules):
|
||||
module_ref = module.get("module", "unknown:unknown")
|
||||
if not isinstance(module_ref, str):
|
||||
module_ref = "unknown:unknown"
|
||||
parts = module_ref.split(":", 1)
|
||||
service = parts[0].replace("-", " ").title() if parts else "Unknown"
|
||||
action_verb = parts[1] if len(parts) > 1 else "process"
|
||||
|
||||
# Build human-readable action
|
||||
action = f"{str(action_verb).replace(':', ' ').title()} via {service}"
|
||||
|
||||
params = module.get("mapper", module.get("parameters", {}))
|
||||
clean_params = _clean_params(params) if isinstance(params, dict) else {}
|
||||
|
||||
# Check for routes (branching) — routers don't connect sequentially
|
||||
routes = module.get("routes", [])
|
||||
if routes:
|
||||
# Router modules branch; don't assign sequential connections
|
||||
connections_to: list[int] = []
|
||||
clean_params["_has_routes"] = len(routes)
|
||||
else:
|
||||
# Make.com flows are sequential by default; each step connects to next
|
||||
connections_to = [i + 1] if i < len(valid_modules) - 1 else []
|
||||
|
||||
steps.append(
|
||||
StepDescription(
|
||||
order=i,
|
||||
action=action,
|
||||
service=service,
|
||||
parameters=clean_params,
|
||||
connections_to=connections_to,
|
||||
)
|
||||
)
|
||||
|
||||
# Detect trigger
|
||||
trigger_type = None
|
||||
if flow and isinstance(flow[0], dict):
|
||||
first_module = flow[0].get("module", "")
|
||||
if isinstance(first_module, str) and (
|
||||
"watch" in first_module.lower() or "trigger" in first_module.lower()
|
||||
):
|
||||
trigger_type = first_module.split(":")[0].replace("-", " ").title()
|
||||
|
||||
return WorkflowDescription(
|
||||
name=json_data.get("name", "Imported Make.com Scenario"),
|
||||
description=_build_workflow_summary(steps),
|
||||
steps=steps,
|
||||
trigger_type=trigger_type,
|
||||
source_format=SourcePlatform.MAKE,
|
||||
)
|
||||
|
||||
|
||||
def describe_zapier_workflow(json_data: dict[str, Any]) -> WorkflowDescription:
|
||||
"""Extract a structured description from a Zapier Zap JSON."""
|
||||
zap_steps = json_data.get("steps", [])
|
||||
valid_steps = [s for s in zap_steps if isinstance(s, dict)]
|
||||
steps: list[StepDescription] = []
|
||||
|
||||
for i, step in enumerate(valid_steps):
|
||||
app = step.get("app", "Unknown")
|
||||
action = step.get("action", "process")
|
||||
action_desc = f"{str(action).replace('_', ' ').title()} via {app}"
|
||||
|
||||
params = step.get("params", step.get("inputFields", {}))
|
||||
clean_params = _clean_params(params) if isinstance(params, dict) else {}
|
||||
|
||||
# Zapier zaps are linear: each step connects to next
|
||||
connections_to = [i + 1] if i < len(valid_steps) - 1 else []
|
||||
|
||||
steps.append(
|
||||
StepDescription(
|
||||
order=i,
|
||||
action=action_desc,
|
||||
service=app,
|
||||
parameters=clean_params,
|
||||
connections_to=connections_to,
|
||||
)
|
||||
)
|
||||
|
||||
trigger_type = None
|
||||
if valid_steps:
|
||||
trigger_type = valid_steps[0].get("app")
|
||||
|
||||
return WorkflowDescription(
|
||||
name=json_data.get("name", json_data.get("title", "Imported Zapier Zap")),
|
||||
description=_build_workflow_summary(steps),
|
||||
steps=steps,
|
||||
trigger_type=trigger_type,
|
||||
source_format=SourcePlatform.ZAPIER,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _extract_n8n_service(node_type: str) -> str:
|
||||
"""Extract a human-readable service name from an n8n node type.
|
||||
|
||||
Examples:
|
||||
"n8n-nodes-base.gmail" -> "Gmail"
|
||||
"@n8n/n8n-nodes-langchain.agent" -> "Langchain Agent"
|
||||
"n8n-nodes-base.httpRequest" -> "Http Request"
|
||||
"""
|
||||
# Strip common prefixes
|
||||
name = node_type
|
||||
for prefix in ("n8n-nodes-base.", "@n8n/n8n-nodes-langchain.", "@n8n/"):
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix) :]
|
||||
break
|
||||
|
||||
# Convert camelCase to Title Case
|
||||
name = re.sub(r"([a-z])([A-Z])", r"\1 \2", name)
|
||||
return name.replace(".", " ").replace("-", " ").title()
|
||||
|
||||
|
||||
def _describe_n8n_action(node_type: str, node_name: str, params: dict[str, Any]) -> str:
|
||||
"""Build a human-readable action description for an n8n node."""
|
||||
service = _extract_n8n_service(node_type)
|
||||
resource = str(params.get("resource", ""))
|
||||
operation = str(params.get("operation", ""))
|
||||
|
||||
if resource and operation:
|
||||
return f"{operation.title()} {resource} via {service}"
|
||||
if operation:
|
||||
return f"{operation.title()} via {service}"
|
||||
return f"{node_name} ({service})"
|
||||
|
||||
|
||||
def _clean_params(params: dict[str, Any], max_keys: int = 10) -> dict[str, Any]:
|
||||
"""Extract key parameters, skipping large or internal values."""
|
||||
cleaned: dict[str, Any] = {}
|
||||
for key, value in list(params.items())[:max_keys]:
|
||||
if key.startswith("_") or key in ("credentials", "webhookId"):
|
||||
continue
|
||||
if isinstance(value, str) and len(value) > 500:
|
||||
cleaned[key] = value[:500] + "..."
|
||||
elif isinstance(value, (str, int, float, bool)):
|
||||
cleaned[key] = value
|
||||
elif isinstance(value, list) and len(value) <= 5:
|
||||
cleaned[key] = value
|
||||
return cleaned
|
||||
|
||||
|
||||
def _build_workflow_summary(steps: list[StepDescription]) -> str:
|
||||
"""Build a one-line summary of the workflow from its steps."""
|
||||
if not steps:
|
||||
return "Empty workflow"
|
||||
services = []
|
||||
for s in steps:
|
||||
if s.service not in services:
|
||||
services.append(s.service)
|
||||
service_chain = " -> ".join(services[:6])
|
||||
if len(services) > 6:
|
||||
service_chain += f" (and {len(services) - 6} more)"
|
||||
return f"Workflow with {len(steps)} steps: {service_chain}"
|
||||
@@ -1,135 +0,0 @@
|
||||
"""Tests for describers.py."""
|
||||
|
||||
import pytest
|
||||
|
||||
from .describers import (
|
||||
describe_make_workflow,
|
||||
describe_n8n_workflow,
|
||||
describe_workflow,
|
||||
describe_zapier_workflow,
|
||||
)
|
||||
from .models import SourcePlatform
|
||||
|
||||
|
||||
class TestDescribeN8nWorkflow:
|
||||
def test_basic_workflow(self):
|
||||
data = {
|
||||
"name": "Email on Webhook",
|
||||
"nodes": [
|
||||
{
|
||||
"name": "Webhook",
|
||||
"type": "n8n-nodes-base.webhookTrigger",
|
||||
"parameters": {"path": "/incoming"},
|
||||
},
|
||||
{
|
||||
"name": "Send Email",
|
||||
"type": "n8n-nodes-base.gmail",
|
||||
"parameters": {"resource": "message", "operation": "send"},
|
||||
},
|
||||
],
|
||||
"connections": {
|
||||
"Webhook": {
|
||||
"main": [[{"node": "Send Email", "type": "main", "index": 0}]]
|
||||
}
|
||||
},
|
||||
}
|
||||
desc = describe_n8n_workflow(data)
|
||||
assert desc.name == "Email on Webhook"
|
||||
assert desc.source_format == SourcePlatform.N8N
|
||||
assert len(desc.steps) == 2
|
||||
assert desc.steps[0].connections_to == [1]
|
||||
assert desc.steps[1].connections_to == []
|
||||
assert desc.trigger_type is not None
|
||||
|
||||
def test_step_extraction(self):
|
||||
data = {
|
||||
"name": "Test",
|
||||
"nodes": [
|
||||
{
|
||||
"name": "HTTP",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"parameters": {"url": "https://example.com", "method": "GET"},
|
||||
},
|
||||
],
|
||||
"connections": {},
|
||||
}
|
||||
desc = describe_n8n_workflow(data)
|
||||
step = desc.steps[0]
|
||||
assert step.service == "Http Request"
|
||||
assert step.order == 0
|
||||
assert "url" in step.parameters
|
||||
|
||||
def test_empty_nodes(self):
|
||||
data = {"name": "Empty", "nodes": [], "connections": {}}
|
||||
desc = describe_n8n_workflow(data)
|
||||
assert len(desc.steps) == 0
|
||||
assert desc.trigger_type is None
|
||||
|
||||
|
||||
class TestDescribeMakeWorkflow:
|
||||
def test_basic_scenario(self):
|
||||
data = {
|
||||
"name": "Sheets to Calendar",
|
||||
"flow": [
|
||||
{
|
||||
"module": "google-sheets:watchUpdatedCells",
|
||||
"mapper": {"spreadsheetId": "abc"},
|
||||
},
|
||||
{
|
||||
"module": "google-calendar:createAnEvent",
|
||||
"mapper": {"title": "Meeting"},
|
||||
},
|
||||
],
|
||||
}
|
||||
desc = describe_make_workflow(data)
|
||||
assert desc.name == "Sheets to Calendar"
|
||||
assert desc.source_format == SourcePlatform.MAKE
|
||||
assert len(desc.steps) == 2
|
||||
# Sequential: step 0 connects to step 1
|
||||
assert desc.steps[0].connections_to == [1]
|
||||
assert desc.steps[1].connections_to == []
|
||||
assert desc.trigger_type is not None # "watch" in module name
|
||||
|
||||
def test_service_extraction(self):
|
||||
data = {
|
||||
"flow": [{"module": "slack:sendMessage", "mapper": {"text": "hello"}}],
|
||||
}
|
||||
desc = describe_make_workflow(data)
|
||||
assert desc.steps[0].service == "Slack"
|
||||
|
||||
|
||||
class TestDescribeZapierWorkflow:
|
||||
def test_basic_zap(self):
|
||||
data = {
|
||||
"name": "Gmail to Slack",
|
||||
"steps": [
|
||||
{"app": "Gmail", "action": "new_email"},
|
||||
{
|
||||
"app": "Slack",
|
||||
"action": "send_message",
|
||||
"params": {"channel": "#alerts"},
|
||||
},
|
||||
],
|
||||
}
|
||||
desc = describe_zapier_workflow(data)
|
||||
assert desc.name == "Gmail to Slack"
|
||||
assert desc.source_format == SourcePlatform.ZAPIER
|
||||
assert len(desc.steps) == 2
|
||||
assert desc.steps[0].connections_to == [1]
|
||||
assert desc.trigger_type == "Gmail"
|
||||
|
||||
|
||||
class TestDescribeWorkflowRouter:
|
||||
def test_routes_to_n8n(self):
|
||||
data = {
|
||||
"nodes": [
|
||||
{"name": "N", "type": "n8n-nodes-base.webhook", "parameters": {}}
|
||||
],
|
||||
"connections": {},
|
||||
}
|
||||
desc = describe_workflow(data, SourcePlatform.N8N)
|
||||
assert desc.source_format == SourcePlatform.N8N
|
||||
|
||||
def test_unknown_raises(self):
|
||||
with pytest.raises(ValueError, match="No describer"):
|
||||
describe_workflow({}, SourcePlatform.UNKNOWN)
|
||||
@@ -1,71 +0,0 @@
|
||||
"""Detect the source platform of a workflow JSON."""
|
||||
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from .models import SourcePlatform
|
||||
|
||||
_N8N_TYPE_RE = re.compile(r"^(n8n-nodes-base\.|@n8n/)")
|
||||
|
||||
|
||||
def detect_format(json_data: dict[str, Any]) -> SourcePlatform:
|
||||
"""Inspect a workflow JSON and determine which platform it came from.
|
||||
|
||||
Args:
|
||||
json_data: The parsed JSON data from a workflow export file.
|
||||
|
||||
Returns:
|
||||
The detected SourcePlatform.
|
||||
"""
|
||||
if _is_n8n(json_data):
|
||||
return SourcePlatform.N8N
|
||||
if _is_make(json_data):
|
||||
return SourcePlatform.MAKE
|
||||
if _is_zapier(json_data):
|
||||
return SourcePlatform.ZAPIER
|
||||
return SourcePlatform.UNKNOWN
|
||||
|
||||
|
||||
def _is_n8n(data: dict[str, Any]) -> bool:
|
||||
"""n8n workflows have a `nodes` array with items containing `type` fields
|
||||
matching patterns like `n8n-nodes-base.*` or `@n8n/*`, plus a `connections`
|
||||
object."""
|
||||
nodes = data.get("nodes")
|
||||
connections = data.get("connections")
|
||||
if not isinstance(nodes, list) or not isinstance(connections, dict):
|
||||
return False
|
||||
if not nodes:
|
||||
return False
|
||||
# Check if at least one node has an n8n-style type
|
||||
return any(
|
||||
isinstance(n, dict)
|
||||
and isinstance(n.get("type"), str)
|
||||
and _N8N_TYPE_RE.match(n["type"])
|
||||
for n in nodes
|
||||
)
|
||||
|
||||
|
||||
def _is_make(data: dict[str, Any]) -> bool:
|
||||
"""Make.com scenarios have a `flow` array with items containing `module`
|
||||
fields in `service:action` URI format."""
|
||||
flow = data.get("flow")
|
||||
if not isinstance(flow, list) or not flow:
|
||||
return False
|
||||
# Check if at least one module has `service:action` pattern
|
||||
return any(
|
||||
isinstance(item, dict)
|
||||
and isinstance(item.get("module"), str)
|
||||
and ":" in item["module"]
|
||||
for item in flow
|
||||
)
|
||||
|
||||
|
||||
def _is_zapier(data: dict[str, Any]) -> bool:
|
||||
"""Zapier Zaps have a `steps` array with items containing `app` and
|
||||
`action` fields."""
|
||||
steps = data.get("steps")
|
||||
if not isinstance(steps, list) or not steps:
|
||||
return False
|
||||
return any(
|
||||
isinstance(step, dict) and "app" in step and "action" in step for step in steps
|
||||
)
|
||||
@@ -1,101 +0,0 @@
|
||||
"""Tests for format_detector.py."""
|
||||
|
||||
from .format_detector import detect_format
|
||||
from .models import SourcePlatform
|
||||
|
||||
|
||||
class TestDetectFormat:
|
||||
def test_n8n_workflow(self):
|
||||
data = {
|
||||
"name": "My n8n Workflow",
|
||||
"nodes": [
|
||||
{
|
||||
"name": "Webhook",
|
||||
"type": "n8n-nodes-base.webhook",
|
||||
"parameters": {"path": "/hook"},
|
||||
},
|
||||
{
|
||||
"name": "HTTP Request",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"parameters": {"url": "https://api.example.com"},
|
||||
},
|
||||
],
|
||||
"connections": {
|
||||
"Webhook": {
|
||||
"main": [[{"node": "HTTP Request", "type": "main", "index": 0}]]
|
||||
}
|
||||
},
|
||||
}
|
||||
assert detect_format(data) == SourcePlatform.N8N
|
||||
|
||||
def test_n8n_langchain_nodes(self):
|
||||
data = {
|
||||
"nodes": [
|
||||
{
|
||||
"name": "Agent",
|
||||
"type": "@n8n/n8n-nodes-langchain.agent",
|
||||
"parameters": {},
|
||||
},
|
||||
],
|
||||
"connections": {},
|
||||
}
|
||||
assert detect_format(data) == SourcePlatform.N8N
|
||||
|
||||
def test_make_scenario(self):
|
||||
data = {
|
||||
"name": "My Make Scenario",
|
||||
"flow": [
|
||||
{
|
||||
"module": "google-sheets:watchUpdatedCells",
|
||||
"mapper": {"spreadsheetId": "123"},
|
||||
},
|
||||
{
|
||||
"module": "google-calendar:createAnEvent",
|
||||
"mapper": {"title": "Test"},
|
||||
},
|
||||
],
|
||||
}
|
||||
assert detect_format(data) == SourcePlatform.MAKE
|
||||
|
||||
def test_zapier_zap(self):
|
||||
data = {
|
||||
"name": "My Zap",
|
||||
"steps": [
|
||||
{"app": "gmail", "action": "new_email"},
|
||||
{
|
||||
"app": "slack",
|
||||
"action": "send_message",
|
||||
"params": {"channel": "#general"},
|
||||
},
|
||||
],
|
||||
}
|
||||
assert detect_format(data) == SourcePlatform.ZAPIER
|
||||
|
||||
def test_unknown_format(self):
|
||||
data = {"foo": "bar", "nodes": []}
|
||||
assert detect_format(data) == SourcePlatform.UNKNOWN
|
||||
|
||||
def test_empty_dict(self):
|
||||
assert detect_format({}) == SourcePlatform.UNKNOWN
|
||||
|
||||
def test_autogpt_graph_not_detected_as_n8n(self):
|
||||
"""AutoGPT graphs have nodes but not n8n-style types."""
|
||||
data = {
|
||||
"nodes": [
|
||||
{"id": "abc", "block_id": "some-uuid", "input_default": {}},
|
||||
],
|
||||
"connections": {},
|
||||
}
|
||||
assert detect_format(data) == SourcePlatform.UNKNOWN
|
||||
|
||||
def test_make_without_colon_not_detected(self):
|
||||
data = {
|
||||
"flow": [{"module": "simplemodule", "mapper": {}}],
|
||||
}
|
||||
assert detect_format(data) == SourcePlatform.UNKNOWN
|
||||
|
||||
def test_zapier_without_action_not_detected(self):
|
||||
data = {
|
||||
"steps": [{"app": "gmail"}],
|
||||
}
|
||||
assert detect_format(data) == SourcePlatform.UNKNOWN
|
||||
@@ -1,33 +0,0 @@
|
||||
"""Data models for external workflow import."""
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import pydantic
|
||||
|
||||
|
||||
class SourcePlatform(str, Enum):
|
||||
N8N = "n8n"
|
||||
MAKE = "make"
|
||||
ZAPIER = "zapier"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class StepDescription(pydantic.BaseModel):
|
||||
"""A single step/node extracted from an external workflow."""
|
||||
|
||||
order: int
|
||||
action: str
|
||||
service: str
|
||||
parameters: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
connections_to: list[int] = pydantic.Field(default_factory=list)
|
||||
|
||||
|
||||
class WorkflowDescription(pydantic.BaseModel):
|
||||
"""Structured description of an external workflow."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
steps: list[StepDescription]
|
||||
trigger_type: str | None = None
|
||||
source_format: SourcePlatform
|
||||
@@ -1,74 +0,0 @@
|
||||
"""Fetch workflow templates by URL."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from backend.util.request import HTTPClientError, Requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Patterns for extracting template IDs from n8n URLs
|
||||
_N8N_WORKFLOW_URL_RE = re.compile(
|
||||
r"https?://(?:www\.)?n8n\.io/workflows/(\d+)", re.IGNORECASE
|
||||
)
|
||||
_N8N_TEMPLATES_API = "https://api.n8n.io/api/templates/workflows/{id}"
|
||||
|
||||
|
||||
async def fetch_n8n_template(url: str) -> dict[str, Any]:
|
||||
"""Fetch an n8n workflow template by its URL.
|
||||
|
||||
Supports URLs like:
|
||||
- https://n8n.io/workflows/1234
|
||||
- https://n8n.io/workflows/1234-some-slug
|
||||
|
||||
Args:
|
||||
url: The n8n template URL.
|
||||
|
||||
Returns:
|
||||
The n8n workflow JSON.
|
||||
|
||||
Raises:
|
||||
ValueError: If the URL is not a valid n8n template URL.
|
||||
RuntimeError: If the fetch fails.
|
||||
"""
|
||||
match = _N8N_WORKFLOW_URL_RE.match(url.strip())
|
||||
if not match:
|
||||
raise ValueError(
|
||||
"Not a valid n8n workflow URL. Expected format: "
|
||||
"https://n8n.io/workflows/<id>"
|
||||
)
|
||||
|
||||
template_id = match.group(1)
|
||||
api_url = _N8N_TEMPLATES_API.format(id=template_id)
|
||||
|
||||
client = Requests(raise_for_status=True)
|
||||
try:
|
||||
response = await client.get(api_url)
|
||||
data = response.json()
|
||||
except HTTPClientError as e:
|
||||
# 4xx from n8n API (e.g. 404 template not found) → bad user input
|
||||
raise ValueError(
|
||||
f"n8n template {template_id} not found or inaccessible: {e}"
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to fetch n8n template {template_id}: {e}") from e
|
||||
|
||||
if not isinstance(data, dict):
|
||||
raise RuntimeError(
|
||||
f"Unexpected response format from n8n API for template {template_id}: "
|
||||
"expected JSON object"
|
||||
)
|
||||
|
||||
# n8n API wraps the workflow in a `workflow` key
|
||||
workflow = data.get("workflow", data)
|
||||
if not isinstance(workflow, dict):
|
||||
raise RuntimeError(
|
||||
f"Unexpected response format from n8n API for template {template_id}"
|
||||
)
|
||||
|
||||
# Preserve the workflow name from the template metadata
|
||||
if "name" not in workflow and "name" in data:
|
||||
workflow["name"] = data["name"]
|
||||
|
||||
return workflow
|
||||
@@ -1,440 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { screen, cleanup } from "@testing-library/react";
|
||||
import { render } from "@/tests/integrations/test-utils";
|
||||
import React from "react";
|
||||
import { BlockUIType } from "../components/types";
|
||||
import type {
|
||||
CustomNodeData,
|
||||
CustomNode as CustomNodeType,
|
||||
} from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import type { NodeProps } from "@xyflow/react";
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
|
||||
// ---- Mock sub-components ----
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer",
|
||||
() => ({
|
||||
NodeContainer: ({
|
||||
children,
|
||||
hasErrors,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
hasErrors: boolean;
|
||||
}) => (
|
||||
<div data-testid="node-container" data-has-errors={String(!!hasErrors)}>
|
||||
{children}
|
||||
</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeHeader",
|
||||
() => ({
|
||||
NodeHeader: ({ data }: { data: CustomNodeData }) => (
|
||||
<div data-testid="node-header">{data.title}</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/StickyNoteBlock",
|
||||
() => ({
|
||||
StickyNoteBlock: ({ data }: { data: CustomNodeData }) => (
|
||||
<div data-testid="sticky-note-block">{data.title}</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeAdvancedToggle",
|
||||
() => ({
|
||||
NodeAdvancedToggle: () => <div data-testid="node-advanced-toggle" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput",
|
||||
() => ({
|
||||
NodeDataRenderer: () => <div data-testid="node-data-renderer" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeExecutionBadge",
|
||||
() => ({
|
||||
NodeExecutionBadge: () => <div data-testid="node-execution-badge" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeRightClickMenu",
|
||||
() => ({
|
||||
NodeRightClickMenu: ({ children }: { children: React.ReactNode }) => (
|
||||
<div data-testid="node-right-click-menu">{children}</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/WebhookDisclaimer",
|
||||
() => ({
|
||||
WebhookDisclaimer: () => <div data-testid="webhook-disclaimer" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/SubAgentUpdateFeature",
|
||||
() => ({
|
||||
SubAgentUpdateFeature: () => <div data-testid="sub-agent-update" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/AyrshareConnectButton",
|
||||
() => ({
|
||||
AyrshareConnectButton: () => <div data-testid="ayrshare-connect-button" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/FormCreator",
|
||||
() => ({
|
||||
FormCreator: () => <div data-testid="form-creator" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/OutputHandler",
|
||||
() => ({
|
||||
OutputHandler: () => <div data-testid="output-handler" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/components/renderers/InputRenderer/utils/input-schema-pre-processor",
|
||||
() => ({
|
||||
preprocessInputSchema: (schema: unknown) => schema,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/useCustomNode",
|
||||
() => ({
|
||||
useCustomNode: ({ data }: { data: CustomNodeData }) => ({
|
||||
inputSchema: data.inputSchema,
|
||||
outputSchema: data.outputSchema,
|
||||
isMCPWithTool: false,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock("@xyflow/react", async () => {
|
||||
const actual = await vi.importActual("@xyflow/react");
|
||||
return {
|
||||
...actual,
|
||||
useReactFlow: () => ({
|
||||
getNodes: () => [],
|
||||
getEdges: () => [],
|
||||
setNodes: vi.fn(),
|
||||
setEdges: vi.fn(),
|
||||
getNode: vi.fn(),
|
||||
}),
|
||||
useNodeId: () => "test-node-id",
|
||||
useUpdateNodeInternals: () => vi.fn(),
|
||||
Handle: ({ children }: { children: React.ReactNode }) => (
|
||||
<div>{children}</div>
|
||||
),
|
||||
Position: { Left: "left", Right: "right", Top: "top", Bottom: "bottom" },
|
||||
};
|
||||
});
|
||||
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
|
||||
// ---- Helpers ----
|
||||
|
||||
function buildNodeData(
|
||||
overrides: Partial<CustomNodeData> = {},
|
||||
): CustomNodeData {
|
||||
return {
|
||||
hardcodedValues: {},
|
||||
title: "Test Block",
|
||||
description: "A test block",
|
||||
inputSchema: { type: "object", properties: {} },
|
||||
outputSchema: { type: "object", properties: {} },
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: "block-123",
|
||||
costs: [],
|
||||
categories: [],
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function buildNodeProps(
|
||||
dataOverrides: Partial<CustomNodeData> = {},
|
||||
propsOverrides: Partial<NodeProps<CustomNodeType>> = {},
|
||||
): NodeProps<CustomNodeType> {
|
||||
return {
|
||||
id: "node-1",
|
||||
data: buildNodeData(dataOverrides),
|
||||
selected: false,
|
||||
type: "custom",
|
||||
isConnectable: true,
|
||||
positionAbsoluteX: 0,
|
||||
positionAbsoluteY: 0,
|
||||
zIndex: 0,
|
||||
dragging: false,
|
||||
dragHandle: undefined,
|
||||
draggable: true,
|
||||
selectable: true,
|
||||
deletable: true,
|
||||
parentId: undefined,
|
||||
width: undefined,
|
||||
height: undefined,
|
||||
sourcePosition: undefined,
|
||||
targetPosition: undefined,
|
||||
...propsOverrides,
|
||||
};
|
||||
}
|
||||
|
||||
function renderCustomNode(
|
||||
dataOverrides: Partial<CustomNodeData> = {},
|
||||
propsOverrides: Partial<NodeProps<CustomNodeType>> = {},
|
||||
) {
|
||||
const props = buildNodeProps(dataOverrides, propsOverrides);
|
||||
return render(<CustomNode {...props} />);
|
||||
}
|
||||
|
||||
function createExecutionResult(
|
||||
overrides: Partial<NodeExecutionResult> = {},
|
||||
): NodeExecutionResult {
|
||||
return {
|
||||
node_exec_id: overrides.node_exec_id ?? "exec-1",
|
||||
node_id: overrides.node_id ?? "node-1",
|
||||
graph_exec_id: overrides.graph_exec_id ?? "graph-exec-1",
|
||||
graph_id: overrides.graph_id ?? "graph-1",
|
||||
graph_version: overrides.graph_version ?? 1,
|
||||
user_id: overrides.user_id ?? "test-user",
|
||||
block_id: overrides.block_id ?? "block-1",
|
||||
status: overrides.status ?? "COMPLETED",
|
||||
input_data: overrides.input_data ?? {},
|
||||
output_data: overrides.output_data ?? {},
|
||||
add_time: overrides.add_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
queue_time: overrides.queue_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
start_time: overrides.start_time ?? new Date("2024-01-01T00:00:01Z"),
|
||||
end_time: overrides.end_time ?? new Date("2024-01-01T00:00:02Z"),
|
||||
};
|
||||
}
|
||||
|
||||
// ---- Tests ----
|
||||
|
||||
beforeEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
describe("CustomNode", () => {
|
||||
describe("STANDARD type rendering", () => {
|
||||
it("renders NodeHeader with the block title", () => {
|
||||
renderCustomNode({ title: "My Standard Block" });
|
||||
|
||||
const header = screen.getByTestId("node-header");
|
||||
expect(header).toBeDefined();
|
||||
expect(header.textContent).toContain("My Standard Block");
|
||||
});
|
||||
|
||||
it("renders NodeContainer, FormCreator, OutputHandler, and NodeExecutionBadge", () => {
|
||||
renderCustomNode();
|
||||
|
||||
expect(screen.getByTestId("node-container")).toBeDefined();
|
||||
expect(screen.getByTestId("form-creator")).toBeDefined();
|
||||
expect(screen.getByTestId("output-handler")).toBeDefined();
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
expect(screen.getByTestId("node-data-renderer")).toBeDefined();
|
||||
expect(screen.getByTestId("node-advanced-toggle")).toBeDefined();
|
||||
});
|
||||
|
||||
it("wraps content in NodeRightClickMenu", () => {
|
||||
renderCustomNode();
|
||||
|
||||
expect(screen.getByTestId("node-right-click-menu")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render StickyNoteBlock for STANDARD type", () => {
|
||||
renderCustomNode();
|
||||
|
||||
expect(screen.queryByTestId("sticky-note-block")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("NOTE type rendering", () => {
|
||||
it("renders StickyNoteBlock instead of main UI", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.NOTE, title: "My Note" });
|
||||
|
||||
const note = screen.getByTestId("sticky-note-block");
|
||||
expect(note).toBeDefined();
|
||||
expect(note.textContent).toContain("My Note");
|
||||
});
|
||||
|
||||
it("does not render NodeContainer or other standard components", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.NOTE });
|
||||
|
||||
expect(screen.queryByTestId("node-container")).toBeNull();
|
||||
expect(screen.queryByTestId("node-header")).toBeNull();
|
||||
expect(screen.queryByTestId("form-creator")).toBeNull();
|
||||
expect(screen.queryByTestId("output-handler")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("WEBHOOK type rendering", () => {
|
||||
it("renders WebhookDisclaimer for WEBHOOK type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.WEBHOOK });
|
||||
|
||||
expect(screen.getByTestId("webhook-disclaimer")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders WebhookDisclaimer for WEBHOOK_MANUAL type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.WEBHOOK_MANUAL });
|
||||
|
||||
expect(screen.getByTestId("webhook-disclaimer")).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("AGENT type rendering", () => {
|
||||
it("renders SubAgentUpdateFeature for AGENT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.AGENT });
|
||||
|
||||
expect(screen.getByTestId("sub-agent-update")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render SubAgentUpdateFeature for non-AGENT types", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.STANDARD });
|
||||
|
||||
expect(screen.queryByTestId("sub-agent-update")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("OUTPUT type rendering", () => {
|
||||
it("does not render OutputHandler for OUTPUT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.OUTPUT });
|
||||
|
||||
expect(screen.queryByTestId("output-handler")).toBeNull();
|
||||
});
|
||||
|
||||
it("still renders FormCreator and other components for OUTPUT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.OUTPUT });
|
||||
|
||||
expect(screen.getByTestId("form-creator")).toBeDefined();
|
||||
expect(screen.getByTestId("node-header")).toBeDefined();
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("AYRSHARE type rendering", () => {
|
||||
it("renders AyrshareConnectButton for AYRSHARE type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.AYRSHARE });
|
||||
|
||||
expect(screen.getByTestId("ayrshare-connect-button")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render AyrshareConnectButton for non-AYRSHARE types", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.STANDARD });
|
||||
|
||||
expect(screen.queryByTestId("ayrshare-connect-button")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("error states", () => {
|
||||
it("sets hasErrors on NodeContainer when data.errors has non-empty values", () => {
|
||||
renderCustomNode({
|
||||
errors: { field1: "This field is required" },
|
||||
});
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("true");
|
||||
});
|
||||
|
||||
it("does not set hasErrors when data.errors is empty", () => {
|
||||
renderCustomNode({ errors: {} });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("does not set hasErrors when data.errors values are all empty strings", () => {
|
||||
renderCustomNode({ errors: { field1: "" } });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("sets hasErrors when last execution result has error in output_data", () => {
|
||||
renderCustomNode({
|
||||
nodeExecutionResults: [
|
||||
createExecutionResult({
|
||||
output_data: { error: ["Something went wrong"] },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("true");
|
||||
});
|
||||
|
||||
it("does not set hasErrors when execution results have no error", () => {
|
||||
renderCustomNode({
|
||||
nodeExecutionResults: [
|
||||
createExecutionResult({
|
||||
output_data: { result: ["success"] },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
});
|
||||
|
||||
describe("NodeExecutionBadge", () => {
|
||||
it("always renders NodeExecutionBadge for non-NOTE types", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.STANDARD });
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders NodeExecutionBadge for AGENT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.AGENT });
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders NodeExecutionBadge for OUTPUT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.OUTPUT });
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("renders without nodeExecutionResults", () => {
|
||||
renderCustomNode({ nodeExecutionResults: undefined });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container).toBeDefined();
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("renders without errors property", () => {
|
||||
renderCustomNode({ errors: undefined });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container).toBeDefined();
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("renders with empty execution results array", () => {
|
||||
renderCustomNode({ nodeExecutionResults: [] });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container).toBeDefined();
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,342 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
import {
|
||||
render,
|
||||
screen,
|
||||
fireEvent,
|
||||
waitFor,
|
||||
cleanup,
|
||||
} from "@/tests/integrations/test-utils";
|
||||
import { useBlockMenuStore } from "../stores/blockMenuStore";
|
||||
import { useControlPanelStore } from "../stores/controlPanelStore";
|
||||
import { DefaultStateType } from "../components/NewControlPanel/NewBlockMenu/types";
|
||||
import { SearchEntryFilterAnyOfItem } from "@/app/api/__generated__/models/searchEntryFilterAnyOfItem";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mocks for heavy child components
|
||||
// ---------------------------------------------------------------------------
|
||||
vi.mock(
|
||||
"../components/NewControlPanel/NewBlockMenu/BlockMenuDefault/BlockMenuDefault",
|
||||
() => ({
|
||||
BlockMenuDefault: () => (
|
||||
<div data-testid="block-menu-default">Default Content</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"../components/NewControlPanel/NewBlockMenu/BlockMenuSearch/BlockMenuSearch",
|
||||
() => ({
|
||||
BlockMenuSearch: () => (
|
||||
<div data-testid="block-menu-search">Search Results</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
// Mock query client used by the search bar hook
|
||||
vi.mock("@/lib/react-query/queryClient", () => ({
|
||||
getQueryClient: () => ({
|
||||
invalidateQueries: vi.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Reset stores before each test
|
||||
// ---------------------------------------------------------------------------
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
useBlockMenuStore.getState().reset();
|
||||
useBlockMenuStore.setState({
|
||||
filters: [],
|
||||
creators: [],
|
||||
creators_list: [],
|
||||
categoryCounts: {
|
||||
blocks: 0,
|
||||
integrations: 0,
|
||||
marketplace_agents: 0,
|
||||
my_agents: 0,
|
||||
},
|
||||
});
|
||||
useControlPanelStore.getState().reset();
|
||||
});
|
||||
|
||||
// ===========================================================================
|
||||
// Section 1: blockMenuStore unit tests
|
||||
// ===========================================================================
|
||||
describe("blockMenuStore", () => {
|
||||
describe("searchQuery", () => {
|
||||
it("defaults to an empty string", () => {
|
||||
expect(useBlockMenuStore.getState().searchQuery).toBe("");
|
||||
});
|
||||
|
||||
it("sets the search query", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("timer");
|
||||
expect(useBlockMenuStore.getState().searchQuery).toBe("timer");
|
||||
});
|
||||
});
|
||||
|
||||
describe("defaultState", () => {
|
||||
it("defaults to SUGGESTION", () => {
|
||||
expect(useBlockMenuStore.getState().defaultState).toBe(
|
||||
DefaultStateType.SUGGESTION,
|
||||
);
|
||||
});
|
||||
|
||||
it("sets the default state", () => {
|
||||
useBlockMenuStore.getState().setDefaultState(DefaultStateType.ALL_BLOCKS);
|
||||
expect(useBlockMenuStore.getState().defaultState).toBe(
|
||||
DefaultStateType.ALL_BLOCKS,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("filters", () => {
|
||||
it("defaults to an empty array", () => {
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([]);
|
||||
});
|
||||
|
||||
it("adds a filter", () => {
|
||||
useBlockMenuStore.getState().addFilter(SearchEntryFilterAnyOfItem.blocks);
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.blocks,
|
||||
]);
|
||||
});
|
||||
|
||||
it("removes a filter", () => {
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.setFilters([
|
||||
SearchEntryFilterAnyOfItem.blocks,
|
||||
SearchEntryFilterAnyOfItem.integrations,
|
||||
]);
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.removeFilter(SearchEntryFilterAnyOfItem.blocks);
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.integrations,
|
||||
]);
|
||||
});
|
||||
|
||||
it("replaces all filters with setFilters", () => {
|
||||
useBlockMenuStore.getState().addFilter(SearchEntryFilterAnyOfItem.blocks);
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.setFilters([SearchEntryFilterAnyOfItem.marketplace_agents]);
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.marketplace_agents,
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("creators", () => {
|
||||
it("adds a creator", () => {
|
||||
useBlockMenuStore.getState().addCreator("alice");
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["alice"]);
|
||||
});
|
||||
|
||||
it("removes a creator", () => {
|
||||
useBlockMenuStore.getState().setCreators(["alice", "bob"]);
|
||||
useBlockMenuStore.getState().removeCreator("alice");
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["bob"]);
|
||||
});
|
||||
|
||||
it("replaces all creators with setCreators", () => {
|
||||
useBlockMenuStore.getState().addCreator("alice");
|
||||
useBlockMenuStore.getState().setCreators(["charlie"]);
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["charlie"]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("categoryCounts", () => {
|
||||
it("sets category counts", () => {
|
||||
const counts = {
|
||||
blocks: 10,
|
||||
integrations: 5,
|
||||
marketplace_agents: 3,
|
||||
my_agents: 2,
|
||||
};
|
||||
useBlockMenuStore.getState().setCategoryCounts(counts);
|
||||
expect(useBlockMenuStore.getState().categoryCounts).toEqual(counts);
|
||||
});
|
||||
});
|
||||
|
||||
describe("searchId", () => {
|
||||
it("defaults to undefined", () => {
|
||||
expect(useBlockMenuStore.getState().searchId).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets and clears searchId", () => {
|
||||
useBlockMenuStore.getState().setSearchId("search-123");
|
||||
expect(useBlockMenuStore.getState().searchId).toBe("search-123");
|
||||
|
||||
useBlockMenuStore.getState().setSearchId(undefined);
|
||||
expect(useBlockMenuStore.getState().searchId).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("integration", () => {
|
||||
it("defaults to undefined", () => {
|
||||
expect(useBlockMenuStore.getState().integration).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets the integration", () => {
|
||||
useBlockMenuStore.getState().setIntegration("slack");
|
||||
expect(useBlockMenuStore.getState().integration).toBe("slack");
|
||||
});
|
||||
});
|
||||
|
||||
describe("reset", () => {
|
||||
it("resets searchQuery, searchId, defaultState, and integration", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("hello");
|
||||
useBlockMenuStore.getState().setSearchId("id-1");
|
||||
useBlockMenuStore.getState().setDefaultState(DefaultStateType.ALL_BLOCKS);
|
||||
useBlockMenuStore.getState().setIntegration("github");
|
||||
|
||||
useBlockMenuStore.getState().reset();
|
||||
|
||||
const state = useBlockMenuStore.getState();
|
||||
expect(state.searchQuery).toBe("");
|
||||
expect(state.searchId).toBeUndefined();
|
||||
expect(state.defaultState).toBe(DefaultStateType.SUGGESTION);
|
||||
expect(state.integration).toBeUndefined();
|
||||
});
|
||||
|
||||
it("does not reset filters or creators (by design)", () => {
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.setFilters([SearchEntryFilterAnyOfItem.blocks]);
|
||||
useBlockMenuStore.getState().setCreators(["alice"]);
|
||||
|
||||
useBlockMenuStore.getState().reset();
|
||||
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.blocks,
|
||||
]);
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["alice"]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ===========================================================================
|
||||
// Section 2: controlPanelStore unit tests
|
||||
// ===========================================================================
|
||||
describe("controlPanelStore", () => {
|
||||
it("defaults blockMenuOpen to false", () => {
|
||||
expect(useControlPanelStore.getState().blockMenuOpen).toBe(false);
|
||||
});
|
||||
|
||||
it("sets blockMenuOpen", () => {
|
||||
useControlPanelStore.getState().setBlockMenuOpen(true);
|
||||
expect(useControlPanelStore.getState().blockMenuOpen).toBe(true);
|
||||
});
|
||||
|
||||
it("sets forceOpenBlockMenu", () => {
|
||||
useControlPanelStore.getState().setForceOpenBlockMenu(true);
|
||||
expect(useControlPanelStore.getState().forceOpenBlockMenu).toBe(true);
|
||||
});
|
||||
|
||||
it("resets all control panel state", () => {
|
||||
useControlPanelStore.getState().setBlockMenuOpen(true);
|
||||
useControlPanelStore.getState().setForceOpenBlockMenu(true);
|
||||
useControlPanelStore.getState().setSaveControlOpen(true);
|
||||
useControlPanelStore.getState().setForceOpenSave(true);
|
||||
|
||||
useControlPanelStore.getState().reset();
|
||||
|
||||
const state = useControlPanelStore.getState();
|
||||
expect(state.blockMenuOpen).toBe(false);
|
||||
expect(state.forceOpenBlockMenu).toBe(false);
|
||||
expect(state.saveControlOpen).toBe(false);
|
||||
expect(state.forceOpenSave).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
// ===========================================================================
|
||||
// Section 3: BlockMenuContent integration tests
|
||||
// ===========================================================================
|
||||
// We import BlockMenuContent directly to avoid dealing with the Popover wrapper.
|
||||
import { BlockMenuContent } from "../components/NewControlPanel/NewBlockMenu/BlockMenuContent/BlockMenuContent";
|
||||
|
||||
describe("BlockMenuContent", () => {
|
||||
it("shows BlockMenuDefault when there is no search query", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("");
|
||||
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-default")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-search")).toBeNull();
|
||||
});
|
||||
|
||||
it("shows BlockMenuSearch when a search query is present", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("timer");
|
||||
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-search")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-default")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders the search bar", () => {
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
expect(
|
||||
screen.getByPlaceholderText(
|
||||
"Blocks, Agents, Integrations or Keywords...",
|
||||
),
|
||||
).toBeDefined();
|
||||
});
|
||||
|
||||
it("switches from default to search view when store query changes", () => {
|
||||
const { rerender } = render(<BlockMenuContent />);
|
||||
expect(screen.getByTestId("block-menu-default")).toBeDefined();
|
||||
|
||||
// Simulate typing by setting the store directly
|
||||
useBlockMenuStore.getState().setSearchQuery("webhook");
|
||||
rerender(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-search")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-default")).toBeNull();
|
||||
});
|
||||
|
||||
it("switches back to default view when search query is cleared", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("something");
|
||||
const { rerender } = render(<BlockMenuContent />);
|
||||
expect(screen.getByTestId("block-menu-search")).toBeDefined();
|
||||
|
||||
useBlockMenuStore.getState().setSearchQuery("");
|
||||
rerender(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-default")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-search")).toBeNull();
|
||||
});
|
||||
|
||||
it("typing in the search bar updates the local input value", async () => {
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
const input = screen.getByPlaceholderText(
|
||||
"Blocks, Agents, Integrations or Keywords...",
|
||||
);
|
||||
fireEvent.change(input, { target: { value: "slack" } });
|
||||
|
||||
expect((input as HTMLInputElement).value).toBe("slack");
|
||||
});
|
||||
|
||||
it("shows clear button when input has text and clears on click", async () => {
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
const input = screen.getByPlaceholderText(
|
||||
"Blocks, Agents, Integrations or Keywords...",
|
||||
);
|
||||
fireEvent.change(input, { target: { value: "test" } });
|
||||
|
||||
// The clear button should appear
|
||||
const clearButton = screen.getByRole("button");
|
||||
fireEvent.click(clearButton);
|
||||
|
||||
await waitFor(() => {
|
||||
expect((input as HTMLInputElement).value).toBe("");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,270 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import {
|
||||
render,
|
||||
screen,
|
||||
fireEvent,
|
||||
waitFor,
|
||||
cleanup,
|
||||
} from "@/tests/integrations/test-utils";
|
||||
import { UseFormReturn, useForm } from "react-hook-form";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import * as z from "zod";
|
||||
import { renderHook } from "@testing-library/react";
|
||||
import { useControlPanelStore } from "../stores/controlPanelStore";
|
||||
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { NewSaveControl } from "../components/NewControlPanel/NewSaveControl/NewSaveControl";
|
||||
import { useNewSaveControl } from "../components/NewControlPanel/NewSaveControl/useNewSaveControl";
|
||||
|
||||
const formSchema = z.object({
|
||||
name: z.string().min(1, "Name is required").max(100),
|
||||
description: z.string().max(500),
|
||||
});
|
||||
|
||||
type SaveableGraphFormValues = z.infer<typeof formSchema>;
|
||||
|
||||
const mockHandleSave = vi.fn();
|
||||
|
||||
vi.mock(
|
||||
"../components/NewControlPanel/NewSaveControl/useNewSaveControl",
|
||||
() => ({
|
||||
useNewSaveControl: vi.fn(),
|
||||
}),
|
||||
);
|
||||
|
||||
const mockUseNewSaveControl = vi.mocked(useNewSaveControl);
|
||||
|
||||
function createMockForm(
|
||||
defaults: SaveableGraphFormValues = { name: "", description: "" },
|
||||
): UseFormReturn<SaveableGraphFormValues> {
|
||||
const { result } = renderHook(() =>
|
||||
useForm<SaveableGraphFormValues>({
|
||||
resolver: zodResolver(formSchema),
|
||||
defaultValues: defaults,
|
||||
}),
|
||||
);
|
||||
return result.current;
|
||||
}
|
||||
|
||||
function setupMock(overrides: {
|
||||
isSaving?: boolean;
|
||||
graphVersion?: number;
|
||||
name?: string;
|
||||
description?: string;
|
||||
}) {
|
||||
const form = createMockForm({
|
||||
name: overrides.name ?? "",
|
||||
description: overrides.description ?? "",
|
||||
});
|
||||
|
||||
mockUseNewSaveControl.mockReturnValue({
|
||||
form,
|
||||
isSaving: overrides.isSaving ?? false,
|
||||
graphVersion: overrides.graphVersion,
|
||||
handleSave: mockHandleSave,
|
||||
});
|
||||
|
||||
return form;
|
||||
}
|
||||
|
||||
function resetStore() {
|
||||
useControlPanelStore.setState({
|
||||
blockMenuOpen: false,
|
||||
saveControlOpen: false,
|
||||
forceOpenBlockMenu: false,
|
||||
forceOpenSave: false,
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
cleanup();
|
||||
resetStore();
|
||||
mockHandleSave.mockReset();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
describe("NewSaveControl", () => {
|
||||
it("renders save button trigger", () => {
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId("save-control-save-button")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders name and description inputs when popover is open", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId("save-control-name-input")).toBeDefined();
|
||||
expect(screen.getByTestId("save-control-description-input")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render popover content when closed", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: false });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.queryByTestId("save-control-name-input")).toBeNull();
|
||||
expect(screen.queryByTestId("save-control-description-input")).toBeNull();
|
||||
});
|
||||
|
||||
it("shows version output when graphVersion is set", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ graphVersion: 3 });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const versionInput = screen.getByTestId("save-control-version-output");
|
||||
expect(versionInput).toBeDefined();
|
||||
expect((versionInput as HTMLInputElement).disabled).toBe(true);
|
||||
});
|
||||
|
||||
it("hides version output when graphVersion is undefined", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ graphVersion: undefined });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.queryByTestId("save-control-version-output")).toBeNull();
|
||||
});
|
||||
|
||||
it("enables save button when isSaving is false", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ isSaving: false });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByTestId("save-control-save-agent-button");
|
||||
expect((saveButton as HTMLButtonElement).disabled).toBe(false);
|
||||
});
|
||||
|
||||
it("disables save button when isSaving is true", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ isSaving: true });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByRole("button", { name: /save agent/i });
|
||||
expect((saveButton as HTMLButtonElement).disabled).toBe(true);
|
||||
});
|
||||
|
||||
it("calls handleSave on form submission with valid data", async () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
const form = setupMock({ name: "My Agent", description: "A description" });
|
||||
|
||||
form.setValue("name", "My Agent");
|
||||
form.setValue("description", "A description");
|
||||
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByTestId("save-control-save-agent-button");
|
||||
fireEvent.click(saveButton);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHandleSave).toHaveBeenCalledWith(
|
||||
{ name: "My Agent", description: "A description" },
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("does not call handleSave when name is empty (validation fails)", async () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ name: "", description: "" });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByTestId("save-control-save-agent-button");
|
||||
fireEvent.click(saveButton);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHandleSave).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
it("popover stays open when forceOpenSave is true", () => {
|
||||
useControlPanelStore.setState({
|
||||
saveControlOpen: false,
|
||||
forceOpenSave: true,
|
||||
});
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId("save-control-name-input")).toBeDefined();
|
||||
});
|
||||
|
||||
it("allows typing in name and description inputs", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const nameInput = screen.getByTestId(
|
||||
"save-control-name-input",
|
||||
) as HTMLInputElement;
|
||||
const descriptionInput = screen.getByTestId(
|
||||
"save-control-description-input",
|
||||
) as HTMLInputElement;
|
||||
|
||||
fireEvent.change(nameInput, { target: { value: "Test Agent" } });
|
||||
fireEvent.change(descriptionInput, {
|
||||
target: { value: "Test Description" },
|
||||
});
|
||||
|
||||
expect(nameInput.value).toBe("Test Agent");
|
||||
expect(descriptionInput.value).toBe("Test Description");
|
||||
});
|
||||
|
||||
it("displays save button text", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByText("Save Agent")).toBeDefined();
|
||||
});
|
||||
});
|
||||
@@ -1,147 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { screen, fireEvent, cleanup } from "@testing-library/react";
|
||||
import { render } from "@/tests/integrations/test-utils";
|
||||
import React from "react";
|
||||
import { useGraphStore } from "../stores/graphStore";
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph",
|
||||
() => ({
|
||||
useRunGraph: vi.fn(),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/BuilderActions/components/RunInputDialog/RunInputDialog",
|
||||
() => ({
|
||||
RunInputDialog: ({ isOpen }: { isOpen: boolean }) =>
|
||||
isOpen ? <div data-testid="run-input-dialog">Dialog</div> : null,
|
||||
}),
|
||||
);
|
||||
|
||||
// Must import after mocks
|
||||
import { useRunGraph } from "../components/BuilderActions/components/RunGraph/useRunGraph";
|
||||
import { RunGraph } from "../components/BuilderActions/components/RunGraph/RunGraph";
|
||||
|
||||
const mockUseRunGraph = vi.mocked(useRunGraph);
|
||||
|
||||
function createMockReturnValue(
|
||||
overrides: Partial<ReturnType<typeof useRunGraph>> = {},
|
||||
) {
|
||||
return {
|
||||
handleRunGraph: vi.fn(),
|
||||
handleStopGraph: vi.fn(),
|
||||
openRunInputDialog: false,
|
||||
setOpenRunInputDialog: vi.fn(),
|
||||
isExecutingGraph: false,
|
||||
isTerminatingGraph: false,
|
||||
isSaving: false,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
// RunGraph uses Tooltip which requires TooltipProvider
|
||||
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
|
||||
function renderRunGraph(flowID: string | null = "test-flow-id") {
|
||||
return render(
|
||||
<TooltipProvider>
|
||||
<RunGraph flowID={flowID} />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
}
|
||||
|
||||
describe("RunGraph", () => {
|
||||
beforeEach(() => {
|
||||
cleanup();
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue());
|
||||
useGraphStore.setState({ isGraphRunning: false });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it("renders an enabled button when flowID is provided", () => {
|
||||
renderRunGraph("test-flow-id");
|
||||
const button = screen.getByRole("button");
|
||||
expect((button as HTMLButtonElement).disabled).toBe(false);
|
||||
});
|
||||
|
||||
it("renders a disabled button when flowID is null", () => {
|
||||
renderRunGraph(null);
|
||||
const button = screen.getByRole("button");
|
||||
expect((button as HTMLButtonElement).disabled).toBe(true);
|
||||
});
|
||||
|
||||
it("disables the button when isExecutingGraph is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(
|
||||
createMockReturnValue({ isExecutingGraph: true }),
|
||||
);
|
||||
renderRunGraph();
|
||||
expect((screen.getByRole("button") as HTMLButtonElement).disabled).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("disables the button when isTerminatingGraph is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(
|
||||
createMockReturnValue({ isTerminatingGraph: true }),
|
||||
);
|
||||
renderRunGraph();
|
||||
expect((screen.getByRole("button") as HTMLButtonElement).disabled).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("disables the button when isSaving is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue({ isSaving: true }));
|
||||
renderRunGraph();
|
||||
expect((screen.getByRole("button") as HTMLButtonElement).disabled).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("uses data-id run-graph-button when not running", () => {
|
||||
renderRunGraph();
|
||||
const button = screen.getByRole("button");
|
||||
expect(button.getAttribute("data-id")).toBe("run-graph-button");
|
||||
});
|
||||
|
||||
it("uses data-id stop-graph-button when running", () => {
|
||||
useGraphStore.setState({ isGraphRunning: true });
|
||||
renderRunGraph();
|
||||
const button = screen.getByRole("button");
|
||||
expect(button.getAttribute("data-id")).toBe("stop-graph-button");
|
||||
});
|
||||
|
||||
it("calls handleRunGraph when clicked and graph is not running", () => {
|
||||
const handleRunGraph = vi.fn();
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue({ handleRunGraph }));
|
||||
renderRunGraph();
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
expect(handleRunGraph).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("calls handleStopGraph when clicked and graph is running", () => {
|
||||
const handleStopGraph = vi.fn();
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue({ handleStopGraph }));
|
||||
useGraphStore.setState({ isGraphRunning: true });
|
||||
renderRunGraph();
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
expect(handleStopGraph).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("renders RunInputDialog hidden by default", () => {
|
||||
renderRunGraph();
|
||||
expect(screen.queryByTestId("run-input-dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders RunInputDialog when openRunInputDialog is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(
|
||||
createMockReturnValue({ openRunInputDialog: true }),
|
||||
);
|
||||
renderRunGraph();
|
||||
expect(screen.getByTestId("run-input-dialog")).toBeDefined();
|
||||
});
|
||||
});
|
||||
@@ -1,257 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { BlockUIType } from "../components/types";
|
||||
|
||||
vi.mock("@/services/storage/local-storage", () => {
|
||||
const store: Record<string, string> = {};
|
||||
return {
|
||||
Key: { COPIED_FLOW_DATA: "COPIED_FLOW_DATA" },
|
||||
storage: {
|
||||
get: (key: string) => store[key] ?? null,
|
||||
set: (key: string, value: string) => {
|
||||
store[key] = value;
|
||||
},
|
||||
clean: (key: string) => {
|
||||
delete store[key];
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
import { useCopyPasteStore } from "../stores/copyPasteStore";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { storage, Key } from "@/services/storage/local-storage";
|
||||
|
||||
function createTestNode(
|
||||
id: string,
|
||||
overrides: Partial<CustomNode> = {},
|
||||
): CustomNode {
|
||||
return {
|
||||
id,
|
||||
type: "custom",
|
||||
position: overrides.position ?? { x: 100, y: 200 },
|
||||
selected: overrides.selected,
|
||||
data: {
|
||||
hardcodedValues: {},
|
||||
title: `Node ${id}`,
|
||||
description: "test node",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: `block-${id}`,
|
||||
costs: [],
|
||||
categories: [],
|
||||
...overrides.data,
|
||||
},
|
||||
} as CustomNode;
|
||||
}
|
||||
|
||||
describe("useCopyPasteStore", () => {
|
||||
beforeEach(() => {
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useHistoryStore.getState().clear();
|
||||
storage.clean(Key.COPIED_FLOW_DATA);
|
||||
});
|
||||
|
||||
describe("copySelectedNodes", () => {
|
||||
it("copies a single selected node to localStorage", () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
const stored = storage.get(Key.COPIED_FLOW_DATA);
|
||||
expect(stored).not.toBeNull();
|
||||
|
||||
const parsed = JSON.parse(stored!);
|
||||
expect(parsed.nodes).toHaveLength(1);
|
||||
expect(parsed.nodes[0].id).toBe("1");
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("copies only edges between selected nodes", () => {
|
||||
const nodeA = createTestNode("a", { selected: true });
|
||||
const nodeB = createTestNode("b", { selected: true });
|
||||
const nodeC = createTestNode("c", { selected: false });
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB, nodeC] });
|
||||
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
{
|
||||
id: "e-ab",
|
||||
source: "a",
|
||||
target: "b",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
},
|
||||
{
|
||||
id: "e-bc",
|
||||
source: "b",
|
||||
target: "c",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
},
|
||||
{
|
||||
id: "e-ac",
|
||||
source: "a",
|
||||
target: "c",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
const parsed = JSON.parse(storage.get(Key.COPIED_FLOW_DATA)!);
|
||||
expect(parsed.nodes).toHaveLength(2);
|
||||
expect(parsed.edges).toHaveLength(1);
|
||||
expect(parsed.edges[0].id).toBe("e-ab");
|
||||
});
|
||||
|
||||
it("stores empty data when no nodes are selected", () => {
|
||||
const node = createTestNode("1", { selected: false });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
const parsed = JSON.parse(storage.get(Key.COPIED_FLOW_DATA)!);
|
||||
expect(parsed.nodes).toHaveLength(0);
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pasteNodes", () => {
|
||||
it("creates new nodes with new IDs via incrementNodeCounter", () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
useNodeStore.setState({ nodes: [node], nodeCounter: 5 });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
|
||||
const pastedNode = nodes.find((n) => n.id !== "orig");
|
||||
expect(pastedNode).toBeDefined();
|
||||
expect(pastedNode!.id).not.toBe("orig");
|
||||
});
|
||||
|
||||
it("offsets pasted node positions by +50 x/y", () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
useNodeStore.setState({ nodes: [node], nodeCounter: 5 });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const pastedNode = nodes.find((n) => n.id !== "orig");
|
||||
expect(pastedNode).toBeDefined();
|
||||
expect(pastedNode!.position).toEqual({ x: 150, y: 250 });
|
||||
});
|
||||
|
||||
it("preserves internal connections with remapped IDs", () => {
|
||||
const nodeA = createTestNode("a", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
const nodeB = createTestNode("b", {
|
||||
selected: true,
|
||||
position: { x: 200, y: 0 },
|
||||
});
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB], nodeCounter: 0 });
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
{
|
||||
id: "e-ab",
|
||||
source: "a",
|
||||
target: "b",
|
||||
sourceHandle: "output",
|
||||
targetHandle: "input",
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { edges } = useEdgeStore.getState();
|
||||
const newEdges = edges.filter((e) => e.id !== "e-ab");
|
||||
expect(newEdges).toHaveLength(1);
|
||||
|
||||
const newEdge = newEdges[0];
|
||||
expect(newEdge.source).not.toBe("a");
|
||||
expect(newEdge.target).not.toBe("b");
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const pastedNodeIDs = nodes
|
||||
.filter((n) => n.id !== "a" && n.id !== "b")
|
||||
.map((n) => n.id);
|
||||
|
||||
expect(pastedNodeIDs).toContain(newEdge.source);
|
||||
expect(pastedNodeIDs).toContain(newEdge.target);
|
||||
});
|
||||
|
||||
it("deselects existing nodes and selects pasted ones", () => {
|
||||
const existingNode = createTestNode("existing", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
const nodeToCopy = createTestNode("copy-me", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 100 },
|
||||
});
|
||||
useNodeStore.setState({
|
||||
nodes: [existingNode, nodeToCopy],
|
||||
nodeCounter: 0,
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
// Deselect nodeToCopy, keep existingNode selected to verify deselection on paste
|
||||
useNodeStore.setState({
|
||||
nodes: [
|
||||
{ ...existingNode, selected: true },
|
||||
{ ...nodeToCopy, selected: false },
|
||||
],
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const originalNodes = nodes.filter(
|
||||
(n) => n.id === "existing" || n.id === "copy-me",
|
||||
);
|
||||
const pastedNodes = nodes.filter(
|
||||
(n) => n.id !== "existing" && n.id !== "copy-me",
|
||||
);
|
||||
|
||||
originalNodes.forEach((n) => {
|
||||
expect(n.selected).toBe(false);
|
||||
});
|
||||
pastedNodes.forEach((n) => {
|
||||
expect(n.selected).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("does nothing when clipboard is empty", () => {
|
||||
const node = createTestNode("1", { position: { x: 0, y: 0 } });
|
||||
useNodeStore.setState({ nodes: [node], nodeCounter: 0 });
|
||||
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,751 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { MarkerType } from "@xyflow/react";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import type { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import type { Link } from "@/app/api/__generated__/models/link";
|
||||
|
||||
function makeEdge(overrides: Partial<CustomEdge> & { id: string }): CustomEdge {
|
||||
return {
|
||||
type: "custom",
|
||||
source: "node-a",
|
||||
target: "node-b",
|
||||
sourceHandle: "output",
|
||||
targetHandle: "input",
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeExecutionResult(
|
||||
overrides: Partial<NodeExecutionResult>,
|
||||
): NodeExecutionResult {
|
||||
return {
|
||||
user_id: "user-1",
|
||||
graph_id: "graph-1",
|
||||
graph_version: 1,
|
||||
graph_exec_id: "gexec-1",
|
||||
node_exec_id: "nexec-1",
|
||||
node_id: "node-1",
|
||||
block_id: "block-1",
|
||||
status: "INCOMPLETE",
|
||||
input_data: {},
|
||||
output_data: {},
|
||||
add_time: new Date(),
|
||||
queue_time: null,
|
||||
start_time: null,
|
||||
end_time: null,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
useHistoryStore.setState({ past: [], future: [] });
|
||||
});
|
||||
|
||||
describe("edgeStore", () => {
|
||||
describe("setEdges", () => {
|
||||
it("replaces all edges", () => {
|
||||
const edges = [
|
||||
makeEdge({ id: "e1" }),
|
||||
makeEdge({ id: "e2", source: "node-c" }),
|
||||
];
|
||||
|
||||
useEdgeStore.getState().setEdges(edges);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(2);
|
||||
expect(useEdgeStore.getState().edges[0].id).toBe("e1");
|
||||
expect(useEdgeStore.getState().edges[1].id).toBe("e2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("addEdge", () => {
|
||||
it("adds an edge and auto-generates an ID", () => {
|
||||
const result = useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(result.id).toBe("n1:out->n2:in");
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(useEdgeStore.getState().edges[0].id).toBe("n1:out->n2:in");
|
||||
});
|
||||
|
||||
it("uses provided ID when given", () => {
|
||||
const result = useEdgeStore.getState().addEdge({
|
||||
id: "custom-id",
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(result.id).toBe("custom-id");
|
||||
});
|
||||
|
||||
it("sets type to custom and adds arrow marker", () => {
|
||||
const result = useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(result.type).toBe("custom");
|
||||
expect(result.markerEnd).toEqual({
|
||||
type: MarkerType.ArrowClosed,
|
||||
strokeWidth: 2,
|
||||
color: "#555",
|
||||
});
|
||||
});
|
||||
|
||||
it("rejects duplicate edges without adding", () => {
|
||||
useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
const pushSpy = vi.spyOn(useHistoryStore.getState(), "pushState");
|
||||
|
||||
const duplicate = useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(duplicate.id).toBe("n1:out->n2:in");
|
||||
expect(pushSpy).not.toHaveBeenCalled();
|
||||
|
||||
pushSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("pushes previous state to history store", () => {
|
||||
const pushSpy = vi.spyOn(useHistoryStore.getState(), "pushState");
|
||||
|
||||
useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(pushSpy).toHaveBeenCalledWith({
|
||||
nodes: [],
|
||||
edges: [],
|
||||
});
|
||||
|
||||
pushSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe("removeEdge", () => {
|
||||
it("removes an edge by ID", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1" }), makeEdge({ id: "e2" })],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().removeEdge("e1");
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(useEdgeStore.getState().edges[0].id).toBe("e2");
|
||||
});
|
||||
|
||||
it("does nothing when removing a non-existent edge", () => {
|
||||
useEdgeStore.setState({ edges: [makeEdge({ id: "e1" })] });
|
||||
|
||||
useEdgeStore.getState().removeEdge("nonexistent");
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("pushes previous state to history store", () => {
|
||||
const existingEdges = [makeEdge({ id: "e1" })];
|
||||
useEdgeStore.setState({ edges: existingEdges });
|
||||
|
||||
const pushSpy = vi.spyOn(useHistoryStore.getState(), "pushState");
|
||||
|
||||
useEdgeStore.getState().removeEdge("e1");
|
||||
|
||||
expect(pushSpy).toHaveBeenCalledWith({
|
||||
nodes: [],
|
||||
edges: existingEdges,
|
||||
});
|
||||
|
||||
pushSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe("upsertMany", () => {
|
||||
it("inserts new edges", () => {
|
||||
useEdgeStore.setState({ edges: [makeEdge({ id: "e1" })] });
|
||||
|
||||
useEdgeStore.getState().upsertMany([makeEdge({ id: "e2" })]);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("updates existing edges by ID", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "old-source" })],
|
||||
});
|
||||
|
||||
useEdgeStore
|
||||
.getState()
|
||||
.upsertMany([makeEdge({ id: "e1", source: "new-source" })]);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(useEdgeStore.getState().edges[0].source).toBe("new-source");
|
||||
});
|
||||
|
||||
it("handles mixed inserts and updates", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "old" })],
|
||||
});
|
||||
|
||||
useEdgeStore
|
||||
.getState()
|
||||
.upsertMany([
|
||||
makeEdge({ id: "e1", source: "updated" }),
|
||||
makeEdge({ id: "e2", source: "new" }),
|
||||
]);
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
expect(edges).toHaveLength(2);
|
||||
expect(edges.find((e) => e.id === "e1")?.source).toBe("updated");
|
||||
expect(edges.find((e) => e.id === "e2")?.source).toBe("new");
|
||||
});
|
||||
});
|
||||
|
||||
describe("removeEdgesByHandlePrefix", () => {
|
||||
it("removes edges targeting a node with matching handle prefix", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", target: "node-b", targetHandle: "input_foo" }),
|
||||
makeEdge({ id: "e2", target: "node-b", targetHandle: "input_bar" }),
|
||||
makeEdge({
|
||||
id: "e3",
|
||||
target: "node-b",
|
||||
targetHandle: "other_handle",
|
||||
}),
|
||||
makeEdge({ id: "e4", target: "node-c", targetHandle: "input_foo" }),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().removeEdgesByHandlePrefix("node-b", "input_");
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
expect(edges).toHaveLength(2);
|
||||
expect(edges.map((e) => e.id).sort()).toEqual(["e3", "e4"]);
|
||||
});
|
||||
|
||||
it("does not remove edges where target does not match nodeId", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-b",
|
||||
target: "node-c",
|
||||
targetHandle: "input_x",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().removeEdgesByHandlePrefix("node-b", "input_");
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getNodeEdges", () => {
|
||||
it("returns edges where node is source", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", source: "node-a", target: "node-b" }),
|
||||
makeEdge({ id: "e2", source: "node-c", target: "node-d" }),
|
||||
],
|
||||
});
|
||||
|
||||
const result = useEdgeStore.getState().getNodeEdges("node-a");
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe("e1");
|
||||
});
|
||||
|
||||
it("returns edges where node is target", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", source: "node-a", target: "node-b" }),
|
||||
makeEdge({ id: "e2", source: "node-c", target: "node-d" }),
|
||||
],
|
||||
});
|
||||
|
||||
const result = useEdgeStore.getState().getNodeEdges("node-b");
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe("e1");
|
||||
});
|
||||
|
||||
it("returns edges for both source and target", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", source: "node-a", target: "node-b" }),
|
||||
makeEdge({ id: "e2", source: "node-b", target: "node-c" }),
|
||||
makeEdge({ id: "e3", source: "node-d", target: "node-e" }),
|
||||
],
|
||||
});
|
||||
|
||||
const result = useEdgeStore.getState().getNodeEdges("node-b");
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result.map((e) => e.id).sort()).toEqual(["e1", "e2"]);
|
||||
});
|
||||
|
||||
it("returns empty array for unconnected node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "node-a", target: "node-b" })],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().getNodeEdges("node-z")).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isInputConnected", () => {
|
||||
it("returns true when target handle is connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isInputConnected("node-b", "input")).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("returns false when target handle is not connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isInputConnected("node-b", "other")).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it("returns false when node is source not target", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-b",
|
||||
target: "node-c",
|
||||
sourceHandle: "output",
|
||||
targetHandle: "input",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isInputConnected("node-b", "output")).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isOutputConnected", () => {
|
||||
it("returns true when source handle is connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-a",
|
||||
sourceHandle: "output",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(
|
||||
useEdgeStore.getState().isOutputConnected("node-a", "output"),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false when source handle is not connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-a",
|
||||
sourceHandle: "output",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isOutputConnected("node-a", "other")).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getBackendLinks", () => {
|
||||
it("converts edges to Link format", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
data: { isStatic: true },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const links = useEdgeStore.getState().getBackendLinks();
|
||||
|
||||
expect(links).toHaveLength(1);
|
||||
expect(links[0]).toEqual({
|
||||
id: "e1",
|
||||
source_id: "n1",
|
||||
sink_id: "n2",
|
||||
source_name: "out",
|
||||
sink_name: "in",
|
||||
is_static: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("addLinks", () => {
|
||||
it("converts Links to edges and adds them", () => {
|
||||
const links: Link[] = [
|
||||
{
|
||||
id: "link-1",
|
||||
source_id: "n1",
|
||||
sink_id: "n2",
|
||||
source_name: "out",
|
||||
sink_name: "in",
|
||||
is_static: false,
|
||||
},
|
||||
];
|
||||
|
||||
useEdgeStore.getState().addLinks(links);
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
expect(edges).toHaveLength(1);
|
||||
expect(edges[0].source).toBe("n1");
|
||||
expect(edges[0].target).toBe("n2");
|
||||
expect(edges[0].sourceHandle).toBe("out");
|
||||
expect(edges[0].targetHandle).toBe("in");
|
||||
expect(edges[0].data?.isStatic).toBe(false);
|
||||
});
|
||||
|
||||
it("adds multiple links", () => {
|
||||
const links: Link[] = [
|
||||
{
|
||||
id: "link-1",
|
||||
source_id: "n1",
|
||||
sink_id: "n2",
|
||||
source_name: "out",
|
||||
sink_name: "in",
|
||||
},
|
||||
{
|
||||
id: "link-2",
|
||||
source_id: "n3",
|
||||
sink_id: "n4",
|
||||
source_name: "result",
|
||||
sink_name: "value",
|
||||
},
|
||||
];
|
||||
|
||||
useEdgeStore.getState().addLinks(links);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAllHandleIdsOfANode", () => {
|
||||
it("returns targetHandle values for edges targeting the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", target: "node-b", targetHandle: "input_a" }),
|
||||
makeEdge({ id: "e2", target: "node-b", targetHandle: "input_b" }),
|
||||
makeEdge({ id: "e3", target: "node-c", targetHandle: "input_c" }),
|
||||
],
|
||||
});
|
||||
|
||||
const handles = useEdgeStore.getState().getAllHandleIdsOfANode("node-b");
|
||||
expect(handles).toEqual(["input_a", "input_b"]);
|
||||
});
|
||||
|
||||
it("returns empty array when no edges target the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "node-b", target: "node-c" })],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().getAllHandleIdsOfANode("node-b")).toEqual(
|
||||
[],
|
||||
);
|
||||
});
|
||||
|
||||
it("returns empty string for edges with no targetHandle", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: undefined,
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().getAllHandleIdsOfANode("node-b")).toEqual([
|
||||
"",
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("updateEdgeBeads", () => {
|
||||
it("updates bead counts for edges targeting the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "some-value" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(1);
|
||||
expect(edge.data?.beadDown).toBe(1);
|
||||
});
|
||||
|
||||
it("counts INCOMPLETE status in beadUp but not beadDown", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "INCOMPLETE",
|
||||
input_data: { input: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(1);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
});
|
||||
|
||||
it("does not modify edges not targeting the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-c",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
});
|
||||
|
||||
it("does not update edge when input_data has no matching handle", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { other_handle: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
});
|
||||
|
||||
it("accumulates beads across multiple executions", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "data1" },
|
||||
}),
|
||||
);
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
status: "INCOMPLETE",
|
||||
input_data: { input: "data2" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(2);
|
||||
expect(edge.data?.beadDown).toBe(1);
|
||||
});
|
||||
|
||||
it("handles static edges by setting beadUp to beadDown + 1", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: {
|
||||
isStatic: true,
|
||||
beadUp: 0,
|
||||
beadDown: 0,
|
||||
beadData: new Map(),
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(2);
|
||||
expect(edge.data?.beadDown).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resetEdgeBeads", () => {
|
||||
it("resets all bead data on all edges", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
data: {
|
||||
beadUp: 5,
|
||||
beadDown: 3,
|
||||
beadData: new Map([["exec-1", "COMPLETED"]]),
|
||||
},
|
||||
}),
|
||||
makeEdge({
|
||||
id: "e2",
|
||||
data: {
|
||||
beadUp: 2,
|
||||
beadDown: 1,
|
||||
beadData: new Map([["exec-2", "INCOMPLETE"]]),
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().resetEdgeBeads();
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
for (const edge of edges) {
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
expect(edge.data?.beadData?.size).toBe(0);
|
||||
}
|
||||
});
|
||||
|
||||
it("preserves other edge data when resetting beads", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
data: {
|
||||
isStatic: true,
|
||||
edgeColorClass: "text-red-500",
|
||||
beadUp: 3,
|
||||
beadDown: 2,
|
||||
beadData: new Map(),
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().resetEdgeBeads();
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.isStatic).toBe(true);
|
||||
expect(edge.data?.edgeColorClass).toBe("text-red-500");
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,347 +0,0 @@
|
||||
import { describe, it, expect, beforeEach } from "vitest";
|
||||
import { useGraphStore } from "../stores/graphStore";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
|
||||
function createTestGraphMeta(
|
||||
overrides: Partial<GraphMeta> & { id: string; name: string },
|
||||
): GraphMeta {
|
||||
return {
|
||||
version: 1,
|
||||
description: "",
|
||||
is_active: true,
|
||||
user_id: "test-user",
|
||||
created_at: new Date("2024-01-01T00:00:00Z"),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function resetStore() {
|
||||
useGraphStore.setState({
|
||||
graphExecutionStatus: undefined,
|
||||
isGraphRunning: false,
|
||||
inputSchema: null,
|
||||
credentialsInputSchema: null,
|
||||
outputSchema: null,
|
||||
availableSubGraphs: [],
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
resetStore();
|
||||
});
|
||||
|
||||
describe("graphStore", () => {
|
||||
describe("execution status transitions", () => {
|
||||
it("handles QUEUED -> RUNNING -> COMPLETED transition", () => {
|
||||
const { setGraphExecutionStatus } = useGraphStore.getState();
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.QUEUED);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.QUEUED,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.RUNNING,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.COMPLETED);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.COMPLETED,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("handles QUEUED -> RUNNING -> FAILED transition", () => {
|
||||
const { setGraphExecutionStatus } = useGraphStore.getState();
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.QUEUED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.FAILED);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.FAILED,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("setGraphExecutionStatus auto-sets isGraphRunning", () => {
|
||||
it("sets isGraphRunning to true for RUNNING", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to true for QUEUED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.QUEUED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for COMPLETED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.COMPLETED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for FAILED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.FAILED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for TERMINATED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.TERMINATED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for INCOMPLETE", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.INCOMPLETE);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for undefined", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
useGraphStore.getState().setGraphExecutionStatus(undefined);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBeUndefined();
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("setIsGraphRunning", () => {
|
||||
it("sets isGraphRunning independently of status", () => {
|
||||
useGraphStore.getState().setIsGraphRunning(true);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
useGraphStore.getState().setIsGraphRunning(false);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("schema management", () => {
|
||||
it("sets all three schemas via setGraphSchemas", () => {
|
||||
const input = { properties: { prompt: { type: "string" } } };
|
||||
const credentials = { properties: { apiKey: { type: "string" } } };
|
||||
const output = { properties: { result: { type: "string" } } };
|
||||
|
||||
useGraphStore.getState().setGraphSchemas(input, credentials, output);
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.inputSchema).toEqual(input);
|
||||
expect(state.credentialsInputSchema).toEqual(credentials);
|
||||
expect(state.outputSchema).toEqual(output);
|
||||
});
|
||||
|
||||
it("sets schemas to null", () => {
|
||||
const input = { properties: { prompt: { type: "string" } } };
|
||||
useGraphStore.getState().setGraphSchemas(input, null, null);
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.inputSchema).toEqual(input);
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
expect(state.outputSchema).toBeNull();
|
||||
});
|
||||
|
||||
it("overwrites previous schemas", () => {
|
||||
const first = { properties: { a: { type: "string" } } };
|
||||
const second = { properties: { b: { type: "number" } } };
|
||||
|
||||
useGraphStore.getState().setGraphSchemas(first, first, first);
|
||||
useGraphStore.getState().setGraphSchemas(second, null, second);
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.inputSchema).toEqual(second);
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
expect(state.outputSchema).toEqual(second);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasInputs", () => {
|
||||
it("returns false when inputSchema is null", () => {
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when inputSchema has no properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas({}, null, null);
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when inputSchema has empty properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas({ properties: {} }, null, null);
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when inputSchema has properties", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphSchemas(
|
||||
{ properties: { prompt: { type: "string" } } },
|
||||
null,
|
||||
null,
|
||||
);
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasCredentials", () => {
|
||||
it("returns false when credentialsInputSchema is null", () => {
|
||||
expect(useGraphStore.getState().hasCredentials()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when credentialsInputSchema has empty properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas(null, { properties: {} }, null);
|
||||
expect(useGraphStore.getState().hasCredentials()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when credentialsInputSchema has properties", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphSchemas(
|
||||
null,
|
||||
{ properties: { apiKey: { type: "string" } } },
|
||||
null,
|
||||
);
|
||||
expect(useGraphStore.getState().hasCredentials()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasOutputs", () => {
|
||||
it("returns false when outputSchema is null", () => {
|
||||
expect(useGraphStore.getState().hasOutputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when outputSchema has empty properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas(null, null, { properties: {} });
|
||||
expect(useGraphStore.getState().hasOutputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when outputSchema has properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas(null, null, {
|
||||
properties: { result: { type: "string" } },
|
||||
});
|
||||
expect(useGraphStore.getState().hasOutputs()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("reset", () => {
|
||||
it("clears execution status and schemas but preserves outputSchema and availableSubGraphs", () => {
|
||||
const subGraphs: GraphMeta[] = [
|
||||
createTestGraphMeta({
|
||||
id: "sub-1",
|
||||
name: "Sub Graph",
|
||||
description: "A sub graph",
|
||||
}),
|
||||
];
|
||||
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphSchemas(
|
||||
{ properties: { a: {} } },
|
||||
{ properties: { b: {} } },
|
||||
{ properties: { c: {} } },
|
||||
);
|
||||
useGraphStore.getState().setAvailableSubGraphs(subGraphs);
|
||||
|
||||
useGraphStore.getState().reset();
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.graphExecutionStatus).toBeUndefined();
|
||||
expect(state.isGraphRunning).toBe(false);
|
||||
expect(state.inputSchema).toBeNull();
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
// reset does not clear outputSchema or availableSubGraphs
|
||||
expect(state.outputSchema).toEqual({ properties: { c: {} } });
|
||||
expect(state.availableSubGraphs).toEqual(subGraphs);
|
||||
});
|
||||
|
||||
it("is idempotent on fresh state", () => {
|
||||
useGraphStore.getState().reset();
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.graphExecutionStatus).toBeUndefined();
|
||||
expect(state.isGraphRunning).toBe(false);
|
||||
expect(state.inputSchema).toBeNull();
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("setAvailableSubGraphs", () => {
|
||||
it("sets sub-graphs list", () => {
|
||||
const graphs: GraphMeta[] = [
|
||||
createTestGraphMeta({
|
||||
id: "graph-1",
|
||||
name: "Graph One",
|
||||
description: "First graph",
|
||||
}),
|
||||
createTestGraphMeta({
|
||||
id: "graph-2",
|
||||
version: 2,
|
||||
name: "Graph Two",
|
||||
description: "Second graph",
|
||||
}),
|
||||
];
|
||||
|
||||
useGraphStore.getState().setAvailableSubGraphs(graphs);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toEqual(graphs);
|
||||
});
|
||||
|
||||
it("replaces previous sub-graphs", () => {
|
||||
const first: GraphMeta[] = [createTestGraphMeta({ id: "a", name: "A" })];
|
||||
const second: GraphMeta[] = [
|
||||
createTestGraphMeta({ id: "b", name: "B" }),
|
||||
createTestGraphMeta({ id: "c", name: "C" }),
|
||||
];
|
||||
|
||||
useGraphStore.getState().setAvailableSubGraphs(first);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toHaveLength(1);
|
||||
|
||||
useGraphStore.getState().setAvailableSubGraphs(second);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toHaveLength(2);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toEqual(second);
|
||||
});
|
||||
|
||||
it("can set empty sub-graphs list", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setAvailableSubGraphs([createTestGraphMeta({ id: "x", name: "X" })]);
|
||||
useGraphStore.getState().setAvailableSubGraphs([]);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,407 +0,0 @@
|
||||
import { describe, it, expect, beforeEach } from "vitest";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
|
||||
function createTestNode(
|
||||
id: string,
|
||||
overrides: Partial<CustomNode> = {},
|
||||
): CustomNode {
|
||||
return {
|
||||
id,
|
||||
type: "custom" as const,
|
||||
position: { x: 0, y: 0 },
|
||||
data: {
|
||||
hardcodedValues: {},
|
||||
title: `Node ${id}`,
|
||||
description: "",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: "STANDARD" as never,
|
||||
block_id: `block-${id}`,
|
||||
costs: [],
|
||||
categories: [],
|
||||
},
|
||||
...overrides,
|
||||
} as CustomNode;
|
||||
}
|
||||
|
||||
function createTestEdge(
|
||||
id: string,
|
||||
source: string,
|
||||
target: string,
|
||||
): CustomEdge {
|
||||
return {
|
||||
id,
|
||||
source,
|
||||
target,
|
||||
type: "custom" as const,
|
||||
} as CustomEdge;
|
||||
}
|
||||
|
||||
async function flushMicrotasks() {
|
||||
await new Promise<void>((resolve) => queueMicrotask(resolve));
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
useHistoryStore.getState().clear();
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
});
|
||||
|
||||
describe("historyStore", () => {
|
||||
describe("undo/redo single action", () => {
|
||||
it("undoes a single pushed state", async () => {
|
||||
const node = createTestNode("1");
|
||||
|
||||
// Initialize history with node present as baseline
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// Simulate a change: clear nodes
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
|
||||
// Undo should restore to [node]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node]);
|
||||
expect(useHistoryStore.getState().future).toHaveLength(1);
|
||||
expect(useHistoryStore.getState().future[0].nodes).toEqual([]);
|
||||
});
|
||||
|
||||
it("redoes after undo", async () => {
|
||||
const node = createTestNode("1");
|
||||
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// Change: clear nodes
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
|
||||
// Undo → back to [node]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node]);
|
||||
|
||||
// Redo → back to []
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("undo/redo multiple actions", () => {
|
||||
it("undoes through multiple states in order", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
// Initialize with [node1] as baseline
|
||||
useNodeStore.setState({ nodes: [node1] });
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// Second change: add node2, push pre-change state
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
// Third change: add node3, push pre-change state
|
||||
useNodeStore.setState({ nodes: [node1, node2, node3] });
|
||||
useHistoryStore
|
||||
.getState()
|
||||
.pushState({ nodes: [node1, node2], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
// Undo 1: back to [node1, node2]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2]);
|
||||
|
||||
// Undo 2: back to [node1]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("undo past empty history", () => {
|
||||
it("does nothing when there is no history to undo", () => {
|
||||
useHistoryStore.getState().undo();
|
||||
|
||||
expect(useNodeStore.getState().nodes).toEqual([]);
|
||||
expect(useEdgeStore.getState().edges).toEqual([]);
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("does nothing when current state equals last past entry", () => {
|
||||
expect(useHistoryStore.getState().canUndo()).toBe(false);
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
expect(useHistoryStore.getState().future).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("state consistency: undo after node add restores previous, redo restores added", () => {
|
||||
it("undo removes added node, redo restores it", async () => {
|
||||
const node = createTestNode("added");
|
||||
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("history limits", () => {
|
||||
it("does not grow past MAX_HISTORY (50)", async () => {
|
||||
for (let i = 0; i < 60; i++) {
|
||||
const node = createTestNode(`node-${i}`);
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({
|
||||
nodes: [createTestNode(`node-${i - 1}`)],
|
||||
edges: [],
|
||||
});
|
||||
await flushMicrotasks();
|
||||
}
|
||||
|
||||
expect(useHistoryStore.getState().past.length).toBeLessThanOrEqual(50);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("redo does nothing when future is empty", () => {
|
||||
const nodesBefore = useNodeStore.getState().nodes;
|
||||
const edgesBefore = useEdgeStore.getState().edges;
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
|
||||
expect(useNodeStore.getState().nodes).toEqual(nodesBefore);
|
||||
expect(useEdgeStore.getState().edges).toEqual(edgesBefore);
|
||||
});
|
||||
|
||||
it("interleaved undo/redo sequence", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
useNodeStore.setState({ nodes: [node1] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2, node3] });
|
||||
useHistoryStore.getState().pushState({
|
||||
nodes: [node1, node2],
|
||||
edges: [],
|
||||
});
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2]);
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2]);
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2, node3]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("canUndo / canRedo", () => {
|
||||
it("canUndo is false on fresh store", () => {
|
||||
expect(useHistoryStore.getState().canUndo()).toBe(false);
|
||||
});
|
||||
|
||||
it("canUndo is true when current state differs from last past entry", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().canUndo()).toBe(true);
|
||||
});
|
||||
|
||||
it("canRedo is false on fresh store", () => {
|
||||
expect(useHistoryStore.getState().canRedo()).toBe(false);
|
||||
});
|
||||
|
||||
it("canRedo is true after undo", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
|
||||
expect(useHistoryStore.getState().canRedo()).toBe(true);
|
||||
});
|
||||
|
||||
it("canRedo becomes false after redo exhausts future", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
useHistoryStore.getState().redo();
|
||||
|
||||
expect(useHistoryStore.getState().canRedo()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pushState deduplication", () => {
|
||||
it("does not push a state identical to the last past entry", async () => {
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("does not push if state matches current node/edge store state", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("initializeHistory", () => {
|
||||
it("resets history with current node/edge store state", async () => {
|
||||
const node = createTestNode("1");
|
||||
const edge = createTestEdge("e1", "1", "2");
|
||||
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useEdgeStore.setState({ edges: [edge] });
|
||||
|
||||
useNodeStore.setState({ nodes: [node, createTestNode("2")] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node], edges: [edge] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
const { past, future } = useHistoryStore.getState();
|
||||
expect(past).toHaveLength(1);
|
||||
expect(past[0].nodes).toEqual(useNodeStore.getState().nodes);
|
||||
expect(past[0].edges).toEqual(useEdgeStore.getState().edges);
|
||||
expect(future).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("clear", () => {
|
||||
it("resets to empty initial state", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().clear();
|
||||
|
||||
const { past, future } = useHistoryStore.getState();
|
||||
expect(past).toEqual([{ nodes: [], edges: [] }]);
|
||||
expect(future).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("microtask batching", () => {
|
||||
it("only commits the first state when multiple pushState calls happen in the same tick", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2, node3] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node2], edges: [] });
|
||||
useHistoryStore
|
||||
.getState()
|
||||
.pushState({ nodes: [node1, node2], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
const { past } = useHistoryStore.getState();
|
||||
expect(past).toHaveLength(2);
|
||||
expect(past[1].nodes).toEqual([node1]);
|
||||
});
|
||||
|
||||
it("commits separately when pushState calls are in different ticks", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node2], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
const { past } = useHistoryStore.getState();
|
||||
expect(past).toHaveLength(3);
|
||||
expect(past[1].nodes).toEqual([node1]);
|
||||
expect(past[2].nodes).toEqual([node2]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edges in undo/redo", () => {
|
||||
it("restores edges on undo and redo", async () => {
|
||||
const edge = createTestEdge("e1", "1", "2");
|
||||
useEdgeStore.setState({ edges: [edge] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useEdgeStore.getState().edges).toEqual([]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useEdgeStore.getState().edges).toEqual([edge]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pushState clears future", () => {
|
||||
it("clears future when a new state is pushed after undo", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
// Initialize empty
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// First change: set [node1]
|
||||
useNodeStore.setState({ nodes: [node1] });
|
||||
|
||||
// Second change: set [node1, node2], push pre-change [node1]
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
// Undo: back to [node1]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useHistoryStore.getState().future).toHaveLength(1);
|
||||
|
||||
// New diverging change: add node3 instead of node2
|
||||
useNodeStore.setState({ nodes: [node1, node3] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().future).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,791 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { BlockUIType } from "../components/types";
|
||||
import type { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import type { CustomNodeData } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
|
||||
function createTestNode(overrides: {
|
||||
id: string;
|
||||
position?: { x: number; y: number };
|
||||
data?: Partial<CustomNodeData>;
|
||||
}): CustomNode {
|
||||
const defaults: CustomNodeData = {
|
||||
hardcodedValues: {},
|
||||
title: "Test Block",
|
||||
description: "A test block",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: "test-block-id",
|
||||
costs: [],
|
||||
categories: [],
|
||||
};
|
||||
|
||||
return {
|
||||
id: overrides.id,
|
||||
type: "custom",
|
||||
position: overrides.position ?? { x: 0, y: 0 },
|
||||
data: { ...defaults, ...overrides.data },
|
||||
};
|
||||
}
|
||||
|
||||
function createExecutionResult(
|
||||
overrides: Partial<NodeExecutionResult> = {},
|
||||
): NodeExecutionResult {
|
||||
return {
|
||||
node_exec_id: overrides.node_exec_id ?? "exec-1",
|
||||
node_id: overrides.node_id ?? "1",
|
||||
graph_exec_id: overrides.graph_exec_id ?? "graph-exec-1",
|
||||
graph_id: overrides.graph_id ?? "graph-1",
|
||||
graph_version: overrides.graph_version ?? 1,
|
||||
user_id: overrides.user_id ?? "test-user",
|
||||
block_id: overrides.block_id ?? "block-1",
|
||||
status: overrides.status ?? "COMPLETED",
|
||||
input_data: overrides.input_data ?? { input_key: "input_value" },
|
||||
output_data: overrides.output_data ?? { output_key: ["output_value"] },
|
||||
add_time: overrides.add_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
queue_time: overrides.queue_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
start_time: overrides.start_time ?? new Date("2024-01-01T00:00:01Z"),
|
||||
end_time: overrides.end_time ?? new Date("2024-01-01T00:00:02Z"),
|
||||
};
|
||||
}
|
||||
|
||||
function resetStores() {
|
||||
useNodeStore.setState({
|
||||
nodes: [],
|
||||
nodeCounter: 0,
|
||||
nodeAdvancedStates: {},
|
||||
latestNodeInputData: {},
|
||||
latestNodeOutputData: {},
|
||||
accumulatedNodeInputData: {},
|
||||
accumulatedNodeOutputData: {},
|
||||
nodesInResolutionMode: new Set(),
|
||||
brokenEdgeIDs: new Map(),
|
||||
nodeResolutionData: new Map(),
|
||||
});
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useHistoryStore.setState({ past: [], future: [] });
|
||||
}
|
||||
|
||||
describe("nodeStore", () => {
|
||||
beforeEach(() => {
|
||||
resetStores();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe("node lifecycle", () => {
|
||||
it("starts with empty nodes", () => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toEqual([]);
|
||||
});
|
||||
|
||||
it("adds a single node with addNode", () => {
|
||||
const node = createTestNode({ id: "1" });
|
||||
useNodeStore.getState().addNode(node);
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
|
||||
it("sets nodes with setNodes, replacing existing ones", () => {
|
||||
const node1 = createTestNode({ id: "1" });
|
||||
const node2 = createTestNode({ id: "2" });
|
||||
useNodeStore.getState().addNode(node1);
|
||||
|
||||
useNodeStore.getState().setNodes([node2]);
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("2");
|
||||
});
|
||||
|
||||
it("removes nodes via onNodesChange", () => {
|
||||
const node = createTestNode({ id: "1" });
|
||||
useNodeStore.getState().setNodes([node]);
|
||||
|
||||
useNodeStore.getState().onNodesChange([{ type: "remove", id: "1" }]);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("updates node data with updateNodeData", () => {
|
||||
const node = createTestNode({ id: "1" });
|
||||
useNodeStore.getState().addNode(node);
|
||||
|
||||
useNodeStore.getState().updateNodeData("1", { title: "Updated Title" });
|
||||
|
||||
const updated = useNodeStore.getState().nodes[0];
|
||||
expect(updated.data.title).toBe("Updated Title");
|
||||
expect(updated.data.block_id).toBe("test-block-id");
|
||||
});
|
||||
|
||||
it("updateNodeData does not affect other nodes", () => {
|
||||
const node1 = createTestNode({ id: "1" });
|
||||
const node2 = createTestNode({
|
||||
id: "2",
|
||||
data: { title: "Node 2" },
|
||||
});
|
||||
useNodeStore.getState().setNodes([node1, node2]);
|
||||
|
||||
useNodeStore.getState().updateNodeData("1", { title: "Changed" });
|
||||
|
||||
expect(useNodeStore.getState().nodes[1].data.title).toBe("Node 2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("bulk operations", () => {
|
||||
it("adds multiple nodes with addNodes", () => {
|
||||
const nodes = [
|
||||
createTestNode({ id: "1" }),
|
||||
createTestNode({ id: "2" }),
|
||||
createTestNode({ id: "3" }),
|
||||
];
|
||||
useNodeStore.getState().addNodes(nodes);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(3);
|
||||
});
|
||||
|
||||
it("removes multiple nodes via onNodesChange", () => {
|
||||
const nodes = [
|
||||
createTestNode({ id: "1" }),
|
||||
createTestNode({ id: "2" }),
|
||||
createTestNode({ id: "3" }),
|
||||
];
|
||||
useNodeStore.getState().setNodes(nodes);
|
||||
|
||||
useNodeStore.getState().onNodesChange([
|
||||
{ type: "remove", id: "1" },
|
||||
{ type: "remove", id: "3" },
|
||||
]);
|
||||
|
||||
const remaining = useNodeStore.getState().nodes;
|
||||
expect(remaining).toHaveLength(1);
|
||||
expect(remaining[0].id).toBe("2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("nodeCounter", () => {
|
||||
it("starts at zero", () => {
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(0);
|
||||
});
|
||||
|
||||
it("increments the counter", () => {
|
||||
useNodeStore.getState().incrementNodeCounter();
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(1);
|
||||
|
||||
useNodeStore.getState().incrementNodeCounter();
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(2);
|
||||
});
|
||||
|
||||
it("sets the counter to a specific value", () => {
|
||||
useNodeStore.getState().setNodeCounter(42);
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(42);
|
||||
});
|
||||
});
|
||||
|
||||
describe("advanced states", () => {
|
||||
it("defaults to false for unknown node IDs", () => {
|
||||
expect(useNodeStore.getState().getShowAdvanced("unknown")).toBe(false);
|
||||
});
|
||||
|
||||
it("toggles advanced state", () => {
|
||||
useNodeStore.getState().toggleAdvanced("node-1");
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(true);
|
||||
|
||||
useNodeStore.getState().toggleAdvanced("node-1");
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(false);
|
||||
});
|
||||
|
||||
it("sets advanced state explicitly", () => {
|
||||
useNodeStore.getState().setShowAdvanced("node-1", true);
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(true);
|
||||
|
||||
useNodeStore.getState().setShowAdvanced("node-1", false);
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("convertCustomNodeToBackendNode", () => {
|
||||
it("converts a node with minimal data", () => {
|
||||
const node = createTestNode({
|
||||
id: "42",
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.id).toBe("42");
|
||||
expect(backend.block_id).toBe("test-block-id");
|
||||
expect(backend.input_default).toEqual({});
|
||||
expect(backend.metadata).toEqual({ position: { x: 100, y: 200 } });
|
||||
});
|
||||
|
||||
it("includes customized_name when present in metadata", () => {
|
||||
const node = createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
metadata: { customized_name: "My Custom Name" },
|
||||
},
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.metadata).toHaveProperty(
|
||||
"customized_name",
|
||||
"My Custom Name",
|
||||
);
|
||||
});
|
||||
|
||||
it("includes credentials_optional when present in metadata", () => {
|
||||
const node = createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
metadata: { credentials_optional: true },
|
||||
},
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.metadata).toHaveProperty("credentials_optional", true);
|
||||
});
|
||||
|
||||
it("prunes empty values from hardcodedValues", () => {
|
||||
const node = createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
hardcodedValues: { filled: "value", empty: "" },
|
||||
},
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.input_default).toEqual({ filled: "value" });
|
||||
expect(backend.input_default).not.toHaveProperty("empty");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getBackendNodes", () => {
|
||||
it("converts all nodes to backend format", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([
|
||||
createTestNode({ id: "1", position: { x: 0, y: 0 } }),
|
||||
createTestNode({ id: "2", position: { x: 100, y: 100 } }),
|
||||
]);
|
||||
|
||||
const backendNodes = useNodeStore.getState().getBackendNodes();
|
||||
|
||||
expect(backendNodes).toHaveLength(2);
|
||||
expect(backendNodes[0].id).toBe("1");
|
||||
expect(backendNodes[1].id).toBe("2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("node status", () => {
|
||||
it("returns undefined for a node with no status", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("updates node status", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeStatus("1", "RUNNING");
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBe("RUNNING");
|
||||
|
||||
useNodeStore.getState().updateNodeStatus("1", "COMPLETED");
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBe("COMPLETED");
|
||||
});
|
||||
|
||||
it("cleans all node statuses", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore.getState().updateNodeStatus("1", "RUNNING");
|
||||
useNodeStore.getState().updateNodeStatus("2", "COMPLETED");
|
||||
|
||||
useNodeStore.getState().cleanNodesStatuses();
|
||||
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBeUndefined();
|
||||
expect(useNodeStore.getState().getNodeStatus("2")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("updating status for non-existent node does not crash", () => {
|
||||
useNodeStore.getState().updateNodeStatus("nonexistent", "RUNNING");
|
||||
expect(
|
||||
useNodeStore.getState().getNodeStatus("nonexistent"),
|
||||
).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("execution result tracking", () => {
|
||||
it("returns empty array for node with no results", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("1")).toEqual([]);
|
||||
});
|
||||
|
||||
it("tracks a single execution result", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
const result = createExecutionResult({ node_id: "1" });
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult("1", result);
|
||||
|
||||
const results = useNodeStore.getState().getNodeExecutionResults("1");
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].node_exec_id).toBe("exec-1");
|
||||
});
|
||||
|
||||
it("accumulates multiple execution results", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "val1" },
|
||||
output_data: { key: ["out1"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
input_data: { key: "val2" },
|
||||
output_data: { key: ["out2"] },
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("1")).toHaveLength(
|
||||
2,
|
||||
);
|
||||
});
|
||||
|
||||
it("updates latest input/output data", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "first" },
|
||||
output_data: { key: ["first_out"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
input_data: { key: "second" },
|
||||
output_data: { key: ["second_out"] },
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getLatestNodeInputData("1")).toEqual({
|
||||
key: "second",
|
||||
});
|
||||
expect(useNodeStore.getState().getLatestNodeOutputData("1")).toEqual({
|
||||
key: ["second_out"],
|
||||
});
|
||||
});
|
||||
|
||||
it("accumulates input/output data across results", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "val1" },
|
||||
output_data: { key: ["out1"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
input_data: { key: "val2" },
|
||||
output_data: { key: ["out2"] },
|
||||
}),
|
||||
);
|
||||
|
||||
const accInput = useNodeStore.getState().getAccumulatedNodeInputData("1");
|
||||
expect(accInput.key).toEqual(["val1", "val2"]);
|
||||
|
||||
const accOutput = useNodeStore
|
||||
.getState()
|
||||
.getAccumulatedNodeOutputData("1");
|
||||
expect(accOutput.key).toEqual(["out1", "out2"]);
|
||||
});
|
||||
|
||||
it("deduplicates execution results by node_exec_id", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "original" },
|
||||
output_data: { key: ["original_out"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "updated" },
|
||||
output_data: { key: ["updated_out"] },
|
||||
}),
|
||||
);
|
||||
|
||||
const results = useNodeStore.getState().getNodeExecutionResults("1");
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].input_data).toEqual({ key: "updated" });
|
||||
});
|
||||
|
||||
it("returns the latest execution result", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({ node_exec_id: "exec-1" }),
|
||||
);
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({ node_exec_id: "exec-2" }),
|
||||
);
|
||||
|
||||
const latest = useNodeStore.getState().getLatestNodeExecutionResult("1");
|
||||
expect(latest?.node_exec_id).toBe("exec-2");
|
||||
});
|
||||
|
||||
it("returns undefined for latest result on unknown node", () => {
|
||||
expect(
|
||||
useNodeStore.getState().getLatestNodeExecutionResult("unknown"),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("clears all execution results", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({ node_exec_id: "exec-1" }),
|
||||
);
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"2",
|
||||
createExecutionResult({ node_exec_id: "exec-2" }),
|
||||
);
|
||||
|
||||
useNodeStore.getState().clearAllNodeExecutionResults();
|
||||
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("1")).toEqual([]);
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("2")).toEqual([]);
|
||||
expect(
|
||||
useNodeStore.getState().getLatestNodeInputData("1"),
|
||||
).toBeUndefined();
|
||||
expect(
|
||||
useNodeStore.getState().getLatestNodeOutputData("1"),
|
||||
).toBeUndefined();
|
||||
expect(useNodeStore.getState().getAccumulatedNodeInputData("1")).toEqual(
|
||||
{},
|
||||
);
|
||||
expect(useNodeStore.getState().getAccumulatedNodeOutputData("1")).toEqual(
|
||||
{},
|
||||
);
|
||||
});
|
||||
|
||||
it("returns empty object for accumulated data on unknown node", () => {
|
||||
expect(
|
||||
useNodeStore.getState().getAccumulatedNodeInputData("unknown"),
|
||||
).toEqual({});
|
||||
expect(
|
||||
useNodeStore.getState().getAccumulatedNodeOutputData("unknown"),
|
||||
).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("getNodeBlockUIType", () => {
|
||||
it("returns the node UI type", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
uiType: BlockUIType.INPUT,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getNodeBlockUIType("1")).toBe(
|
||||
BlockUIType.INPUT,
|
||||
);
|
||||
});
|
||||
|
||||
it("defaults to STANDARD for unknown node IDs", () => {
|
||||
expect(useNodeStore.getState().getNodeBlockUIType("unknown")).toBe(
|
||||
BlockUIType.STANDARD,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasWebhookNodes", () => {
|
||||
it("returns false when there are no webhook nodes", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().hasWebhookNodes()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when a WEBHOOK node exists", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
uiType: BlockUIType.WEBHOOK,
|
||||
},
|
||||
}),
|
||||
);
|
||||
expect(useNodeStore.getState().hasWebhookNodes()).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true when a WEBHOOK_MANUAL node exists", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
uiType: BlockUIType.WEBHOOK_MANUAL,
|
||||
},
|
||||
}),
|
||||
);
|
||||
expect(useNodeStore.getState().hasWebhookNodes()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("node errors", () => {
|
||||
it("returns undefined for a node with no errors", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets and retrieves node errors", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
const errors = { field1: "required", field2: "invalid" };
|
||||
useNodeStore.getState().updateNodeErrors("1", errors);
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toEqual(errors);
|
||||
});
|
||||
|
||||
it("clears errors for a specific node", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore.getState().updateNodeErrors("1", { f: "err" });
|
||||
useNodeStore.getState().updateNodeErrors("2", { g: "err2" });
|
||||
|
||||
useNodeStore.getState().clearNodeErrors("1");
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toBeUndefined();
|
||||
expect(useNodeStore.getState().getNodeErrors("2")).toEqual({ g: "err2" });
|
||||
});
|
||||
|
||||
it("clears all node errors", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore.getState().updateNodeErrors("1", { a: "err1" });
|
||||
useNodeStore.getState().updateNodeErrors("2", { b: "err2" });
|
||||
|
||||
useNodeStore.getState().clearAllNodeErrors();
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toBeUndefined();
|
||||
expect(useNodeStore.getState().getNodeErrors("2")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets errors by backend ID matching node id", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "backend-1" }));
|
||||
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodeErrorsForBackendId("backend-1", { x: "error" });
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("backend-1")).toEqual({
|
||||
x: "error",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("getHardCodedValues", () => {
|
||||
it("returns hardcoded values for a node", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
hardcodedValues: { key: "value" },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getHardCodedValues("1")).toEqual({
|
||||
key: "value",
|
||||
});
|
||||
});
|
||||
|
||||
it("returns empty object for unknown node", () => {
|
||||
expect(useNodeStore.getState().getHardCodedValues("unknown")).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("credentials optional", () => {
|
||||
it("sets credentials_optional in node metadata", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().setCredentialsOptional("1", true);
|
||||
|
||||
const node = useNodeStore.getState().nodes[0];
|
||||
expect(node.data.metadata?.credentials_optional).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolution mode", () => {
|
||||
it("defaults to not in resolution mode", () => {
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(false);
|
||||
});
|
||||
|
||||
it("enters and exits resolution mode", () => {
|
||||
useNodeStore.getState().setNodeResolutionMode("1", true);
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(true);
|
||||
|
||||
useNodeStore.getState().setNodeResolutionMode("1", false);
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(false);
|
||||
});
|
||||
|
||||
it("tracks broken edge IDs", () => {
|
||||
useNodeStore.getState().setBrokenEdgeIDs("node-1", ["edge-1", "edge-2"]);
|
||||
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(true);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-2")).toBe(true);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-3")).toBe(false);
|
||||
});
|
||||
|
||||
it("removes individual broken edge IDs", () => {
|
||||
useNodeStore.getState().setBrokenEdgeIDs("node-1", ["edge-1", "edge-2"]);
|
||||
useNodeStore.getState().removeBrokenEdgeID("node-1", "edge-1");
|
||||
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(false);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-2")).toBe(true);
|
||||
});
|
||||
|
||||
it("clears all resolution state", () => {
|
||||
useNodeStore.getState().setNodeResolutionMode("1", true);
|
||||
useNodeStore.getState().setBrokenEdgeIDs("1", ["edge-1"]);
|
||||
|
||||
useNodeStore.getState().clearResolutionState();
|
||||
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(false);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(false);
|
||||
});
|
||||
|
||||
it("cleans up broken edges when exiting resolution mode", () => {
|
||||
useNodeStore.getState().setNodeResolutionMode("1", true);
|
||||
useNodeStore.getState().setBrokenEdgeIDs("1", ["edge-1"]);
|
||||
|
||||
useNodeStore.getState().setNodeResolutionMode("1", false);
|
||||
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("handles updating data on a non-existent node gracefully", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeData("nonexistent", { title: "New Title" });
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("handles removing a non-existent node gracefully", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore
|
||||
.getState()
|
||||
.onNodesChange([{ type: "remove", id: "nonexistent" }]);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("handles duplicate node IDs in addNodes", () => {
|
||||
useNodeStore.getState().addNodes([
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: { title: "First" },
|
||||
}),
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: { title: "Second" },
|
||||
}),
|
||||
]);
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
expect(nodes[0].data.title).toBe("First");
|
||||
expect(nodes[1].data.title).toBe("Second");
|
||||
});
|
||||
|
||||
it("updating node status mid-execution preserves other data", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
title: "My Node",
|
||||
hardcodedValues: { key: "val" },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
useNodeStore.getState().updateNodeStatus("1", "RUNNING");
|
||||
|
||||
const node = useNodeStore.getState().nodes[0];
|
||||
expect(node.data.status).toBe("RUNNING");
|
||||
expect(node.data.title).toBe("My Node");
|
||||
expect(node.data.hardcodedValues).toEqual({ key: "val" });
|
||||
});
|
||||
|
||||
it("execution result for non-existent node does not add it", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"nonexistent",
|
||||
createExecutionResult({ node_exec_id: "exec-1" }),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(0);
|
||||
expect(
|
||||
useNodeStore.getState().getNodeExecutionResults("nonexistent"),
|
||||
).toEqual([]);
|
||||
});
|
||||
|
||||
it("getBackendNodes returns empty array when no nodes exist", () => {
|
||||
expect(useNodeStore.getState().getBackendNodes()).toEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,567 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { renderHook, act } from "@testing-library/react";
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { BlockUIType } from "../components/types";
|
||||
|
||||
// ---- Mocks ----
|
||||
|
||||
const mockGetViewport = vi.fn(() => ({ x: 0, y: 0, zoom: 1 }));
|
||||
|
||||
vi.mock("@xyflow/react", async () => {
|
||||
const actual = await vi.importActual("@xyflow/react");
|
||||
return {
|
||||
...actual,
|
||||
useReactFlow: vi.fn(() => ({
|
||||
getViewport: mockGetViewport,
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
const mockToast = vi.fn();
|
||||
|
||||
vi.mock("@/components/molecules/Toast/use-toast", () => ({
|
||||
useToast: vi.fn(() => ({ toast: mockToast })),
|
||||
}));
|
||||
|
||||
let uuidCounter = 0;
|
||||
vi.mock("uuid", () => ({
|
||||
v4: vi.fn(() => `new-uuid-${++uuidCounter}`),
|
||||
}));
|
||||
|
||||
// Mock navigator.clipboard
|
||||
const mockWriteText = vi.fn(() => Promise.resolve());
|
||||
const mockReadText = vi.fn(() => Promise.resolve(""));
|
||||
|
||||
Object.defineProperty(navigator, "clipboard", {
|
||||
value: {
|
||||
writeText: mockWriteText,
|
||||
readText: mockReadText,
|
||||
},
|
||||
writable: true,
|
||||
configurable: true,
|
||||
});
|
||||
|
||||
// Mock window.innerWidth / innerHeight for viewport centering calculations
|
||||
Object.defineProperty(window, "innerWidth", { value: 1000, writable: true });
|
||||
Object.defineProperty(window, "innerHeight", { value: 800, writable: true });
|
||||
|
||||
import { useCopyPaste } from "../components/FlowEditor/Flow/useCopyPaste";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
|
||||
const CLIPBOARD_PREFIX = "autogpt-flow-data:";
|
||||
|
||||
function createTestNode(
|
||||
id: string,
|
||||
overrides: Partial<CustomNode> = {},
|
||||
): CustomNode {
|
||||
return {
|
||||
id,
|
||||
type: "custom",
|
||||
position: overrides.position ?? { x: 100, y: 200 },
|
||||
selected: overrides.selected,
|
||||
data: {
|
||||
hardcodedValues: {},
|
||||
title: `Node ${id}`,
|
||||
description: "test node",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: `block-${id}`,
|
||||
costs: [],
|
||||
categories: [],
|
||||
...overrides.data,
|
||||
},
|
||||
} as CustomNode;
|
||||
}
|
||||
|
||||
function createTestEdge(
|
||||
id: string,
|
||||
source: string,
|
||||
target: string,
|
||||
sourceHandle = "out",
|
||||
targetHandle = "in",
|
||||
): CustomEdge {
|
||||
return {
|
||||
id,
|
||||
source,
|
||||
target,
|
||||
sourceHandle,
|
||||
targetHandle,
|
||||
} as CustomEdge;
|
||||
}
|
||||
|
||||
function makeCopyEvent(): KeyboardEvent {
|
||||
return new KeyboardEvent("keydown", {
|
||||
key: "c",
|
||||
ctrlKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
}
|
||||
|
||||
function makePasteEvent(): KeyboardEvent {
|
||||
return new KeyboardEvent("keydown", {
|
||||
key: "v",
|
||||
ctrlKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
}
|
||||
|
||||
function clipboardPayload(nodes: CustomNode[], edges: CustomEdge[]): string {
|
||||
return `${CLIPBOARD_PREFIX}${JSON.stringify({ nodes, edges })}`;
|
||||
}
|
||||
|
||||
describe("useCopyPaste", () => {
|
||||
beforeEach(() => {
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useHistoryStore.getState().clear();
|
||||
mockWriteText.mockClear();
|
||||
mockReadText.mockClear();
|
||||
mockToast.mockClear();
|
||||
mockGetViewport.mockReturnValue({ x: 0, y: 0, zoom: 1 });
|
||||
uuidCounter = 0;
|
||||
|
||||
// Ensure no input element is focused
|
||||
if (document.activeElement && document.activeElement !== document.body) {
|
||||
(document.activeElement as HTMLElement).blur();
|
||||
}
|
||||
});
|
||||
|
||||
describe("copy (Ctrl+C)", () => {
|
||||
it("copies a single selected node to clipboard with prefix", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const written = (mockWriteText.mock.calls as string[][])[0][0];
|
||||
expect(written.startsWith(CLIPBOARD_PREFIX)).toBe(true);
|
||||
|
||||
const parsed = JSON.parse(written.slice(CLIPBOARD_PREFIX.length));
|
||||
expect(parsed.nodes).toHaveLength(1);
|
||||
expect(parsed.nodes[0].id).toBe("1");
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("shows a success toast after copying", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockToast).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
title: "Copied successfully",
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("copies multiple connected nodes and preserves internal edges", async () => {
|
||||
const nodeA = createTestNode("a", { selected: true });
|
||||
const nodeB = createTestNode("b", { selected: true });
|
||||
const nodeC = createTestNode("c", { selected: false });
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB, nodeC] });
|
||||
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
createTestEdge("e-ab", "a", "b"),
|
||||
createTestEdge("e-bc", "b", "c"),
|
||||
],
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const parsed = JSON.parse(
|
||||
(mockWriteText.mock.calls as string[][])[0][0].slice(
|
||||
CLIPBOARD_PREFIX.length,
|
||||
),
|
||||
);
|
||||
expect(parsed.nodes).toHaveLength(2);
|
||||
expect(parsed.edges).toHaveLength(1);
|
||||
expect(parsed.edges[0].id).toBe("e-ab");
|
||||
});
|
||||
|
||||
it("drops external edges where one endpoint is not selected", async () => {
|
||||
const nodeA = createTestNode("a", { selected: true });
|
||||
const nodeB = createTestNode("b", { selected: false });
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB] });
|
||||
|
||||
useEdgeStore.setState({
|
||||
edges: [createTestEdge("e-ab", "a", "b")],
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const parsed = JSON.parse(
|
||||
(mockWriteText.mock.calls as string[][])[0][0].slice(
|
||||
CLIPBOARD_PREFIX.length,
|
||||
),
|
||||
);
|
||||
expect(parsed.nodes).toHaveLength(1);
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("copies nothing when no nodes are selected", async () => {
|
||||
const node = createTestNode("1", { selected: false });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const parsed = JSON.parse(
|
||||
(mockWriteText.mock.calls as string[][])[0][0].slice(
|
||||
CLIPBOARD_PREFIX.length,
|
||||
),
|
||||
);
|
||||
expect(parsed.nodes).toHaveLength(0);
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("paste (Ctrl+V)", () => {
|
||||
it("creates new nodes with new UUIDs", async () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([node], []));
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
});
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes[0].id).toBe("new-uuid-1");
|
||||
expect(nodes[0].id).not.toBe("orig");
|
||||
});
|
||||
|
||||
it("centers pasted nodes in the current viewport", async () => {
|
||||
// Viewport at origin, zoom 1 => center = (500, 400)
|
||||
mockGetViewport.mockReturnValue({ x: 0, y: 0, zoom: 1 });
|
||||
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 100 },
|
||||
});
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([node], []));
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
});
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
// Single node: center of bounds = (100, 100)
|
||||
// Viewport center = (500, 400)
|
||||
// Offset = (400, 300)
|
||||
// New position = (100 + 400, 100 + 300) = (500, 400)
|
||||
expect(nodes[0].position).toEqual({ x: 500, y: 400 });
|
||||
});
|
||||
|
||||
it("deselects existing nodes and selects pasted nodes", async () => {
|
||||
const existingNode = createTestNode("existing", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
|
||||
useNodeStore.setState({ nodes: [existingNode], nodeCounter: 0 });
|
||||
|
||||
const nodeToPaste = createTestNode("paste-me", {
|
||||
selected: false,
|
||||
position: { x: 100, y: 100 },
|
||||
});
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([nodeToPaste], []));
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
});
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const originalNode = nodes.find((n) => n.id === "existing");
|
||||
const pastedNode = nodes.find((n) => n.id !== "existing");
|
||||
|
||||
expect(originalNode!.selected).toBe(false);
|
||||
expect(pastedNode!.selected).toBe(true);
|
||||
});
|
||||
|
||||
it("remaps edge source/target IDs to newly created node IDs", async () => {
|
||||
const nodeA = createTestNode("a", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
const nodeB = createTestNode("b", {
|
||||
selected: true,
|
||||
position: { x: 200, y: 0 },
|
||||
});
|
||||
const edge = createTestEdge("e-ab", "a", "b", "output", "input");
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([nodeA, nodeB], [edge]));
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
});
|
||||
|
||||
// Wait for edges to be added too
|
||||
await vi.waitFor(() => {
|
||||
const { edges } = useEdgeStore.getState();
|
||||
expect(edges).toHaveLength(1);
|
||||
});
|
||||
|
||||
const { edges } = useEdgeStore.getState();
|
||||
const newEdge = edges[0];
|
||||
|
||||
// Edge source/target should be remapped to new UUIDs, not "a"/"b"
|
||||
expect(newEdge.source).not.toBe("a");
|
||||
expect(newEdge.target).not.toBe("b");
|
||||
expect(newEdge.source).toBe("new-uuid-1");
|
||||
expect(newEdge.target).toBe("new-uuid-2");
|
||||
expect(newEdge.sourceHandle).toBe("output");
|
||||
expect(newEdge.targetHandle).toBe("input");
|
||||
});
|
||||
|
||||
it("does nothing when clipboard does not have the expected prefix", async () => {
|
||||
mockReadText.mockResolvedValue("some random text");
|
||||
|
||||
const existingNode = createTestNode("1", { position: { x: 0, y: 0 } });
|
||||
useNodeStore.setState({ nodes: [existingNode], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
// Give async operations time to settle
|
||||
await vi.waitFor(() => {
|
||||
expect(mockReadText).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Ensure no state changes happen after clipboard read
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
});
|
||||
|
||||
it("does nothing when clipboard is empty", async () => {
|
||||
mockReadText.mockResolvedValue("");
|
||||
|
||||
const existingNode = createTestNode("1", { position: { x: 0, y: 0 } });
|
||||
useNodeStore.setState({ nodes: [existingNode], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockReadText).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Ensure no state changes happen after clipboard read
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("input field focus guard", () => {
|
||||
it("ignores Ctrl+C when an input element is focused", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const input = document.createElement("input");
|
||||
document.body.appendChild(input);
|
||||
input.focus();
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
// Clipboard write should NOT be called
|
||||
expect(mockWriteText).not.toHaveBeenCalled();
|
||||
|
||||
document.body.removeChild(input);
|
||||
});
|
||||
|
||||
it("ignores Ctrl+V when a textarea element is focused", async () => {
|
||||
mockReadText.mockResolvedValue(
|
||||
clipboardPayload(
|
||||
[createTestNode("a", { position: { x: 0, y: 0 } })],
|
||||
[],
|
||||
),
|
||||
);
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const textarea = document.createElement("textarea");
|
||||
document.body.appendChild(textarea);
|
||||
textarea.focus();
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
expect(mockReadText).not.toHaveBeenCalled();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(0);
|
||||
|
||||
document.body.removeChild(textarea);
|
||||
});
|
||||
|
||||
it("ignores keypresses when a contenteditable element is focused", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const div = document.createElement("div");
|
||||
div.setAttribute("contenteditable", "true");
|
||||
document.body.appendChild(div);
|
||||
div.focus();
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
expect(mockWriteText).not.toHaveBeenCalled();
|
||||
|
||||
document.body.removeChild(div);
|
||||
});
|
||||
});
|
||||
|
||||
describe("meta key support (macOS)", () => {
|
||||
it("handles Cmd+C (metaKey) the same as Ctrl+C", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
const metaCopyEvent = new KeyboardEvent("keydown", {
|
||||
key: "c",
|
||||
metaKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
|
||||
act(() => {
|
||||
result.current(metaCopyEvent);
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
it("handles Cmd+V (metaKey) the same as Ctrl+V", async () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
mockReadText.mockResolvedValue(clipboardPayload([node], []));
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
const metaPasteEvent = new KeyboardEvent("keydown", {
|
||||
key: "v",
|
||||
metaKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
|
||||
act(() => {
|
||||
result.current(metaPasteEvent);
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,134 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { renderHook, act } from "@testing-library/react";
|
||||
|
||||
const mockScreenToFlowPosition = vi.fn((pos: { x: number; y: number }) => pos);
|
||||
const mockFitView = vi.fn();
|
||||
|
||||
vi.mock("@xyflow/react", async () => {
|
||||
const actual = await vi.importActual("@xyflow/react");
|
||||
return {
|
||||
...actual,
|
||||
useReactFlow: () => ({
|
||||
screenToFlowPosition: mockScreenToFlowPosition,
|
||||
fitView: mockFitView,
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
const mockSetQueryStates = vi.fn();
|
||||
let mockQueryStateValues: {
|
||||
flowID: string | null;
|
||||
flowVersion: number | null;
|
||||
flowExecutionID: string | null;
|
||||
} = {
|
||||
flowID: null,
|
||||
flowVersion: null,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
|
||||
vi.mock("nuqs", () => ({
|
||||
parseAsString: {},
|
||||
parseAsInteger: {},
|
||||
useQueryStates: vi.fn(() => [mockQueryStateValues, mockSetQueryStates]),
|
||||
}));
|
||||
|
||||
let mockGraphLoading = false;
|
||||
let mockBlocksLoading = false;
|
||||
|
||||
vi.mock("@/app/api/__generated__/endpoints/graphs/graphs", () => ({
|
||||
useGetV1GetSpecificGraph: vi.fn(() => ({
|
||||
data: undefined,
|
||||
isLoading: mockGraphLoading,
|
||||
})),
|
||||
useGetV1GetExecutionDetails: vi.fn(() => ({
|
||||
data: undefined,
|
||||
})),
|
||||
useGetV1ListUserGraphs: vi.fn(() => ({
|
||||
data: undefined,
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@/app/api/__generated__/endpoints/default/default", () => ({
|
||||
useGetV2GetSpecificBlocks: vi.fn(() => ({
|
||||
data: undefined,
|
||||
isLoading: mockBlocksLoading,
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@/app/api/helpers", () => ({
|
||||
okData: (res: { data: unknown }) => res?.data,
|
||||
}));
|
||||
|
||||
vi.mock("../components/helper", () => ({
|
||||
convertNodesPlusBlockInfoIntoCustomNodes: vi.fn(),
|
||||
}));
|
||||
|
||||
describe("useFlow", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.useFakeTimers({ shouldAdvanceTime: true });
|
||||
mockGraphLoading = false;
|
||||
mockBlocksLoading = false;
|
||||
mockQueryStateValues = {
|
||||
flowID: null,
|
||||
flowVersion: null,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
describe("loading states", () => {
|
||||
it("returns isFlowContentLoading true when graph is loading", async () => {
|
||||
mockGraphLoading = true;
|
||||
mockQueryStateValues = {
|
||||
flowID: "test-flow",
|
||||
flowVersion: 1,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isFlowContentLoading).toBe(true);
|
||||
});
|
||||
|
||||
it("returns isFlowContentLoading true when blocks are loading", async () => {
|
||||
mockBlocksLoading = true;
|
||||
mockQueryStateValues = {
|
||||
flowID: "test-flow",
|
||||
flowVersion: 1,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isFlowContentLoading).toBe(true);
|
||||
});
|
||||
|
||||
it("returns isFlowContentLoading false when neither is loading", async () => {
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isFlowContentLoading).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("initial load completion", () => {
|
||||
it("marks initial load complete for new flows without flowID", async () => {
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isInitialLoadComplete).toBe(false);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(300);
|
||||
});
|
||||
|
||||
expect(result.current.isInitialLoadComplete).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,4 +1,3 @@
|
||||
import { useCopilotUIStore } from "@/app/(platform)/copilot/store";
|
||||
import { ChangeEvent, FormEvent, useEffect, useState } from "react";
|
||||
|
||||
interface Args {
|
||||
@@ -17,16 +16,6 @@ export function useChatInput({
|
||||
}: Args) {
|
||||
const [value, setValue] = useState("");
|
||||
const [isSending, setIsSending] = useState(false);
|
||||
const { initialPrompt, setInitialPrompt } = useCopilotUIStore();
|
||||
|
||||
useEffect(
|
||||
function consumeInitialPrompt() {
|
||||
if (!initialPrompt) return;
|
||||
setValue((prev) => (prev.length === 0 ? initialPrompt : prev));
|
||||
setInitialPrompt(null);
|
||||
},
|
||||
[initialPrompt, setInitialPrompt],
|
||||
);
|
||||
|
||||
useEffect(
|
||||
function focusOnMount() {
|
||||
|
||||
@@ -7,10 +7,6 @@ export interface DeleteTarget {
|
||||
}
|
||||
|
||||
interface CopilotUIState {
|
||||
/** Prompt extracted from URL hash (e.g. /copilot#prompt=...) for input prefill. */
|
||||
initialPrompt: string | null;
|
||||
setInitialPrompt: (prompt: string | null) => void;
|
||||
|
||||
sessionToDelete: DeleteTarget | null;
|
||||
setSessionToDelete: (target: DeleteTarget | null) => void;
|
||||
|
||||
@@ -35,9 +31,6 @@ interface CopilotUIState {
|
||||
}
|
||||
|
||||
export const useCopilotUIStore = create<CopilotUIState>((set) => ({
|
||||
initialPrompt: null,
|
||||
setInitialPrompt: (prompt) => set({ initialPrompt: prompt }),
|
||||
|
||||
sessionToDelete: null,
|
||||
setSessionToDelete: (target) => set({ sessionToDelete: target }),
|
||||
|
||||
|
||||
@@ -19,42 +19,6 @@ import { useCopilotStream } from "./useCopilotStream";
|
||||
const TITLE_POLL_INTERVAL_MS = 2_000;
|
||||
const TITLE_POLL_MAX_ATTEMPTS = 5;
|
||||
|
||||
/**
|
||||
* Extract a prompt from the URL hash fragment.
|
||||
* Supports: /copilot#prompt=URL-encoded-text
|
||||
* Optionally auto-submits if ?autosubmit=true is in the query string.
|
||||
* Returns null if no prompt is present.
|
||||
*/
|
||||
function extractPromptFromUrl(): {
|
||||
prompt: string;
|
||||
autosubmit: boolean;
|
||||
} | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
|
||||
const hash = window.location.hash;
|
||||
if (!hash) return null;
|
||||
|
||||
const hashParams = new URLSearchParams(hash.slice(1));
|
||||
const prompt = hashParams.get("prompt");
|
||||
|
||||
if (!prompt || !prompt.trim()) return null;
|
||||
|
||||
const searchParams = new URLSearchParams(window.location.search);
|
||||
const autosubmit = searchParams.get("autosubmit") === "true";
|
||||
|
||||
// Clean up hash + autosubmit param only (preserve other query params)
|
||||
const cleanURL = new URL(window.location.href);
|
||||
cleanURL.hash = "";
|
||||
cleanURL.searchParams.delete("autosubmit");
|
||||
window.history.replaceState(
|
||||
null,
|
||||
"",
|
||||
`${cleanURL.pathname}${cleanURL.search}`,
|
||||
);
|
||||
|
||||
return { prompt: prompt.trim(), autosubmit };
|
||||
}
|
||||
|
||||
interface UploadedFile {
|
||||
file_id: string;
|
||||
name: string;
|
||||
@@ -163,28 +127,6 @@ export function useCopilotPage() {
|
||||
}
|
||||
}, [sessionId, pendingMessage, sendMessage]);
|
||||
|
||||
// --- Extract prompt from URL hash on mount (e.g. /copilot#prompt=Hello) ---
|
||||
const { setInitialPrompt } = useCopilotUIStore();
|
||||
const hasProcessedUrlPrompt = useRef(false);
|
||||
useEffect(() => {
|
||||
if (hasProcessedUrlPrompt.current) return;
|
||||
|
||||
const urlPrompt = extractPromptFromUrl();
|
||||
if (!urlPrompt) return;
|
||||
|
||||
hasProcessedUrlPrompt.current = true;
|
||||
|
||||
if (urlPrompt.autosubmit) {
|
||||
setPendingMessage(urlPrompt.prompt);
|
||||
void createSession().catch(() => {
|
||||
setPendingMessage(null);
|
||||
setInitialPrompt(urlPrompt.prompt);
|
||||
});
|
||||
} else {
|
||||
setInitialPrompt(urlPrompt.prompt);
|
||||
}
|
||||
}, [createSession, setInitialPrompt]);
|
||||
|
||||
async function uploadFiles(
|
||||
files: File[],
|
||||
sid: string,
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import LibraryImportWorkflowDialog from "../LibraryImportWorkflowDialog/LibraryImportWorkflowDialog";
|
||||
import { LibrarySearchBar } from "../LibrarySearchBar/LibrarySearchBar";
|
||||
import LibraryUploadAgentDialog from "../LibraryUploadAgentDialog/LibraryUploadAgentDialog";
|
||||
|
||||
@@ -12,14 +11,12 @@ export function LibraryActionHeader({ setSearchTerm }: Props) {
|
||||
<div className="mb-[32px] hidden items-center justify-center gap-4 md:flex">
|
||||
<LibrarySearchBar setSearchTerm={setSearchTerm} />
|
||||
<LibraryUploadAgentDialog />
|
||||
<LibraryImportWorkflowDialog />
|
||||
</div>
|
||||
|
||||
{/* Mobile and tablet */}
|
||||
<div className="flex flex-col gap-4 p-4 pt-[52px] md:hidden">
|
||||
<div className="flex w-full justify-between gap-2">
|
||||
<div className="flex w-full justify-between">
|
||||
<LibraryUploadAgentDialog />
|
||||
<LibraryImportWorkflowDialog />
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-center">
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
"use client";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import {
|
||||
Form,
|
||||
FormControl,
|
||||
FormField,
|
||||
FormItem,
|
||||
FormMessage,
|
||||
} from "@/components/molecules/Form/Form";
|
||||
import { ArrowsClockwiseIcon } from "@phosphor-icons/react";
|
||||
import { z } from "zod";
|
||||
import { useLibraryImportWorkflowDialog } from "./useLibraryImportWorkflowDialog";
|
||||
|
||||
export const importWorkflowFormSchema = z.object({
|
||||
workflowFile: z.string(),
|
||||
templateUrl: z.string(),
|
||||
});
|
||||
|
||||
export default function LibraryImportWorkflowDialog() {
|
||||
const {
|
||||
onSubmit,
|
||||
isConverting,
|
||||
isOpen,
|
||||
setIsOpen,
|
||||
form,
|
||||
importMode,
|
||||
setImportMode,
|
||||
} = useLibraryImportWorkflowDialog();
|
||||
|
||||
const hasInput =
|
||||
importMode === "url"
|
||||
? !!form.watch("templateUrl")
|
||||
: !!form.watch("workflowFile");
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Import Workflow"
|
||||
styling={{ maxWidth: "32rem" }}
|
||||
controlled={{
|
||||
isOpen,
|
||||
set: setIsOpen,
|
||||
}}
|
||||
onClose={() => {
|
||||
setIsOpen(false);
|
||||
form.reset();
|
||||
}}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
data-testid="import-workflow-button"
|
||||
variant="primary"
|
||||
className="h-[2.78rem] w-full md:w-[14rem]"
|
||||
size="small"
|
||||
>
|
||||
<ArrowsClockwiseIcon width={18} height={18} />
|
||||
<span>Import workflow</span>
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
{/* Mode toggle */}
|
||||
<div className="mb-4 flex gap-2">
|
||||
<Button
|
||||
variant={importMode === "file" ? "primary" : "outline"}
|
||||
size="small"
|
||||
onClick={() => setImportMode("file")}
|
||||
type="button"
|
||||
>
|
||||
Upload file
|
||||
</Button>
|
||||
<Button
|
||||
variant={importMode === "url" ? "primary" : "outline"}
|
||||
size="small"
|
||||
onClick={() => setImportMode("url")}
|
||||
type="button"
|
||||
>
|
||||
Paste URL
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<p className="mb-4 text-sm text-neutral-500">
|
||||
Import workflows from n8n, Make.com, or Zapier. The workflow will be
|
||||
automatically converted to an AutoGPT agent.
|
||||
</p>
|
||||
|
||||
<Form
|
||||
form={form}
|
||||
onSubmit={onSubmit}
|
||||
className="flex flex-col justify-center gap-0 px-1"
|
||||
>
|
||||
{importMode === "file" ? (
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="workflowFile"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormControl>
|
||||
<FileInput
|
||||
mode="base64"
|
||||
value={field.value}
|
||||
onChange={field.onChange}
|
||||
accept=".json,application/json"
|
||||
placeholder="Workflow JSON file (n8n, Make.com, or Zapier export)"
|
||||
maxFileSize={10 * 1024 * 1024}
|
||||
showStorageNote={false}
|
||||
className="mb-4 mt-2"
|
||||
/>
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
) : (
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="templateUrl"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormControl>
|
||||
<Input
|
||||
{...field}
|
||||
id={field.name}
|
||||
label="n8n template URL"
|
||||
placeholder="https://n8n.io/workflows/1234"
|
||||
className="mb-4 mt-2 w-full rounded-[10px]"
|
||||
/>
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
)}
|
||||
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
className="min-w-[18rem]"
|
||||
disabled={!hasInput || isConverting}
|
||||
>
|
||||
{isConverting ? (
|
||||
<div className="flex items-center gap-2">
|
||||
<LoadingSpinner size="small" className="text-white" />
|
||||
<span>Parsing workflow...</span>
|
||||
</div>
|
||||
) : (
|
||||
"Import to AutoPilot"
|
||||
)}
|
||||
</Button>
|
||||
</Form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
import { usePostV2ImportAWorkflowFromAnotherToolN8nMakeComZapier } from "@/app/api/__generated__/endpoints/import/import";
|
||||
import type { ImportWorkflowRequest } from "@/app/api/__generated__/models/importWorkflowRequest";
|
||||
import type { ImportWorkflowResponse } from "@/app/api/__generated__/models/importWorkflowResponse";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
import { useForm } from "react-hook-form";
|
||||
import { z } from "zod";
|
||||
import { importWorkflowFormSchema } from "./LibraryImportWorkflowDialog";
|
||||
|
||||
export function useLibraryImportWorkflowDialog() {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const { toast } = useToast();
|
||||
const router = useRouter();
|
||||
const [importMode, setImportMode] = useState<"file" | "url">("file");
|
||||
|
||||
const { mutateAsync: importWorkflow, isPending: isConverting } =
|
||||
usePostV2ImportAWorkflowFromAnotherToolN8nMakeComZapier();
|
||||
|
||||
const form = useForm<z.infer<typeof importWorkflowFormSchema>>({
|
||||
resolver: zodResolver(importWorkflowFormSchema),
|
||||
defaultValues: {
|
||||
workflowFile: "",
|
||||
templateUrl: "",
|
||||
},
|
||||
});
|
||||
|
||||
const onSubmit = async (values: z.infer<typeof importWorkflowFormSchema>) => {
|
||||
try {
|
||||
let body: ImportWorkflowRequest;
|
||||
|
||||
if (importMode === "url" && values.templateUrl) {
|
||||
body = { template_url: values.templateUrl };
|
||||
} else if (importMode === "file" && values.workflowFile) {
|
||||
// Decode base64 file to JSON
|
||||
const base64Match = values.workflowFile.match(
|
||||
/^data:[^;]+;base64,(.+)$/,
|
||||
);
|
||||
if (!base64Match) {
|
||||
throw new Error("Invalid file format");
|
||||
}
|
||||
const jsonString = atob(base64Match[1]);
|
||||
const workflowJson = JSON.parse(jsonString);
|
||||
body = { workflow_json: workflowJson };
|
||||
} else {
|
||||
throw new Error("Please provide a workflow file or template URL");
|
||||
}
|
||||
|
||||
const response = await importWorkflow({ data: body });
|
||||
// Cast needed: generated client returns union with error types,
|
||||
// but errors throw before reaching here
|
||||
const data = response.data as ImportWorkflowResponse;
|
||||
|
||||
setIsOpen(false);
|
||||
form.reset();
|
||||
|
||||
toast({
|
||||
title: "Workflow Parsed",
|
||||
description: `Detected ${data.source_format} workflow "${data.source_name}". Redirecting to AutoPilot...`,
|
||||
});
|
||||
|
||||
// Redirect to AutoPilot with the prompt pre-filled and auto-submitted
|
||||
const encodedPrompt = encodeURIComponent(data.copilot_prompt);
|
||||
router.push(`/copilot?autosubmit=true#prompt=${encodedPrompt}`);
|
||||
} catch (error) {
|
||||
console.error("Import failed:", error);
|
||||
toast({
|
||||
title: "Import Failed",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Failed to parse workflow. Please check the file format.",
|
||||
variant: "destructive",
|
||||
duration: 5000,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
onSubmit,
|
||||
isConverting,
|
||||
isOpen,
|
||||
setIsOpen,
|
||||
form,
|
||||
importMode,
|
||||
setImportMode,
|
||||
};
|
||||
}
|
||||
@@ -2920,46 +2920,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/import/workflow": {
|
||||
"post": {
|
||||
"tags": ["v2", "import"],
|
||||
"summary": "Import a workflow from another tool (n8n, Make.com, Zapier)",
|
||||
"description": "Parse an external workflow and return a CoPilot prompt.\n\nAccepts either raw workflow JSON or a template URL (n8n only for now).\nThe workflow is parsed and described, then a structured prompt is returned\nfor CoPilot's agent-generator to handle the actual conversion.",
|
||||
"operationId": "postV2Import a workflow from another tool (n8n, make.com, zapier)",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/ImportWorkflowRequest" }
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ImportWorkflowResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||
},
|
||||
"422": {
|
||||
"description": "Validation Error",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"security": [{ "HTTPBearerJWT": [] }]
|
||||
}
|
||||
},
|
||||
"/api/integrations/ayrshare/sso_url": {
|
||||
"get": {
|
||||
"tags": ["v1", "integrations"],
|
||||
@@ -10011,35 +9971,6 @@
|
||||
"required": ["image_url"],
|
||||
"title": "ImageURLResponse"
|
||||
},
|
||||
"ImportWorkflowRequest": {
|
||||
"properties": {
|
||||
"workflow_json": {
|
||||
"anyOf": [
|
||||
{ "additionalProperties": true, "type": "object" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"title": "Workflow Json"
|
||||
},
|
||||
"template_url": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Template Url"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"title": "ImportWorkflowRequest",
|
||||
"description": "Request body for importing an external workflow."
|
||||
},
|
||||
"ImportWorkflowResponse": {
|
||||
"properties": {
|
||||
"copilot_prompt": { "type": "string", "title": "Copilot Prompt" },
|
||||
"source_format": { "type": "string", "title": "Source Format" },
|
||||
"source_name": { "type": "string", "title": "Source Name" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["copilot_prompt", "source_format", "source_name"],
|
||||
"title": "ImportWorkflowResponse",
|
||||
"description": "Response from parsing an external workflow.\n\nReturns a CoPilot prompt that the frontend uses to redirect the user\nto CoPilot, where the agentic agent-generator handles the conversion."
|
||||
},
|
||||
"InputValidationErrorResponse": {
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
Reference in New Issue
Block a user