mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-03-17 03:00:27 -04:00
Compare commits
26 Commits
fix/copilo
...
abhi/add-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c6e7a28325 | ||
|
|
48ff8300a4 | ||
|
|
79dcfed31b | ||
|
|
404c0fdc0d | ||
|
|
cf1ba67d86 | ||
|
|
c3855c0910 | ||
|
|
cd0ff8d386 | ||
|
|
5670938d42 | ||
|
|
f6ccc8cac7 | ||
|
|
cd3cad1fef | ||
|
|
c0ebb5c719 | ||
|
|
1c6cfa5e78 | ||
|
|
a3651ad682 | ||
|
|
78ef378ca8 | ||
|
|
45897957a5 | ||
|
|
553cac94ca | ||
|
|
00945ae33b | ||
|
|
d2d599c89b | ||
|
|
ed0ca6e102 | ||
|
|
669c73ff0e | ||
|
|
68e346f1cb | ||
|
|
556198afb7 | ||
|
|
0d21f9b19f | ||
|
|
c268fc6464 | ||
|
|
aff3fb44af | ||
|
|
0b594a219c |
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -5,12 +5,14 @@ on:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
merge_group:
|
||||
|
||||
169
.github/workflows/platform-frontend-ci.yml
vendored
169
.github/workflows/platform-frontend-ci.yml
vendored
@@ -120,175 +120,6 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
exitOnceUploaded: true
|
||||
|
||||
e2e_test:
|
||||
name: end-to-end tests
|
||||
runs-on: big-boi
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Platform - Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||
env:
|
||||
# Used by E2E test data script to generate embeddings for approved store agents
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Set up Platform - Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v4
|
||||
|
||||
- name: Set up Platform - Build Docker images (with cache)
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
# Resolve extends and generate a flat compose file that bake can understand
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
|
||||
# Add cache configuration to the resolved compose file
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
||||
--git-ref "${{ github.ref }}"
|
||||
|
||||
# Build with bake using the resolved compose file (now includes cache config)
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Cache E2E test data
|
||||
id: e2e-data-cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/e2e_test_data.sql
|
||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
||||
|
||||
- name: Set up Platform - Start Supabase DB + Auth
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||
echo "Waiting for auth service to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||
|
||||
- name: Set up Platform - Run migrations
|
||||
run: |
|
||||
echo "Running migrations..."
|
||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||
echo "✅ Migrations completed"
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Load cached E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||
run: |
|
||||
echo "✅ Found cached E2E test data, restoring..."
|
||||
{
|
||||
echo "SET session_replication_role = 'replica';"
|
||||
cat /tmp/e2e_test_data.sql
|
||||
echo "SET session_replication_role = 'origin';"
|
||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||
# Refresh materialized views after restore
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||
|
||||
echo "✅ E2E test data restored from cache"
|
||||
|
||||
- name: Set up Platform - Start (all other services)
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Create E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||
echo "Dumping database for cache..."
|
||||
{
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--table='auth.users' postgres
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--schema=platform \
|
||||
--exclude-table='platform._prisma_migrations' \
|
||||
--exclude-table='platform.apscheduler_jobs' \
|
||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||
postgres
|
||||
} > /tmp/e2e_test_data.sql
|
||||
|
||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||
|
||||
- name: Set up tests - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up tests - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Set up tests - Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Set up tests - Install browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build
|
||||
continue-on-error: false
|
||||
|
||||
- name: Upload Playwright report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Upload Playwright test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-test-results
|
||||
path: test-results
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||
|
||||
integration_test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
312
.github/workflows/platform-fullstack-ci.yml
vendored
312
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -1,14 +1,18 @@
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
name: AutoGPT Platform - Full-stack CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- ".github/workflows/scripts/docker-ci-fix-compose-build-cache.py"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/platform-fullstack-ci.yml"
|
||||
- ".github/workflows/scripts/docker-ci-fix-compose-build-cache.py"
|
||||
- ".github/workflows/scripts/get_package_version_from_lockfile.py"
|
||||
- "autogpt_platform/**"
|
||||
merge_group:
|
||||
|
||||
@@ -24,42 +28,28 @@ defaults:
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v5
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
- name: Install dependencies to populate cache
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
types:
|
||||
runs-on: big-boi
|
||||
check-api-types:
|
||||
name: check API types
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -67,70 +57,256 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
# ------------------------ Backend setup ------------------------
|
||||
|
||||
- name: Set up Backend - Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Set up Backend - Install Poetry
|
||||
working-directory: autogpt_platform/backend
|
||||
run: |
|
||||
POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Installing Poetry version ${POETRY_VERSION}"
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
|
||||
|
||||
- name: Set up Backend - Set up dependency cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Set up Backend - Install dependencies
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry install
|
||||
|
||||
- name: Set up Backend - Generate Prisma client
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
- name: Set up Frontend - Export OpenAPI schema from Backend
|
||||
working-directory: autogpt_platform/backend
|
||||
run: poetry run export-api-schema --output ../frontend/src/app/api/openapi.json
|
||||
|
||||
# ------------------------ Frontend setup ------------------------
|
||||
|
||||
- name: Set up Frontend - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Frontend - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml --profile local up -d deps_backend
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
- name: Set up Frontend - Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup .env
|
||||
run: cp .env.default .env
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Generate API queries
|
||||
run: pnpm generate:api:force
|
||||
- name: Set up Frontend - Format OpenAPI schema
|
||||
id: format-schema
|
||||
run: pnpm prettier --write ./src/app/api/openapi.json
|
||||
|
||||
- name: Check for API schema changes
|
||||
run: |
|
||||
if ! git diff --exit-code src/app/api/openapi.json; then
|
||||
echo "❌ API schema changes detected in src/app/api/openapi.json"
|
||||
echo ""
|
||||
echo "The openapi.json file has been modified after running 'pnpm generate:api-all'."
|
||||
echo "The openapi.json file has been modified after exporting the API schema."
|
||||
echo "This usually means changes have been made in the BE endpoints without updating the Frontend."
|
||||
echo "The API schema is now out of sync with the Front-end queries."
|
||||
echo ""
|
||||
echo "To fix this:"
|
||||
echo "1. Pull the backend 'docker compose pull && docker compose up -d --build --force-recreate'"
|
||||
echo "2. Run 'pnpm generate:api' locally"
|
||||
echo "3. Run 'pnpm types' locally"
|
||||
echo "4. Fix any TypeScript errors that may have been introduced"
|
||||
echo "5. Commit and push your changes"
|
||||
echo "\nIn the backend directory:"
|
||||
echo "1. Run 'poetry run export-api-schema --output ../frontend/src/app/api/openapi.json'"
|
||||
echo "\nIn the frontend directory:"
|
||||
echo "2. Run 'pnpm prettier --write src/app/api/openapi.json'"
|
||||
echo "3. Run 'pnpm generate:api'"
|
||||
echo "4. Run 'pnpm types'"
|
||||
echo "5. Fix any TypeScript errors that may have been introduced"
|
||||
echo "6. Commit and push your changes"
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
echo "✅ No API schema changes detected"
|
||||
fi
|
||||
|
||||
- name: Run Typescript checks
|
||||
- name: Set up Frontend - Generate API client
|
||||
id: generate-api-client
|
||||
run: pnpm orval --config ./orval.config.ts
|
||||
# Continue with type generation & check even if there are schema changes
|
||||
if: success() || (steps.format-schema.outcome == 'success')
|
||||
|
||||
- name: Check for TypeScript errors
|
||||
run: pnpm types
|
||||
if: success() || (steps.generate-api-client.outcome == 'success')
|
||||
|
||||
e2e_test:
|
||||
name: end-to-end tests
|
||||
runs-on: big-boi
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Platform - Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||
env:
|
||||
# Used by E2E test data script to generate embeddings for approved store agents
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Set up Platform - Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v4
|
||||
|
||||
- name: Set up Platform - Build Docker images (with cache)
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
# Resolve extends and generate a flat compose file that bake can understand
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
|
||||
# Add cache configuration to the resolved compose file
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend/**') }}" \
|
||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src/**') }}" \
|
||||
--git-ref "${{ github.ref }}"
|
||||
|
||||
# Build with bake using the resolved compose file (now includes cache config)
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Cache E2E test data
|
||||
id: e2e-data-cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/e2e_test_data.sql
|
||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-fullstack-ci.yml') }}
|
||||
|
||||
- name: Set up Platform - Start Supabase DB + Auth
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||
echo "Waiting for auth service to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||
|
||||
- name: Set up Platform - Run migrations
|
||||
run: |
|
||||
echo "Running migrations..."
|
||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||
echo "✅ Migrations completed"
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Load cached E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||
run: |
|
||||
echo "✅ Found cached E2E test data, restoring..."
|
||||
{
|
||||
echo "SET session_replication_role = 'replica';"
|
||||
cat /tmp/e2e_test_data.sql
|
||||
echo "SET session_replication_role = 'origin';"
|
||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||
# Refresh materialized views after restore
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||
|
||||
echo "✅ E2E test data restored from cache"
|
||||
|
||||
- name: Set up Platform - Start (all other services)
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Create E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||
echo "Dumping database for cache..."
|
||||
{
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--table='auth.users' postgres
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--schema=platform \
|
||||
--exclude-table='platform._prisma_migrations' \
|
||||
--exclude-table='platform.apscheduler_jobs' \
|
||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||
postgres
|
||||
} > /tmp/e2e_test_data.sql
|
||||
|
||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||
|
||||
- name: Set up tests - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up tests - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Set up tests - Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Set up tests - Install browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build
|
||||
continue-on-error: false
|
||||
|
||||
- name: Upload Playwright report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Upload Playwright test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-test-results
|
||||
path: test-results
|
||||
if-no-files-found: ignore
|
||||
retention-days: 3
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
"""
|
||||
Shared configuration for all AgentMail blocks.
|
||||
"""
|
||||
|
||||
from backend.sdk import APIKeyCredentials, ProviderBuilder, SecretStr
|
||||
|
||||
agent_mail = (
|
||||
ProviderBuilder("agent_mail")
|
||||
.with_api_key("AGENTMAIL_API_KEY", "AgentMail API Key")
|
||||
.build()
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="agent_mail",
|
||||
title="Mock AgentMail API Key",
|
||||
api_key=SecretStr("mock-agentmail-api-key"),
|
||||
expires_at=None,
|
||||
)
|
||||
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
AgentMail Attachment blocks — download file attachments from messages and threads.
|
||||
|
||||
Attachments are files associated with messages (PDFs, CSVs, images, etc.).
|
||||
To send attachments, include them in the attachments parameter when using
|
||||
AgentMailSendMessageBlock or AgentMailReplyToMessageBlock.
|
||||
|
||||
To download, first get the attachment_id from a message's attachments array,
|
||||
then use these blocks to retrieve the file content as base64.
|
||||
"""
|
||||
|
||||
import base64
|
||||
|
||||
from agentmail import AsyncAgentMail
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._config import agent_mail
|
||||
|
||||
|
||||
def _client(credentials: APIKeyCredentials) -> AsyncAgentMail:
|
||||
return AsyncAgentMail(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
class AgentMailGetMessageAttachmentBlock(Block):
|
||||
"""
|
||||
Download a file attachment from a specific email message.
|
||||
|
||||
Retrieves the raw file content and returns it as base64-encoded data.
|
||||
First get the attachment_id from a message object's attachments array,
|
||||
then use this block to download the file.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the message belongs to"
|
||||
)
|
||||
message_id: str = SchemaField(
|
||||
description="Message ID containing the attachment"
|
||||
)
|
||||
attachment_id: str = SchemaField(
|
||||
description="Attachment ID to download (from the message's attachments array)"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
content_base64: str = SchemaField(
|
||||
description="File content encoded as a base64 string. Decode with base64.b64decode() to get raw bytes."
|
||||
)
|
||||
attachment_id: str = SchemaField(
|
||||
description="The attachment ID that was downloaded"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a283ffc4-8087-4c3d-9135-8f26b86742ec",
|
||||
description="Download a file attachment from an email message. Returns base64-encoded file content.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
client = _client(credentials)
|
||||
data = await client.inboxes.messages.get_attachment(
|
||||
inbox_id=input_data.inbox_id,
|
||||
message_id=input_data.message_id,
|
||||
attachment_id=input_data.attachment_id,
|
||||
)
|
||||
if isinstance(data, bytes):
|
||||
encoded = base64.b64encode(data).decode()
|
||||
elif isinstance(data, str):
|
||||
encoded = base64.b64encode(data.encode("utf-8")).decode()
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Unexpected attachment data type: {type(data).__name__}"
|
||||
)
|
||||
|
||||
yield "content_base64", encoded
|
||||
yield "attachment_id", input_data.attachment_id
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class AgentMailGetThreadAttachmentBlock(Block):
|
||||
"""
|
||||
Download a file attachment from a conversation thread.
|
||||
|
||||
Same as GetMessageAttachment but looks up by thread ID instead of
|
||||
message ID. Useful when you know the thread but not the specific
|
||||
message containing the attachment.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the thread belongs to"
|
||||
)
|
||||
thread_id: str = SchemaField(description="Thread ID containing the attachment")
|
||||
attachment_id: str = SchemaField(
|
||||
description="Attachment ID to download (from a message's attachments array within the thread)"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
content_base64: str = SchemaField(
|
||||
description="File content encoded as a base64 string. Decode with base64.b64decode() to get raw bytes."
|
||||
)
|
||||
attachment_id: str = SchemaField(
|
||||
description="The attachment ID that was downloaded"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="06b6a4c4-9d71-4992-9e9c-cf3b352763b5",
|
||||
description="Download a file attachment from a conversation thread. Returns base64-encoded file content.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
client = _client(credentials)
|
||||
data = await client.inboxes.threads.get_attachment(
|
||||
inbox_id=input_data.inbox_id,
|
||||
thread_id=input_data.thread_id,
|
||||
attachment_id=input_data.attachment_id,
|
||||
)
|
||||
if isinstance(data, bytes):
|
||||
encoded = base64.b64encode(data).decode()
|
||||
elif isinstance(data, str):
|
||||
encoded = base64.b64encode(data.encode("utf-8")).decode()
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Unexpected attachment data type: {type(data).__name__}"
|
||||
)
|
||||
|
||||
yield "content_base64", encoded
|
||||
yield "attachment_id", input_data.attachment_id
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
501
autogpt_platform/backend/backend/blocks/agent_mail/drafts.py
Normal file
501
autogpt_platform/backend/backend/blocks/agent_mail/drafts.py
Normal file
@@ -0,0 +1,501 @@
|
||||
"""
|
||||
AgentMail Draft blocks — create, get, list, update, send, and delete drafts.
|
||||
|
||||
A Draft is an unsent message that can be reviewed, edited, and sent later.
|
||||
Drafts enable human-in-the-loop review, scheduled sending (via send_at),
|
||||
and complex multi-step email composition workflows.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from agentmail import AsyncAgentMail
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._config import agent_mail
|
||||
|
||||
|
||||
def _client(credentials: APIKeyCredentials) -> AsyncAgentMail:
|
||||
return AsyncAgentMail(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
class AgentMailCreateDraftBlock(Block):
|
||||
"""
|
||||
Create a draft email in an AgentMail inbox for review or scheduled sending.
|
||||
|
||||
Drafts let agents prepare emails without sending immediately. Use send_at
|
||||
to schedule automatic sending at a future time (ISO 8601 format).
|
||||
Scheduled drafts are auto-labeled 'scheduled' and can be cancelled by
|
||||
deleting the draft.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to create the draft in"
|
||||
)
|
||||
to: list[str] = SchemaField(
|
||||
description="Recipient email addresses (e.g. ['user@example.com'])"
|
||||
)
|
||||
subject: str = SchemaField(description="Email subject line", default="")
|
||||
text: str = SchemaField(description="Plain text body of the draft", default="")
|
||||
html: str = SchemaField(
|
||||
description="Rich HTML body of the draft", default="", advanced=True
|
||||
)
|
||||
cc: list[str] = SchemaField(
|
||||
description="CC recipient email addresses",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
bcc: list[str] = SchemaField(
|
||||
description="BCC recipient email addresses",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
in_reply_to: str = SchemaField(
|
||||
description="Message ID this draft replies to, for threading follow-up drafts",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
send_at: str = SchemaField(
|
||||
description="Schedule automatic sending at this ISO 8601 datetime (e.g. '2025-01-15T09:00:00Z'). Leave empty for manual send.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
draft_id: str = SchemaField(
|
||||
description="Unique identifier of the created draft"
|
||||
)
|
||||
send_status: str = SchemaField(
|
||||
description="'scheduled' if send_at was set, empty otherwise. Values: scheduled, sending, failed.",
|
||||
default="",
|
||||
)
|
||||
result: dict = SchemaField(
|
||||
description="Complete draft object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="25ac9086-69fd-48b8-b910-9dbe04b8f3bd",
|
||||
description="Create a draft email for review or scheduled sending. Use send_at for automatic future delivery.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"to": input_data.to}
|
||||
if input_data.subject:
|
||||
params["subject"] = input_data.subject
|
||||
if input_data.text:
|
||||
params["text"] = input_data.text
|
||||
if input_data.html:
|
||||
params["html"] = input_data.html
|
||||
if input_data.cc:
|
||||
params["cc"] = input_data.cc
|
||||
if input_data.bcc:
|
||||
params["bcc"] = input_data.bcc
|
||||
if input_data.in_reply_to:
|
||||
params["in_reply_to"] = input_data.in_reply_to
|
||||
if input_data.send_at:
|
||||
params["send_at"] = input_data.send_at
|
||||
|
||||
draft = await client.inboxes.drafts.create(input_data.inbox_id, **params)
|
||||
result = draft.__dict__ if hasattr(draft, "__dict__") else {}
|
||||
|
||||
yield "draft_id", draft.draft_id
|
||||
yield "send_status", getattr(draft, "send_status", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailGetDraftBlock(Block):
|
||||
"""
|
||||
Retrieve a specific draft from an AgentMail inbox.
|
||||
|
||||
Returns the draft contents including recipients, subject, body, and
|
||||
scheduled send status. Use this to review a draft before approving it.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the draft belongs to"
|
||||
)
|
||||
draft_id: str = SchemaField(description="Draft ID to retrieve")
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
draft_id: str = SchemaField(description="Unique identifier of the draft")
|
||||
subject: str = SchemaField(description="Draft subject line", default="")
|
||||
send_status: str = SchemaField(
|
||||
description="Scheduled send status: 'scheduled', 'sending', 'failed', or empty",
|
||||
default="",
|
||||
)
|
||||
send_at: str = SchemaField(
|
||||
description="Scheduled send time (ISO 8601) if set", default=""
|
||||
)
|
||||
result: dict = SchemaField(description="Complete draft object with all fields")
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8e57780d-dc25-43d4-a0f4-1f02877b09fb",
|
||||
description="Retrieve a draft email to review its contents, recipients, and scheduled send status.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
draft = await client.inboxes.drafts.get(
|
||||
inbox_id=input_data.inbox_id,
|
||||
draft_id=input_data.draft_id,
|
||||
)
|
||||
result = draft.__dict__ if hasattr(draft, "__dict__") else {}
|
||||
|
||||
yield "draft_id", draft.draft_id
|
||||
yield "subject", getattr(draft, "subject", None) or ""
|
||||
yield "send_status", getattr(draft, "send_status", None) or ""
|
||||
yield "send_at", getattr(draft, "send_at", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailListDraftsBlock(Block):
|
||||
"""
|
||||
List all drafts in an AgentMail inbox with optional label filtering.
|
||||
|
||||
Use labels=['scheduled'] to find all drafts queued for future sending.
|
||||
Useful for building approval dashboards or monitoring pending outreach.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to list drafts from"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of drafts to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
labels: list[str] = SchemaField(
|
||||
description="Filter drafts by labels (e.g. ['scheduled'] for pending sends)",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
drafts: list[dict] = SchemaField(
|
||||
description="List of draft objects with subject, recipients, send_status, etc."
|
||||
)
|
||||
count: int = SchemaField(description="Number of drafts returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e84883b7-7c39-4c5c-88e8-0a72b078ea63",
|
||||
description="List drafts in an AgentMail inbox. Filter by labels=['scheduled'] to find pending sends.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
if input_data.labels:
|
||||
params["labels"] = input_data.labels
|
||||
|
||||
response = await client.inboxes.drafts.list(input_data.inbox_id, **params)
|
||||
drafts = [
|
||||
d.__dict__ if hasattr(d, "__dict__") else d
|
||||
for d in getattr(response, "drafts", [])
|
||||
]
|
||||
|
||||
yield "drafts", drafts
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(drafts)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailUpdateDraftBlock(Block):
|
||||
"""
|
||||
Update an existing draft's content, recipients, or scheduled send time.
|
||||
|
||||
Use this to reschedule a draft (change send_at), modify recipients,
|
||||
or edit the subject/body before sending. To cancel a scheduled send,
|
||||
delete the draft instead.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the draft belongs to"
|
||||
)
|
||||
draft_id: str = SchemaField(description="Draft ID to update")
|
||||
to: Optional[list[str]] = SchemaField(
|
||||
description="Updated recipient email addresses (replaces existing list). Omit to keep current value.",
|
||||
default=None,
|
||||
)
|
||||
subject: Optional[str] = SchemaField(
|
||||
description="Updated subject line. Omit to keep current value.",
|
||||
default=None,
|
||||
)
|
||||
text: Optional[str] = SchemaField(
|
||||
description="Updated plain text body. Omit to keep current value.",
|
||||
default=None,
|
||||
)
|
||||
html: Optional[str] = SchemaField(
|
||||
description="Updated HTML body. Omit to keep current value.",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
send_at: Optional[str] = SchemaField(
|
||||
description="Reschedule: new ISO 8601 send time (e.g. '2025-01-20T14:00:00Z'). Omit to keep current value.",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
draft_id: str = SchemaField(description="The updated draft ID")
|
||||
send_status: str = SchemaField(description="Updated send status", default="")
|
||||
result: dict = SchemaField(description="Complete updated draft object")
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="351f6e51-695a-421a-9032-46a587b10336",
|
||||
description="Update a draft's content, recipients, or scheduled send time. Use to reschedule or edit before sending.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {}
|
||||
if input_data.to is not None:
|
||||
params["to"] = input_data.to
|
||||
if input_data.subject is not None:
|
||||
params["subject"] = input_data.subject
|
||||
if input_data.text is not None:
|
||||
params["text"] = input_data.text
|
||||
if input_data.html is not None:
|
||||
params["html"] = input_data.html
|
||||
if input_data.send_at is not None:
|
||||
params["send_at"] = input_data.send_at
|
||||
|
||||
draft = await client.inboxes.drafts.update(
|
||||
inbox_id=input_data.inbox_id,
|
||||
draft_id=input_data.draft_id,
|
||||
**params,
|
||||
)
|
||||
result = draft.__dict__ if hasattr(draft, "__dict__") else {}
|
||||
|
||||
yield "draft_id", draft.draft_id
|
||||
yield "send_status", getattr(draft, "send_status", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailSendDraftBlock(Block):
|
||||
"""
|
||||
Send a draft immediately, converting it into a delivered message.
|
||||
|
||||
The draft is deleted after successful sending and becomes a regular
|
||||
message with a message_id. Use this for human-in-the-loop approval
|
||||
workflows: agent creates draft, human reviews, then this block sends it.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the draft belongs to"
|
||||
)
|
||||
draft_id: str = SchemaField(description="Draft ID to send now")
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
message_id: str = SchemaField(
|
||||
description="Message ID of the now-sent email (draft is deleted)"
|
||||
)
|
||||
thread_id: str = SchemaField(
|
||||
description="Thread ID the sent message belongs to"
|
||||
)
|
||||
result: dict = SchemaField(description="Complete sent message object")
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="37c39e83-475d-4b3d-843a-d923d001b85a",
|
||||
description="Send a draft immediately, converting it into a delivered message. The draft is deleted after sending.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
msg = await client.inboxes.drafts.send(
|
||||
inbox_id=input_data.inbox_id,
|
||||
draft_id=input_data.draft_id,
|
||||
)
|
||||
result = msg.__dict__ if hasattr(msg, "__dict__") else {}
|
||||
|
||||
yield "message_id", msg.message_id
|
||||
yield "thread_id", getattr(msg, "thread_id", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailDeleteDraftBlock(Block):
|
||||
"""
|
||||
Delete a draft from an AgentMail inbox. Also cancels any scheduled send.
|
||||
|
||||
If the draft was scheduled with send_at, deleting it cancels the
|
||||
scheduled delivery. This is the way to cancel a scheduled email.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the draft belongs to"
|
||||
)
|
||||
draft_id: str = SchemaField(
|
||||
description="Draft ID to delete (also cancels scheduled sends)"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
success: bool = SchemaField(
|
||||
description="True if the draft was successfully deleted/cancelled"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9023eb99-3e2f-4def-808b-d9c584b3d9e7",
|
||||
description="Delete a draft or cancel a scheduled email. Removes the draft permanently.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
await client.inboxes.drafts.delete(
|
||||
inbox_id=input_data.inbox_id,
|
||||
draft_id=input_data.draft_id,
|
||||
)
|
||||
yield "success", True
|
||||
|
||||
|
||||
class AgentMailListOrgDraftsBlock(Block):
|
||||
"""
|
||||
List all drafts across every inbox in your organization.
|
||||
|
||||
Returns drafts from all inboxes in one query. Perfect for building
|
||||
a central approval dashboard where a human supervisor can review
|
||||
and approve any draft created by any agent.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of drafts to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
drafts: list[dict] = SchemaField(
|
||||
description="List of draft objects from all inboxes in the organization"
|
||||
)
|
||||
count: int = SchemaField(description="Number of drafts returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ed7558ae-3a07-45f5-af55-a25fe88c9971",
|
||||
description="List all drafts across every inbox in your organization. Use for central approval dashboards.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
|
||||
response = await client.drafts.list(**params)
|
||||
drafts = [
|
||||
d.__dict__ if hasattr(d, "__dict__") else d
|
||||
for d in getattr(response, "drafts", [])
|
||||
]
|
||||
|
||||
yield "drafts", drafts
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(drafts)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
302
autogpt_platform/backend/backend/blocks/agent_mail/inbox.py
Normal file
302
autogpt_platform/backend/backend/blocks/agent_mail/inbox.py
Normal file
@@ -0,0 +1,302 @@
|
||||
"""
|
||||
AgentMail Inbox blocks — create, get, list, update, and delete inboxes.
|
||||
|
||||
An Inbox is a fully programmable email account for AI agents. Each inbox gets
|
||||
a unique email address and can send, receive, and manage emails via the
|
||||
AgentMail API. You can create thousands of inboxes on demand.
|
||||
"""
|
||||
|
||||
from agentmail import AsyncAgentMail
|
||||
from agentmail.inboxes.types import CreateInboxRequest
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._config import agent_mail
|
||||
|
||||
|
||||
def _client(credentials: APIKeyCredentials) -> AsyncAgentMail:
|
||||
return AsyncAgentMail(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
class AgentMailCreateInboxBlock(Block):
|
||||
"""
|
||||
Create a new email inbox for an AI agent via AgentMail.
|
||||
|
||||
Each inbox gets a unique email address (e.g. username@agentmail.to).
|
||||
If username and domain are not provided, AgentMail auto-generates them.
|
||||
Use custom domains by specifying the domain field.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
username: str = SchemaField(
|
||||
description="Local part of the email address (e.g. 'support' for support@domain.com). Leave empty to auto-generate.",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
domain: str = SchemaField(
|
||||
description="Email domain (e.g. 'mydomain.com'). Defaults to agentmail.to if empty.",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
display_name: str = SchemaField(
|
||||
description="Friendly name shown in the 'From' field of sent emails (e.g. 'Support Agent')",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
inbox_id: str = SchemaField(
|
||||
description="Unique identifier for the created inbox (also the email address)"
|
||||
)
|
||||
email_address: str = SchemaField(
|
||||
description="Full email address of the inbox (e.g. support@agentmail.to)"
|
||||
)
|
||||
result: dict = SchemaField(
|
||||
description="Complete inbox object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="7a8ac219-c6ec-4eec-a828-81af283ce04c",
|
||||
description="Create a new email inbox for an AI agent via AgentMail. Each inbox gets a unique address and can send/receive emails.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {}
|
||||
if input_data.username:
|
||||
params["username"] = input_data.username
|
||||
if input_data.domain:
|
||||
params["domain"] = input_data.domain
|
||||
if input_data.display_name:
|
||||
params["display_name"] = input_data.display_name
|
||||
|
||||
inbox = await client.inboxes.create(request=CreateInboxRequest(**params))
|
||||
result = inbox.__dict__ if hasattr(inbox, "__dict__") else {}
|
||||
|
||||
yield "inbox_id", inbox.inbox_id
|
||||
yield "email_address", getattr(inbox, "email_address", inbox.inbox_id)
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailGetInboxBlock(Block):
|
||||
"""
|
||||
Retrieve details of an existing AgentMail inbox by its ID or email address.
|
||||
|
||||
Returns the inbox metadata including email address, display name, and
|
||||
configuration. Use this to check if an inbox exists or get its properties.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to look up (e.g. 'support@agentmail.to')"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
inbox_id: str = SchemaField(description="Unique identifier of the inbox")
|
||||
email_address: str = SchemaField(description="Full email address of the inbox")
|
||||
display_name: str = SchemaField(
|
||||
description="Friendly name shown in the 'From' field", default=""
|
||||
)
|
||||
result: dict = SchemaField(
|
||||
description="Complete inbox object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b858f62b-6c12-4736-aaf2-dbc5a9281320",
|
||||
description="Retrieve details of an existing AgentMail inbox including its email address, display name, and configuration.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
inbox = await client.inboxes.get(inbox_id=input_data.inbox_id)
|
||||
result = inbox.__dict__ if hasattr(inbox, "__dict__") else {}
|
||||
|
||||
yield "inbox_id", inbox.inbox_id
|
||||
yield "email_address", getattr(inbox, "email_address", inbox.inbox_id)
|
||||
yield "display_name", getattr(inbox, "display_name", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailListInboxesBlock(Block):
|
||||
"""
|
||||
List all email inboxes in your AgentMail organization.
|
||||
|
||||
Returns a paginated list of all inboxes with their metadata.
|
||||
Use page_token for pagination when you have many inboxes.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of inboxes to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page of results",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
inboxes: list[dict] = SchemaField(
|
||||
description="List of inbox objects, each containing inbox_id, email_address, display_name, etc."
|
||||
)
|
||||
count: int = SchemaField(
|
||||
description="Total number of inboxes in your organization"
|
||||
)
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token to pass as page_token to get the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="cfd84a06-2121-4cef-8d14-8badf52d22f0",
|
||||
description="List all email inboxes in your AgentMail organization with pagination support.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
|
||||
response = await client.inboxes.list(**params)
|
||||
inboxes = [
|
||||
inbox.__dict__ if hasattr(inbox, "__dict__") else inbox
|
||||
for inbox in getattr(response, "inboxes", [])
|
||||
]
|
||||
|
||||
yield "inboxes", inboxes
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(inboxes)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailUpdateInboxBlock(Block):
|
||||
"""
|
||||
Update the display name of an existing AgentMail inbox.
|
||||
|
||||
Changes the friendly name shown in the 'From' field when emails are sent
|
||||
from this inbox. The email address itself cannot be changed.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to update (e.g. 'support@agentmail.to')"
|
||||
)
|
||||
display_name: str = SchemaField(
|
||||
description="New display name for the inbox (e.g. 'Customer Support Bot')"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
inbox_id: str = SchemaField(description="The updated inbox ID")
|
||||
result: dict = SchemaField(
|
||||
description="Complete updated inbox object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="59b49f59-a6d1-4203-94c0-3908adac50b6",
|
||||
description="Update the display name of an AgentMail inbox. Changes the 'From' name shown when emails are sent.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
inbox = await client.inboxes.update(
|
||||
inbox_id=input_data.inbox_id,
|
||||
display_name=input_data.display_name,
|
||||
)
|
||||
result = inbox.__dict__ if hasattr(inbox, "__dict__") else {}
|
||||
|
||||
yield "inbox_id", inbox.inbox_id
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailDeleteInboxBlock(Block):
|
||||
"""
|
||||
Permanently delete an AgentMail inbox and all its data.
|
||||
|
||||
This removes the inbox, all its messages, threads, and drafts.
|
||||
This action cannot be undone. The email address will no longer
|
||||
receive or send emails.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to permanently delete"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
success: bool = SchemaField(
|
||||
description="True if the inbox was successfully deleted"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ade970ae-8428-4a7b-9278-b52054dbf535",
|
||||
description="Permanently delete an AgentMail inbox and all its messages, threads, and drafts. This action cannot be undone.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
await client.inboxes.delete(inbox_id=input_data.inbox_id)
|
||||
yield "success", True
|
||||
278
autogpt_platform/backend/backend/blocks/agent_mail/lists.py
Normal file
278
autogpt_platform/backend/backend/blocks/agent_mail/lists.py
Normal file
@@ -0,0 +1,278 @@
|
||||
"""
|
||||
AgentMail List blocks — manage allow/block lists for email filtering.
|
||||
|
||||
Lists let you control which email addresses and domains your agents can
|
||||
send to or receive from. There are four list types based on two dimensions:
|
||||
direction (send/receive) and type (allow/block).
|
||||
|
||||
- receive + allow: Only accept emails from these addresses/domains
|
||||
- receive + block: Reject emails from these addresses/domains
|
||||
- send + allow: Only send emails to these addresses/domains
|
||||
- send + block: Prevent sending emails to these addresses/domains
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from agentmail import AsyncAgentMail
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._config import agent_mail
|
||||
|
||||
|
||||
def _client(credentials: APIKeyCredentials) -> AsyncAgentMail:
|
||||
return AsyncAgentMail(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
class ListDirection(str, Enum):
|
||||
SEND = "send"
|
||||
RECEIVE = "receive"
|
||||
|
||||
|
||||
class ListType(str, Enum):
|
||||
ALLOW = "allow"
|
||||
BLOCK = "block"
|
||||
|
||||
|
||||
class AgentMailListEntriesBlock(Block):
|
||||
"""
|
||||
List all entries in an AgentMail allow/block list.
|
||||
|
||||
Retrieves email addresses and domains that are currently allowed
|
||||
or blocked for sending or receiving. Use direction and list_type
|
||||
to select which of the four lists to query.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
direction: ListDirection = SchemaField(
|
||||
description="'send' to filter outgoing emails, 'receive' to filter incoming emails"
|
||||
)
|
||||
list_type: ListType = SchemaField(
|
||||
description="'allow' for whitelist (only permit these), 'block' for blacklist (reject these)"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of entries to return per page",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
entries: list[dict] = SchemaField(
|
||||
description="List of entries, each with an email address or domain"
|
||||
)
|
||||
count: int = SchemaField(description="Number of entries returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="01489100-35da-45aa-8a01-9540ba0e9a21",
|
||||
description="List all entries in an AgentMail allow/block list. Choose send/receive direction and allow/block type.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
|
||||
response = await client.lists.list(
|
||||
input_data.direction.value, input_data.list_type.value, **params
|
||||
)
|
||||
entries = [
|
||||
e.__dict__ if hasattr(e, "__dict__") else e
|
||||
for e in getattr(response, "entries", [])
|
||||
]
|
||||
|
||||
yield "entries", entries
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(entries)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailCreateListEntryBlock(Block):
|
||||
"""
|
||||
Add an email address or domain to an AgentMail allow/block list.
|
||||
|
||||
Entries can be full email addresses (e.g. 'partner@example.com') or
|
||||
entire domains (e.g. 'example.com'). For block lists, you can optionally
|
||||
provide a reason (e.g. 'spam', 'competitor').
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
direction: ListDirection = SchemaField(
|
||||
description="'send' for outgoing email rules, 'receive' for incoming email rules"
|
||||
)
|
||||
list_type: ListType = SchemaField(
|
||||
description="'allow' to whitelist, 'block' to blacklist"
|
||||
)
|
||||
entry: str = SchemaField(
|
||||
description="Email address (user@example.com) or domain (example.com) to add"
|
||||
)
|
||||
reason: str = SchemaField(
|
||||
description="Reason for blocking (only used with block lists, e.g. 'spam', 'competitor')",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
entry: str = SchemaField(
|
||||
description="The email address or domain that was added"
|
||||
)
|
||||
result: dict = SchemaField(description="Complete entry object")
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b6650a0a-b113-40cf-8243-ff20f684f9b8",
|
||||
description="Add an email address or domain to an allow/block list. Block spam senders or whitelist trusted domains.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"entry": input_data.entry}
|
||||
if input_data.reason and input_data.list_type == ListType.BLOCK:
|
||||
params["reason"] = input_data.reason
|
||||
|
||||
result = await client.lists.create(
|
||||
input_data.direction.value, input_data.list_type.value, **params
|
||||
)
|
||||
result_dict = result.__dict__ if hasattr(result, "__dict__") else {}
|
||||
|
||||
yield "entry", input_data.entry
|
||||
yield "result", result_dict
|
||||
|
||||
|
||||
class AgentMailGetListEntryBlock(Block):
|
||||
"""
|
||||
Check if an email address or domain exists in an AgentMail allow/block list.
|
||||
|
||||
Returns the entry details if found. Use this to verify whether a specific
|
||||
address or domain is currently allowed or blocked.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
direction: ListDirection = SchemaField(
|
||||
description="'send' for outgoing rules, 'receive' for incoming rules"
|
||||
)
|
||||
list_type: ListType = SchemaField(
|
||||
description="'allow' for whitelist, 'block' for blacklist"
|
||||
)
|
||||
entry: str = SchemaField(description="Email address or domain to look up")
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
entry: str = SchemaField(
|
||||
description="The email address or domain that was found"
|
||||
)
|
||||
result: dict = SchemaField(description="Complete entry object with metadata")
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="fb117058-ab27-40d1-9231-eb1dd526fc7a",
|
||||
description="Check if an email address or domain is in an allow/block list. Verify filtering rules.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
result = await client.lists.get(
|
||||
input_data.direction.value,
|
||||
input_data.list_type.value,
|
||||
entry=input_data.entry,
|
||||
)
|
||||
result_dict = result.__dict__ if hasattr(result, "__dict__") else {}
|
||||
|
||||
yield "entry", input_data.entry
|
||||
yield "result", result_dict
|
||||
|
||||
|
||||
class AgentMailDeleteListEntryBlock(Block):
|
||||
"""
|
||||
Remove an email address or domain from an AgentMail allow/block list.
|
||||
|
||||
After removal, the address/domain will no longer be filtered by this list.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
direction: ListDirection = SchemaField(
|
||||
description="'send' for outgoing rules, 'receive' for incoming rules"
|
||||
)
|
||||
list_type: ListType = SchemaField(
|
||||
description="'allow' for whitelist, 'block' for blacklist"
|
||||
)
|
||||
entry: str = SchemaField(
|
||||
description="Email address or domain to remove from the list"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
success: bool = SchemaField(
|
||||
description="True if the entry was successfully removed"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="2b8d57f1-1c9e-470f-a70b-5991c80fad5f",
|
||||
description="Remove an email address or domain from an allow/block list to stop filtering it.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
await client.lists.delete(
|
||||
input_data.direction.value,
|
||||
input_data.list_type.value,
|
||||
entry=input_data.entry,
|
||||
)
|
||||
yield "success", True
|
||||
492
autogpt_platform/backend/backend/blocks/agent_mail/messages.py
Normal file
492
autogpt_platform/backend/backend/blocks/agent_mail/messages.py
Normal file
@@ -0,0 +1,492 @@
|
||||
"""
|
||||
AgentMail Message blocks — send, list, get, reply, forward, and update messages.
|
||||
|
||||
A Message is an individual email within a Thread. Agents can send new messages
|
||||
(which create threads), reply to existing messages, forward them, and manage
|
||||
labels for state tracking (e.g. read/unread, campaign tags).
|
||||
"""
|
||||
|
||||
from agentmail import AsyncAgentMail
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._config import agent_mail
|
||||
|
||||
|
||||
def _client(credentials: APIKeyCredentials) -> AsyncAgentMail:
|
||||
return AsyncAgentMail(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
class AgentMailSendMessageBlock(Block):
|
||||
"""
|
||||
Send a new email from an AgentMail inbox, automatically creating a new thread.
|
||||
|
||||
Supports plain text and HTML bodies, CC/BCC recipients, and labels for
|
||||
organizing messages (e.g. campaign tracking, state management).
|
||||
Max 50 combined recipients across to, cc, and bcc.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to send from (e.g. 'agent@agentmail.to')"
|
||||
)
|
||||
to: list[str] = SchemaField(
|
||||
description="Recipient email addresses (e.g. ['user@example.com'])"
|
||||
)
|
||||
subject: str = SchemaField(description="Email subject line")
|
||||
text: str = SchemaField(
|
||||
description="Plain text body of the email. Always provide this as a fallback for email clients that don't render HTML."
|
||||
)
|
||||
html: str = SchemaField(
|
||||
description="Rich HTML body of the email. Embed CSS in a <style> tag for best compatibility across email clients.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
cc: list[str] = SchemaField(
|
||||
description="CC recipient email addresses for human-in-the-loop oversight",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
bcc: list[str] = SchemaField(
|
||||
description="BCC recipient email addresses (hidden from other recipients)",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
labels: list[str] = SchemaField(
|
||||
description="Labels to tag the message for filtering and state management (e.g. ['outreach', 'q4-campaign'])",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
message_id: str = SchemaField(
|
||||
description="Unique identifier of the sent message"
|
||||
)
|
||||
thread_id: str = SchemaField(
|
||||
description="Thread ID grouping this message and any future replies"
|
||||
)
|
||||
result: dict = SchemaField(
|
||||
description="Complete sent message object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b67469b2-7748-4d81-a223-4ebd332cca89",
|
||||
description="Send a new email from an AgentMail inbox. Creates a new conversation thread. Supports HTML, CC/BCC, and labels.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
total = len(input_data.to) + len(input_data.cc) + len(input_data.bcc)
|
||||
if total > 50:
|
||||
raise ValueError(
|
||||
f"Max 50 combined recipients across to, cc, and bcc (got {total})"
|
||||
)
|
||||
|
||||
client = _client(credentials)
|
||||
params: dict = {
|
||||
"to": input_data.to,
|
||||
"subject": input_data.subject,
|
||||
"text": input_data.text,
|
||||
}
|
||||
if input_data.html:
|
||||
params["html"] = input_data.html
|
||||
if input_data.cc:
|
||||
params["cc"] = input_data.cc
|
||||
if input_data.bcc:
|
||||
params["bcc"] = input_data.bcc
|
||||
if input_data.labels:
|
||||
params["labels"] = input_data.labels
|
||||
|
||||
msg = await client.inboxes.messages.send(input_data.inbox_id, **params)
|
||||
result = msg.__dict__ if hasattr(msg, "__dict__") else {}
|
||||
|
||||
yield "message_id", msg.message_id
|
||||
yield "thread_id", getattr(msg, "thread_id", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailListMessagesBlock(Block):
|
||||
"""
|
||||
List all messages in an AgentMail inbox with optional label filtering.
|
||||
|
||||
Returns a paginated list of messages. Use labels to filter (e.g.
|
||||
labels=['unread'] to only get unprocessed messages). Useful for
|
||||
polling workflows or building inbox views.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to list messages from"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of messages to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
labels: list[str] = SchemaField(
|
||||
description="Only return messages with ALL of these labels (e.g. ['unread'] or ['q4-campaign', 'follow-up'])",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
messages: list[dict] = SchemaField(
|
||||
description="List of message objects with subject, sender, text, html, labels, etc."
|
||||
)
|
||||
count: int = SchemaField(description="Number of messages returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="721234df-c7a2-4927-b205-744badbd5844",
|
||||
description="List messages in an AgentMail inbox. Filter by labels to find unread, campaign-tagged, or categorized messages.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
if input_data.labels:
|
||||
params["labels"] = input_data.labels
|
||||
|
||||
response = await client.inboxes.messages.list(input_data.inbox_id, **params)
|
||||
messages = [
|
||||
m.__dict__ if hasattr(m, "__dict__") else m
|
||||
for m in getattr(response, "messages", [])
|
||||
]
|
||||
|
||||
yield "messages", messages
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(messages)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailGetMessageBlock(Block):
|
||||
"""
|
||||
Retrieve a specific email message by ID from an AgentMail inbox.
|
||||
|
||||
Returns the full message including subject, body (text and HTML),
|
||||
sender, recipients, and attachments. Use extracted_text to get
|
||||
only the new reply content without quoted history.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the message belongs to"
|
||||
)
|
||||
message_id: str = SchemaField(
|
||||
description="Message ID to retrieve (e.g. '<abc123@agentmail.to>')"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
message_id: str = SchemaField(description="Unique identifier of the message")
|
||||
thread_id: str = SchemaField(description="Thread this message belongs to")
|
||||
subject: str = SchemaField(description="Email subject line")
|
||||
text: str = SchemaField(
|
||||
description="Full plain text body (may include quoted reply history)"
|
||||
)
|
||||
extracted_text: str = SchemaField(
|
||||
description="Just the new reply content with quoted history stripped. Best for AI processing.",
|
||||
default="",
|
||||
)
|
||||
html: str = SchemaField(description="HTML body of the email", default="")
|
||||
result: dict = SchemaField(
|
||||
description="Complete message object with all fields including sender, recipients, attachments, labels"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="2788bdfa-1527-4603-a5e4-a455c05c032f",
|
||||
description="Retrieve a specific email message by ID. Includes extracted_text for clean reply content without quoted history.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
msg = await client.inboxes.messages.get(
|
||||
inbox_id=input_data.inbox_id,
|
||||
message_id=input_data.message_id,
|
||||
)
|
||||
result = msg.__dict__ if hasattr(msg, "__dict__") else {}
|
||||
|
||||
yield "message_id", msg.message_id
|
||||
yield "thread_id", getattr(msg, "thread_id", None) or ""
|
||||
yield "subject", getattr(msg, "subject", None) or ""
|
||||
yield "text", getattr(msg, "text", None) or ""
|
||||
yield "extracted_text", getattr(msg, "extracted_text", None) or ""
|
||||
yield "html", getattr(msg, "html", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailReplyToMessageBlock(Block):
|
||||
"""
|
||||
Reply to an existing email message, keeping the reply in the same thread.
|
||||
|
||||
The reply is automatically added to the same conversation thread as the
|
||||
original message. Use this for multi-turn agent conversations.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to send the reply from"
|
||||
)
|
||||
message_id: str = SchemaField(
|
||||
description="Message ID to reply to (e.g. '<abc123@agentmail.to>')"
|
||||
)
|
||||
text: str = SchemaField(description="Plain text body of the reply")
|
||||
html: str = SchemaField(
|
||||
description="Rich HTML body of the reply",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
message_id: str = SchemaField(
|
||||
description="Unique identifier of the reply message"
|
||||
)
|
||||
thread_id: str = SchemaField(description="Thread ID the reply was added to")
|
||||
result: dict = SchemaField(
|
||||
description="Complete reply message object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b9fe53fa-5026-4547-9570-b54ccb487229",
|
||||
description="Reply to an existing email in the same conversation thread. Use for multi-turn agent conversations.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"text": input_data.text}
|
||||
if input_data.html:
|
||||
params["html"] = input_data.html
|
||||
|
||||
reply = await client.inboxes.messages.reply(
|
||||
inbox_id=input_data.inbox_id,
|
||||
message_id=input_data.message_id,
|
||||
**params,
|
||||
)
|
||||
result = reply.__dict__ if hasattr(reply, "__dict__") else {}
|
||||
|
||||
yield "message_id", reply.message_id
|
||||
yield "thread_id", getattr(reply, "thread_id", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailForwardMessageBlock(Block):
|
||||
"""
|
||||
Forward an existing email message to one or more recipients.
|
||||
|
||||
Sends the original message content to different email addresses.
|
||||
Optionally prepend additional text or override the subject line.
|
||||
Max 50 combined recipients across to, cc, and bcc.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to forward from"
|
||||
)
|
||||
message_id: str = SchemaField(description="Message ID to forward")
|
||||
to: list[str] = SchemaField(
|
||||
description="Recipient email addresses to forward the message to (e.g. ['user@example.com'])"
|
||||
)
|
||||
cc: list[str] = SchemaField(
|
||||
description="CC recipient email addresses",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
bcc: list[str] = SchemaField(
|
||||
description="BCC recipient email addresses (hidden from other recipients)",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
subject: str = SchemaField(
|
||||
description="Override the subject line (defaults to 'Fwd: <original subject>')",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
text: str = SchemaField(
|
||||
description="Additional plain text to prepend before the forwarded content",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
html: str = SchemaField(
|
||||
description="Additional HTML to prepend before the forwarded content",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
message_id: str = SchemaField(
|
||||
description="Unique identifier of the forwarded message"
|
||||
)
|
||||
thread_id: str = SchemaField(description="Thread ID of the forward")
|
||||
result: dict = SchemaField(
|
||||
description="Complete forwarded message object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b70c7e33-5d66-4f8e-897f-ac73a7bfce82",
|
||||
description="Forward an email message to one or more recipients. Supports CC/BCC and optional extra text or subject override.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
total = len(input_data.to) + len(input_data.cc) + len(input_data.bcc)
|
||||
if total > 50:
|
||||
raise ValueError(
|
||||
f"Max 50 combined recipients across to, cc, and bcc (got {total})"
|
||||
)
|
||||
|
||||
client = _client(credentials)
|
||||
params: dict = {"to": input_data.to}
|
||||
if input_data.cc:
|
||||
params["cc"] = input_data.cc
|
||||
if input_data.bcc:
|
||||
params["bcc"] = input_data.bcc
|
||||
if input_data.subject:
|
||||
params["subject"] = input_data.subject
|
||||
if input_data.text:
|
||||
params["text"] = input_data.text
|
||||
if input_data.html:
|
||||
params["html"] = input_data.html
|
||||
|
||||
fwd = await client.inboxes.messages.forward(
|
||||
inbox_id=input_data.inbox_id,
|
||||
message_id=input_data.message_id,
|
||||
**params,
|
||||
)
|
||||
result = fwd.__dict__ if hasattr(fwd, "__dict__") else {}
|
||||
|
||||
yield "message_id", fwd.message_id
|
||||
yield "thread_id", getattr(fwd, "thread_id", None) or ""
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailUpdateMessageBlock(Block):
|
||||
"""
|
||||
Add or remove labels on an email message for state management.
|
||||
|
||||
Labels are string tags used to track message state (read/unread),
|
||||
categorize messages (billing, support), or tag campaigns (q4-outreach).
|
||||
Common pattern: add 'read' and remove 'unread' after processing a message.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the message belongs to"
|
||||
)
|
||||
message_id: str = SchemaField(description="Message ID to update labels on")
|
||||
add_labels: list[str] = SchemaField(
|
||||
description="Labels to add (e.g. ['read', 'processed', 'high-priority'])",
|
||||
default_factory=list,
|
||||
)
|
||||
remove_labels: list[str] = SchemaField(
|
||||
description="Labels to remove (e.g. ['unread', 'pending'])",
|
||||
default_factory=list,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
message_id: str = SchemaField(description="The updated message ID")
|
||||
result: dict = SchemaField(
|
||||
description="Complete updated message object with current labels"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="694ff816-4c89-4a5e-a552-8c31be187735",
|
||||
description="Add or remove labels on an email message. Use for read/unread tracking, campaign tagging, or state management.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {}
|
||||
if input_data.add_labels:
|
||||
params["add_labels"] = input_data.add_labels
|
||||
if input_data.remove_labels:
|
||||
params["remove_labels"] = input_data.remove_labels
|
||||
|
||||
msg = await client.inboxes.messages.update(
|
||||
inbox_id=input_data.inbox_id,
|
||||
message_id=input_data.message_id,
|
||||
**params,
|
||||
)
|
||||
result = msg.__dict__ if hasattr(msg, "__dict__") else {}
|
||||
|
||||
yield "message_id", msg.message_id
|
||||
yield "result", result
|
||||
486
autogpt_platform/backend/backend/blocks/agent_mail/pods.py
Normal file
486
autogpt_platform/backend/backend/blocks/agent_mail/pods.py
Normal file
@@ -0,0 +1,486 @@
|
||||
"""
|
||||
AgentMail Pod blocks — create, get, list, delete pods and list pod-scoped resources.
|
||||
|
||||
Pods provide multi-tenant isolation between your customers. Each pod acts as
|
||||
an isolated workspace containing its own inboxes, domains, threads, and drafts.
|
||||
Use pods when building SaaS platforms, agency tools, or AI agent fleets that
|
||||
serve multiple customers.
|
||||
"""
|
||||
|
||||
from agentmail import AsyncAgentMail
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._config import agent_mail
|
||||
|
||||
|
||||
def _client(credentials: APIKeyCredentials) -> AsyncAgentMail:
|
||||
return AsyncAgentMail(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
class AgentMailCreatePodBlock(Block):
|
||||
"""
|
||||
Create a new pod for multi-tenant customer isolation.
|
||||
|
||||
Each pod acts as an isolated workspace for one customer or tenant.
|
||||
Use client_id to map pods to your internal tenant IDs for idempotent
|
||||
creation (safe to retry without creating duplicates).
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
client_id: str = SchemaField(
|
||||
description="Your internal tenant/customer ID for idempotent mapping. Lets you access the pod by your own ID instead of AgentMail's pod_id.",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
pod_id: str = SchemaField(description="Unique identifier of the created pod")
|
||||
result: dict = SchemaField(description="Complete pod object with all metadata")
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a2db9784-2d17-4f8f-9d6b-0214e6f22101",
|
||||
description="Create a new pod for multi-tenant customer isolation. Use client_id to map to your internal tenant IDs.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {}
|
||||
if input_data.client_id:
|
||||
params["client_id"] = input_data.client_id
|
||||
|
||||
pod = await client.pods.create(**params)
|
||||
result = pod.__dict__ if hasattr(pod, "__dict__") else {}
|
||||
|
||||
yield "pod_id", pod.pod_id
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailGetPodBlock(Block):
|
||||
"""
|
||||
Retrieve details of an existing pod by its ID.
|
||||
|
||||
Returns the pod metadata including its client_id mapping and
|
||||
creation timestamp.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
pod_id: str = SchemaField(description="Pod ID to retrieve")
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
pod_id: str = SchemaField(description="Unique identifier of the pod")
|
||||
result: dict = SchemaField(description="Complete pod object with all metadata")
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="553361bc-bb1b-4322-9ad4-0c226200217e",
|
||||
description="Retrieve details of an existing pod including its client_id mapping and metadata.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
pod = await client.pods.get(pod_id=input_data.pod_id)
|
||||
result = pod.__dict__ if hasattr(pod, "__dict__") else {}
|
||||
|
||||
yield "pod_id", pod.pod_id
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailListPodsBlock(Block):
|
||||
"""
|
||||
List all pods in your AgentMail organization.
|
||||
|
||||
Returns a paginated list of all tenant pods with their metadata.
|
||||
Use this to see all customer workspaces at a glance.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of pods to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
pods: list[dict] = SchemaField(
|
||||
description="List of pod objects with pod_id, client_id, creation time, etc."
|
||||
)
|
||||
count: int = SchemaField(description="Number of pods returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9d3725ee-2968-431a-a816-857ab41e1420",
|
||||
description="List all tenant pods in your organization. See all customer workspaces at a glance.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
|
||||
response = await client.pods.list(**params)
|
||||
pods = [
|
||||
p.__dict__ if hasattr(p, "__dict__") else p
|
||||
for p in getattr(response, "pods", [])
|
||||
]
|
||||
|
||||
yield "pods", pods
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(pods)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailDeletePodBlock(Block):
|
||||
"""
|
||||
Permanently delete a pod. All inboxes and domains must be removed first.
|
||||
|
||||
You cannot delete a pod that still contains inboxes or domains.
|
||||
Delete all child resources first, then delete the pod.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
pod_id: str = SchemaField(
|
||||
description="Pod ID to permanently delete (must have no inboxes or domains)"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
success: bool = SchemaField(
|
||||
description="True if the pod was successfully deleted"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f371f8cd-682d-4f5f-905c-529c74a8fb35",
|
||||
description="Permanently delete a pod. All inboxes and domains must be removed first.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
await client.pods.delete(pod_id=input_data.pod_id)
|
||||
yield "success", True
|
||||
|
||||
|
||||
class AgentMailListPodInboxesBlock(Block):
|
||||
"""
|
||||
List all inboxes within a specific pod (customer workspace).
|
||||
|
||||
Returns only the inboxes belonging to this pod, providing
|
||||
tenant-scoped visibility.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
pod_id: str = SchemaField(description="Pod ID to list inboxes from")
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of inboxes to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
inboxes: list[dict] = SchemaField(
|
||||
description="List of inbox objects within this pod"
|
||||
)
|
||||
count: int = SchemaField(description="Number of inboxes returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a8c17ce0-b7c1-4bc3-ae39-680e1952e5d0",
|
||||
description="List all inboxes within a pod. View email accounts scoped to a specific customer.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
|
||||
response = await client.pods.inboxes.list(pod_id=input_data.pod_id, **params)
|
||||
inboxes = [
|
||||
i.__dict__ if hasattr(i, "__dict__") else i
|
||||
for i in getattr(response, "inboxes", [])
|
||||
]
|
||||
|
||||
yield "inboxes", inboxes
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(inboxes)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailListPodThreadsBlock(Block):
|
||||
"""
|
||||
List all conversation threads across all inboxes within a pod.
|
||||
|
||||
Returns threads from every inbox in the pod. Use for building
|
||||
per-customer dashboards showing all email activity, or for
|
||||
supervisor agents monitoring a customer's conversations.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
pod_id: str = SchemaField(description="Pod ID to list threads from")
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of threads to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
labels: list[str] = SchemaField(
|
||||
description="Only return threads matching ALL of these labels",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
threads: list[dict] = SchemaField(
|
||||
description="List of thread objects from all inboxes in this pod"
|
||||
)
|
||||
count: int = SchemaField(description="Number of threads returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="80214f08-8b85-4533-a6b8-f8123bfcb410",
|
||||
description="List all conversation threads across all inboxes within a pod. View all email activity for a customer.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
if input_data.labels:
|
||||
params["labels"] = input_data.labels
|
||||
|
||||
response = await client.pods.threads.list(pod_id=input_data.pod_id, **params)
|
||||
threads = [
|
||||
t.__dict__ if hasattr(t, "__dict__") else t
|
||||
for t in getattr(response, "threads", [])
|
||||
]
|
||||
|
||||
yield "threads", threads
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(threads)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailListPodDraftsBlock(Block):
|
||||
"""
|
||||
List all drafts across all inboxes within a pod.
|
||||
|
||||
Returns pending drafts from every inbox in the pod. Use for
|
||||
per-customer approval dashboards or monitoring scheduled sends.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
pod_id: str = SchemaField(description="Pod ID to list drafts from")
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of drafts to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
drafts: list[dict] = SchemaField(
|
||||
description="List of draft objects from all inboxes in this pod"
|
||||
)
|
||||
count: int = SchemaField(description="Number of drafts returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="12fd7a3e-51ad-4b20-97c1-0391f207f517",
|
||||
description="List all drafts across all inboxes within a pod. View pending emails for a customer.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
|
||||
response = await client.pods.drafts.list(pod_id=input_data.pod_id, **params)
|
||||
drafts = [
|
||||
d.__dict__ if hasattr(d, "__dict__") else d
|
||||
for d in getattr(response, "drafts", [])
|
||||
]
|
||||
|
||||
yield "drafts", drafts
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(drafts)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailCreatePodInboxBlock(Block):
|
||||
"""
|
||||
Create a new email inbox within a specific pod (customer workspace).
|
||||
|
||||
The inbox is automatically scoped to the pod and inherits its
|
||||
isolation guarantees. If username/domain are not provided,
|
||||
AgentMail auto-generates a unique address.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
pod_id: str = SchemaField(description="Pod ID to create the inbox in")
|
||||
username: str = SchemaField(
|
||||
description="Local part of the email address (e.g. 'support'). Leave empty to auto-generate.",
|
||||
default="",
|
||||
)
|
||||
domain: str = SchemaField(
|
||||
description="Email domain (e.g. 'mydomain.com'). Defaults to agentmail.to if empty.",
|
||||
default="",
|
||||
)
|
||||
display_name: str = SchemaField(
|
||||
description="Friendly name shown in the 'From' field (e.g. 'Customer Support')",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
inbox_id: str = SchemaField(
|
||||
description="Unique identifier of the created inbox"
|
||||
)
|
||||
email_address: str = SchemaField(description="Full email address of the inbox")
|
||||
result: dict = SchemaField(
|
||||
description="Complete inbox object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c6862373-1ac6-402e-89e6-7db1fea882af",
|
||||
description="Create a new email inbox within a pod. The inbox is scoped to the customer workspace.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {}
|
||||
if input_data.username:
|
||||
params["username"] = input_data.username
|
||||
if input_data.domain:
|
||||
params["domain"] = input_data.domain
|
||||
if input_data.display_name:
|
||||
params["display_name"] = input_data.display_name
|
||||
|
||||
inbox = await client.pods.inboxes.create(pod_id=input_data.pod_id, **params)
|
||||
result = inbox.__dict__ if hasattr(inbox, "__dict__") else {}
|
||||
|
||||
yield "inbox_id", inbox.inbox_id
|
||||
yield "email_address", getattr(inbox, "email_address", inbox.inbox_id)
|
||||
yield "result", result
|
||||
329
autogpt_platform/backend/backend/blocks/agent_mail/threads.py
Normal file
329
autogpt_platform/backend/backend/blocks/agent_mail/threads.py
Normal file
@@ -0,0 +1,329 @@
|
||||
"""
|
||||
AgentMail Thread blocks — list, get, and delete conversation threads.
|
||||
|
||||
A Thread groups related messages into a single conversation. Threads are
|
||||
created automatically when a new message is sent and grow as replies are added.
|
||||
Threads can be queried per-inbox or across the entire organization.
|
||||
"""
|
||||
|
||||
from agentmail import AsyncAgentMail
|
||||
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
from ._config import agent_mail
|
||||
|
||||
|
||||
def _client(credentials: APIKeyCredentials) -> AsyncAgentMail:
|
||||
return AsyncAgentMail(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
class AgentMailListInboxThreadsBlock(Block):
|
||||
"""
|
||||
List all conversation threads within a specific AgentMail inbox.
|
||||
|
||||
Returns a paginated list of threads with optional label filtering.
|
||||
Use labels to find threads by campaign, status, or custom tags.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address to list threads from"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of threads to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
labels: list[str] = SchemaField(
|
||||
description="Only return threads matching ALL of these labels (e.g. ['q4-campaign', 'follow-up'])",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
threads: list[dict] = SchemaField(
|
||||
description="List of thread objects with thread_id, subject, message count, labels, etc."
|
||||
)
|
||||
count: int = SchemaField(description="Number of threads returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="63dd9e2d-ef81-405c-b034-c031f0437334",
|
||||
description="List all conversation threads in an AgentMail inbox. Filter by labels for campaign tracking or status management.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
if input_data.labels:
|
||||
params["labels"] = input_data.labels
|
||||
|
||||
response = await client.inboxes.threads.list(
|
||||
inbox_id=input_data.inbox_id, **params
|
||||
)
|
||||
threads = [
|
||||
t.__dict__ if hasattr(t, "__dict__") else t
|
||||
for t in getattr(response, "threads", [])
|
||||
]
|
||||
|
||||
yield "threads", threads
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(threads)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailGetInboxThreadBlock(Block):
|
||||
"""
|
||||
Retrieve a single conversation thread from an AgentMail inbox.
|
||||
|
||||
Returns the thread with all its messages in chronological order.
|
||||
Use this to get the full conversation history for context when
|
||||
composing replies.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the thread belongs to"
|
||||
)
|
||||
thread_id: str = SchemaField(description="Thread ID to retrieve")
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
thread_id: str = SchemaField(description="Unique identifier of the thread")
|
||||
messages: list[dict] = SchemaField(
|
||||
description="All messages in the thread, in chronological order"
|
||||
)
|
||||
result: dict = SchemaField(
|
||||
description="Complete thread object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="42866290-1479-4153-83e7-550b703e9da2",
|
||||
description="Retrieve a conversation thread with all its messages. Use for getting full conversation context before replying.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
thread = await client.inboxes.threads.get(
|
||||
inbox_id=input_data.inbox_id,
|
||||
thread_id=input_data.thread_id,
|
||||
)
|
||||
messages = [
|
||||
m.__dict__ if hasattr(m, "__dict__") else m
|
||||
for m in getattr(thread, "messages", [])
|
||||
]
|
||||
result = dict(thread.__dict__) if hasattr(thread, "__dict__") else {}
|
||||
if "messages" in result:
|
||||
result["messages"] = messages
|
||||
|
||||
yield "thread_id", thread.thread_id
|
||||
yield "messages", messages
|
||||
yield "result", result
|
||||
|
||||
|
||||
class AgentMailDeleteInboxThreadBlock(Block):
|
||||
"""
|
||||
Permanently delete a conversation thread and all its messages from an inbox.
|
||||
|
||||
This removes the thread and every message within it. This action
|
||||
cannot be undone.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
inbox_id: str = SchemaField(
|
||||
description="Inbox ID or email address the thread belongs to"
|
||||
)
|
||||
thread_id: str = SchemaField(description="Thread ID to permanently delete")
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
success: bool = SchemaField(
|
||||
description="True if the thread was successfully deleted"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="18cd5f6f-4ff6-45da-8300-25a50ea7fb75",
|
||||
description="Permanently delete a conversation thread and all its messages. This action cannot be undone.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
is_sensitive_action=True,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
await client.inboxes.threads.delete(
|
||||
inbox_id=input_data.inbox_id,
|
||||
thread_id=input_data.thread_id,
|
||||
)
|
||||
yield "success", True
|
||||
|
||||
|
||||
class AgentMailListOrgThreadsBlock(Block):
|
||||
"""
|
||||
List conversation threads across ALL inboxes in your organization.
|
||||
|
||||
Unlike per-inbox listing, this returns threads from every inbox.
|
||||
Ideal for building supervisor agents that monitor all conversations,
|
||||
analytics dashboards, or cross-agent routing workflows.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
limit: int = SchemaField(
|
||||
description="Maximum number of threads to return per page (1-100)",
|
||||
default=20,
|
||||
advanced=True,
|
||||
)
|
||||
page_token: str = SchemaField(
|
||||
description="Token from a previous response to fetch the next page",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
labels: list[str] = SchemaField(
|
||||
description="Only return threads matching ALL of these labels",
|
||||
default_factory=list,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
threads: list[dict] = SchemaField(
|
||||
description="List of thread objects from all inboxes in the organization"
|
||||
)
|
||||
count: int = SchemaField(description="Number of threads returned")
|
||||
next_page_token: str = SchemaField(
|
||||
description="Token for the next page. Empty if no more results.",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d7a0657b-58ab-48b2-898b-7bd94f44a708",
|
||||
description="List threads across ALL inboxes in your organization. Use for supervisor agents, dashboards, or cross-agent monitoring.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
params: dict = {"limit": input_data.limit}
|
||||
if input_data.page_token:
|
||||
params["page_token"] = input_data.page_token
|
||||
if input_data.labels:
|
||||
params["labels"] = input_data.labels
|
||||
|
||||
response = await client.threads.list(**params)
|
||||
threads = [
|
||||
t.__dict__ if hasattr(t, "__dict__") else t
|
||||
for t in getattr(response, "threads", [])
|
||||
]
|
||||
|
||||
yield "threads", threads
|
||||
yield "count", (
|
||||
c if (c := getattr(response, "count", None)) is not None else len(threads)
|
||||
)
|
||||
yield "next_page_token", getattr(response, "next_page_token", "") or ""
|
||||
|
||||
|
||||
class AgentMailGetOrgThreadBlock(Block):
|
||||
"""
|
||||
Retrieve a single conversation thread by ID from anywhere in the organization.
|
||||
|
||||
Works without needing to know which inbox the thread belongs to.
|
||||
Returns the thread with all its messages in chronological order.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: CredentialsMetaInput = agent_mail.credentials_field(
|
||||
description="AgentMail API key from https://console.agentmail.to"
|
||||
)
|
||||
thread_id: str = SchemaField(
|
||||
description="Thread ID to retrieve (works across all inboxes)"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
thread_id: str = SchemaField(description="Unique identifier of the thread")
|
||||
messages: list[dict] = SchemaField(
|
||||
description="All messages in the thread, in chronological order"
|
||||
)
|
||||
result: dict = SchemaField(
|
||||
description="Complete thread object with all metadata"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the operation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="39aaae31-3eb1-44c6-9e37-5a44a4529649",
|
||||
description="Retrieve a conversation thread by ID from anywhere in the organization, without needing the inbox ID.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
client = _client(credentials)
|
||||
thread = await client.threads.get(thread_id=input_data.thread_id)
|
||||
messages = [
|
||||
m.__dict__ if hasattr(m, "__dict__") else m
|
||||
for m in getattr(thread, "messages", [])
|
||||
]
|
||||
result = dict(thread.__dict__) if hasattr(thread, "__dict__") else {}
|
||||
if "messages" in result:
|
||||
result["messages"] = messages
|
||||
|
||||
yield "thread_id", thread.thread_id
|
||||
yield "messages", messages
|
||||
yield "result", result
|
||||
@@ -40,7 +40,7 @@ from backend.copilot.response_model import (
|
||||
from backend.copilot.service import (
|
||||
_build_system_prompt,
|
||||
_generate_session_title,
|
||||
client,
|
||||
_get_openai_client,
|
||||
config,
|
||||
)
|
||||
from backend.copilot.tools import execute_tool, get_available_tools
|
||||
@@ -89,7 +89,7 @@ async def _compress_session_messages(
|
||||
result = await compress_context(
|
||||
messages=messages_dict,
|
||||
model=config.model,
|
||||
client=client,
|
||||
client=_get_openai_client(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("[Baseline] Context compression with LLM failed: %s", e)
|
||||
@@ -235,7 +235,7 @@ async def stream_chat_completion_baseline(
|
||||
)
|
||||
if tools:
|
||||
create_kwargs["tools"] = tools
|
||||
response = await client.chat.completions.create(**create_kwargs) # type: ignore[arg-type] # dynamic kwargs
|
||||
response = await _get_openai_client().chat.completions.create(**create_kwargs) # type: ignore[arg-type] # dynamic kwargs
|
||||
|
||||
# Accumulate streamed response (text + tool calls)
|
||||
round_text = ""
|
||||
|
||||
@@ -28,10 +28,24 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
config = ChatConfig()
|
||||
settings = Settings()
|
||||
client = LangfuseAsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
|
||||
_client: LangfuseAsyncOpenAI | None = None
|
||||
_langfuse = None
|
||||
|
||||
|
||||
langfuse = get_client()
|
||||
def _get_openai_client() -> LangfuseAsyncOpenAI:
|
||||
global _client
|
||||
if _client is None:
|
||||
_client = LangfuseAsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
return _client
|
||||
|
||||
|
||||
def _get_langfuse():
|
||||
global _langfuse
|
||||
if _langfuse is None:
|
||||
_langfuse = get_client()
|
||||
return _langfuse
|
||||
|
||||
|
||||
# Default system prompt used when Langfuse is not configured
|
||||
# Provides minimal baseline tone and personality - all workflow, tools, and
|
||||
@@ -84,7 +98,7 @@ async def _get_system_prompt_template(context: str) -> str:
|
||||
else "latest"
|
||||
)
|
||||
prompt = await asyncio.to_thread(
|
||||
langfuse.get_prompt,
|
||||
_get_langfuse().get_prompt,
|
||||
config.langfuse_prompt_name,
|
||||
label=label,
|
||||
cache_ttl_seconds=config.langfuse_prompt_cache_ttl,
|
||||
@@ -158,7 +172,7 @@ async def _generate_session_title(
|
||||
"environment": settings.config.app_env.value,
|
||||
}
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
response = await _get_openai_client().chat.completions.create(
|
||||
model=config.title_model,
|
||||
messages=[
|
||||
{
|
||||
|
||||
21
autogpt_platform/backend/poetry.lock
generated
21
autogpt_platform/backend/poetry.lock
generated
@@ -1,5 +1,24 @@
|
||||
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "agentmail"
|
||||
version = "0.4.7"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "agentmail-0.4.7-py3-none-any.whl", hash = "sha256:059fcd1bb6ed0d9c8aea33419e0592d2a1d7fce810c9756b43e7fb40da484eb0"},
|
||||
{file = "agentmail-0.4.7.tar.gz", hash = "sha256:5648dffb5bedd8f165d633271873de61362d5a76e8b5256e8916ecdd859539b9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.21.2"
|
||||
pydantic = ">=1.9.2"
|
||||
pydantic-core = ">=2.18.2"
|
||||
typing_extensions = ">=4.0.0"
|
||||
websockets = ">=12.0"
|
||||
|
||||
[[package]]
|
||||
name = "aio-pika"
|
||||
version = "9.5.8"
|
||||
@@ -8969,4 +8988,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "86dab25684dd46e635a33bd33281a926e5626a874ecc048c34389fecf34a87d8"
|
||||
content-hash = "b5888ff2e8bf296540994d9a620d67bd2a4a50b418c2159fa38f0b7af1e7b538"
|
||||
|
||||
@@ -12,6 +12,7 @@ python = ">=3.10,<3.14"
|
||||
aio-pika = "^9.5.5"
|
||||
aiohttp = "^3.10.0"
|
||||
aiodns = "^3.5.0"
|
||||
agentmail = "^0.4.5"
|
||||
anthropic = "^0.79.0"
|
||||
apscheduler = "^3.11.1"
|
||||
autogpt-libs = { path = "../autogpt_libs", develop = true }
|
||||
|
||||
BIN
autogpt_platform/frontend/public/integrations/agent_mail.png
Normal file
BIN
autogpt_platform/frontend/public/integrations/agent_mail.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
@@ -0,0 +1,440 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { screen, cleanup } from "@testing-library/react";
|
||||
import { render } from "@/tests/integrations/test-utils";
|
||||
import React from "react";
|
||||
import { BlockUIType } from "../components/types";
|
||||
import type {
|
||||
CustomNodeData,
|
||||
CustomNode as CustomNodeType,
|
||||
} from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import type { NodeProps } from "@xyflow/react";
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
|
||||
// ---- Mock sub-components ----
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer",
|
||||
() => ({
|
||||
NodeContainer: ({
|
||||
children,
|
||||
hasErrors,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
hasErrors: boolean;
|
||||
}) => (
|
||||
<div data-testid="node-container" data-has-errors={String(!!hasErrors)}>
|
||||
{children}
|
||||
</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeHeader",
|
||||
() => ({
|
||||
NodeHeader: ({ data }: { data: CustomNodeData }) => (
|
||||
<div data-testid="node-header">{data.title}</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/StickyNoteBlock",
|
||||
() => ({
|
||||
StickyNoteBlock: ({ data }: { data: CustomNodeData }) => (
|
||||
<div data-testid="sticky-note-block">{data.title}</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeAdvancedToggle",
|
||||
() => ({
|
||||
NodeAdvancedToggle: () => <div data-testid="node-advanced-toggle" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/NodeOutput",
|
||||
() => ({
|
||||
NodeDataRenderer: () => <div data-testid="node-data-renderer" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeExecutionBadge",
|
||||
() => ({
|
||||
NodeExecutionBadge: () => <div data-testid="node-execution-badge" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeRightClickMenu",
|
||||
() => ({
|
||||
NodeRightClickMenu: ({ children }: { children: React.ReactNode }) => (
|
||||
<div data-testid="node-right-click-menu">{children}</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/WebhookDisclaimer",
|
||||
() => ({
|
||||
WebhookDisclaimer: () => <div data-testid="webhook-disclaimer" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/SubAgentUpdate/SubAgentUpdateFeature",
|
||||
() => ({
|
||||
SubAgentUpdateFeature: () => <div data-testid="sub-agent-update" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/AyrshareConnectButton",
|
||||
() => ({
|
||||
AyrshareConnectButton: () => <div data-testid="ayrshare-connect-button" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/FormCreator",
|
||||
() => ({
|
||||
FormCreator: () => <div data-testid="form-creator" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/OutputHandler",
|
||||
() => ({
|
||||
OutputHandler: () => <div data-testid="output-handler" />,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/components/renderers/InputRenderer/utils/input-schema-pre-processor",
|
||||
() => ({
|
||||
preprocessInputSchema: (schema: unknown) => schema,
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/FlowEditor/nodes/CustomNode/useCustomNode",
|
||||
() => ({
|
||||
useCustomNode: ({ data }: { data: CustomNodeData }) => ({
|
||||
inputSchema: data.inputSchema,
|
||||
outputSchema: data.outputSchema,
|
||||
isMCPWithTool: false,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock("@xyflow/react", async () => {
|
||||
const actual = await vi.importActual("@xyflow/react");
|
||||
return {
|
||||
...actual,
|
||||
useReactFlow: () => ({
|
||||
getNodes: () => [],
|
||||
getEdges: () => [],
|
||||
setNodes: vi.fn(),
|
||||
setEdges: vi.fn(),
|
||||
getNode: vi.fn(),
|
||||
}),
|
||||
useNodeId: () => "test-node-id",
|
||||
useUpdateNodeInternals: () => vi.fn(),
|
||||
Handle: ({ children }: { children: React.ReactNode }) => (
|
||||
<div>{children}</div>
|
||||
),
|
||||
Position: { Left: "left", Right: "right", Top: "top", Bottom: "bottom" },
|
||||
};
|
||||
});
|
||||
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
|
||||
// ---- Helpers ----
|
||||
|
||||
function buildNodeData(
|
||||
overrides: Partial<CustomNodeData> = {},
|
||||
): CustomNodeData {
|
||||
return {
|
||||
hardcodedValues: {},
|
||||
title: "Test Block",
|
||||
description: "A test block",
|
||||
inputSchema: { type: "object", properties: {} },
|
||||
outputSchema: { type: "object", properties: {} },
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: "block-123",
|
||||
costs: [],
|
||||
categories: [],
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function buildNodeProps(
|
||||
dataOverrides: Partial<CustomNodeData> = {},
|
||||
propsOverrides: Partial<NodeProps<CustomNodeType>> = {},
|
||||
): NodeProps<CustomNodeType> {
|
||||
return {
|
||||
id: "node-1",
|
||||
data: buildNodeData(dataOverrides),
|
||||
selected: false,
|
||||
type: "custom",
|
||||
isConnectable: true,
|
||||
positionAbsoluteX: 0,
|
||||
positionAbsoluteY: 0,
|
||||
zIndex: 0,
|
||||
dragging: false,
|
||||
dragHandle: undefined,
|
||||
draggable: true,
|
||||
selectable: true,
|
||||
deletable: true,
|
||||
parentId: undefined,
|
||||
width: undefined,
|
||||
height: undefined,
|
||||
sourcePosition: undefined,
|
||||
targetPosition: undefined,
|
||||
...propsOverrides,
|
||||
};
|
||||
}
|
||||
|
||||
function renderCustomNode(
|
||||
dataOverrides: Partial<CustomNodeData> = {},
|
||||
propsOverrides: Partial<NodeProps<CustomNodeType>> = {},
|
||||
) {
|
||||
const props = buildNodeProps(dataOverrides, propsOverrides);
|
||||
return render(<CustomNode {...props} />);
|
||||
}
|
||||
|
||||
function createExecutionResult(
|
||||
overrides: Partial<NodeExecutionResult> = {},
|
||||
): NodeExecutionResult {
|
||||
return {
|
||||
node_exec_id: overrides.node_exec_id ?? "exec-1",
|
||||
node_id: overrides.node_id ?? "node-1",
|
||||
graph_exec_id: overrides.graph_exec_id ?? "graph-exec-1",
|
||||
graph_id: overrides.graph_id ?? "graph-1",
|
||||
graph_version: overrides.graph_version ?? 1,
|
||||
user_id: overrides.user_id ?? "test-user",
|
||||
block_id: overrides.block_id ?? "block-1",
|
||||
status: overrides.status ?? "COMPLETED",
|
||||
input_data: overrides.input_data ?? {},
|
||||
output_data: overrides.output_data ?? {},
|
||||
add_time: overrides.add_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
queue_time: overrides.queue_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
start_time: overrides.start_time ?? new Date("2024-01-01T00:00:01Z"),
|
||||
end_time: overrides.end_time ?? new Date("2024-01-01T00:00:02Z"),
|
||||
};
|
||||
}
|
||||
|
||||
// ---- Tests ----
|
||||
|
||||
beforeEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
describe("CustomNode", () => {
|
||||
describe("STANDARD type rendering", () => {
|
||||
it("renders NodeHeader with the block title", () => {
|
||||
renderCustomNode({ title: "My Standard Block" });
|
||||
|
||||
const header = screen.getByTestId("node-header");
|
||||
expect(header).toBeDefined();
|
||||
expect(header.textContent).toContain("My Standard Block");
|
||||
});
|
||||
|
||||
it("renders NodeContainer, FormCreator, OutputHandler, and NodeExecutionBadge", () => {
|
||||
renderCustomNode();
|
||||
|
||||
expect(screen.getByTestId("node-container")).toBeDefined();
|
||||
expect(screen.getByTestId("form-creator")).toBeDefined();
|
||||
expect(screen.getByTestId("output-handler")).toBeDefined();
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
expect(screen.getByTestId("node-data-renderer")).toBeDefined();
|
||||
expect(screen.getByTestId("node-advanced-toggle")).toBeDefined();
|
||||
});
|
||||
|
||||
it("wraps content in NodeRightClickMenu", () => {
|
||||
renderCustomNode();
|
||||
|
||||
expect(screen.getByTestId("node-right-click-menu")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render StickyNoteBlock for STANDARD type", () => {
|
||||
renderCustomNode();
|
||||
|
||||
expect(screen.queryByTestId("sticky-note-block")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("NOTE type rendering", () => {
|
||||
it("renders StickyNoteBlock instead of main UI", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.NOTE, title: "My Note" });
|
||||
|
||||
const note = screen.getByTestId("sticky-note-block");
|
||||
expect(note).toBeDefined();
|
||||
expect(note.textContent).toContain("My Note");
|
||||
});
|
||||
|
||||
it("does not render NodeContainer or other standard components", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.NOTE });
|
||||
|
||||
expect(screen.queryByTestId("node-container")).toBeNull();
|
||||
expect(screen.queryByTestId("node-header")).toBeNull();
|
||||
expect(screen.queryByTestId("form-creator")).toBeNull();
|
||||
expect(screen.queryByTestId("output-handler")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("WEBHOOK type rendering", () => {
|
||||
it("renders WebhookDisclaimer for WEBHOOK type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.WEBHOOK });
|
||||
|
||||
expect(screen.getByTestId("webhook-disclaimer")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders WebhookDisclaimer for WEBHOOK_MANUAL type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.WEBHOOK_MANUAL });
|
||||
|
||||
expect(screen.getByTestId("webhook-disclaimer")).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("AGENT type rendering", () => {
|
||||
it("renders SubAgentUpdateFeature for AGENT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.AGENT });
|
||||
|
||||
expect(screen.getByTestId("sub-agent-update")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render SubAgentUpdateFeature for non-AGENT types", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.STANDARD });
|
||||
|
||||
expect(screen.queryByTestId("sub-agent-update")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("OUTPUT type rendering", () => {
|
||||
it("does not render OutputHandler for OUTPUT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.OUTPUT });
|
||||
|
||||
expect(screen.queryByTestId("output-handler")).toBeNull();
|
||||
});
|
||||
|
||||
it("still renders FormCreator and other components for OUTPUT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.OUTPUT });
|
||||
|
||||
expect(screen.getByTestId("form-creator")).toBeDefined();
|
||||
expect(screen.getByTestId("node-header")).toBeDefined();
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("AYRSHARE type rendering", () => {
|
||||
it("renders AyrshareConnectButton for AYRSHARE type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.AYRSHARE });
|
||||
|
||||
expect(screen.getByTestId("ayrshare-connect-button")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render AyrshareConnectButton for non-AYRSHARE types", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.STANDARD });
|
||||
|
||||
expect(screen.queryByTestId("ayrshare-connect-button")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("error states", () => {
|
||||
it("sets hasErrors on NodeContainer when data.errors has non-empty values", () => {
|
||||
renderCustomNode({
|
||||
errors: { field1: "This field is required" },
|
||||
});
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("true");
|
||||
});
|
||||
|
||||
it("does not set hasErrors when data.errors is empty", () => {
|
||||
renderCustomNode({ errors: {} });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("does not set hasErrors when data.errors values are all empty strings", () => {
|
||||
renderCustomNode({ errors: { field1: "" } });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("sets hasErrors when last execution result has error in output_data", () => {
|
||||
renderCustomNode({
|
||||
nodeExecutionResults: [
|
||||
createExecutionResult({
|
||||
output_data: { error: ["Something went wrong"] },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("true");
|
||||
});
|
||||
|
||||
it("does not set hasErrors when execution results have no error", () => {
|
||||
renderCustomNode({
|
||||
nodeExecutionResults: [
|
||||
createExecutionResult({
|
||||
output_data: { result: ["success"] },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
});
|
||||
|
||||
describe("NodeExecutionBadge", () => {
|
||||
it("always renders NodeExecutionBadge for non-NOTE types", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.STANDARD });
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders NodeExecutionBadge for AGENT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.AGENT });
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders NodeExecutionBadge for OUTPUT type", () => {
|
||||
renderCustomNode({ uiType: BlockUIType.OUTPUT });
|
||||
expect(screen.getByTestId("node-execution-badge")).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("renders without nodeExecutionResults", () => {
|
||||
renderCustomNode({ nodeExecutionResults: undefined });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container).toBeDefined();
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("renders without errors property", () => {
|
||||
renderCustomNode({ errors: undefined });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container).toBeDefined();
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
|
||||
it("renders with empty execution results array", () => {
|
||||
renderCustomNode({ nodeExecutionResults: [] });
|
||||
|
||||
const container = screen.getByTestId("node-container");
|
||||
expect(container).toBeDefined();
|
||||
expect(container.getAttribute("data-has-errors")).toBe("false");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,342 @@
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
import {
|
||||
render,
|
||||
screen,
|
||||
fireEvent,
|
||||
waitFor,
|
||||
cleanup,
|
||||
} from "@/tests/integrations/test-utils";
|
||||
import { useBlockMenuStore } from "../stores/blockMenuStore";
|
||||
import { useControlPanelStore } from "../stores/controlPanelStore";
|
||||
import { DefaultStateType } from "../components/NewControlPanel/NewBlockMenu/types";
|
||||
import { SearchEntryFilterAnyOfItem } from "@/app/api/__generated__/models/searchEntryFilterAnyOfItem";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mocks for heavy child components
|
||||
// ---------------------------------------------------------------------------
|
||||
vi.mock(
|
||||
"../components/NewControlPanel/NewBlockMenu/BlockMenuDefault/BlockMenuDefault",
|
||||
() => ({
|
||||
BlockMenuDefault: () => (
|
||||
<div data-testid="block-menu-default">Default Content</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"../components/NewControlPanel/NewBlockMenu/BlockMenuSearch/BlockMenuSearch",
|
||||
() => ({
|
||||
BlockMenuSearch: () => (
|
||||
<div data-testid="block-menu-search">Search Results</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
// Mock query client used by the search bar hook
|
||||
vi.mock("@/lib/react-query/queryClient", () => ({
|
||||
getQueryClient: () => ({
|
||||
invalidateQueries: vi.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Reset stores before each test
|
||||
// ---------------------------------------------------------------------------
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
useBlockMenuStore.getState().reset();
|
||||
useBlockMenuStore.setState({
|
||||
filters: [],
|
||||
creators: [],
|
||||
creators_list: [],
|
||||
categoryCounts: {
|
||||
blocks: 0,
|
||||
integrations: 0,
|
||||
marketplace_agents: 0,
|
||||
my_agents: 0,
|
||||
},
|
||||
});
|
||||
useControlPanelStore.getState().reset();
|
||||
});
|
||||
|
||||
// ===========================================================================
|
||||
// Section 1: blockMenuStore unit tests
|
||||
// ===========================================================================
|
||||
describe("blockMenuStore", () => {
|
||||
describe("searchQuery", () => {
|
||||
it("defaults to an empty string", () => {
|
||||
expect(useBlockMenuStore.getState().searchQuery).toBe("");
|
||||
});
|
||||
|
||||
it("sets the search query", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("timer");
|
||||
expect(useBlockMenuStore.getState().searchQuery).toBe("timer");
|
||||
});
|
||||
});
|
||||
|
||||
describe("defaultState", () => {
|
||||
it("defaults to SUGGESTION", () => {
|
||||
expect(useBlockMenuStore.getState().defaultState).toBe(
|
||||
DefaultStateType.SUGGESTION,
|
||||
);
|
||||
});
|
||||
|
||||
it("sets the default state", () => {
|
||||
useBlockMenuStore.getState().setDefaultState(DefaultStateType.ALL_BLOCKS);
|
||||
expect(useBlockMenuStore.getState().defaultState).toBe(
|
||||
DefaultStateType.ALL_BLOCKS,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("filters", () => {
|
||||
it("defaults to an empty array", () => {
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([]);
|
||||
});
|
||||
|
||||
it("adds a filter", () => {
|
||||
useBlockMenuStore.getState().addFilter(SearchEntryFilterAnyOfItem.blocks);
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.blocks,
|
||||
]);
|
||||
});
|
||||
|
||||
it("removes a filter", () => {
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.setFilters([
|
||||
SearchEntryFilterAnyOfItem.blocks,
|
||||
SearchEntryFilterAnyOfItem.integrations,
|
||||
]);
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.removeFilter(SearchEntryFilterAnyOfItem.blocks);
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.integrations,
|
||||
]);
|
||||
});
|
||||
|
||||
it("replaces all filters with setFilters", () => {
|
||||
useBlockMenuStore.getState().addFilter(SearchEntryFilterAnyOfItem.blocks);
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.setFilters([SearchEntryFilterAnyOfItem.marketplace_agents]);
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.marketplace_agents,
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("creators", () => {
|
||||
it("adds a creator", () => {
|
||||
useBlockMenuStore.getState().addCreator("alice");
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["alice"]);
|
||||
});
|
||||
|
||||
it("removes a creator", () => {
|
||||
useBlockMenuStore.getState().setCreators(["alice", "bob"]);
|
||||
useBlockMenuStore.getState().removeCreator("alice");
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["bob"]);
|
||||
});
|
||||
|
||||
it("replaces all creators with setCreators", () => {
|
||||
useBlockMenuStore.getState().addCreator("alice");
|
||||
useBlockMenuStore.getState().setCreators(["charlie"]);
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["charlie"]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("categoryCounts", () => {
|
||||
it("sets category counts", () => {
|
||||
const counts = {
|
||||
blocks: 10,
|
||||
integrations: 5,
|
||||
marketplace_agents: 3,
|
||||
my_agents: 2,
|
||||
};
|
||||
useBlockMenuStore.getState().setCategoryCounts(counts);
|
||||
expect(useBlockMenuStore.getState().categoryCounts).toEqual(counts);
|
||||
});
|
||||
});
|
||||
|
||||
describe("searchId", () => {
|
||||
it("defaults to undefined", () => {
|
||||
expect(useBlockMenuStore.getState().searchId).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets and clears searchId", () => {
|
||||
useBlockMenuStore.getState().setSearchId("search-123");
|
||||
expect(useBlockMenuStore.getState().searchId).toBe("search-123");
|
||||
|
||||
useBlockMenuStore.getState().setSearchId(undefined);
|
||||
expect(useBlockMenuStore.getState().searchId).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("integration", () => {
|
||||
it("defaults to undefined", () => {
|
||||
expect(useBlockMenuStore.getState().integration).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets the integration", () => {
|
||||
useBlockMenuStore.getState().setIntegration("slack");
|
||||
expect(useBlockMenuStore.getState().integration).toBe("slack");
|
||||
});
|
||||
});
|
||||
|
||||
describe("reset", () => {
|
||||
it("resets searchQuery, searchId, defaultState, and integration", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("hello");
|
||||
useBlockMenuStore.getState().setSearchId("id-1");
|
||||
useBlockMenuStore.getState().setDefaultState(DefaultStateType.ALL_BLOCKS);
|
||||
useBlockMenuStore.getState().setIntegration("github");
|
||||
|
||||
useBlockMenuStore.getState().reset();
|
||||
|
||||
const state = useBlockMenuStore.getState();
|
||||
expect(state.searchQuery).toBe("");
|
||||
expect(state.searchId).toBeUndefined();
|
||||
expect(state.defaultState).toBe(DefaultStateType.SUGGESTION);
|
||||
expect(state.integration).toBeUndefined();
|
||||
});
|
||||
|
||||
it("does not reset filters or creators (by design)", () => {
|
||||
useBlockMenuStore
|
||||
.getState()
|
||||
.setFilters([SearchEntryFilterAnyOfItem.blocks]);
|
||||
useBlockMenuStore.getState().setCreators(["alice"]);
|
||||
|
||||
useBlockMenuStore.getState().reset();
|
||||
|
||||
expect(useBlockMenuStore.getState().filters).toEqual([
|
||||
SearchEntryFilterAnyOfItem.blocks,
|
||||
]);
|
||||
expect(useBlockMenuStore.getState().creators).toEqual(["alice"]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ===========================================================================
|
||||
// Section 2: controlPanelStore unit tests
|
||||
// ===========================================================================
|
||||
describe("controlPanelStore", () => {
|
||||
it("defaults blockMenuOpen to false", () => {
|
||||
expect(useControlPanelStore.getState().blockMenuOpen).toBe(false);
|
||||
});
|
||||
|
||||
it("sets blockMenuOpen", () => {
|
||||
useControlPanelStore.getState().setBlockMenuOpen(true);
|
||||
expect(useControlPanelStore.getState().blockMenuOpen).toBe(true);
|
||||
});
|
||||
|
||||
it("sets forceOpenBlockMenu", () => {
|
||||
useControlPanelStore.getState().setForceOpenBlockMenu(true);
|
||||
expect(useControlPanelStore.getState().forceOpenBlockMenu).toBe(true);
|
||||
});
|
||||
|
||||
it("resets all control panel state", () => {
|
||||
useControlPanelStore.getState().setBlockMenuOpen(true);
|
||||
useControlPanelStore.getState().setForceOpenBlockMenu(true);
|
||||
useControlPanelStore.getState().setSaveControlOpen(true);
|
||||
useControlPanelStore.getState().setForceOpenSave(true);
|
||||
|
||||
useControlPanelStore.getState().reset();
|
||||
|
||||
const state = useControlPanelStore.getState();
|
||||
expect(state.blockMenuOpen).toBe(false);
|
||||
expect(state.forceOpenBlockMenu).toBe(false);
|
||||
expect(state.saveControlOpen).toBe(false);
|
||||
expect(state.forceOpenSave).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
// ===========================================================================
|
||||
// Section 3: BlockMenuContent integration tests
|
||||
// ===========================================================================
|
||||
// We import BlockMenuContent directly to avoid dealing with the Popover wrapper.
|
||||
import { BlockMenuContent } from "../components/NewControlPanel/NewBlockMenu/BlockMenuContent/BlockMenuContent";
|
||||
|
||||
describe("BlockMenuContent", () => {
|
||||
it("shows BlockMenuDefault when there is no search query", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("");
|
||||
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-default")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-search")).toBeNull();
|
||||
});
|
||||
|
||||
it("shows BlockMenuSearch when a search query is present", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("timer");
|
||||
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-search")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-default")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders the search bar", () => {
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
expect(
|
||||
screen.getByPlaceholderText(
|
||||
"Blocks, Agents, Integrations or Keywords...",
|
||||
),
|
||||
).toBeDefined();
|
||||
});
|
||||
|
||||
it("switches from default to search view when store query changes", () => {
|
||||
const { rerender } = render(<BlockMenuContent />);
|
||||
expect(screen.getByTestId("block-menu-default")).toBeDefined();
|
||||
|
||||
// Simulate typing by setting the store directly
|
||||
useBlockMenuStore.getState().setSearchQuery("webhook");
|
||||
rerender(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-search")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-default")).toBeNull();
|
||||
});
|
||||
|
||||
it("switches back to default view when search query is cleared", () => {
|
||||
useBlockMenuStore.getState().setSearchQuery("something");
|
||||
const { rerender } = render(<BlockMenuContent />);
|
||||
expect(screen.getByTestId("block-menu-search")).toBeDefined();
|
||||
|
||||
useBlockMenuStore.getState().setSearchQuery("");
|
||||
rerender(<BlockMenuContent />);
|
||||
|
||||
expect(screen.getByTestId("block-menu-default")).toBeDefined();
|
||||
expect(screen.queryByTestId("block-menu-search")).toBeNull();
|
||||
});
|
||||
|
||||
it("typing in the search bar updates the local input value", async () => {
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
const input = screen.getByPlaceholderText(
|
||||
"Blocks, Agents, Integrations or Keywords...",
|
||||
);
|
||||
fireEvent.change(input, { target: { value: "slack" } });
|
||||
|
||||
expect((input as HTMLInputElement).value).toBe("slack");
|
||||
});
|
||||
|
||||
it("shows clear button when input has text and clears on click", async () => {
|
||||
render(<BlockMenuContent />);
|
||||
|
||||
const input = screen.getByPlaceholderText(
|
||||
"Blocks, Agents, Integrations or Keywords...",
|
||||
);
|
||||
fireEvent.change(input, { target: { value: "test" } });
|
||||
|
||||
// The clear button should appear
|
||||
const clearButton = screen.getByRole("button");
|
||||
fireEvent.click(clearButton);
|
||||
|
||||
await waitFor(() => {
|
||||
expect((input as HTMLInputElement).value).toBe("");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,270 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import {
|
||||
render,
|
||||
screen,
|
||||
fireEvent,
|
||||
waitFor,
|
||||
cleanup,
|
||||
} from "@/tests/integrations/test-utils";
|
||||
import { UseFormReturn, useForm } from "react-hook-form";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import * as z from "zod";
|
||||
import { renderHook } from "@testing-library/react";
|
||||
import { useControlPanelStore } from "../stores/controlPanelStore";
|
||||
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { NewSaveControl } from "../components/NewControlPanel/NewSaveControl/NewSaveControl";
|
||||
import { useNewSaveControl } from "../components/NewControlPanel/NewSaveControl/useNewSaveControl";
|
||||
|
||||
const formSchema = z.object({
|
||||
name: z.string().min(1, "Name is required").max(100),
|
||||
description: z.string().max(500),
|
||||
});
|
||||
|
||||
type SaveableGraphFormValues = z.infer<typeof formSchema>;
|
||||
|
||||
const mockHandleSave = vi.fn();
|
||||
|
||||
vi.mock(
|
||||
"../components/NewControlPanel/NewSaveControl/useNewSaveControl",
|
||||
() => ({
|
||||
useNewSaveControl: vi.fn(),
|
||||
}),
|
||||
);
|
||||
|
||||
const mockUseNewSaveControl = vi.mocked(useNewSaveControl);
|
||||
|
||||
function createMockForm(
|
||||
defaults: SaveableGraphFormValues = { name: "", description: "" },
|
||||
): UseFormReturn<SaveableGraphFormValues> {
|
||||
const { result } = renderHook(() =>
|
||||
useForm<SaveableGraphFormValues>({
|
||||
resolver: zodResolver(formSchema),
|
||||
defaultValues: defaults,
|
||||
}),
|
||||
);
|
||||
return result.current;
|
||||
}
|
||||
|
||||
function setupMock(overrides: {
|
||||
isSaving?: boolean;
|
||||
graphVersion?: number;
|
||||
name?: string;
|
||||
description?: string;
|
||||
}) {
|
||||
const form = createMockForm({
|
||||
name: overrides.name ?? "",
|
||||
description: overrides.description ?? "",
|
||||
});
|
||||
|
||||
mockUseNewSaveControl.mockReturnValue({
|
||||
form,
|
||||
isSaving: overrides.isSaving ?? false,
|
||||
graphVersion: overrides.graphVersion,
|
||||
handleSave: mockHandleSave,
|
||||
});
|
||||
|
||||
return form;
|
||||
}
|
||||
|
||||
function resetStore() {
|
||||
useControlPanelStore.setState({
|
||||
blockMenuOpen: false,
|
||||
saveControlOpen: false,
|
||||
forceOpenBlockMenu: false,
|
||||
forceOpenSave: false,
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
cleanup();
|
||||
resetStore();
|
||||
mockHandleSave.mockReset();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
describe("NewSaveControl", () => {
|
||||
it("renders save button trigger", () => {
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId("save-control-save-button")).toBeDefined();
|
||||
});
|
||||
|
||||
it("renders name and description inputs when popover is open", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId("save-control-name-input")).toBeDefined();
|
||||
expect(screen.getByTestId("save-control-description-input")).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not render popover content when closed", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: false });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.queryByTestId("save-control-name-input")).toBeNull();
|
||||
expect(screen.queryByTestId("save-control-description-input")).toBeNull();
|
||||
});
|
||||
|
||||
it("shows version output when graphVersion is set", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ graphVersion: 3 });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const versionInput = screen.getByTestId("save-control-version-output");
|
||||
expect(versionInput).toBeDefined();
|
||||
expect((versionInput as HTMLInputElement).disabled).toBe(true);
|
||||
});
|
||||
|
||||
it("hides version output when graphVersion is undefined", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ graphVersion: undefined });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.queryByTestId("save-control-version-output")).toBeNull();
|
||||
});
|
||||
|
||||
it("enables save button when isSaving is false", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ isSaving: false });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByTestId("save-control-save-agent-button");
|
||||
expect((saveButton as HTMLButtonElement).disabled).toBe(false);
|
||||
});
|
||||
|
||||
it("disables save button when isSaving is true", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ isSaving: true });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByRole("button", { name: /save agent/i });
|
||||
expect((saveButton as HTMLButtonElement).disabled).toBe(true);
|
||||
});
|
||||
|
||||
it("calls handleSave on form submission with valid data", async () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
const form = setupMock({ name: "My Agent", description: "A description" });
|
||||
|
||||
form.setValue("name", "My Agent");
|
||||
form.setValue("description", "A description");
|
||||
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByTestId("save-control-save-agent-button");
|
||||
fireEvent.click(saveButton);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHandleSave).toHaveBeenCalledWith(
|
||||
{ name: "My Agent", description: "A description" },
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("does not call handleSave when name is empty (validation fails)", async () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({ name: "", description: "" });
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const saveButton = screen.getByTestId("save-control-save-agent-button");
|
||||
fireEvent.click(saveButton);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHandleSave).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
it("popover stays open when forceOpenSave is true", () => {
|
||||
useControlPanelStore.setState({
|
||||
saveControlOpen: false,
|
||||
forceOpenSave: true,
|
||||
});
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId("save-control-name-input")).toBeDefined();
|
||||
});
|
||||
|
||||
it("allows typing in name and description inputs", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
const nameInput = screen.getByTestId(
|
||||
"save-control-name-input",
|
||||
) as HTMLInputElement;
|
||||
const descriptionInput = screen.getByTestId(
|
||||
"save-control-description-input",
|
||||
) as HTMLInputElement;
|
||||
|
||||
fireEvent.change(nameInput, { target: { value: "Test Agent" } });
|
||||
fireEvent.change(descriptionInput, {
|
||||
target: { value: "Test Description" },
|
||||
});
|
||||
|
||||
expect(nameInput.value).toBe("Test Agent");
|
||||
expect(descriptionInput.value).toBe("Test Description");
|
||||
});
|
||||
|
||||
it("displays save button text", () => {
|
||||
useControlPanelStore.setState({ saveControlOpen: true });
|
||||
setupMock({});
|
||||
render(
|
||||
<TooltipProvider>
|
||||
<NewSaveControl />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
|
||||
expect(screen.getByText("Save Agent")).toBeDefined();
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,147 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { screen, fireEvent, cleanup } from "@testing-library/react";
|
||||
import { render } from "@/tests/integrations/test-utils";
|
||||
import React from "react";
|
||||
import { useGraphStore } from "../stores/graphStore";
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph",
|
||||
() => ({
|
||||
useRunGraph: vi.fn(),
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock(
|
||||
"@/app/(platform)/build/components/BuilderActions/components/RunInputDialog/RunInputDialog",
|
||||
() => ({
|
||||
RunInputDialog: ({ isOpen }: { isOpen: boolean }) =>
|
||||
isOpen ? <div data-testid="run-input-dialog">Dialog</div> : null,
|
||||
}),
|
||||
);
|
||||
|
||||
// Must import after mocks
|
||||
import { useRunGraph } from "../components/BuilderActions/components/RunGraph/useRunGraph";
|
||||
import { RunGraph } from "../components/BuilderActions/components/RunGraph/RunGraph";
|
||||
|
||||
const mockUseRunGraph = vi.mocked(useRunGraph);
|
||||
|
||||
function createMockReturnValue(
|
||||
overrides: Partial<ReturnType<typeof useRunGraph>> = {},
|
||||
) {
|
||||
return {
|
||||
handleRunGraph: vi.fn(),
|
||||
handleStopGraph: vi.fn(),
|
||||
openRunInputDialog: false,
|
||||
setOpenRunInputDialog: vi.fn(),
|
||||
isExecutingGraph: false,
|
||||
isTerminatingGraph: false,
|
||||
isSaving: false,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
// RunGraph uses Tooltip which requires TooltipProvider
|
||||
import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
|
||||
function renderRunGraph(flowID: string | null = "test-flow-id") {
|
||||
return render(
|
||||
<TooltipProvider>
|
||||
<RunGraph flowID={flowID} />
|
||||
</TooltipProvider>,
|
||||
);
|
||||
}
|
||||
|
||||
describe("RunGraph", () => {
|
||||
beforeEach(() => {
|
||||
cleanup();
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue());
|
||||
useGraphStore.setState({ isGraphRunning: false });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it("renders an enabled button when flowID is provided", () => {
|
||||
renderRunGraph("test-flow-id");
|
||||
const button = screen.getByRole("button");
|
||||
expect((button as HTMLButtonElement).disabled).toBe(false);
|
||||
});
|
||||
|
||||
it("renders a disabled button when flowID is null", () => {
|
||||
renderRunGraph(null);
|
||||
const button = screen.getByRole("button");
|
||||
expect((button as HTMLButtonElement).disabled).toBe(true);
|
||||
});
|
||||
|
||||
it("disables the button when isExecutingGraph is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(
|
||||
createMockReturnValue({ isExecutingGraph: true }),
|
||||
);
|
||||
renderRunGraph();
|
||||
expect((screen.getByRole("button") as HTMLButtonElement).disabled).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("disables the button when isTerminatingGraph is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(
|
||||
createMockReturnValue({ isTerminatingGraph: true }),
|
||||
);
|
||||
renderRunGraph();
|
||||
expect((screen.getByRole("button") as HTMLButtonElement).disabled).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("disables the button when isSaving is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue({ isSaving: true }));
|
||||
renderRunGraph();
|
||||
expect((screen.getByRole("button") as HTMLButtonElement).disabled).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("uses data-id run-graph-button when not running", () => {
|
||||
renderRunGraph();
|
||||
const button = screen.getByRole("button");
|
||||
expect(button.getAttribute("data-id")).toBe("run-graph-button");
|
||||
});
|
||||
|
||||
it("uses data-id stop-graph-button when running", () => {
|
||||
useGraphStore.setState({ isGraphRunning: true });
|
||||
renderRunGraph();
|
||||
const button = screen.getByRole("button");
|
||||
expect(button.getAttribute("data-id")).toBe("stop-graph-button");
|
||||
});
|
||||
|
||||
it("calls handleRunGraph when clicked and graph is not running", () => {
|
||||
const handleRunGraph = vi.fn();
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue({ handleRunGraph }));
|
||||
renderRunGraph();
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
expect(handleRunGraph).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("calls handleStopGraph when clicked and graph is running", () => {
|
||||
const handleStopGraph = vi.fn();
|
||||
mockUseRunGraph.mockReturnValue(createMockReturnValue({ handleStopGraph }));
|
||||
useGraphStore.setState({ isGraphRunning: true });
|
||||
renderRunGraph();
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
expect(handleStopGraph).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("renders RunInputDialog hidden by default", () => {
|
||||
renderRunGraph();
|
||||
expect(screen.queryByTestId("run-input-dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders RunInputDialog when openRunInputDialog is true", () => {
|
||||
mockUseRunGraph.mockReturnValue(
|
||||
createMockReturnValue({ openRunInputDialog: true }),
|
||||
);
|
||||
renderRunGraph();
|
||||
expect(screen.getByTestId("run-input-dialog")).toBeDefined();
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,257 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { BlockUIType } from "../components/types";
|
||||
|
||||
vi.mock("@/services/storage/local-storage", () => {
|
||||
const store: Record<string, string> = {};
|
||||
return {
|
||||
Key: { COPIED_FLOW_DATA: "COPIED_FLOW_DATA" },
|
||||
storage: {
|
||||
get: (key: string) => store[key] ?? null,
|
||||
set: (key: string, value: string) => {
|
||||
store[key] = value;
|
||||
},
|
||||
clean: (key: string) => {
|
||||
delete store[key];
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
import { useCopyPasteStore } from "../stores/copyPasteStore";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { storage, Key } from "@/services/storage/local-storage";
|
||||
|
||||
function createTestNode(
|
||||
id: string,
|
||||
overrides: Partial<CustomNode> = {},
|
||||
): CustomNode {
|
||||
return {
|
||||
id,
|
||||
type: "custom",
|
||||
position: overrides.position ?? { x: 100, y: 200 },
|
||||
selected: overrides.selected,
|
||||
data: {
|
||||
hardcodedValues: {},
|
||||
title: `Node ${id}`,
|
||||
description: "test node",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: `block-${id}`,
|
||||
costs: [],
|
||||
categories: [],
|
||||
...overrides.data,
|
||||
},
|
||||
} as CustomNode;
|
||||
}
|
||||
|
||||
describe("useCopyPasteStore", () => {
|
||||
beforeEach(() => {
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useHistoryStore.getState().clear();
|
||||
storage.clean(Key.COPIED_FLOW_DATA);
|
||||
});
|
||||
|
||||
describe("copySelectedNodes", () => {
|
||||
it("copies a single selected node to localStorage", () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
const stored = storage.get(Key.COPIED_FLOW_DATA);
|
||||
expect(stored).not.toBeNull();
|
||||
|
||||
const parsed = JSON.parse(stored!);
|
||||
expect(parsed.nodes).toHaveLength(1);
|
||||
expect(parsed.nodes[0].id).toBe("1");
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("copies only edges between selected nodes", () => {
|
||||
const nodeA = createTestNode("a", { selected: true });
|
||||
const nodeB = createTestNode("b", { selected: true });
|
||||
const nodeC = createTestNode("c", { selected: false });
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB, nodeC] });
|
||||
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
{
|
||||
id: "e-ab",
|
||||
source: "a",
|
||||
target: "b",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
},
|
||||
{
|
||||
id: "e-bc",
|
||||
source: "b",
|
||||
target: "c",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
},
|
||||
{
|
||||
id: "e-ac",
|
||||
source: "a",
|
||||
target: "c",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
const parsed = JSON.parse(storage.get(Key.COPIED_FLOW_DATA)!);
|
||||
expect(parsed.nodes).toHaveLength(2);
|
||||
expect(parsed.edges).toHaveLength(1);
|
||||
expect(parsed.edges[0].id).toBe("e-ab");
|
||||
});
|
||||
|
||||
it("stores empty data when no nodes are selected", () => {
|
||||
const node = createTestNode("1", { selected: false });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
const parsed = JSON.parse(storage.get(Key.COPIED_FLOW_DATA)!);
|
||||
expect(parsed.nodes).toHaveLength(0);
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pasteNodes", () => {
|
||||
it("creates new nodes with new IDs via incrementNodeCounter", () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
useNodeStore.setState({ nodes: [node], nodeCounter: 5 });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
|
||||
const pastedNode = nodes.find((n) => n.id !== "orig");
|
||||
expect(pastedNode).toBeDefined();
|
||||
expect(pastedNode!.id).not.toBe("orig");
|
||||
});
|
||||
|
||||
it("offsets pasted node positions by +50 x/y", () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
useNodeStore.setState({ nodes: [node], nodeCounter: 5 });
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const pastedNode = nodes.find((n) => n.id !== "orig");
|
||||
expect(pastedNode).toBeDefined();
|
||||
expect(pastedNode!.position).toEqual({ x: 150, y: 250 });
|
||||
});
|
||||
|
||||
it("preserves internal connections with remapped IDs", () => {
|
||||
const nodeA = createTestNode("a", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
const nodeB = createTestNode("b", {
|
||||
selected: true,
|
||||
position: { x: 200, y: 0 },
|
||||
});
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB], nodeCounter: 0 });
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
{
|
||||
id: "e-ab",
|
||||
source: "a",
|
||||
target: "b",
|
||||
sourceHandle: "output",
|
||||
targetHandle: "input",
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { edges } = useEdgeStore.getState();
|
||||
const newEdges = edges.filter((e) => e.id !== "e-ab");
|
||||
expect(newEdges).toHaveLength(1);
|
||||
|
||||
const newEdge = newEdges[0];
|
||||
expect(newEdge.source).not.toBe("a");
|
||||
expect(newEdge.target).not.toBe("b");
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const pastedNodeIDs = nodes
|
||||
.filter((n) => n.id !== "a" && n.id !== "b")
|
||||
.map((n) => n.id);
|
||||
|
||||
expect(pastedNodeIDs).toContain(newEdge.source);
|
||||
expect(pastedNodeIDs).toContain(newEdge.target);
|
||||
});
|
||||
|
||||
it("deselects existing nodes and selects pasted ones", () => {
|
||||
const existingNode = createTestNode("existing", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
const nodeToCopy = createTestNode("copy-me", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 100 },
|
||||
});
|
||||
useNodeStore.setState({
|
||||
nodes: [existingNode, nodeToCopy],
|
||||
nodeCounter: 0,
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
|
||||
// Deselect nodeToCopy, keep existingNode selected to verify deselection on paste
|
||||
useNodeStore.setState({
|
||||
nodes: [
|
||||
{ ...existingNode, selected: true },
|
||||
{ ...nodeToCopy, selected: false },
|
||||
],
|
||||
});
|
||||
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const originalNodes = nodes.filter(
|
||||
(n) => n.id === "existing" || n.id === "copy-me",
|
||||
);
|
||||
const pastedNodes = nodes.filter(
|
||||
(n) => n.id !== "existing" && n.id !== "copy-me",
|
||||
);
|
||||
|
||||
originalNodes.forEach((n) => {
|
||||
expect(n.selected).toBe(false);
|
||||
});
|
||||
pastedNodes.forEach((n) => {
|
||||
expect(n.selected).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("does nothing when clipboard is empty", () => {
|
||||
const node = createTestNode("1", { position: { x: 0, y: 0 } });
|
||||
useNodeStore.setState({ nodes: [node], nodeCounter: 0 });
|
||||
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,751 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { MarkerType } from "@xyflow/react";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import type { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import type { Link } from "@/app/api/__generated__/models/link";
|
||||
|
||||
function makeEdge(overrides: Partial<CustomEdge> & { id: string }): CustomEdge {
|
||||
return {
|
||||
type: "custom",
|
||||
source: "node-a",
|
||||
target: "node-b",
|
||||
sourceHandle: "output",
|
||||
targetHandle: "input",
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeExecutionResult(
|
||||
overrides: Partial<NodeExecutionResult>,
|
||||
): NodeExecutionResult {
|
||||
return {
|
||||
user_id: "user-1",
|
||||
graph_id: "graph-1",
|
||||
graph_version: 1,
|
||||
graph_exec_id: "gexec-1",
|
||||
node_exec_id: "nexec-1",
|
||||
node_id: "node-1",
|
||||
block_id: "block-1",
|
||||
status: "INCOMPLETE",
|
||||
input_data: {},
|
||||
output_data: {},
|
||||
add_time: new Date(),
|
||||
queue_time: null,
|
||||
start_time: null,
|
||||
end_time: null,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
useHistoryStore.setState({ past: [], future: [] });
|
||||
});
|
||||
|
||||
describe("edgeStore", () => {
|
||||
describe("setEdges", () => {
|
||||
it("replaces all edges", () => {
|
||||
const edges = [
|
||||
makeEdge({ id: "e1" }),
|
||||
makeEdge({ id: "e2", source: "node-c" }),
|
||||
];
|
||||
|
||||
useEdgeStore.getState().setEdges(edges);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(2);
|
||||
expect(useEdgeStore.getState().edges[0].id).toBe("e1");
|
||||
expect(useEdgeStore.getState().edges[1].id).toBe("e2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("addEdge", () => {
|
||||
it("adds an edge and auto-generates an ID", () => {
|
||||
const result = useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(result.id).toBe("n1:out->n2:in");
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(useEdgeStore.getState().edges[0].id).toBe("n1:out->n2:in");
|
||||
});
|
||||
|
||||
it("uses provided ID when given", () => {
|
||||
const result = useEdgeStore.getState().addEdge({
|
||||
id: "custom-id",
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(result.id).toBe("custom-id");
|
||||
});
|
||||
|
||||
it("sets type to custom and adds arrow marker", () => {
|
||||
const result = useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(result.type).toBe("custom");
|
||||
expect(result.markerEnd).toEqual({
|
||||
type: MarkerType.ArrowClosed,
|
||||
strokeWidth: 2,
|
||||
color: "#555",
|
||||
});
|
||||
});
|
||||
|
||||
it("rejects duplicate edges without adding", () => {
|
||||
useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
const pushSpy = vi.spyOn(useHistoryStore.getState(), "pushState");
|
||||
|
||||
const duplicate = useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(duplicate.id).toBe("n1:out->n2:in");
|
||||
expect(pushSpy).not.toHaveBeenCalled();
|
||||
|
||||
pushSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("pushes previous state to history store", () => {
|
||||
const pushSpy = vi.spyOn(useHistoryStore.getState(), "pushState");
|
||||
|
||||
useEdgeStore.getState().addEdge({
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
});
|
||||
|
||||
expect(pushSpy).toHaveBeenCalledWith({
|
||||
nodes: [],
|
||||
edges: [],
|
||||
});
|
||||
|
||||
pushSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe("removeEdge", () => {
|
||||
it("removes an edge by ID", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1" }), makeEdge({ id: "e2" })],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().removeEdge("e1");
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(useEdgeStore.getState().edges[0].id).toBe("e2");
|
||||
});
|
||||
|
||||
it("does nothing when removing a non-existent edge", () => {
|
||||
useEdgeStore.setState({ edges: [makeEdge({ id: "e1" })] });
|
||||
|
||||
useEdgeStore.getState().removeEdge("nonexistent");
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("pushes previous state to history store", () => {
|
||||
const existingEdges = [makeEdge({ id: "e1" })];
|
||||
useEdgeStore.setState({ edges: existingEdges });
|
||||
|
||||
const pushSpy = vi.spyOn(useHistoryStore.getState(), "pushState");
|
||||
|
||||
useEdgeStore.getState().removeEdge("e1");
|
||||
|
||||
expect(pushSpy).toHaveBeenCalledWith({
|
||||
nodes: [],
|
||||
edges: existingEdges,
|
||||
});
|
||||
|
||||
pushSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe("upsertMany", () => {
|
||||
it("inserts new edges", () => {
|
||||
useEdgeStore.setState({ edges: [makeEdge({ id: "e1" })] });
|
||||
|
||||
useEdgeStore.getState().upsertMany([makeEdge({ id: "e2" })]);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("updates existing edges by ID", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "old-source" })],
|
||||
});
|
||||
|
||||
useEdgeStore
|
||||
.getState()
|
||||
.upsertMany([makeEdge({ id: "e1", source: "new-source" })]);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
expect(useEdgeStore.getState().edges[0].source).toBe("new-source");
|
||||
});
|
||||
|
||||
it("handles mixed inserts and updates", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "old" })],
|
||||
});
|
||||
|
||||
useEdgeStore
|
||||
.getState()
|
||||
.upsertMany([
|
||||
makeEdge({ id: "e1", source: "updated" }),
|
||||
makeEdge({ id: "e2", source: "new" }),
|
||||
]);
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
expect(edges).toHaveLength(2);
|
||||
expect(edges.find((e) => e.id === "e1")?.source).toBe("updated");
|
||||
expect(edges.find((e) => e.id === "e2")?.source).toBe("new");
|
||||
});
|
||||
});
|
||||
|
||||
describe("removeEdgesByHandlePrefix", () => {
|
||||
it("removes edges targeting a node with matching handle prefix", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", target: "node-b", targetHandle: "input_foo" }),
|
||||
makeEdge({ id: "e2", target: "node-b", targetHandle: "input_bar" }),
|
||||
makeEdge({
|
||||
id: "e3",
|
||||
target: "node-b",
|
||||
targetHandle: "other_handle",
|
||||
}),
|
||||
makeEdge({ id: "e4", target: "node-c", targetHandle: "input_foo" }),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().removeEdgesByHandlePrefix("node-b", "input_");
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
expect(edges).toHaveLength(2);
|
||||
expect(edges.map((e) => e.id).sort()).toEqual(["e3", "e4"]);
|
||||
});
|
||||
|
||||
it("does not remove edges where target does not match nodeId", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-b",
|
||||
target: "node-c",
|
||||
targetHandle: "input_x",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().removeEdgesByHandlePrefix("node-b", "input_");
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getNodeEdges", () => {
|
||||
it("returns edges where node is source", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", source: "node-a", target: "node-b" }),
|
||||
makeEdge({ id: "e2", source: "node-c", target: "node-d" }),
|
||||
],
|
||||
});
|
||||
|
||||
const result = useEdgeStore.getState().getNodeEdges("node-a");
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe("e1");
|
||||
});
|
||||
|
||||
it("returns edges where node is target", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", source: "node-a", target: "node-b" }),
|
||||
makeEdge({ id: "e2", source: "node-c", target: "node-d" }),
|
||||
],
|
||||
});
|
||||
|
||||
const result = useEdgeStore.getState().getNodeEdges("node-b");
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe("e1");
|
||||
});
|
||||
|
||||
it("returns edges for both source and target", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", source: "node-a", target: "node-b" }),
|
||||
makeEdge({ id: "e2", source: "node-b", target: "node-c" }),
|
||||
makeEdge({ id: "e3", source: "node-d", target: "node-e" }),
|
||||
],
|
||||
});
|
||||
|
||||
const result = useEdgeStore.getState().getNodeEdges("node-b");
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result.map((e) => e.id).sort()).toEqual(["e1", "e2"]);
|
||||
});
|
||||
|
||||
it("returns empty array for unconnected node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "node-a", target: "node-b" })],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().getNodeEdges("node-z")).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isInputConnected", () => {
|
||||
it("returns true when target handle is connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isInputConnected("node-b", "input")).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("returns false when target handle is not connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isInputConnected("node-b", "other")).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it("returns false when node is source not target", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-b",
|
||||
target: "node-c",
|
||||
sourceHandle: "output",
|
||||
targetHandle: "input",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isInputConnected("node-b", "output")).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isOutputConnected", () => {
|
||||
it("returns true when source handle is connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-a",
|
||||
sourceHandle: "output",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(
|
||||
useEdgeStore.getState().isOutputConnected("node-a", "output"),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false when source handle is not connected", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "node-a",
|
||||
sourceHandle: "output",
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().isOutputConnected("node-a", "other")).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getBackendLinks", () => {
|
||||
it("converts edges to Link format", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
source: "n1",
|
||||
target: "n2",
|
||||
sourceHandle: "out",
|
||||
targetHandle: "in",
|
||||
data: { isStatic: true },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const links = useEdgeStore.getState().getBackendLinks();
|
||||
|
||||
expect(links).toHaveLength(1);
|
||||
expect(links[0]).toEqual({
|
||||
id: "e1",
|
||||
source_id: "n1",
|
||||
sink_id: "n2",
|
||||
source_name: "out",
|
||||
sink_name: "in",
|
||||
is_static: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("addLinks", () => {
|
||||
it("converts Links to edges and adds them", () => {
|
||||
const links: Link[] = [
|
||||
{
|
||||
id: "link-1",
|
||||
source_id: "n1",
|
||||
sink_id: "n2",
|
||||
source_name: "out",
|
||||
sink_name: "in",
|
||||
is_static: false,
|
||||
},
|
||||
];
|
||||
|
||||
useEdgeStore.getState().addLinks(links);
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
expect(edges).toHaveLength(1);
|
||||
expect(edges[0].source).toBe("n1");
|
||||
expect(edges[0].target).toBe("n2");
|
||||
expect(edges[0].sourceHandle).toBe("out");
|
||||
expect(edges[0].targetHandle).toBe("in");
|
||||
expect(edges[0].data?.isStatic).toBe(false);
|
||||
});
|
||||
|
||||
it("adds multiple links", () => {
|
||||
const links: Link[] = [
|
||||
{
|
||||
id: "link-1",
|
||||
source_id: "n1",
|
||||
sink_id: "n2",
|
||||
source_name: "out",
|
||||
sink_name: "in",
|
||||
},
|
||||
{
|
||||
id: "link-2",
|
||||
source_id: "n3",
|
||||
sink_id: "n4",
|
||||
source_name: "result",
|
||||
sink_name: "value",
|
||||
},
|
||||
];
|
||||
|
||||
useEdgeStore.getState().addLinks(links);
|
||||
|
||||
expect(useEdgeStore.getState().edges).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAllHandleIdsOfANode", () => {
|
||||
it("returns targetHandle values for edges targeting the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({ id: "e1", target: "node-b", targetHandle: "input_a" }),
|
||||
makeEdge({ id: "e2", target: "node-b", targetHandle: "input_b" }),
|
||||
makeEdge({ id: "e3", target: "node-c", targetHandle: "input_c" }),
|
||||
],
|
||||
});
|
||||
|
||||
const handles = useEdgeStore.getState().getAllHandleIdsOfANode("node-b");
|
||||
expect(handles).toEqual(["input_a", "input_b"]);
|
||||
});
|
||||
|
||||
it("returns empty array when no edges target the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [makeEdge({ id: "e1", source: "node-b", target: "node-c" })],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().getAllHandleIdsOfANode("node-b")).toEqual(
|
||||
[],
|
||||
);
|
||||
});
|
||||
|
||||
it("returns empty string for edges with no targetHandle", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: undefined,
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
expect(useEdgeStore.getState().getAllHandleIdsOfANode("node-b")).toEqual([
|
||||
"",
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("updateEdgeBeads", () => {
|
||||
it("updates bead counts for edges targeting the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "some-value" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(1);
|
||||
expect(edge.data?.beadDown).toBe(1);
|
||||
});
|
||||
|
||||
it("counts INCOMPLETE status in beadUp but not beadDown", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "INCOMPLETE",
|
||||
input_data: { input: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(1);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
});
|
||||
|
||||
it("does not modify edges not targeting the node", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-c",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
});
|
||||
|
||||
it("does not update edge when input_data has no matching handle", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { other_handle: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
});
|
||||
|
||||
it("accumulates beads across multiple executions", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: { beadUp: 0, beadDown: 0, beadData: new Map() },
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "data1" },
|
||||
}),
|
||||
);
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
status: "INCOMPLETE",
|
||||
input_data: { input: "data2" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(2);
|
||||
expect(edge.data?.beadDown).toBe(1);
|
||||
});
|
||||
|
||||
it("handles static edges by setting beadUp to beadDown + 1", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
target: "node-b",
|
||||
targetHandle: "input",
|
||||
data: {
|
||||
isStatic: true,
|
||||
beadUp: 0,
|
||||
beadDown: 0,
|
||||
beadData: new Map(),
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().updateEdgeBeads(
|
||||
"node-b",
|
||||
makeExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
status: "COMPLETED",
|
||||
input_data: { input: "data" },
|
||||
}),
|
||||
);
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.beadUp).toBe(2);
|
||||
expect(edge.data?.beadDown).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resetEdgeBeads", () => {
|
||||
it("resets all bead data on all edges", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
data: {
|
||||
beadUp: 5,
|
||||
beadDown: 3,
|
||||
beadData: new Map([["exec-1", "COMPLETED"]]),
|
||||
},
|
||||
}),
|
||||
makeEdge({
|
||||
id: "e2",
|
||||
data: {
|
||||
beadUp: 2,
|
||||
beadDown: 1,
|
||||
beadData: new Map([["exec-2", "INCOMPLETE"]]),
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().resetEdgeBeads();
|
||||
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
for (const edge of edges) {
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
expect(edge.data?.beadDown).toBe(0);
|
||||
expect(edge.data?.beadData?.size).toBe(0);
|
||||
}
|
||||
});
|
||||
|
||||
it("preserves other edge data when resetting beads", () => {
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
makeEdge({
|
||||
id: "e1",
|
||||
data: {
|
||||
isStatic: true,
|
||||
edgeColorClass: "text-red-500",
|
||||
beadUp: 3,
|
||||
beadDown: 2,
|
||||
beadData: new Map(),
|
||||
},
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
useEdgeStore.getState().resetEdgeBeads();
|
||||
|
||||
const edge = useEdgeStore.getState().edges[0];
|
||||
expect(edge.data?.isStatic).toBe(true);
|
||||
expect(edge.data?.edgeColorClass).toBe("text-red-500");
|
||||
expect(edge.data?.beadUp).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,347 @@
|
||||
import { describe, it, expect, beforeEach } from "vitest";
|
||||
import { useGraphStore } from "../stores/graphStore";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
|
||||
function createTestGraphMeta(
|
||||
overrides: Partial<GraphMeta> & { id: string; name: string },
|
||||
): GraphMeta {
|
||||
return {
|
||||
version: 1,
|
||||
description: "",
|
||||
is_active: true,
|
||||
user_id: "test-user",
|
||||
created_at: new Date("2024-01-01T00:00:00Z"),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function resetStore() {
|
||||
useGraphStore.setState({
|
||||
graphExecutionStatus: undefined,
|
||||
isGraphRunning: false,
|
||||
inputSchema: null,
|
||||
credentialsInputSchema: null,
|
||||
outputSchema: null,
|
||||
availableSubGraphs: [],
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
resetStore();
|
||||
});
|
||||
|
||||
describe("graphStore", () => {
|
||||
describe("execution status transitions", () => {
|
||||
it("handles QUEUED -> RUNNING -> COMPLETED transition", () => {
|
||||
const { setGraphExecutionStatus } = useGraphStore.getState();
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.QUEUED);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.QUEUED,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.RUNNING,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.COMPLETED);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.COMPLETED,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("handles QUEUED -> RUNNING -> FAILED transition", () => {
|
||||
const { setGraphExecutionStatus } = useGraphStore.getState();
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.QUEUED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
setGraphExecutionStatus(AgentExecutionStatus.FAILED);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBe(
|
||||
AgentExecutionStatus.FAILED,
|
||||
);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("setGraphExecutionStatus auto-sets isGraphRunning", () => {
|
||||
it("sets isGraphRunning to true for RUNNING", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to true for QUEUED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.QUEUED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for COMPLETED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.COMPLETED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for FAILED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.FAILED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for TERMINATED", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.TERMINATED);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for INCOMPLETE", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.INCOMPLETE);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
|
||||
it("sets isGraphRunning to false for undefined", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
useGraphStore.getState().setGraphExecutionStatus(undefined);
|
||||
expect(useGraphStore.getState().graphExecutionStatus).toBeUndefined();
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("setIsGraphRunning", () => {
|
||||
it("sets isGraphRunning independently of status", () => {
|
||||
useGraphStore.getState().setIsGraphRunning(true);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(true);
|
||||
|
||||
useGraphStore.getState().setIsGraphRunning(false);
|
||||
expect(useGraphStore.getState().isGraphRunning).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("schema management", () => {
|
||||
it("sets all three schemas via setGraphSchemas", () => {
|
||||
const input = { properties: { prompt: { type: "string" } } };
|
||||
const credentials = { properties: { apiKey: { type: "string" } } };
|
||||
const output = { properties: { result: { type: "string" } } };
|
||||
|
||||
useGraphStore.getState().setGraphSchemas(input, credentials, output);
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.inputSchema).toEqual(input);
|
||||
expect(state.credentialsInputSchema).toEqual(credentials);
|
||||
expect(state.outputSchema).toEqual(output);
|
||||
});
|
||||
|
||||
it("sets schemas to null", () => {
|
||||
const input = { properties: { prompt: { type: "string" } } };
|
||||
useGraphStore.getState().setGraphSchemas(input, null, null);
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.inputSchema).toEqual(input);
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
expect(state.outputSchema).toBeNull();
|
||||
});
|
||||
|
||||
it("overwrites previous schemas", () => {
|
||||
const first = { properties: { a: { type: "string" } } };
|
||||
const second = { properties: { b: { type: "number" } } };
|
||||
|
||||
useGraphStore.getState().setGraphSchemas(first, first, first);
|
||||
useGraphStore.getState().setGraphSchemas(second, null, second);
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.inputSchema).toEqual(second);
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
expect(state.outputSchema).toEqual(second);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasInputs", () => {
|
||||
it("returns false when inputSchema is null", () => {
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when inputSchema has no properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas({}, null, null);
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when inputSchema has empty properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas({ properties: {} }, null, null);
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when inputSchema has properties", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphSchemas(
|
||||
{ properties: { prompt: { type: "string" } } },
|
||||
null,
|
||||
null,
|
||||
);
|
||||
expect(useGraphStore.getState().hasInputs()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasCredentials", () => {
|
||||
it("returns false when credentialsInputSchema is null", () => {
|
||||
expect(useGraphStore.getState().hasCredentials()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when credentialsInputSchema has empty properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas(null, { properties: {} }, null);
|
||||
expect(useGraphStore.getState().hasCredentials()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when credentialsInputSchema has properties", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphSchemas(
|
||||
null,
|
||||
{ properties: { apiKey: { type: "string" } } },
|
||||
null,
|
||||
);
|
||||
expect(useGraphStore.getState().hasCredentials()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasOutputs", () => {
|
||||
it("returns false when outputSchema is null", () => {
|
||||
expect(useGraphStore.getState().hasOutputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false when outputSchema has empty properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas(null, null, { properties: {} });
|
||||
expect(useGraphStore.getState().hasOutputs()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when outputSchema has properties", () => {
|
||||
useGraphStore.getState().setGraphSchemas(null, null, {
|
||||
properties: { result: { type: "string" } },
|
||||
});
|
||||
expect(useGraphStore.getState().hasOutputs()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("reset", () => {
|
||||
it("clears execution status and schemas but preserves outputSchema and availableSubGraphs", () => {
|
||||
const subGraphs: GraphMeta[] = [
|
||||
createTestGraphMeta({
|
||||
id: "sub-1",
|
||||
name: "Sub Graph",
|
||||
description: "A sub graph",
|
||||
}),
|
||||
];
|
||||
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphExecutionStatus(AgentExecutionStatus.RUNNING);
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphSchemas(
|
||||
{ properties: { a: {} } },
|
||||
{ properties: { b: {} } },
|
||||
{ properties: { c: {} } },
|
||||
);
|
||||
useGraphStore.getState().setAvailableSubGraphs(subGraphs);
|
||||
|
||||
useGraphStore.getState().reset();
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.graphExecutionStatus).toBeUndefined();
|
||||
expect(state.isGraphRunning).toBe(false);
|
||||
expect(state.inputSchema).toBeNull();
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
// reset does not clear outputSchema or availableSubGraphs
|
||||
expect(state.outputSchema).toEqual({ properties: { c: {} } });
|
||||
expect(state.availableSubGraphs).toEqual(subGraphs);
|
||||
});
|
||||
|
||||
it("is idempotent on fresh state", () => {
|
||||
useGraphStore.getState().reset();
|
||||
|
||||
const state = useGraphStore.getState();
|
||||
expect(state.graphExecutionStatus).toBeUndefined();
|
||||
expect(state.isGraphRunning).toBe(false);
|
||||
expect(state.inputSchema).toBeNull();
|
||||
expect(state.credentialsInputSchema).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("setAvailableSubGraphs", () => {
|
||||
it("sets sub-graphs list", () => {
|
||||
const graphs: GraphMeta[] = [
|
||||
createTestGraphMeta({
|
||||
id: "graph-1",
|
||||
name: "Graph One",
|
||||
description: "First graph",
|
||||
}),
|
||||
createTestGraphMeta({
|
||||
id: "graph-2",
|
||||
version: 2,
|
||||
name: "Graph Two",
|
||||
description: "Second graph",
|
||||
}),
|
||||
];
|
||||
|
||||
useGraphStore.getState().setAvailableSubGraphs(graphs);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toEqual(graphs);
|
||||
});
|
||||
|
||||
it("replaces previous sub-graphs", () => {
|
||||
const first: GraphMeta[] = [createTestGraphMeta({ id: "a", name: "A" })];
|
||||
const second: GraphMeta[] = [
|
||||
createTestGraphMeta({ id: "b", name: "B" }),
|
||||
createTestGraphMeta({ id: "c", name: "C" }),
|
||||
];
|
||||
|
||||
useGraphStore.getState().setAvailableSubGraphs(first);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toHaveLength(1);
|
||||
|
||||
useGraphStore.getState().setAvailableSubGraphs(second);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toHaveLength(2);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toEqual(second);
|
||||
});
|
||||
|
||||
it("can set empty sub-graphs list", () => {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setAvailableSubGraphs([createTestGraphMeta({ id: "x", name: "X" })]);
|
||||
useGraphStore.getState().setAvailableSubGraphs([]);
|
||||
expect(useGraphStore.getState().availableSubGraphs).toEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,407 @@
|
||||
import { describe, it, expect, beforeEach } from "vitest";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
|
||||
function createTestNode(
|
||||
id: string,
|
||||
overrides: Partial<CustomNode> = {},
|
||||
): CustomNode {
|
||||
return {
|
||||
id,
|
||||
type: "custom" as const,
|
||||
position: { x: 0, y: 0 },
|
||||
data: {
|
||||
hardcodedValues: {},
|
||||
title: `Node ${id}`,
|
||||
description: "",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: "STANDARD" as never,
|
||||
block_id: `block-${id}`,
|
||||
costs: [],
|
||||
categories: [],
|
||||
},
|
||||
...overrides,
|
||||
} as CustomNode;
|
||||
}
|
||||
|
||||
function createTestEdge(
|
||||
id: string,
|
||||
source: string,
|
||||
target: string,
|
||||
): CustomEdge {
|
||||
return {
|
||||
id,
|
||||
source,
|
||||
target,
|
||||
type: "custom" as const,
|
||||
} as CustomEdge;
|
||||
}
|
||||
|
||||
async function flushMicrotasks() {
|
||||
await new Promise<void>((resolve) => queueMicrotask(resolve));
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
useHistoryStore.getState().clear();
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
});
|
||||
|
||||
describe("historyStore", () => {
|
||||
describe("undo/redo single action", () => {
|
||||
it("undoes a single pushed state", async () => {
|
||||
const node = createTestNode("1");
|
||||
|
||||
// Initialize history with node present as baseline
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// Simulate a change: clear nodes
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
|
||||
// Undo should restore to [node]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node]);
|
||||
expect(useHistoryStore.getState().future).toHaveLength(1);
|
||||
expect(useHistoryStore.getState().future[0].nodes).toEqual([]);
|
||||
});
|
||||
|
||||
it("redoes after undo", async () => {
|
||||
const node = createTestNode("1");
|
||||
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// Change: clear nodes
|
||||
useNodeStore.setState({ nodes: [] });
|
||||
|
||||
// Undo → back to [node]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node]);
|
||||
|
||||
// Redo → back to []
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("undo/redo multiple actions", () => {
|
||||
it("undoes through multiple states in order", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
// Initialize with [node1] as baseline
|
||||
useNodeStore.setState({ nodes: [node1] });
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// Second change: add node2, push pre-change state
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
// Third change: add node3, push pre-change state
|
||||
useNodeStore.setState({ nodes: [node1, node2, node3] });
|
||||
useHistoryStore
|
||||
.getState()
|
||||
.pushState({ nodes: [node1, node2], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
// Undo 1: back to [node1, node2]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2]);
|
||||
|
||||
// Undo 2: back to [node1]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("undo past empty history", () => {
|
||||
it("does nothing when there is no history to undo", () => {
|
||||
useHistoryStore.getState().undo();
|
||||
|
||||
expect(useNodeStore.getState().nodes).toEqual([]);
|
||||
expect(useEdgeStore.getState().edges).toEqual([]);
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("does nothing when current state equals last past entry", () => {
|
||||
expect(useHistoryStore.getState().canUndo()).toBe(false);
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
expect(useHistoryStore.getState().future).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("state consistency: undo after node add restores previous, redo restores added", () => {
|
||||
it("undo removes added node, redo restores it", async () => {
|
||||
const node = createTestNode("added");
|
||||
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("history limits", () => {
|
||||
it("does not grow past MAX_HISTORY (50)", async () => {
|
||||
for (let i = 0; i < 60; i++) {
|
||||
const node = createTestNode(`node-${i}`);
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({
|
||||
nodes: [createTestNode(`node-${i - 1}`)],
|
||||
edges: [],
|
||||
});
|
||||
await flushMicrotasks();
|
||||
}
|
||||
|
||||
expect(useHistoryStore.getState().past.length).toBeLessThanOrEqual(50);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("redo does nothing when future is empty", () => {
|
||||
const nodesBefore = useNodeStore.getState().nodes;
|
||||
const edgesBefore = useEdgeStore.getState().edges;
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
|
||||
expect(useNodeStore.getState().nodes).toEqual(nodesBefore);
|
||||
expect(useEdgeStore.getState().edges).toEqual(edgesBefore);
|
||||
});
|
||||
|
||||
it("interleaved undo/redo sequence", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
useNodeStore.setState({ nodes: [node1] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2, node3] });
|
||||
useHistoryStore.getState().pushState({
|
||||
nodes: [node1, node2],
|
||||
edges: [],
|
||||
});
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2]);
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2]);
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useNodeStore.getState().nodes).toEqual([node1, node2, node3]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("canUndo / canRedo", () => {
|
||||
it("canUndo is false on fresh store", () => {
|
||||
expect(useHistoryStore.getState().canUndo()).toBe(false);
|
||||
});
|
||||
|
||||
it("canUndo is true when current state differs from last past entry", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().canUndo()).toBe(true);
|
||||
});
|
||||
|
||||
it("canRedo is false on fresh store", () => {
|
||||
expect(useHistoryStore.getState().canRedo()).toBe(false);
|
||||
});
|
||||
|
||||
it("canRedo is true after undo", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
|
||||
expect(useHistoryStore.getState().canRedo()).toBe(true);
|
||||
});
|
||||
|
||||
it("canRedo becomes false after redo exhausts future", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
useHistoryStore.getState().redo();
|
||||
|
||||
expect(useHistoryStore.getState().canRedo()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pushState deduplication", () => {
|
||||
it("does not push a state identical to the last past entry", async () => {
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("does not push if state matches current node/edge store state", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().past).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("initializeHistory", () => {
|
||||
it("resets history with current node/edge store state", async () => {
|
||||
const node = createTestNode("1");
|
||||
const edge = createTestEdge("e1", "1", "2");
|
||||
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useEdgeStore.setState({ edges: [edge] });
|
||||
|
||||
useNodeStore.setState({ nodes: [node, createTestNode("2")] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node], edges: [edge] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
const { past, future } = useHistoryStore.getState();
|
||||
expect(past).toHaveLength(1);
|
||||
expect(past[0].nodes).toEqual(useNodeStore.getState().nodes);
|
||||
expect(past[0].edges).toEqual(useEdgeStore.getState().edges);
|
||||
expect(future).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("clear", () => {
|
||||
it("resets to empty initial state", async () => {
|
||||
const node = createTestNode("1");
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().clear();
|
||||
|
||||
const { past, future } = useHistoryStore.getState();
|
||||
expect(past).toEqual([{ nodes: [], edges: [] }]);
|
||||
expect(future).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("microtask batching", () => {
|
||||
it("only commits the first state when multiple pushState calls happen in the same tick", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2, node3] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node2], edges: [] });
|
||||
useHistoryStore
|
||||
.getState()
|
||||
.pushState({ nodes: [node1, node2], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
const { past } = useHistoryStore.getState();
|
||||
expect(past).toHaveLength(2);
|
||||
expect(past[1].nodes).toEqual([node1]);
|
||||
});
|
||||
|
||||
it("commits separately when pushState calls are in different ticks", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [node2], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
const { past } = useHistoryStore.getState();
|
||||
expect(past).toHaveLength(3);
|
||||
expect(past[1].nodes).toEqual([node1]);
|
||||
expect(past[2].nodes).toEqual([node2]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edges in undo/redo", () => {
|
||||
it("restores edges on undo and redo", async () => {
|
||||
const edge = createTestEdge("e1", "1", "2");
|
||||
useEdgeStore.setState({ edges: [edge] });
|
||||
|
||||
useHistoryStore.getState().pushState({ nodes: [], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useEdgeStore.getState().edges).toEqual([]);
|
||||
|
||||
useHistoryStore.getState().redo();
|
||||
expect(useEdgeStore.getState().edges).toEqual([edge]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("pushState clears future", () => {
|
||||
it("clears future when a new state is pushed after undo", async () => {
|
||||
const node1 = createTestNode("1");
|
||||
const node2 = createTestNode("2");
|
||||
const node3 = createTestNode("3");
|
||||
|
||||
// Initialize empty
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
|
||||
// First change: set [node1]
|
||||
useNodeStore.setState({ nodes: [node1] });
|
||||
|
||||
// Second change: set [node1, node2], push pre-change [node1]
|
||||
useNodeStore.setState({ nodes: [node1, node2] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
// Undo: back to [node1]
|
||||
useHistoryStore.getState().undo();
|
||||
expect(useHistoryStore.getState().future).toHaveLength(1);
|
||||
|
||||
// New diverging change: add node3 instead of node2
|
||||
useNodeStore.setState({ nodes: [node1, node3] });
|
||||
useHistoryStore.getState().pushState({ nodes: [node1], edges: [] });
|
||||
await flushMicrotasks();
|
||||
|
||||
expect(useHistoryStore.getState().future).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,791 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { BlockUIType } from "../components/types";
|
||||
import type { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import type { CustomNodeData } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
|
||||
function createTestNode(overrides: {
|
||||
id: string;
|
||||
position?: { x: number; y: number };
|
||||
data?: Partial<CustomNodeData>;
|
||||
}): CustomNode {
|
||||
const defaults: CustomNodeData = {
|
||||
hardcodedValues: {},
|
||||
title: "Test Block",
|
||||
description: "A test block",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: "test-block-id",
|
||||
costs: [],
|
||||
categories: [],
|
||||
};
|
||||
|
||||
return {
|
||||
id: overrides.id,
|
||||
type: "custom",
|
||||
position: overrides.position ?? { x: 0, y: 0 },
|
||||
data: { ...defaults, ...overrides.data },
|
||||
};
|
||||
}
|
||||
|
||||
function createExecutionResult(
|
||||
overrides: Partial<NodeExecutionResult> = {},
|
||||
): NodeExecutionResult {
|
||||
return {
|
||||
node_exec_id: overrides.node_exec_id ?? "exec-1",
|
||||
node_id: overrides.node_id ?? "1",
|
||||
graph_exec_id: overrides.graph_exec_id ?? "graph-exec-1",
|
||||
graph_id: overrides.graph_id ?? "graph-1",
|
||||
graph_version: overrides.graph_version ?? 1,
|
||||
user_id: overrides.user_id ?? "test-user",
|
||||
block_id: overrides.block_id ?? "block-1",
|
||||
status: overrides.status ?? "COMPLETED",
|
||||
input_data: overrides.input_data ?? { input_key: "input_value" },
|
||||
output_data: overrides.output_data ?? { output_key: ["output_value"] },
|
||||
add_time: overrides.add_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
queue_time: overrides.queue_time ?? new Date("2024-01-01T00:00:00Z"),
|
||||
start_time: overrides.start_time ?? new Date("2024-01-01T00:00:01Z"),
|
||||
end_time: overrides.end_time ?? new Date("2024-01-01T00:00:02Z"),
|
||||
};
|
||||
}
|
||||
|
||||
function resetStores() {
|
||||
useNodeStore.setState({
|
||||
nodes: [],
|
||||
nodeCounter: 0,
|
||||
nodeAdvancedStates: {},
|
||||
latestNodeInputData: {},
|
||||
latestNodeOutputData: {},
|
||||
accumulatedNodeInputData: {},
|
||||
accumulatedNodeOutputData: {},
|
||||
nodesInResolutionMode: new Set(),
|
||||
brokenEdgeIDs: new Map(),
|
||||
nodeResolutionData: new Map(),
|
||||
});
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useHistoryStore.setState({ past: [], future: [] });
|
||||
}
|
||||
|
||||
describe("nodeStore", () => {
|
||||
beforeEach(() => {
|
||||
resetStores();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe("node lifecycle", () => {
|
||||
it("starts with empty nodes", () => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toEqual([]);
|
||||
});
|
||||
|
||||
it("adds a single node with addNode", () => {
|
||||
const node = createTestNode({ id: "1" });
|
||||
useNodeStore.getState().addNode(node);
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
|
||||
it("sets nodes with setNodes, replacing existing ones", () => {
|
||||
const node1 = createTestNode({ id: "1" });
|
||||
const node2 = createTestNode({ id: "2" });
|
||||
useNodeStore.getState().addNode(node1);
|
||||
|
||||
useNodeStore.getState().setNodes([node2]);
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("2");
|
||||
});
|
||||
|
||||
it("removes nodes via onNodesChange", () => {
|
||||
const node = createTestNode({ id: "1" });
|
||||
useNodeStore.getState().setNodes([node]);
|
||||
|
||||
useNodeStore.getState().onNodesChange([{ type: "remove", id: "1" }]);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("updates node data with updateNodeData", () => {
|
||||
const node = createTestNode({ id: "1" });
|
||||
useNodeStore.getState().addNode(node);
|
||||
|
||||
useNodeStore.getState().updateNodeData("1", { title: "Updated Title" });
|
||||
|
||||
const updated = useNodeStore.getState().nodes[0];
|
||||
expect(updated.data.title).toBe("Updated Title");
|
||||
expect(updated.data.block_id).toBe("test-block-id");
|
||||
});
|
||||
|
||||
it("updateNodeData does not affect other nodes", () => {
|
||||
const node1 = createTestNode({ id: "1" });
|
||||
const node2 = createTestNode({
|
||||
id: "2",
|
||||
data: { title: "Node 2" },
|
||||
});
|
||||
useNodeStore.getState().setNodes([node1, node2]);
|
||||
|
||||
useNodeStore.getState().updateNodeData("1", { title: "Changed" });
|
||||
|
||||
expect(useNodeStore.getState().nodes[1].data.title).toBe("Node 2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("bulk operations", () => {
|
||||
it("adds multiple nodes with addNodes", () => {
|
||||
const nodes = [
|
||||
createTestNode({ id: "1" }),
|
||||
createTestNode({ id: "2" }),
|
||||
createTestNode({ id: "3" }),
|
||||
];
|
||||
useNodeStore.getState().addNodes(nodes);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(3);
|
||||
});
|
||||
|
||||
it("removes multiple nodes via onNodesChange", () => {
|
||||
const nodes = [
|
||||
createTestNode({ id: "1" }),
|
||||
createTestNode({ id: "2" }),
|
||||
createTestNode({ id: "3" }),
|
||||
];
|
||||
useNodeStore.getState().setNodes(nodes);
|
||||
|
||||
useNodeStore.getState().onNodesChange([
|
||||
{ type: "remove", id: "1" },
|
||||
{ type: "remove", id: "3" },
|
||||
]);
|
||||
|
||||
const remaining = useNodeStore.getState().nodes;
|
||||
expect(remaining).toHaveLength(1);
|
||||
expect(remaining[0].id).toBe("2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("nodeCounter", () => {
|
||||
it("starts at zero", () => {
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(0);
|
||||
});
|
||||
|
||||
it("increments the counter", () => {
|
||||
useNodeStore.getState().incrementNodeCounter();
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(1);
|
||||
|
||||
useNodeStore.getState().incrementNodeCounter();
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(2);
|
||||
});
|
||||
|
||||
it("sets the counter to a specific value", () => {
|
||||
useNodeStore.getState().setNodeCounter(42);
|
||||
expect(useNodeStore.getState().nodeCounter).toBe(42);
|
||||
});
|
||||
});
|
||||
|
||||
describe("advanced states", () => {
|
||||
it("defaults to false for unknown node IDs", () => {
|
||||
expect(useNodeStore.getState().getShowAdvanced("unknown")).toBe(false);
|
||||
});
|
||||
|
||||
it("toggles advanced state", () => {
|
||||
useNodeStore.getState().toggleAdvanced("node-1");
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(true);
|
||||
|
||||
useNodeStore.getState().toggleAdvanced("node-1");
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(false);
|
||||
});
|
||||
|
||||
it("sets advanced state explicitly", () => {
|
||||
useNodeStore.getState().setShowAdvanced("node-1", true);
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(true);
|
||||
|
||||
useNodeStore.getState().setShowAdvanced("node-1", false);
|
||||
expect(useNodeStore.getState().getShowAdvanced("node-1")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("convertCustomNodeToBackendNode", () => {
|
||||
it("converts a node with minimal data", () => {
|
||||
const node = createTestNode({
|
||||
id: "42",
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.id).toBe("42");
|
||||
expect(backend.block_id).toBe("test-block-id");
|
||||
expect(backend.input_default).toEqual({});
|
||||
expect(backend.metadata).toEqual({ position: { x: 100, y: 200 } });
|
||||
});
|
||||
|
||||
it("includes customized_name when present in metadata", () => {
|
||||
const node = createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
metadata: { customized_name: "My Custom Name" },
|
||||
},
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.metadata).toHaveProperty(
|
||||
"customized_name",
|
||||
"My Custom Name",
|
||||
);
|
||||
});
|
||||
|
||||
it("includes credentials_optional when present in metadata", () => {
|
||||
const node = createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
metadata: { credentials_optional: true },
|
||||
},
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.metadata).toHaveProperty("credentials_optional", true);
|
||||
});
|
||||
|
||||
it("prunes empty values from hardcodedValues", () => {
|
||||
const node = createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
hardcodedValues: { filled: "value", empty: "" },
|
||||
},
|
||||
});
|
||||
|
||||
const backend = useNodeStore
|
||||
.getState()
|
||||
.convertCustomNodeToBackendNode(node);
|
||||
|
||||
expect(backend.input_default).toEqual({ filled: "value" });
|
||||
expect(backend.input_default).not.toHaveProperty("empty");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getBackendNodes", () => {
|
||||
it("converts all nodes to backend format", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([
|
||||
createTestNode({ id: "1", position: { x: 0, y: 0 } }),
|
||||
createTestNode({ id: "2", position: { x: 100, y: 100 } }),
|
||||
]);
|
||||
|
||||
const backendNodes = useNodeStore.getState().getBackendNodes();
|
||||
|
||||
expect(backendNodes).toHaveLength(2);
|
||||
expect(backendNodes[0].id).toBe("1");
|
||||
expect(backendNodes[1].id).toBe("2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("node status", () => {
|
||||
it("returns undefined for a node with no status", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("updates node status", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeStatus("1", "RUNNING");
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBe("RUNNING");
|
||||
|
||||
useNodeStore.getState().updateNodeStatus("1", "COMPLETED");
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBe("COMPLETED");
|
||||
});
|
||||
|
||||
it("cleans all node statuses", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore.getState().updateNodeStatus("1", "RUNNING");
|
||||
useNodeStore.getState().updateNodeStatus("2", "COMPLETED");
|
||||
|
||||
useNodeStore.getState().cleanNodesStatuses();
|
||||
|
||||
expect(useNodeStore.getState().getNodeStatus("1")).toBeUndefined();
|
||||
expect(useNodeStore.getState().getNodeStatus("2")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("updating status for non-existent node does not crash", () => {
|
||||
useNodeStore.getState().updateNodeStatus("nonexistent", "RUNNING");
|
||||
expect(
|
||||
useNodeStore.getState().getNodeStatus("nonexistent"),
|
||||
).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("execution result tracking", () => {
|
||||
it("returns empty array for node with no results", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("1")).toEqual([]);
|
||||
});
|
||||
|
||||
it("tracks a single execution result", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
const result = createExecutionResult({ node_id: "1" });
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult("1", result);
|
||||
|
||||
const results = useNodeStore.getState().getNodeExecutionResults("1");
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].node_exec_id).toBe("exec-1");
|
||||
});
|
||||
|
||||
it("accumulates multiple execution results", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "val1" },
|
||||
output_data: { key: ["out1"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
input_data: { key: "val2" },
|
||||
output_data: { key: ["out2"] },
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("1")).toHaveLength(
|
||||
2,
|
||||
);
|
||||
});
|
||||
|
||||
it("updates latest input/output data", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "first" },
|
||||
output_data: { key: ["first_out"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
input_data: { key: "second" },
|
||||
output_data: { key: ["second_out"] },
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getLatestNodeInputData("1")).toEqual({
|
||||
key: "second",
|
||||
});
|
||||
expect(useNodeStore.getState().getLatestNodeOutputData("1")).toEqual({
|
||||
key: ["second_out"],
|
||||
});
|
||||
});
|
||||
|
||||
it("accumulates input/output data across results", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "val1" },
|
||||
output_data: { key: ["out1"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-2",
|
||||
input_data: { key: "val2" },
|
||||
output_data: { key: ["out2"] },
|
||||
}),
|
||||
);
|
||||
|
||||
const accInput = useNodeStore.getState().getAccumulatedNodeInputData("1");
|
||||
expect(accInput.key).toEqual(["val1", "val2"]);
|
||||
|
||||
const accOutput = useNodeStore
|
||||
.getState()
|
||||
.getAccumulatedNodeOutputData("1");
|
||||
expect(accOutput.key).toEqual(["out1", "out2"]);
|
||||
});
|
||||
|
||||
it("deduplicates execution results by node_exec_id", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "original" },
|
||||
output_data: { key: ["original_out"] },
|
||||
}),
|
||||
);
|
||||
useNodeStore.getState().updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({
|
||||
node_exec_id: "exec-1",
|
||||
input_data: { key: "updated" },
|
||||
output_data: { key: ["updated_out"] },
|
||||
}),
|
||||
);
|
||||
|
||||
const results = useNodeStore.getState().getNodeExecutionResults("1");
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].input_data).toEqual({ key: "updated" });
|
||||
});
|
||||
|
||||
it("returns the latest execution result", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({ node_exec_id: "exec-1" }),
|
||||
);
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({ node_exec_id: "exec-2" }),
|
||||
);
|
||||
|
||||
const latest = useNodeStore.getState().getLatestNodeExecutionResult("1");
|
||||
expect(latest?.node_exec_id).toBe("exec-2");
|
||||
});
|
||||
|
||||
it("returns undefined for latest result on unknown node", () => {
|
||||
expect(
|
||||
useNodeStore.getState().getLatestNodeExecutionResult("unknown"),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("clears all execution results", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"1",
|
||||
createExecutionResult({ node_exec_id: "exec-1" }),
|
||||
);
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"2",
|
||||
createExecutionResult({ node_exec_id: "exec-2" }),
|
||||
);
|
||||
|
||||
useNodeStore.getState().clearAllNodeExecutionResults();
|
||||
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("1")).toEqual([]);
|
||||
expect(useNodeStore.getState().getNodeExecutionResults("2")).toEqual([]);
|
||||
expect(
|
||||
useNodeStore.getState().getLatestNodeInputData("1"),
|
||||
).toBeUndefined();
|
||||
expect(
|
||||
useNodeStore.getState().getLatestNodeOutputData("1"),
|
||||
).toBeUndefined();
|
||||
expect(useNodeStore.getState().getAccumulatedNodeInputData("1")).toEqual(
|
||||
{},
|
||||
);
|
||||
expect(useNodeStore.getState().getAccumulatedNodeOutputData("1")).toEqual(
|
||||
{},
|
||||
);
|
||||
});
|
||||
|
||||
it("returns empty object for accumulated data on unknown node", () => {
|
||||
expect(
|
||||
useNodeStore.getState().getAccumulatedNodeInputData("unknown"),
|
||||
).toEqual({});
|
||||
expect(
|
||||
useNodeStore.getState().getAccumulatedNodeOutputData("unknown"),
|
||||
).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("getNodeBlockUIType", () => {
|
||||
it("returns the node UI type", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
uiType: BlockUIType.INPUT,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getNodeBlockUIType("1")).toBe(
|
||||
BlockUIType.INPUT,
|
||||
);
|
||||
});
|
||||
|
||||
it("defaults to STANDARD for unknown node IDs", () => {
|
||||
expect(useNodeStore.getState().getNodeBlockUIType("unknown")).toBe(
|
||||
BlockUIType.STANDARD,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasWebhookNodes", () => {
|
||||
it("returns false when there are no webhook nodes", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().hasWebhookNodes()).toBe(false);
|
||||
});
|
||||
|
||||
it("returns true when a WEBHOOK node exists", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
uiType: BlockUIType.WEBHOOK,
|
||||
},
|
||||
}),
|
||||
);
|
||||
expect(useNodeStore.getState().hasWebhookNodes()).toBe(true);
|
||||
});
|
||||
|
||||
it("returns true when a WEBHOOK_MANUAL node exists", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
uiType: BlockUIType.WEBHOOK_MANUAL,
|
||||
},
|
||||
}),
|
||||
);
|
||||
expect(useNodeStore.getState().hasWebhookNodes()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("node errors", () => {
|
||||
it("returns undefined for a node with no errors", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets and retrieves node errors", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
const errors = { field1: "required", field2: "invalid" };
|
||||
useNodeStore.getState().updateNodeErrors("1", errors);
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toEqual(errors);
|
||||
});
|
||||
|
||||
it("clears errors for a specific node", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore.getState().updateNodeErrors("1", { f: "err" });
|
||||
useNodeStore.getState().updateNodeErrors("2", { g: "err2" });
|
||||
|
||||
useNodeStore.getState().clearNodeErrors("1");
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toBeUndefined();
|
||||
expect(useNodeStore.getState().getNodeErrors("2")).toEqual({ g: "err2" });
|
||||
});
|
||||
|
||||
it("clears all node errors", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodes([createTestNode({ id: "1" }), createTestNode({ id: "2" })]);
|
||||
useNodeStore.getState().updateNodeErrors("1", { a: "err1" });
|
||||
useNodeStore.getState().updateNodeErrors("2", { b: "err2" });
|
||||
|
||||
useNodeStore.getState().clearAllNodeErrors();
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("1")).toBeUndefined();
|
||||
expect(useNodeStore.getState().getNodeErrors("2")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("sets errors by backend ID matching node id", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "backend-1" }));
|
||||
|
||||
useNodeStore
|
||||
.getState()
|
||||
.setNodeErrorsForBackendId("backend-1", { x: "error" });
|
||||
|
||||
expect(useNodeStore.getState().getNodeErrors("backend-1")).toEqual({
|
||||
x: "error",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("getHardCodedValues", () => {
|
||||
it("returns hardcoded values for a node", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
hardcodedValues: { key: "value" },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().getHardCodedValues("1")).toEqual({
|
||||
key: "value",
|
||||
});
|
||||
});
|
||||
|
||||
it("returns empty object for unknown node", () => {
|
||||
expect(useNodeStore.getState().getHardCodedValues("unknown")).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("credentials optional", () => {
|
||||
it("sets credentials_optional in node metadata", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore.getState().setCredentialsOptional("1", true);
|
||||
|
||||
const node = useNodeStore.getState().nodes[0];
|
||||
expect(node.data.metadata?.credentials_optional).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolution mode", () => {
|
||||
it("defaults to not in resolution mode", () => {
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(false);
|
||||
});
|
||||
|
||||
it("enters and exits resolution mode", () => {
|
||||
useNodeStore.getState().setNodeResolutionMode("1", true);
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(true);
|
||||
|
||||
useNodeStore.getState().setNodeResolutionMode("1", false);
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(false);
|
||||
});
|
||||
|
||||
it("tracks broken edge IDs", () => {
|
||||
useNodeStore.getState().setBrokenEdgeIDs("node-1", ["edge-1", "edge-2"]);
|
||||
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(true);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-2")).toBe(true);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-3")).toBe(false);
|
||||
});
|
||||
|
||||
it("removes individual broken edge IDs", () => {
|
||||
useNodeStore.getState().setBrokenEdgeIDs("node-1", ["edge-1", "edge-2"]);
|
||||
useNodeStore.getState().removeBrokenEdgeID("node-1", "edge-1");
|
||||
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(false);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-2")).toBe(true);
|
||||
});
|
||||
|
||||
it("clears all resolution state", () => {
|
||||
useNodeStore.getState().setNodeResolutionMode("1", true);
|
||||
useNodeStore.getState().setBrokenEdgeIDs("1", ["edge-1"]);
|
||||
|
||||
useNodeStore.getState().clearResolutionState();
|
||||
|
||||
expect(useNodeStore.getState().isNodeInResolutionMode("1")).toBe(false);
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(false);
|
||||
});
|
||||
|
||||
it("cleans up broken edges when exiting resolution mode", () => {
|
||||
useNodeStore.getState().setNodeResolutionMode("1", true);
|
||||
useNodeStore.getState().setBrokenEdgeIDs("1", ["edge-1"]);
|
||||
|
||||
useNodeStore.getState().setNodeResolutionMode("1", false);
|
||||
|
||||
expect(useNodeStore.getState().isEdgeBroken("edge-1")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("handles updating data on a non-existent node gracefully", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeData("nonexistent", { title: "New Title" });
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("handles removing a non-existent node gracefully", () => {
|
||||
useNodeStore.getState().addNode(createTestNode({ id: "1" }));
|
||||
|
||||
useNodeStore
|
||||
.getState()
|
||||
.onNodesChange([{ type: "remove", id: "nonexistent" }]);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("handles duplicate node IDs in addNodes", () => {
|
||||
useNodeStore.getState().addNodes([
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: { title: "First" },
|
||||
}),
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: { title: "Second" },
|
||||
}),
|
||||
]);
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
expect(nodes[0].data.title).toBe("First");
|
||||
expect(nodes[1].data.title).toBe("Second");
|
||||
});
|
||||
|
||||
it("updating node status mid-execution preserves other data", () => {
|
||||
useNodeStore.getState().addNode(
|
||||
createTestNode({
|
||||
id: "1",
|
||||
data: {
|
||||
title: "My Node",
|
||||
hardcodedValues: { key: "val" },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
useNodeStore.getState().updateNodeStatus("1", "RUNNING");
|
||||
|
||||
const node = useNodeStore.getState().nodes[0];
|
||||
expect(node.data.status).toBe("RUNNING");
|
||||
expect(node.data.title).toBe("My Node");
|
||||
expect(node.data.hardcodedValues).toEqual({ key: "val" });
|
||||
});
|
||||
|
||||
it("execution result for non-existent node does not add it", () => {
|
||||
useNodeStore
|
||||
.getState()
|
||||
.updateNodeExecutionResult(
|
||||
"nonexistent",
|
||||
createExecutionResult({ node_exec_id: "exec-1" }),
|
||||
);
|
||||
|
||||
expect(useNodeStore.getState().nodes).toHaveLength(0);
|
||||
expect(
|
||||
useNodeStore.getState().getNodeExecutionResults("nonexistent"),
|
||||
).toEqual([]);
|
||||
});
|
||||
|
||||
it("getBackendNodes returns empty array when no nodes exist", () => {
|
||||
expect(useNodeStore.getState().getBackendNodes()).toEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,567 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { renderHook, act } from "@testing-library/react";
|
||||
import { CustomNode } from "../components/FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { BlockUIType } from "../components/types";
|
||||
|
||||
// ---- Mocks ----
|
||||
|
||||
const mockGetViewport = vi.fn(() => ({ x: 0, y: 0, zoom: 1 }));
|
||||
|
||||
vi.mock("@xyflow/react", async () => {
|
||||
const actual = await vi.importActual("@xyflow/react");
|
||||
return {
|
||||
...actual,
|
||||
useReactFlow: vi.fn(() => ({
|
||||
getViewport: mockGetViewport,
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
const mockToast = vi.fn();
|
||||
|
||||
vi.mock("@/components/molecules/Toast/use-toast", () => ({
|
||||
useToast: vi.fn(() => ({ toast: mockToast })),
|
||||
}));
|
||||
|
||||
let uuidCounter = 0;
|
||||
vi.mock("uuid", () => ({
|
||||
v4: vi.fn(() => `new-uuid-${++uuidCounter}`),
|
||||
}));
|
||||
|
||||
// Mock navigator.clipboard
|
||||
const mockWriteText = vi.fn(() => Promise.resolve());
|
||||
const mockReadText = vi.fn(() => Promise.resolve(""));
|
||||
|
||||
Object.defineProperty(navigator, "clipboard", {
|
||||
value: {
|
||||
writeText: mockWriteText,
|
||||
readText: mockReadText,
|
||||
},
|
||||
writable: true,
|
||||
configurable: true,
|
||||
});
|
||||
|
||||
// Mock window.innerWidth / innerHeight for viewport centering calculations
|
||||
Object.defineProperty(window, "innerWidth", { value: 1000, writable: true });
|
||||
Object.defineProperty(window, "innerHeight", { value: 800, writable: true });
|
||||
|
||||
import { useCopyPaste } from "../components/FlowEditor/Flow/useCopyPaste";
|
||||
import { useNodeStore } from "../stores/nodeStore";
|
||||
import { useEdgeStore } from "../stores/edgeStore";
|
||||
import { useHistoryStore } from "../stores/historyStore";
|
||||
import { CustomEdge } from "../components/FlowEditor/edges/CustomEdge";
|
||||
|
||||
const CLIPBOARD_PREFIX = "autogpt-flow-data:";
|
||||
|
||||
function createTestNode(
|
||||
id: string,
|
||||
overrides: Partial<CustomNode> = {},
|
||||
): CustomNode {
|
||||
return {
|
||||
id,
|
||||
type: "custom",
|
||||
position: overrides.position ?? { x: 100, y: 200 },
|
||||
selected: overrides.selected,
|
||||
data: {
|
||||
hardcodedValues: {},
|
||||
title: `Node ${id}`,
|
||||
description: "test node",
|
||||
inputSchema: {},
|
||||
outputSchema: {},
|
||||
uiType: BlockUIType.STANDARD,
|
||||
block_id: `block-${id}`,
|
||||
costs: [],
|
||||
categories: [],
|
||||
...overrides.data,
|
||||
},
|
||||
} as CustomNode;
|
||||
}
|
||||
|
||||
function createTestEdge(
|
||||
id: string,
|
||||
source: string,
|
||||
target: string,
|
||||
sourceHandle = "out",
|
||||
targetHandle = "in",
|
||||
): CustomEdge {
|
||||
return {
|
||||
id,
|
||||
source,
|
||||
target,
|
||||
sourceHandle,
|
||||
targetHandle,
|
||||
} as CustomEdge;
|
||||
}
|
||||
|
||||
function makeCopyEvent(): KeyboardEvent {
|
||||
return new KeyboardEvent("keydown", {
|
||||
key: "c",
|
||||
ctrlKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
}
|
||||
|
||||
function makePasteEvent(): KeyboardEvent {
|
||||
return new KeyboardEvent("keydown", {
|
||||
key: "v",
|
||||
ctrlKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
}
|
||||
|
||||
function clipboardPayload(nodes: CustomNode[], edges: CustomEdge[]): string {
|
||||
return `${CLIPBOARD_PREFIX}${JSON.stringify({ nodes, edges })}`;
|
||||
}
|
||||
|
||||
describe("useCopyPaste", () => {
|
||||
beforeEach(() => {
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
useHistoryStore.getState().clear();
|
||||
mockWriteText.mockClear();
|
||||
mockReadText.mockClear();
|
||||
mockToast.mockClear();
|
||||
mockGetViewport.mockReturnValue({ x: 0, y: 0, zoom: 1 });
|
||||
uuidCounter = 0;
|
||||
|
||||
// Ensure no input element is focused
|
||||
if (document.activeElement && document.activeElement !== document.body) {
|
||||
(document.activeElement as HTMLElement).blur();
|
||||
}
|
||||
});
|
||||
|
||||
describe("copy (Ctrl+C)", () => {
|
||||
it("copies a single selected node to clipboard with prefix", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const written = (mockWriteText.mock.calls as string[][])[0][0];
|
||||
expect(written.startsWith(CLIPBOARD_PREFIX)).toBe(true);
|
||||
|
||||
const parsed = JSON.parse(written.slice(CLIPBOARD_PREFIX.length));
|
||||
expect(parsed.nodes).toHaveLength(1);
|
||||
expect(parsed.nodes[0].id).toBe("1");
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("shows a success toast after copying", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockToast).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
title: "Copied successfully",
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("copies multiple connected nodes and preserves internal edges", async () => {
|
||||
const nodeA = createTestNode("a", { selected: true });
|
||||
const nodeB = createTestNode("b", { selected: true });
|
||||
const nodeC = createTestNode("c", { selected: false });
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB, nodeC] });
|
||||
|
||||
useEdgeStore.setState({
|
||||
edges: [
|
||||
createTestEdge("e-ab", "a", "b"),
|
||||
createTestEdge("e-bc", "b", "c"),
|
||||
],
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const parsed = JSON.parse(
|
||||
(mockWriteText.mock.calls as string[][])[0][0].slice(
|
||||
CLIPBOARD_PREFIX.length,
|
||||
),
|
||||
);
|
||||
expect(parsed.nodes).toHaveLength(2);
|
||||
expect(parsed.edges).toHaveLength(1);
|
||||
expect(parsed.edges[0].id).toBe("e-ab");
|
||||
});
|
||||
|
||||
it("drops external edges where one endpoint is not selected", async () => {
|
||||
const nodeA = createTestNode("a", { selected: true });
|
||||
const nodeB = createTestNode("b", { selected: false });
|
||||
useNodeStore.setState({ nodes: [nodeA, nodeB] });
|
||||
|
||||
useEdgeStore.setState({
|
||||
edges: [createTestEdge("e-ab", "a", "b")],
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const parsed = JSON.parse(
|
||||
(mockWriteText.mock.calls as string[][])[0][0].slice(
|
||||
CLIPBOARD_PREFIX.length,
|
||||
),
|
||||
);
|
||||
expect(parsed.nodes).toHaveLength(1);
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("copies nothing when no nodes are selected", async () => {
|
||||
const node = createTestNode("1", { selected: false });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
const parsed = JSON.parse(
|
||||
(mockWriteText.mock.calls as string[][])[0][0].slice(
|
||||
CLIPBOARD_PREFIX.length,
|
||||
),
|
||||
);
|
||||
expect(parsed.nodes).toHaveLength(0);
|
||||
expect(parsed.edges).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("paste (Ctrl+V)", () => {
|
||||
it("creates new nodes with new UUIDs", async () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 200 },
|
||||
});
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([node], []));
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
});
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes[0].id).toBe("new-uuid-1");
|
||||
expect(nodes[0].id).not.toBe("orig");
|
||||
});
|
||||
|
||||
it("centers pasted nodes in the current viewport", async () => {
|
||||
// Viewport at origin, zoom 1 => center = (500, 400)
|
||||
mockGetViewport.mockReturnValue({ x: 0, y: 0, zoom: 1 });
|
||||
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 100, y: 100 },
|
||||
});
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([node], []));
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
});
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
// Single node: center of bounds = (100, 100)
|
||||
// Viewport center = (500, 400)
|
||||
// Offset = (400, 300)
|
||||
// New position = (100 + 400, 100 + 300) = (500, 400)
|
||||
expect(nodes[0].position).toEqual({ x: 500, y: 400 });
|
||||
});
|
||||
|
||||
it("deselects existing nodes and selects pasted nodes", async () => {
|
||||
const existingNode = createTestNode("existing", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
|
||||
useNodeStore.setState({ nodes: [existingNode], nodeCounter: 0 });
|
||||
|
||||
const nodeToPaste = createTestNode("paste-me", {
|
||||
selected: false,
|
||||
position: { x: 100, y: 100 },
|
||||
});
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([nodeToPaste], []));
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
});
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const originalNode = nodes.find((n) => n.id === "existing");
|
||||
const pastedNode = nodes.find((n) => n.id !== "existing");
|
||||
|
||||
expect(originalNode!.selected).toBe(false);
|
||||
expect(pastedNode!.selected).toBe(true);
|
||||
});
|
||||
|
||||
it("remaps edge source/target IDs to newly created node IDs", async () => {
|
||||
const nodeA = createTestNode("a", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
const nodeB = createTestNode("b", {
|
||||
selected: true,
|
||||
position: { x: 200, y: 0 },
|
||||
});
|
||||
const edge = createTestEdge("e-ab", "a", "b", "output", "input");
|
||||
|
||||
mockReadText.mockResolvedValue(clipboardPayload([nodeA, nodeB], [edge]));
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
useEdgeStore.setState({ edges: [] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(2);
|
||||
});
|
||||
|
||||
// Wait for edges to be added too
|
||||
await vi.waitFor(() => {
|
||||
const { edges } = useEdgeStore.getState();
|
||||
expect(edges).toHaveLength(1);
|
||||
});
|
||||
|
||||
const { edges } = useEdgeStore.getState();
|
||||
const newEdge = edges[0];
|
||||
|
||||
// Edge source/target should be remapped to new UUIDs, not "a"/"b"
|
||||
expect(newEdge.source).not.toBe("a");
|
||||
expect(newEdge.target).not.toBe("b");
|
||||
expect(newEdge.source).toBe("new-uuid-1");
|
||||
expect(newEdge.target).toBe("new-uuid-2");
|
||||
expect(newEdge.sourceHandle).toBe("output");
|
||||
expect(newEdge.targetHandle).toBe("input");
|
||||
});
|
||||
|
||||
it("does nothing when clipboard does not have the expected prefix", async () => {
|
||||
mockReadText.mockResolvedValue("some random text");
|
||||
|
||||
const existingNode = createTestNode("1", { position: { x: 0, y: 0 } });
|
||||
useNodeStore.setState({ nodes: [existingNode], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
// Give async operations time to settle
|
||||
await vi.waitFor(() => {
|
||||
expect(mockReadText).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Ensure no state changes happen after clipboard read
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
});
|
||||
|
||||
it("does nothing when clipboard is empty", async () => {
|
||||
mockReadText.mockResolvedValue("");
|
||||
|
||||
const existingNode = createTestNode("1", { position: { x: 0, y: 0 } });
|
||||
useNodeStore.setState({ nodes: [existingNode], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockReadText).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Ensure no state changes happen after clipboard read
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
expect(nodes[0].id).toBe("1");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("input field focus guard", () => {
|
||||
it("ignores Ctrl+C when an input element is focused", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const input = document.createElement("input");
|
||||
document.body.appendChild(input);
|
||||
input.focus();
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
// Clipboard write should NOT be called
|
||||
expect(mockWriteText).not.toHaveBeenCalled();
|
||||
|
||||
document.body.removeChild(input);
|
||||
});
|
||||
|
||||
it("ignores Ctrl+V when a textarea element is focused", async () => {
|
||||
mockReadText.mockResolvedValue(
|
||||
clipboardPayload(
|
||||
[createTestNode("a", { position: { x: 0, y: 0 } })],
|
||||
[],
|
||||
),
|
||||
);
|
||||
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const textarea = document.createElement("textarea");
|
||||
document.body.appendChild(textarea);
|
||||
textarea.focus();
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makePasteEvent());
|
||||
});
|
||||
|
||||
expect(mockReadText).not.toHaveBeenCalled();
|
||||
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(0);
|
||||
|
||||
document.body.removeChild(textarea);
|
||||
});
|
||||
|
||||
it("ignores keypresses when a contenteditable element is focused", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const div = document.createElement("div");
|
||||
div.setAttribute("contenteditable", "true");
|
||||
document.body.appendChild(div);
|
||||
div.focus();
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
act(() => {
|
||||
result.current(makeCopyEvent());
|
||||
});
|
||||
|
||||
expect(mockWriteText).not.toHaveBeenCalled();
|
||||
|
||||
document.body.removeChild(div);
|
||||
});
|
||||
});
|
||||
|
||||
describe("meta key support (macOS)", () => {
|
||||
it("handles Cmd+C (metaKey) the same as Ctrl+C", async () => {
|
||||
const node = createTestNode("1", { selected: true });
|
||||
useNodeStore.setState({ nodes: [node] });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
const metaCopyEvent = new KeyboardEvent("keydown", {
|
||||
key: "c",
|
||||
metaKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
|
||||
act(() => {
|
||||
result.current(metaCopyEvent);
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockWriteText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
it("handles Cmd+V (metaKey) the same as Ctrl+V", async () => {
|
||||
const node = createTestNode("orig", {
|
||||
selected: true,
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
mockReadText.mockResolvedValue(clipboardPayload([node], []));
|
||||
useNodeStore.setState({ nodes: [], nodeCounter: 0 });
|
||||
|
||||
const { result } = renderHook(() => useCopyPaste());
|
||||
|
||||
const metaPasteEvent = new KeyboardEvent("keydown", {
|
||||
key: "v",
|
||||
metaKey: true,
|
||||
bubbles: true,
|
||||
});
|
||||
|
||||
act(() => {
|
||||
result.current(metaPasteEvent);
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
expect(nodes).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,134 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { renderHook, act } from "@testing-library/react";
|
||||
|
||||
const mockScreenToFlowPosition = vi.fn((pos: { x: number; y: number }) => pos);
|
||||
const mockFitView = vi.fn();
|
||||
|
||||
vi.mock("@xyflow/react", async () => {
|
||||
const actual = await vi.importActual("@xyflow/react");
|
||||
return {
|
||||
...actual,
|
||||
useReactFlow: () => ({
|
||||
screenToFlowPosition: mockScreenToFlowPosition,
|
||||
fitView: mockFitView,
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
const mockSetQueryStates = vi.fn();
|
||||
let mockQueryStateValues: {
|
||||
flowID: string | null;
|
||||
flowVersion: number | null;
|
||||
flowExecutionID: string | null;
|
||||
} = {
|
||||
flowID: null,
|
||||
flowVersion: null,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
|
||||
vi.mock("nuqs", () => ({
|
||||
parseAsString: {},
|
||||
parseAsInteger: {},
|
||||
useQueryStates: vi.fn(() => [mockQueryStateValues, mockSetQueryStates]),
|
||||
}));
|
||||
|
||||
let mockGraphLoading = false;
|
||||
let mockBlocksLoading = false;
|
||||
|
||||
vi.mock("@/app/api/__generated__/endpoints/graphs/graphs", () => ({
|
||||
useGetV1GetSpecificGraph: vi.fn(() => ({
|
||||
data: undefined,
|
||||
isLoading: mockGraphLoading,
|
||||
})),
|
||||
useGetV1GetExecutionDetails: vi.fn(() => ({
|
||||
data: undefined,
|
||||
})),
|
||||
useGetV1ListUserGraphs: vi.fn(() => ({
|
||||
data: undefined,
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@/app/api/__generated__/endpoints/default/default", () => ({
|
||||
useGetV2GetSpecificBlocks: vi.fn(() => ({
|
||||
data: undefined,
|
||||
isLoading: mockBlocksLoading,
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("@/app/api/helpers", () => ({
|
||||
okData: (res: { data: unknown }) => res?.data,
|
||||
}));
|
||||
|
||||
vi.mock("../components/helper", () => ({
|
||||
convertNodesPlusBlockInfoIntoCustomNodes: vi.fn(),
|
||||
}));
|
||||
|
||||
describe("useFlow", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.useFakeTimers({ shouldAdvanceTime: true });
|
||||
mockGraphLoading = false;
|
||||
mockBlocksLoading = false;
|
||||
mockQueryStateValues = {
|
||||
flowID: null,
|
||||
flowVersion: null,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
describe("loading states", () => {
|
||||
it("returns isFlowContentLoading true when graph is loading", async () => {
|
||||
mockGraphLoading = true;
|
||||
mockQueryStateValues = {
|
||||
flowID: "test-flow",
|
||||
flowVersion: 1,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isFlowContentLoading).toBe(true);
|
||||
});
|
||||
|
||||
it("returns isFlowContentLoading true when blocks are loading", async () => {
|
||||
mockBlocksLoading = true;
|
||||
mockQueryStateValues = {
|
||||
flowID: "test-flow",
|
||||
flowVersion: 1,
|
||||
flowExecutionID: null,
|
||||
};
|
||||
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isFlowContentLoading).toBe(true);
|
||||
});
|
||||
|
||||
it("returns isFlowContentLoading false when neither is loading", async () => {
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isFlowContentLoading).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("initial load completion", () => {
|
||||
it("marks initial load complete for new flows without flowID", async () => {
|
||||
const { useFlow } = await import("../components/FlowEditor/Flow/useFlow");
|
||||
const { result } = renderHook(() => useFlow());
|
||||
|
||||
expect(result.current.isInitialLoadComplete).toBe(false);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(300);
|
||||
});
|
||||
|
||||
expect(result.current.isInitialLoadComplete).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useCopilotUIStore } from "@/app/(platform)/copilot/store";
|
||||
import { ChangeEvent, FormEvent, useEffect, useState } from "react";
|
||||
|
||||
interface Args {
|
||||
@@ -16,6 +17,16 @@ export function useChatInput({
|
||||
}: Args) {
|
||||
const [value, setValue] = useState("");
|
||||
const [isSending, setIsSending] = useState(false);
|
||||
const { initialPrompt, setInitialPrompt } = useCopilotUIStore();
|
||||
|
||||
useEffect(
|
||||
function consumeInitialPrompt() {
|
||||
if (!initialPrompt) return;
|
||||
setValue((prev) => (prev.length === 0 ? initialPrompt : prev));
|
||||
setInitialPrompt(null);
|
||||
},
|
||||
[initialPrompt, setInitialPrompt],
|
||||
);
|
||||
|
||||
useEffect(
|
||||
function focusOnMount() {
|
||||
|
||||
@@ -7,6 +7,10 @@ export interface DeleteTarget {
|
||||
}
|
||||
|
||||
interface CopilotUIState {
|
||||
/** Prompt extracted from URL hash (e.g. /copilot#prompt=...) for input prefill. */
|
||||
initialPrompt: string | null;
|
||||
setInitialPrompt: (prompt: string | null) => void;
|
||||
|
||||
sessionToDelete: DeleteTarget | null;
|
||||
setSessionToDelete: (target: DeleteTarget | null) => void;
|
||||
|
||||
@@ -31,6 +35,9 @@ interface CopilotUIState {
|
||||
}
|
||||
|
||||
export const useCopilotUIStore = create<CopilotUIState>((set) => ({
|
||||
initialPrompt: null,
|
||||
setInitialPrompt: (prompt) => set({ initialPrompt: prompt }),
|
||||
|
||||
sessionToDelete: null,
|
||||
setSessionToDelete: (target) => set({ sessionToDelete: target }),
|
||||
|
||||
|
||||
@@ -19,6 +19,42 @@ import { useCopilotStream } from "./useCopilotStream";
|
||||
const TITLE_POLL_INTERVAL_MS = 2_000;
|
||||
const TITLE_POLL_MAX_ATTEMPTS = 5;
|
||||
|
||||
/**
|
||||
* Extract a prompt from the URL hash fragment.
|
||||
* Supports: /copilot#prompt=URL-encoded-text
|
||||
* Optionally auto-submits if ?autosubmit=true is in the query string.
|
||||
* Returns null if no prompt is present.
|
||||
*/
|
||||
function extractPromptFromUrl(): {
|
||||
prompt: string;
|
||||
autosubmit: boolean;
|
||||
} | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
|
||||
const hash = window.location.hash;
|
||||
if (!hash) return null;
|
||||
|
||||
const hashParams = new URLSearchParams(hash.slice(1));
|
||||
const prompt = hashParams.get("prompt");
|
||||
|
||||
if (!prompt || !prompt.trim()) return null;
|
||||
|
||||
const searchParams = new URLSearchParams(window.location.search);
|
||||
const autosubmit = searchParams.get("autosubmit") === "true";
|
||||
|
||||
// Clean up hash + autosubmit param only (preserve other query params)
|
||||
const cleanURL = new URL(window.location.href);
|
||||
cleanURL.hash = "";
|
||||
cleanURL.searchParams.delete("autosubmit");
|
||||
window.history.replaceState(
|
||||
null,
|
||||
"",
|
||||
`${cleanURL.pathname}${cleanURL.search}`,
|
||||
);
|
||||
|
||||
return { prompt: prompt.trim(), autosubmit };
|
||||
}
|
||||
|
||||
interface UploadedFile {
|
||||
file_id: string;
|
||||
name: string;
|
||||
@@ -127,6 +163,28 @@ export function useCopilotPage() {
|
||||
}
|
||||
}, [sessionId, pendingMessage, sendMessage]);
|
||||
|
||||
// --- Extract prompt from URL hash on mount (e.g. /copilot#prompt=Hello) ---
|
||||
const { setInitialPrompt } = useCopilotUIStore();
|
||||
const hasProcessedUrlPrompt = useRef(false);
|
||||
useEffect(() => {
|
||||
if (hasProcessedUrlPrompt.current) return;
|
||||
|
||||
const urlPrompt = extractPromptFromUrl();
|
||||
if (!urlPrompt) return;
|
||||
|
||||
hasProcessedUrlPrompt.current = true;
|
||||
|
||||
if (urlPrompt.autosubmit) {
|
||||
setPendingMessage(urlPrompt.prompt);
|
||||
void createSession().catch(() => {
|
||||
setPendingMessage(null);
|
||||
setInitialPrompt(urlPrompt.prompt);
|
||||
});
|
||||
} else {
|
||||
setInitialPrompt(urlPrompt.prompt);
|
||||
}
|
||||
}, [createSession, setInitialPrompt]);
|
||||
|
||||
async function uploadFiles(
|
||||
files: File[],
|
||||
sid: string,
|
||||
|
||||
@@ -418,6 +418,43 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
|
||||
| Block Name | Description |
|
||||
|------------|-------------|
|
||||
| [Agent Mail Create Draft](block-integrations/agent_mail/drafts.md#agent-mail-create-draft) | Create a draft email for review or scheduled sending |
|
||||
| [Agent Mail Create Inbox](block-integrations/agent_mail/inbox.md#agent-mail-create-inbox) | Create a new email inbox for an AI agent via AgentMail |
|
||||
| [Agent Mail Create List Entry](block-integrations/agent_mail/lists.md#agent-mail-create-list-entry) | Add an email address or domain to an allow/block list |
|
||||
| [Agent Mail Create Pod](block-integrations/agent_mail/pods.md#agent-mail-create-pod) | Create a new pod for multi-tenant customer isolation |
|
||||
| [Agent Mail Create Pod Inbox](block-integrations/agent_mail/pods.md#agent-mail-create-pod-inbox) | Create a new email inbox within a pod |
|
||||
| [Agent Mail Delete Draft](block-integrations/agent_mail/drafts.md#agent-mail-delete-draft) | Delete a draft or cancel a scheduled email |
|
||||
| [Agent Mail Delete Inbox](block-integrations/agent_mail/inbox.md#agent-mail-delete-inbox) | Permanently delete an AgentMail inbox and all its messages, threads, and drafts |
|
||||
| [Agent Mail Delete Inbox Thread](block-integrations/agent_mail/threads.md#agent-mail-delete-inbox-thread) | Permanently delete a conversation thread and all its messages |
|
||||
| [Agent Mail Delete List Entry](block-integrations/agent_mail/lists.md#agent-mail-delete-list-entry) | Remove an email address or domain from an allow/block list to stop filtering it |
|
||||
| [Agent Mail Delete Pod](block-integrations/agent_mail/pods.md#agent-mail-delete-pod) | Permanently delete a pod |
|
||||
| [Agent Mail Forward Message](block-integrations/agent_mail/messages.md#agent-mail-forward-message) | Forward an email message to one or more recipients |
|
||||
| [Agent Mail Get Draft](block-integrations/agent_mail/drafts.md#agent-mail-get-draft) | Retrieve a draft email to review its contents, recipients, and scheduled send status |
|
||||
| [Agent Mail Get Inbox](block-integrations/agent_mail/inbox.md#agent-mail-get-inbox) | Retrieve details of an existing AgentMail inbox including its email address, display name, and configuration |
|
||||
| [Agent Mail Get Inbox Thread](block-integrations/agent_mail/threads.md#agent-mail-get-inbox-thread) | Retrieve a conversation thread with all its messages |
|
||||
| [Agent Mail Get List Entry](block-integrations/agent_mail/lists.md#agent-mail-get-list-entry) | Check if an email address or domain is in an allow/block list |
|
||||
| [Agent Mail Get Message](block-integrations/agent_mail/messages.md#agent-mail-get-message) | Retrieve a specific email message by ID |
|
||||
| [Agent Mail Get Message Attachment](block-integrations/agent_mail/attachments.md#agent-mail-get-message-attachment) | Download a file attachment from an email message |
|
||||
| [Agent Mail Get Org Thread](block-integrations/agent_mail/threads.md#agent-mail-get-org-thread) | Retrieve a conversation thread by ID from anywhere in the organization, without needing the inbox ID |
|
||||
| [Agent Mail Get Pod](block-integrations/agent_mail/pods.md#agent-mail-get-pod) | Retrieve details of an existing pod including its client_id mapping and metadata |
|
||||
| [Agent Mail Get Thread Attachment](block-integrations/agent_mail/attachments.md#agent-mail-get-thread-attachment) | Download a file attachment from a conversation thread |
|
||||
| [Agent Mail List Drafts](block-integrations/agent_mail/drafts.md#agent-mail-list-drafts) | List drafts in an AgentMail inbox |
|
||||
| [Agent Mail List Entries](block-integrations/agent_mail/lists.md#agent-mail-list-entries) | List all entries in an AgentMail allow/block list |
|
||||
| [Agent Mail List Inbox Threads](block-integrations/agent_mail/threads.md#agent-mail-list-inbox-threads) | List all conversation threads in an AgentMail inbox |
|
||||
| [Agent Mail List Inboxes](block-integrations/agent_mail/inbox.md#agent-mail-list-inboxes) | List all email inboxes in your AgentMail organization with pagination support |
|
||||
| [Agent Mail List Messages](block-integrations/agent_mail/messages.md#agent-mail-list-messages) | List messages in an AgentMail inbox |
|
||||
| [Agent Mail List Org Drafts](block-integrations/agent_mail/drafts.md#agent-mail-list-org-drafts) | List all drafts across every inbox in your organization |
|
||||
| [Agent Mail List Org Threads](block-integrations/agent_mail/threads.md#agent-mail-list-org-threads) | List threads across ALL inboxes in your organization |
|
||||
| [Agent Mail List Pod Drafts](block-integrations/agent_mail/pods.md#agent-mail-list-pod-drafts) | List all drafts across all inboxes within a pod |
|
||||
| [Agent Mail List Pod Inboxes](block-integrations/agent_mail/pods.md#agent-mail-list-pod-inboxes) | List all inboxes within a pod |
|
||||
| [Agent Mail List Pod Threads](block-integrations/agent_mail/pods.md#agent-mail-list-pod-threads) | List all conversation threads across all inboxes within a pod |
|
||||
| [Agent Mail List Pods](block-integrations/agent_mail/pods.md#agent-mail-list-pods) | List all tenant pods in your organization |
|
||||
| [Agent Mail Reply To Message](block-integrations/agent_mail/messages.md#agent-mail-reply-to-message) | Reply to an existing email in the same conversation thread |
|
||||
| [Agent Mail Send Draft](block-integrations/agent_mail/drafts.md#agent-mail-send-draft) | Send a draft immediately, converting it into a delivered message |
|
||||
| [Agent Mail Send Message](block-integrations/agent_mail/messages.md#agent-mail-send-message) | Send a new email from an AgentMail inbox |
|
||||
| [Agent Mail Update Draft](block-integrations/agent_mail/drafts.md#agent-mail-update-draft) | Update a draft's content, recipients, or scheduled send time |
|
||||
| [Agent Mail Update Inbox](block-integrations/agent_mail/inbox.md#agent-mail-update-inbox) | Update the display name of an AgentMail inbox |
|
||||
| [Agent Mail Update Message](block-integrations/agent_mail/messages.md#agent-mail-update-message) | Add or remove labels on an email message |
|
||||
| [Baas Bot Join Meeting](block-integrations/baas/bots.md#baas-bot-join-meeting) | Deploy a bot to join and record a meeting |
|
||||
| [Baas Bot Leave Meeting](block-integrations/baas/bots.md#baas-bot-leave-meeting) | Remove a bot from an ongoing meeting |
|
||||
| [Gmail Add Label](block-integrations/google/gmail.md#gmail-add-label) | A block that adds a label to a specific email message in Gmail, creating the label if it doesn't exist |
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
|
||||
## Block Integrations
|
||||
|
||||
* [Agent Mail Attachments](block-integrations/agent_mail/attachments.md)
|
||||
* [Agent Mail Drafts](block-integrations/agent_mail/drafts.md)
|
||||
* [Agent Mail Inbox](block-integrations/agent_mail/inbox.md)
|
||||
* [Agent Mail Lists](block-integrations/agent_mail/lists.md)
|
||||
* [Agent Mail Messages](block-integrations/agent_mail/messages.md)
|
||||
* [Agent Mail Pods](block-integrations/agent_mail/pods.md)
|
||||
* [Agent Mail Threads](block-integrations/agent_mail/threads.md)
|
||||
* [Airtable Bases](block-integrations/airtable/bases.md)
|
||||
* [Airtable Records](block-integrations/airtable/records.md)
|
||||
* [Airtable Schema](block-integrations/airtable/schema.md)
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
# Agent Mail Attachments
|
||||
<!-- MANUAL: file_description -->
|
||||
Blocks for downloading file attachments from AgentMail messages and threads. Attachments are files associated with messages (PDFs, CSVs, images, etc.) and are returned as base64-encoded content.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Agent Mail Get Message Attachment
|
||||
|
||||
### What it is
|
||||
Download a file attachment from an email message. Returns base64-encoded file content.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls the AgentMail API's `inboxes.messages.get_attachment` endpoint using the provided inbox ID, message ID, and attachment ID. The API returns the raw file content, which may arrive as `bytes` or `str` depending on the attachment type. The block base64-encodes the result: binary data is encoded directly, while string data is first UTF-8 encoded then base64-encoded. If the API returns an unexpected data type, the block raises a `TypeError`.
|
||||
|
||||
On any failure — invalid IDs, network errors, authentication issues, or unexpected response types — the block catches the exception and yields the error message on the `error` output instead of `content_base64`. No partial results are returned; the block either yields both `content_base64` and `attachment_id` on success, or only `error` on failure.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the message belongs to | str | Yes |
|
||||
| message_id | Message ID containing the attachment | str | Yes |
|
||||
| attachment_id | Attachment ID to download (from the message's attachments array) | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| content_base64 | File content encoded as a base64 string. Decode with base64.b64decode() to get raw bytes. | str |
|
||||
| attachment_id | The attachment ID that was downloaded | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Invoice Processing Pipeline** — Download PDF invoices from incoming messages and feed them into a parsing block that extracts line items and totals.
|
||||
**Automated Attachment Archival** — Pull attachments from specific senders and store the base64 content in a database or cloud bucket for long-term retention.
|
||||
**Image Analysis Workflow** — Retrieve image attachments from support emails and pass them to a vision model block for classification or OCR.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get Thread Attachment
|
||||
|
||||
### What it is
|
||||
Download a file attachment from a conversation thread. Returns base64-encoded file content.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls the AgentMail API's `inboxes.threads.get_attachment` endpoint using the provided inbox ID, thread ID, and attachment ID. This is functionally identical to the message attachment block but resolves the attachment via a thread rather than a specific message — useful when you have the thread context but not the individual message ID. The raw response is base64-encoded the same way: `bytes` are encoded directly, `str` is UTF-8 encoded first, and any other response type triggers a `TypeError`.
|
||||
|
||||
Error handling follows the same all-or-nothing pattern: on success the block yields `content_base64` and `attachment_id`; on any exception (bad IDs, auth failure, network error, unexpected type) it yields only `error` with the exception message.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the thread belongs to | str | Yes |
|
||||
| thread_id | Thread ID containing the attachment | str | Yes |
|
||||
| attachment_id | Attachment ID to download (from a message's attachments array within the thread) | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| content_base64 | File content encoded as a base64 string. Decode with base64.b64decode() to get raw bytes. | str |
|
||||
| attachment_id | The attachment ID that was downloaded | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Conversation Attachment Collector** — Iterate over a support thread and download every attachment to build a complete case file for review.
|
||||
**Threaded Report Extraction** — Pull CSV or Excel attachments from recurring report threads and forward them to a data-processing block.
|
||||
**Compliance Document Retrieval** — Download signed-document attachments from legal threads and pass them to a verification or archival workflow.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
291
docs/integrations/block-integrations/agent_mail/drafts.md
Normal file
291
docs/integrations/block-integrations/agent_mail/drafts.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# Agent Mail Drafts
|
||||
<!-- MANUAL: file_description -->
|
||||
Blocks for creating, reviewing, editing, sending, and deleting email drafts in AgentMail. Drafts enable human-in-the-loop review, scheduled sending, and multi-step email composition workflows.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Agent Mail Create Draft
|
||||
|
||||
### What it is
|
||||
Create a draft email for review or scheduled sending. Use send_at for automatic future delivery.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls `client.inboxes.drafts.create(inbox_id, **params)` to create a new draft in the specified inbox. You must provide at least the recipient list; subject, text body, HTML body, cc, bcc, and in_reply_to are all optional.
|
||||
|
||||
If you supply `send_at`, the draft is scheduled for automatic delivery at that time and the returned `send_status` will be `scheduled`. If `send_at` is omitted, the draft remains unsent until you explicitly send it with the Send Draft block. Any errors propagate to the block framework's global error handler, which yields them on the error output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to create the draft in | str | Yes |
|
||||
| to | Recipient email addresses (e.g. ['user@example.com']) | List[str] | Yes |
|
||||
| subject | Email subject line | str | No |
|
||||
| text | Plain text body of the draft | str | No |
|
||||
| html | Rich HTML body of the draft | str | No |
|
||||
| cc | CC recipient email addresses | List[str] | No |
|
||||
| bcc | BCC recipient email addresses | List[str] | No |
|
||||
| in_reply_to | Message ID this draft replies to, for threading follow-up drafts | str | No |
|
||||
| send_at | Schedule automatic sending at this ISO 8601 datetime (e.g. '2025-01-15T09:00:00Z'). Leave empty for manual send. | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| draft_id | Unique identifier of the created draft | str |
|
||||
| send_status | 'scheduled' if send_at was set, empty otherwise. Values: scheduled, sending, failed. | str |
|
||||
| result | Complete draft object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Human-in-the-Loop Review** — Create a draft so a human can review and approve the email before it is sent.
|
||||
|
||||
**Scheduled Outreach** — Set `send_at` to queue a follow-up email that delivers automatically at a future date and time.
|
||||
|
||||
**Multi-Step Composition** — Create a draft with initial content, then use the Update Draft block to refine recipients or body text in later workflow steps.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Delete Draft
|
||||
|
||||
### What it is
|
||||
Delete a draft or cancel a scheduled email. Removes the draft permanently.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls `client.inboxes.drafts.delete(inbox_id, draft_id)` to permanently remove the specified draft. If the draft was scheduled for future delivery, deleting it also cancels that scheduled send.
|
||||
|
||||
On success the block yields `success=True`. Any errors propagate to the block framework's global error handler, which yields them on the error output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the draft belongs to | str | Yes |
|
||||
| draft_id | Draft ID to delete (also cancels scheduled sends) | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| success | True if the draft was successfully deleted/cancelled | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Cancel Scheduled Send** — Delete a draft that was scheduled with `send_at` to prevent it from being delivered.
|
||||
|
||||
**Clean Up Rejected Drafts** — Remove drafts that a human reviewer has declined during an approval workflow.
|
||||
|
||||
**Abort Workflow** — Delete an in-progress draft when upstream conditions change and the email is no longer needed.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get Draft
|
||||
|
||||
### What it is
|
||||
Retrieve a draft email to review its contents, recipients, and scheduled send status.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls `client.inboxes.drafts.get(inbox_id, draft_id)` to fetch a single draft by its ID. It returns the draft's subject, send status, scheduled send time, and the complete draft object.
|
||||
|
||||
Any errors propagate to the block framework's global error handler, which yields them on the error output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the draft belongs to | str | Yes |
|
||||
| draft_id | Draft ID to retrieve | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| draft_id | Unique identifier of the draft | str |
|
||||
| subject | Draft subject line | str |
|
||||
| send_status | Scheduled send status: 'scheduled', 'sending', 'failed', or empty | str |
|
||||
| send_at | Scheduled send time (ISO 8601) if set | str |
|
||||
| result | Complete draft object with all fields | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Approval Gate** — Fetch a draft so a human reviewer can inspect its content and recipients before approving it for send.
|
||||
|
||||
**Schedule Monitoring** — Retrieve a scheduled draft to check its `send_status` and confirm it is still queued for delivery.
|
||||
|
||||
**Content Verification** — Read back a draft after creation or update to verify that the subject and body match expectations before proceeding.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Drafts
|
||||
|
||||
### What it is
|
||||
List drafts in an AgentMail inbox. Filter by labels=['scheduled'] to find pending sends.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls `client.inboxes.drafts.list(inbox_id, **params)` to retrieve drafts from a single inbox. You can control page size with `limit`, paginate with `page_token`, and filter by `labels` (for example, `['scheduled']` to find only pending sends).
|
||||
|
||||
The block returns the list of draft objects, a count of drafts in the current page, and a `next_page_token` for fetching subsequent pages. Any errors propagate to the block framework's global error handler, which yields them on the error output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to list drafts from | str | Yes |
|
||||
| limit | Maximum number of drafts to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
| labels | Filter drafts by labels (e.g. ['scheduled'] for pending sends) | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| drafts | List of draft objects with subject, recipients, send_status, etc. | List[Dict[str, Any]] |
|
||||
| count | Number of drafts returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Inbox Dashboard** — List all drafts in an inbox to display a queue of pending emails awaiting review or approval.
|
||||
|
||||
**Scheduled Send Audit** — Filter by `labels=['scheduled']` to surface every draft that is queued for future delivery and verify send times.
|
||||
|
||||
**Batch Processing** — Paginate through all drafts in an inbox to perform bulk updates or deletions in a cleanup workflow.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Org Drafts
|
||||
|
||||
### What it is
|
||||
List all drafts across every inbox in your organization. Use for central approval dashboards.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls `client.drafts.list(**params)` at the organization level, so it returns drafts across every inbox without requiring an inbox ID. You can control page size with `limit` and paginate with `page_token`.
|
||||
|
||||
The block returns the list of draft objects, a count of drafts in the current page, and a `next_page_token` for fetching subsequent pages. Any errors propagate to the block framework's global error handler, which yields them on the error output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| limit | Maximum number of drafts to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| drafts | List of draft objects from all inboxes in the organization | List[Dict[str, Any]] |
|
||||
| count | Number of drafts returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Central Approval Dashboard** — List every pending draft across all inboxes so a manager can review and approve outbound emails in one place.
|
||||
|
||||
**Organization-Wide Analytics** — Count and categorize drafts across inboxes to report on email pipeline volume and bottlenecks.
|
||||
|
||||
**Stale Draft Cleanup** — Paginate through all org drafts to identify and delete old or abandoned drafts that were never sent.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Send Draft
|
||||
|
||||
### What it is
|
||||
Send a draft immediately, converting it into a delivered message. The draft is deleted after sending.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls `client.inboxes.drafts.send(inbox_id, draft_id)` to deliver the draft immediately. Once the send completes, the draft is deleted from the inbox and a message object is returned in its place.
|
||||
|
||||
The block yields the `message_id` and `thread_id` of the newly sent email along with the complete message result. Any errors propagate to the block framework's global error handler, which yields them on the error output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the draft belongs to | str | Yes |
|
||||
| draft_id | Draft ID to send now | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| message_id | Message ID of the now-sent email (draft is deleted) | str |
|
||||
| thread_id | Thread ID the sent message belongs to | str |
|
||||
| result | Complete sent message object | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Post-Approval Dispatch** — Send a draft immediately after a human reviewer approves it in a review workflow.
|
||||
|
||||
**On-Demand Notifications** — Compose a draft earlier in the workflow and send it only when a triggering event occurs.
|
||||
|
||||
**Retry After Edit** — After updating a draft that failed validation, send it again without recreating it from scratch.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Update Draft
|
||||
|
||||
### What it is
|
||||
Update a draft's content, recipients, or scheduled send time. Use to reschedule or edit before sending.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls `client.inboxes.drafts.update(inbox_id, draft_id, **params)` to modify an existing draft. Only the fields you provide are changed; omitted fields are left untouched. Internally, `None` is used to distinguish between "omit this field" and "clear this field to empty," so you can selectively update recipients, subject, body, or scheduled send time without affecting other fields.
|
||||
|
||||
The block returns the updated `draft_id`, `send_status`, and the complete draft result. Any errors propagate to the block framework's global error handler, which yields them on the error output.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the draft belongs to | str | Yes |
|
||||
| draft_id | Draft ID to update | str | Yes |
|
||||
| to | Updated recipient email addresses (replaces existing list). Omit to keep current value. | List[str] | No |
|
||||
| subject | Updated subject line. Omit to keep current value. | str | No |
|
||||
| text | Updated plain text body. Omit to keep current value. | str | No |
|
||||
| html | Updated HTML body. Omit to keep current value. | str | No |
|
||||
| send_at | Reschedule: new ISO 8601 send time (e.g. '2025-01-20T14:00:00Z'). Omit to keep current value. | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| draft_id | The updated draft ID | str |
|
||||
| send_status | Updated send status | str |
|
||||
| result | Complete updated draft object | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Reschedule Delivery** — Change the `send_at` time on a scheduled draft to delay or advance its delivery window.
|
||||
|
||||
**Reviewer Edits** — Allow a human reviewer to modify the subject or body of a draft before it is approved and sent.
|
||||
|
||||
**Dynamic Recipient Updates** — Update the recipient list based on data gathered in earlier workflow steps without recreating the draft.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
196
docs/integrations/block-integrations/agent_mail/inbox.md
Normal file
196
docs/integrations/block-integrations/agent_mail/inbox.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Agent Mail Inbox
|
||||
<!-- MANUAL: file_description -->
|
||||
Blocks for creating, retrieving, listing, updating, and deleting AgentMail inboxes. An Inbox is a fully programmable email account for AI agents — each inbox gets a unique email address and can send, receive, and manage emails via the AgentMail API.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Agent Mail Create Inbox
|
||||
|
||||
### What it is
|
||||
Create a new email inbox for an AI agent via AgentMail. Each inbox gets a unique address and can send/receive emails.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block calls the AgentMail API to provision a new inbox. You can optionally specify a username (local part of the address), a custom domain, and a display name. Any parameters left empty use sensible defaults — the username is auto-generated and the domain defaults to `agentmail.to`.
|
||||
|
||||
The API returns the newly created inbox object, from which the block extracts the inbox ID and full email address as separate outputs. The complete inbox metadata is also available as a dictionary for downstream blocks that need additional fields. If the API call fails, the error propagates to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| username | Local part of the email address (e.g. 'support' for support@domain.com). Leave empty to auto-generate. | str | No |
|
||||
| domain | Email domain (e.g. 'mydomain.com'). Defaults to agentmail.to if empty. | str | No |
|
||||
| display_name | Friendly name shown in the 'From' field of sent emails (e.g. 'Support Agent') | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| inbox_id | Unique identifier for the created inbox (also the email address) | str |
|
||||
| email_address | Full email address of the inbox (e.g. support@agentmail.to) | str |
|
||||
| result | Complete inbox object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Per-Customer Support Agents** — Spin up a dedicated inbox for each customer so an AI agent can handle their support requests through a personalized email address.
|
||||
|
||||
**Campaign-Specific Outreach** — Create a branded inbox for each marketing campaign so replies are automatically routed to the agent managing that campaign.
|
||||
|
||||
**Ephemeral Verification Workflows** — Generate a temporary inbox on the fly to receive a sign-up confirmation code, then pass the address to a registration block.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Delete Inbox
|
||||
|
||||
### What it is
|
||||
Permanently delete an AgentMail inbox and all its messages, threads, and drafts. This action cannot be undone.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block sends a delete request to the AgentMail API using the provided inbox ID. The API permanently removes the inbox along with all associated messages, threads, and drafts. This action is irreversible.
|
||||
|
||||
On success the block outputs `success=True`. If the API returns an error (e.g., the inbox does not exist), the exception propagates to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to permanently delete | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| success | True if the inbox was successfully deleted | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Cleanup After Task Completion** — Delete a temporary inbox once an agent finishes processing a one-off workflow like order confirmation or password reset.
|
||||
|
||||
**User Offboarding** — Remove an agent's inbox when a customer cancels their account to avoid accumulating unused resources.
|
||||
|
||||
**Test Environment Teardown** — Automatically delete inboxes created during integration tests so the organization stays clean between runs.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get Inbox
|
||||
|
||||
### What it is
|
||||
Retrieve details of an existing AgentMail inbox including its email address, display name, and configuration.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block calls the AgentMail API to retrieve the details of a single inbox identified by its inbox ID (which is also its email address). The API returns the full inbox object including its email address, display name, and any other metadata.
|
||||
|
||||
The block extracts the inbox ID, email address, and display name as individual outputs for easy wiring to downstream blocks. The complete inbox object is also returned as a dictionary. If the inbox does not exist or the request fails, the error propagates to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to look up (e.g. 'support@agentmail.to') | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| inbox_id | Unique identifier of the inbox | str |
|
||||
| email_address | Full email address of the inbox | str |
|
||||
| display_name | Friendly name shown in the 'From' field | str |
|
||||
| result | Complete inbox object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Pre-Send Validation** — Fetch inbox details before sending an email to confirm the inbox still exists and verify its display name is correct.
|
||||
|
||||
**Dashboard Display** — Retrieve inbox metadata to show the agent's email address and display name in a monitoring dashboard.
|
||||
|
||||
**Conditional Routing** — Look up an inbox's properties to decide which downstream workflow should handle incoming messages for that address.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Inboxes
|
||||
|
||||
### What it is
|
||||
List all email inboxes in your AgentMail organization with pagination support.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block calls the AgentMail API to list all inboxes in the organization. You can control page size with the `limit` parameter (1-100) and paginate through results using the `page_token` returned from a previous call. Only non-empty parameters are sent in the request.
|
||||
|
||||
The block outputs the list of inbox objects, a count of inboxes returned, and a `next_page_token` for fetching additional pages. When there are no more results, `next_page_token` is empty. If the API call fails, the error propagates to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| limit | Maximum number of inboxes to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page of results | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| inboxes | List of inbox objects, each containing inbox_id, email_address, display_name, etc. | List[Dict[str, Any]] |
|
||||
| count | Total number of inboxes in your organization | int |
|
||||
| next_page_token | Token to pass as page_token to get the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Organization Audit** — Enumerate all inboxes to generate a report of active agent email accounts and their configurations.
|
||||
|
||||
**Bulk Operations** — Iterate through every inbox to perform batch updates, such as rotating API keys or updating display names across all agents.
|
||||
|
||||
**Stale Inbox Detection** — List all inboxes and cross-reference with recent activity to identify accounts that can be cleaned up.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Update Inbox
|
||||
|
||||
### What it is
|
||||
Update the display name of an AgentMail inbox. Changes the 'From' name shown when emails are sent.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block calls the AgentMail API to update an existing inbox's display name. It sends the inbox ID and the new display name to the update endpoint. The display name controls the "From" label recipients see when the agent sends emails.
|
||||
|
||||
The block outputs the inbox ID and the full updated inbox object as a dictionary. If the inbox does not exist or the request fails, the error propagates to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to update (e.g. 'support@agentmail.to') | str | Yes |
|
||||
| display_name | New display name for the inbox (e.g. 'Customer Support Bot') | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| inbox_id | The updated inbox ID | str |
|
||||
| result | Complete updated inbox object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Rebranding an Agent** — Change the display name when an agent is reassigned from one team to another so outgoing emails reflect the new identity.
|
||||
|
||||
**Personalized Sender Names** — Update the display name dynamically to include a customer's name or ticket number for a more personal touch in automated replies.
|
||||
|
||||
**A/B Testing Email Identity** — Swap the display name between variations to test which sender identity gets higher open rates.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
162
docs/integrations/block-integrations/agent_mail/lists.md
Normal file
162
docs/integrations/block-integrations/agent_mail/lists.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# Agent Mail Lists
|
||||
<!-- MANUAL: file_description -->
|
||||
Blocks for managing allow/block lists in AgentMail. Lists let you control which email addresses and domains your agents can send to or receive from, based on direction (send/receive) and type (allow/block).
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Agent Mail Create List Entry
|
||||
|
||||
### What it is
|
||||
Add an email address or domain to an allow/block list. Block spam senders or whitelist trusted domains.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block calls the AgentMail API to add an email address or domain to a specified list. You select a direction ("send" or "receive") and a list type ("allow" or "block"), then provide the entry to add. For block lists, you can optionally include a reason such as "spam" or "competitor."
|
||||
|
||||
The API creates the entry and returns both the entry string and the complete entry object with metadata. Any errors propagate directly to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| direction | 'send' for outgoing email rules, 'receive' for incoming email rules | "send" \| "receive" | Yes |
|
||||
| list_type | 'allow' to whitelist, 'block' to blacklist | "allow" \| "block" | Yes |
|
||||
| entry | Email address (user@example.com) or domain (example.com) to add | str | Yes |
|
||||
| reason | Reason for blocking (only used with block lists, e.g. 'spam', 'competitor') | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| entry | The email address or domain that was added | str |
|
||||
| result | Complete entry object | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Spam Prevention** — Block a known spam domain so your agent never processes incoming messages from it.
|
||||
|
||||
**Trusted Partner Allowlisting** — Add a partner's domain to the receive allow list so their emails always reach your agent.
|
||||
|
||||
**Outbound Restriction** — Add a competitor's domain to the send block list to prevent your agent from accidentally emailing them.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Delete List Entry
|
||||
|
||||
### What it is
|
||||
Remove an email address or domain from an allow/block list to stop filtering it.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block calls the AgentMail API to remove an existing entry from an allow or block list. You specify the direction, list type, and the entry to delete.
|
||||
|
||||
On success the block returns `success=True`. If the entry does not exist or the API call fails, the error propagates to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| direction | 'send' for outgoing rules, 'receive' for incoming rules | "send" \| "receive" | Yes |
|
||||
| list_type | 'allow' for whitelist, 'block' for blacklist | "allow" \| "block" | Yes |
|
||||
| entry | Email address or domain to remove from the list | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| success | True if the entry was successfully removed | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Unblocking a Sender** — Remove a previously blocked domain after confirming it is no longer a spam source.
|
||||
|
||||
**Revoking Access** — Delete a domain from the receive allow list when a partnership ends and messages should no longer be accepted.
|
||||
|
||||
**Policy Correction** — Remove an entry that was added by mistake so normal email flow resumes immediately.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get List Entry
|
||||
|
||||
### What it is
|
||||
Check if an email address or domain is in an allow/block list. Verify filtering rules.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block queries the AgentMail API to check whether a specific email address or domain exists in an allow or block list. You provide the direction, list type, and the entry to look up.
|
||||
|
||||
If the entry is found, the block returns the entry string and the complete entry object with metadata (such as the reason it was added). If the entry does not exist, the error propagates to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| direction | 'send' for outgoing rules, 'receive' for incoming rules | "send" \| "receive" | Yes |
|
||||
| list_type | 'allow' for whitelist, 'block' for blacklist | "allow" \| "block" | Yes |
|
||||
| entry | Email address or domain to look up | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| entry | The email address or domain that was found | str |
|
||||
| result | Complete entry object with metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Pre-Send Validation** — Check whether a recipient domain is on the send block list before your agent drafts an outbound email.
|
||||
|
||||
**Audit Verification** — Look up a specific address to confirm it was added to the block list and review the recorded reason.
|
||||
|
||||
**Conditional Routing** — Check the receive allow list for a sender's domain to decide whether to process the message or skip it.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Entries
|
||||
|
||||
### What it is
|
||||
List all entries in an AgentMail allow/block list. Choose send/receive direction and allow/block type.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block retrieves all entries from a specified allow or block list by calling the AgentMail API. You select a direction and list type, and optionally set a page size with `limit` and continue paginating with `page_token`.
|
||||
|
||||
The API returns the list of entry objects, a count of entries in the current page, and a `next_page_token` for fetching additional results. Any errors propagate to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| direction | 'send' to filter outgoing emails, 'receive' to filter incoming emails | "send" \| "receive" | Yes |
|
||||
| list_type | 'allow' for whitelist (only permit these), 'block' for blacklist (reject these) | "allow" \| "block" | Yes |
|
||||
| limit | Maximum number of entries to return per page | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| entries | List of entries, each with an email address or domain | List[Dict[str, Any]] |
|
||||
| count | Number of entries returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
**Policy Dashboard** — Fetch all entries on the receive block list to display a management view where administrators can review and remove entries.
|
||||
|
||||
**Periodic Cleanup** — Page through the full send allow list to identify stale entries that no longer correspond to active partners.
|
||||
|
||||
**Compliance Export** — Retrieve every block list entry across both directions to generate a report for an internal compliance audit.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
247
docs/integrations/block-integrations/agent_mail/messages.md
Normal file
247
docs/integrations/block-integrations/agent_mail/messages.md
Normal file
@@ -0,0 +1,247 @@
|
||||
# Agent Mail Messages
|
||||
<!-- MANUAL: file_description -->
|
||||
Blocks for sending, receiving, replying to, forwarding, and managing email messages via AgentMail. Messages are individual emails within conversation threads.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Agent Mail Forward Message
|
||||
|
||||
### What it is
|
||||
Forward an email message to one or more recipients. Supports CC/BCC and optional extra text or subject override.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block validates that the combined recipient count across to, cc, and bcc does not exceed 50, then calls the AgentMail API to forward a specific message from your inbox. You provide the inbox ID and message ID to identify the original email, along with the target email addresses. Optionally, you can override the subject line or prepend additional plain text or HTML content before the forwarded message body.
|
||||
|
||||
The API handles constructing the forwarded email with the original content included. Any errors from the API propagate directly to the global error handler without being caught by the block.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to forward from | str | Yes |
|
||||
| message_id | Message ID to forward | str | Yes |
|
||||
| to | Recipient email addresses to forward the message to (e.g. ['user@example.com']) | List[str] | Yes |
|
||||
| cc | CC recipient email addresses | List[str] | No |
|
||||
| bcc | BCC recipient email addresses (hidden from other recipients) | List[str] | No |
|
||||
| subject | Override the subject line (defaults to 'Fwd: <original subject>') | str | No |
|
||||
| text | Additional plain text to prepend before the forwarded content | str | No |
|
||||
| html | Additional HTML to prepend before the forwarded content | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| message_id | Unique identifier of the forwarded message | str |
|
||||
| thread_id | Thread ID of the forward | str |
|
||||
| result | Complete forwarded message object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Escalation Routing** — Forward messages that match certain keywords or priority levels to a human supervisor's email for review.
|
||||
- **Multi-Agent Collaboration** — Forward incoming requests to a specialized agent's inbox so the right agent handles each task.
|
||||
- **Digest Distribution** — Forward summarized daily reports from an aggregation inbox to a distribution list of stakeholders.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get Message
|
||||
|
||||
### What it is
|
||||
Retrieve a specific email message by ID. Includes extracted_text for clean reply content without quoted history.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block fetches a single message from an AgentMail inbox by calling the API with the inbox ID and message ID. It returns the full message content including subject, plain text body, HTML body, and all metadata.
|
||||
|
||||
A key output is `extracted_text`, which contains only the new reply content with quoted history stripped out. This is especially useful when feeding message content into an LLM, since it avoids processing redundant quoted text from earlier in the conversation.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the message belongs to | str | Yes |
|
||||
| message_id | Message ID to retrieve (e.g. '<abc123@agentmail.to>') | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| message_id | Unique identifier of the message | str |
|
||||
| thread_id | Thread this message belongs to | str |
|
||||
| subject | Email subject line | str |
|
||||
| text | Full plain text body (may include quoted reply history) | str |
|
||||
| extracted_text | Just the new reply content with quoted history stripped. Best for AI processing. | str |
|
||||
| html | HTML body of the email | str |
|
||||
| result | Complete message object with all fields including sender, recipients, attachments, labels | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Intent Classification** — Retrieve a message and pass its extracted text to an LLM to classify the sender's intent before routing to the appropriate workflow.
|
||||
- **Conversation Context Loading** — Fetch a specific message to build context for generating a relevant reply in a multi-turn email conversation.
|
||||
- **Attachment Processing** — Retrieve a message's full metadata to extract attachment URLs for downstream processing like document parsing or image analysis.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Messages
|
||||
|
||||
### What it is
|
||||
List messages in an AgentMail inbox. Filter by labels to find unread, campaign-tagged, or categorized messages.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block queries the AgentMail API to retrieve a paginated list of messages from the specified inbox. You can control the page size with `limit` (1-100) and navigate through results using the `page_token` returned from a previous call. An optional `labels` filter returns only messages that have all of the specified labels.
|
||||
|
||||
The block outputs the list of message objects, a count of messages returned in the current page, and a `next_page_token` for fetching subsequent pages. When `next_page_token` is empty, there are no more results.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to list messages from | str | Yes |
|
||||
| limit | Maximum number of messages to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
| labels | Only return messages with ALL of these labels (e.g. ['unread'] or ['q4-campaign', 'follow-up']) | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| messages | List of message objects with subject, sender, text, html, labels, etc. | List[Dict[str, Any]] |
|
||||
| count | Number of messages returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Inbox Polling** — Periodically list messages labeled "unread" to trigger automated processing workflows for new incoming emails.
|
||||
- **Campaign Monitoring** — Filter messages by campaign-specific labels to track reply rates and engagement across an outreach sequence.
|
||||
- **Batch Processing** — Page through all messages in an inbox to perform bulk operations like summarization, archiving, or data extraction.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Reply To Message
|
||||
|
||||
### What it is
|
||||
Reply to an existing email in the same conversation thread. Use for multi-turn agent conversations.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block sends a reply to an existing message by calling the AgentMail API with the inbox ID, the message ID being replied to, and the reply body text. An optional HTML body can be provided for rich formatting. The API automatically threads the reply into the same conversation as the original message.
|
||||
|
||||
The block returns the new reply's message ID, the thread ID it was added to, and the complete message object. This makes it straightforward to build multi-turn email conversations where an agent responds to incoming messages within the same thread.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to send the reply from | str | Yes |
|
||||
| message_id | Message ID to reply to (e.g. '<abc123@agentmail.to>') | str | Yes |
|
||||
| text | Plain text body of the reply | str | Yes |
|
||||
| html | Rich HTML body of the reply | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| message_id | Unique identifier of the reply message | str |
|
||||
| thread_id | Thread ID the reply was added to | str |
|
||||
| result | Complete reply message object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Customer Support Agent** — Automatically reply to incoming support emails with answers generated by an LLM based on the message content and a knowledge base.
|
||||
- **Interview Scheduling** — Reply to candidate emails with proposed interview times after checking calendar availability through another block.
|
||||
- **Conversational Workflow** — Maintain an ongoing back-and-forth conversation with a user, where each reply builds on the previous exchange to complete a multi-step task.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Send Message
|
||||
|
||||
### What it is
|
||||
Send a new email from an AgentMail inbox. Creates a new conversation thread. Supports HTML, CC/BCC, and labels.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block first validates that the combined count of `to`, `cc`, and `bcc` recipients does not exceed 50. It then calls the AgentMail API to send a new message from the specified inbox, creating a new conversation thread. You must provide at least a plain text body; an optional HTML body can be included for rich formatting.
|
||||
|
||||
The block supports CC and BCC recipients for human-in-the-loop oversight or silent monitoring, and labels for tagging outgoing messages for later filtering. The API returns the new message's ID, the thread ID for tracking future replies, and the complete message object.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to send from (e.g. 'agent@agentmail.to') | str | Yes |
|
||||
| to | Recipient email addresses (e.g. ['user@example.com']) | List[str] | Yes |
|
||||
| subject | Email subject line | str | Yes |
|
||||
| text | Plain text body of the email. Always provide this as a fallback for email clients that don't render HTML. | str | Yes |
|
||||
| html | Rich HTML body of the email. Embed CSS in a <style> tag for best compatibility across email clients. | str | No |
|
||||
| cc | CC recipient email addresses for human-in-the-loop oversight | List[str] | No |
|
||||
| bcc | BCC recipient email addresses (hidden from other recipients) | List[str] | No |
|
||||
| labels | Labels to tag the message for filtering and state management (e.g. ['outreach', 'q4-campaign']) | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| message_id | Unique identifier of the sent message | str |
|
||||
| thread_id | Thread ID grouping this message and any future replies | str |
|
||||
| result | Complete sent message object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Outreach Campaigns** — Send personalized cold emails to a list of prospects with campaign labels for tracking, using HTML templates for professional formatting.
|
||||
- **Alert Notifications** — Send automated alert emails when a monitored metric crosses a threshold, CC-ing a human operator for oversight.
|
||||
- **Report Delivery** — Generate and send periodic summary reports to stakeholders with BCC to an archive inbox for record-keeping.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Update Message
|
||||
|
||||
### What it is
|
||||
Add or remove labels on an email message. Use for read/unread tracking, campaign tagging, or state management.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls the AgentMail API to modify the labels on a specific message. You can add new labels, remove existing ones, or do both in a single call. Labels are arbitrary strings you define, making them flexible for tracking message state such as read/unread, processing status, or campaign membership.
|
||||
|
||||
The block returns the updated message ID and the complete message object reflecting the current label state. This is useful as a downstream step after processing a message, allowing you to mark it as handled so it is not picked up again by a polling workflow.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the message belongs to | str | Yes |
|
||||
| message_id | Message ID to update labels on | str | Yes |
|
||||
| add_labels | Labels to add (e.g. ['read', 'processed', 'high-priority']) | List[str] | No |
|
||||
| remove_labels | Labels to remove (e.g. ['unread', 'pending']) | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| message_id | The updated message ID | str |
|
||||
| result | Complete updated message object with current labels | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Read/Unread Tracking** — Remove the "unread" label and add "read" after an agent processes a message, preventing duplicate processing on the next polling cycle.
|
||||
- **Pipeline State Management** — Add labels like "sentiment-analyzed" or "response-drafted" as a message moves through multi-step processing, so each stage knows which messages still need work.
|
||||
- **Priority Tagging** — Add a "high-priority" label to messages from VIP senders or containing urgent keywords, enabling downstream blocks to filter and handle them first.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
299
docs/integrations/block-integrations/agent_mail/pods.md
Normal file
299
docs/integrations/block-integrations/agent_mail/pods.md
Normal file
@@ -0,0 +1,299 @@
|
||||
# Agent Mail Pods
|
||||
<!-- MANUAL: file_description -->
|
||||
Blocks for creating, managing, and querying pods in AgentMail. Pods provide multi-tenant isolation between customers — each pod acts as an isolated workspace containing its own inboxes, domains, threads, and drafts. Use pods when building SaaS platforms, agency tools, or AI agent fleets serving multiple customers.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Agent Mail Create Pod
|
||||
|
||||
### What it is
|
||||
Create a new pod for multi-tenant customer isolation. Use client_id to map to your internal tenant IDs.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API to provision a new pod, passing an optional `client_id` parameter. When a `client_id` is provided, AgentMail maps it to the pod so you can later reference the pod using your own internal tenant identifier instead of the AgentMail-assigned `pod_id`.
|
||||
|
||||
The block returns the newly created `pod_id` along with the complete pod object containing all metadata. Any API errors propagate directly to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| client_id | Your internal tenant/customer ID for idempotent mapping. Lets you access the pod by your own ID instead of AgentMail's pod_id. | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| pod_id | Unique identifier of the created pod | str |
|
||||
| result | Complete pod object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **SaaS Customer Onboarding** — Automatically provision an isolated email workspace when a new customer signs up for your platform.
|
||||
- **AI Agent Fleet Management** — Create a dedicated pod for each AI agent so its email activity is fully isolated from other agents.
|
||||
- **White-Label Email Service** — Spin up tenant-scoped pods mapped to your internal customer IDs to power a branded email product.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Create Pod Inbox
|
||||
|
||||
### What it is
|
||||
Create a new email inbox within a pod. The inbox is scoped to the customer workspace.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API to create a new inbox scoped to the specified pod. You can optionally provide a `username`, `domain`, and `display_name` to customize the email address and sender identity. If omitted, the username is auto-generated and the domain defaults to `agentmail.to`.
|
||||
|
||||
The block returns the `inbox_id`, the full `email_address`, and the complete inbox metadata object. The inbox is fully isolated within the pod, meaning it only appears in that pod's inbox listings and its threads stay separate from other pods.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| pod_id | Pod ID to create the inbox in | str | Yes |
|
||||
| username | Local part of the email address (e.g. 'support'). Leave empty to auto-generate. | str | No |
|
||||
| domain | Email domain (e.g. 'mydomain.com'). Defaults to agentmail.to if empty. | str | No |
|
||||
| display_name | Friendly name shown in the 'From' field (e.g. 'Customer Support') | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| inbox_id | Unique identifier of the created inbox | str |
|
||||
| email_address | Full email address of the inbox | str |
|
||||
| result | Complete inbox object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Per-Customer Support Addresses** — Create a `support@clientdomain.com` inbox inside each customer's pod so inbound support emails are automatically routed to the right tenant.
|
||||
- **Branded Outbound Campaigns** — Provision inboxes with custom display names and domains for each customer's marketing agent to send branded emails.
|
||||
- **Multi-Department Agent Setup** — Create separate inboxes (sales, billing, support) within a single customer pod so different AI agents handle different functions.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Delete Pod
|
||||
|
||||
### What it is
|
||||
Permanently delete a pod. All inboxes and domains must be removed first.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API to permanently delete the specified pod. The API enforces a precondition: all inboxes and custom domains must be removed from the pod before deletion is allowed. If any remain, the API returns an error that propagates to the global error handler.
|
||||
|
||||
On success the block returns `success=True`. This operation is irreversible -- the pod and its associated `client_id` mapping are permanently removed.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| pod_id | Pod ID to permanently delete (must have no inboxes or domains) | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| success | True if the pod was successfully deleted | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Customer Offboarding** — Automatically delete a customer's pod after they cancel their subscription and all their inboxes have been cleaned up.
|
||||
- **Development Environment Cleanup** — Tear down temporary pods created during testing or staging so they do not accumulate over time.
|
||||
- **Compliance Data Removal** — Permanently remove a tenant's email workspace as part of a GDPR or data-deletion request workflow.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get Pod
|
||||
|
||||
### What it is
|
||||
Retrieve details of an existing pod including its client_id mapping and metadata.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API with the given `pod_id` to fetch the full pod record. The returned object includes the pod's `client_id` mapping, creation timestamp, and any other metadata stored on the pod.
|
||||
|
||||
The block outputs both the `pod_id` and the complete result dictionary. If the pod does not exist, the API error propagates directly to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| pod_id | Pod ID to retrieve | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| pod_id | Unique identifier of the pod | str |
|
||||
| result | Complete pod object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Tenant Dashboard Display** — Fetch pod details to show a customer's workspace status, creation date, and associated client ID on an admin dashboard.
|
||||
- **Pre-Action Validation** — Retrieve pod metadata before performing operations like inbox creation to confirm the pod exists and is correctly mapped.
|
||||
- **Audit Logging** — Pull pod details as part of an automated audit trail that records which tenant workspace was accessed and when.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Pod Drafts
|
||||
|
||||
### What it is
|
||||
List all drafts across all inboxes within a pod. View pending emails for a customer.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API to retrieve drafts across all inboxes within the specified pod. Optional `limit` and `page_token` parameters control pagination. Only non-empty parameters are sent to the API.
|
||||
|
||||
The block returns the list of draft objects, a `count` of drafts in the current page, and a `next_page_token` for fetching subsequent pages. This provides a pod-wide view of unsent emails without needing to query each inbox individually.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| pod_id | Pod ID to list drafts from | str | Yes |
|
||||
| limit | Maximum number of drafts to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| drafts | List of draft objects from all inboxes in this pod | List[Dict[str, Any]] |
|
||||
| count | Number of drafts returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Draft Review Queue** — Surface all pending drafts across a customer's inboxes so a human reviewer can approve or discard them before sending.
|
||||
- **Stuck Draft Detection** — Periodically list pod drafts to find emails that have been sitting unsent for too long and alert the responsible agent or operator.
|
||||
- **Customer Activity Summary** — Include draft counts in a tenant dashboard to show how many outbound emails are queued and awaiting dispatch.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Pod Inboxes
|
||||
|
||||
### What it is
|
||||
List all inboxes within a pod. View email accounts scoped to a specific customer.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API to list all inboxes belonging to the specified pod. Optional `limit` and `page_token` parameters enable paginated retrieval. Only non-empty parameters are included in the API call.
|
||||
|
||||
The block returns the list of inbox objects, a `count` of inboxes in the current page, and a `next_page_token` for fetching additional pages. Each inbox object includes its ID, email address, display name, and other metadata.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| pod_id | Pod ID to list inboxes from | str | Yes |
|
||||
| limit | Maximum number of inboxes to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| inboxes | List of inbox objects within this pod | List[Dict[str, Any]] |
|
||||
| count | Number of inboxes returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Customer Inbox Inventory** — Display all email addresses belonging to a tenant on their settings page so they can manage or remove unused inboxes.
|
||||
- **Pre-Deletion Validation** — List a pod's inboxes before attempting to delete the pod, ensuring all inboxes have been removed as required by the API.
|
||||
- **Multi-Inbox Routing Overview** — Show operators which inboxes exist in a customer's pod so they can configure routing rules for each address.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Pod Threads
|
||||
|
||||
### What it is
|
||||
List all conversation threads across all inboxes within a pod. View all email activity for a customer.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API to retrieve conversation threads across all inboxes in the specified pod. Supports optional `limit`, `page_token`, and `labels` parameters. When `labels` are provided, only threads matching all specified labels are returned.
|
||||
|
||||
The block returns the list of thread objects, a `count` for the current page, and a `next_page_token` for pagination. This gives a unified, cross-inbox view of all email conversations within a customer's workspace.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| pod_id | Pod ID to list threads from | str | Yes |
|
||||
| limit | Maximum number of threads to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
| labels | Only return threads matching ALL of these labels | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| threads | List of thread objects from all inboxes in this pod | List[Dict[str, Any]] |
|
||||
| count | Number of threads returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Unified Customer Inbox View** — Aggregate all email threads from every inbox in a customer's pod into a single activity feed for support agents or dashboards.
|
||||
- **Label-Based Ticket Triage** — Filter pod threads by labels like "urgent" or "billing" to route conversations to the appropriate AI agent or human team.
|
||||
- **Conversation Volume Monitoring** — Periodically list pod threads to track email volume per tenant and trigger alerts when activity spikes or drops.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Pods
|
||||
|
||||
### What it is
|
||||
List all tenant pods in your organization. See all customer workspaces at a glance.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
Calls the AgentMail API to list all pods in your organization. Optional `limit` and `page_token` parameters control pagination. Only non-empty parameters are included in the request.
|
||||
|
||||
The block returns a list of pod objects (each containing `pod_id`, `client_id`, creation time, and other metadata), a `count` for the current page, and a `next_page_token` for retrieving additional pages.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| limit | Maximum number of pods to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| pods | List of pod objects with pod_id, client_id, creation time, etc. | List[Dict[str, Any]] |
|
||||
| count | Number of pods returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Admin Tenant Overview** — Display all customer pods on an internal admin dashboard so operators can monitor workspace count and health at a glance.
|
||||
- **Automated Tenant Reconciliation** — Periodically list all pods and compare against your internal customer database to detect orphaned or missing workspaces.
|
||||
- **Usage Reporting** — Enumerate all pods to generate per-tenant usage reports or billing summaries based on workspace activity.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
189
docs/integrations/block-integrations/agent_mail/threads.md
Normal file
189
docs/integrations/block-integrations/agent_mail/threads.md
Normal file
@@ -0,0 +1,189 @@
|
||||
# Agent Mail Threads
|
||||
<!-- MANUAL: file_description -->
|
||||
Blocks for listing, retrieving, and deleting conversation threads in AgentMail. Threads group related messages into a single conversation and can be queried per-inbox or across the entire organization.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
## Agent Mail Delete Inbox Thread
|
||||
|
||||
### What it is
|
||||
Permanently delete a conversation thread and all its messages. This action cannot be undone.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block calls the AgentMail API to permanently delete a thread and all of its messages from the specified inbox. It requires both the inbox ID (or email address) and the thread ID.
|
||||
|
||||
On success the block outputs `success=True`. If the API returns an error (for example, the thread does not exist), the error propagates to the global error handler and the block outputs an error message instead.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the thread belongs to | str | Yes |
|
||||
| thread_id | Thread ID to permanently delete | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| success | True if the thread was successfully deleted | bool |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **GDPR Data Removal** — Permanently delete conversation threads when a user requests erasure of their personal data.
|
||||
- **Spam Cleanup** — Automatically remove threads flagged as spam by an upstream classification block.
|
||||
- **Conversation Archival Pipeline** — Delete threads from the live inbox after they have been exported to long-term storage.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get Inbox Thread
|
||||
|
||||
### What it is
|
||||
Retrieve a conversation thread with all its messages. Use for getting full conversation context before replying.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block fetches a single thread from a specific inbox by calling the AgentMail API with the inbox ID and thread ID. It returns the thread ID, the full list of messages in chronological order, and the complete thread object as a dictionary.
|
||||
|
||||
Any API error (such as an invalid thread ID or insufficient permissions) propagates to the global error handler, and the block outputs an error message.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address the thread belongs to | str | Yes |
|
||||
| thread_id | Thread ID to retrieve | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| thread_id | Unique identifier of the thread | str |
|
||||
| messages | All messages in the thread, in chronological order | List[Dict[str, Any]] |
|
||||
| result | Complete thread object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Context-Aware Replies** — Retrieve the full conversation history before generating an AI-drafted reply to ensure continuity.
|
||||
- **Conversation Summarization** — Pull all messages in a thread and pass them to a summarization block for a digest.
|
||||
- **Support Ticket Review** — Fetch a specific customer thread so a QA agent can evaluate response quality.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail Get Org Thread
|
||||
|
||||
### What it is
|
||||
Retrieve a conversation thread by ID from anywhere in the organization, without needing the inbox ID.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block performs an organization-wide thread lookup by calling the AgentMail API with only the thread ID. Unlike the inbox-scoped variant, no inbox ID is required because the API resolves the thread across all inboxes in the organization.
|
||||
|
||||
It returns the thread ID, all messages in chronological order, and the complete thread object. Errors propagate to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| thread_id | Thread ID to retrieve (works across all inboxes) | str | Yes |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| thread_id | Unique identifier of the thread | str |
|
||||
| messages | All messages in the thread, in chronological order | List[Dict[str, Any]] |
|
||||
| result | Complete thread object with all metadata | Dict[str, Any] |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Cross-Inbox Thread Tracking** — Look up a thread by ID when the originating inbox is unknown, such as from a webhook or external reference.
|
||||
- **Supervisor Agent Oversight** — Allow a manager agent to inspect any conversation across the organization without needing inbox-level routing.
|
||||
- **Audit and Compliance** — Retrieve a specific thread for compliance review when only the thread ID is available from a log or report.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Inbox Threads
|
||||
|
||||
### What it is
|
||||
List all conversation threads in an AgentMail inbox. Filter by labels for campaign tracking or status management.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block lists conversation threads within a single inbox by calling the AgentMail API with the inbox ID and optional pagination and filtering parameters. You can set a limit (1-100), pass a page token for pagination, and filter by labels so that only threads matching all specified labels are returned.
|
||||
|
||||
The block outputs the list of thread objects, the count of threads returned in this page, and a next-page token for retrieving additional results. Errors propagate to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| inbox_id | Inbox ID or email address to list threads from | str | Yes |
|
||||
| limit | Maximum number of threads to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
| labels | Only return threads matching ALL of these labels (e.g. ['q4-campaign', 'follow-up']) | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| threads | List of thread objects with thread_id, subject, message count, labels, etc. | List[Dict[str, Any]] |
|
||||
| count | Number of threads returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Inbox Dashboard** — List all threads in a support inbox to display an overview of active conversations.
|
||||
- **Campaign Monitoring** — Filter threads by a campaign label to track how many conversations a specific outreach effort has generated.
|
||||
- **Stale Thread Detection** — Paginate through all threads in an inbox to identify conversations that have not received a reply within a set time window.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
|
||||
## Agent Mail List Org Threads
|
||||
|
||||
### What it is
|
||||
List threads across ALL inboxes in your organization. Use for supervisor agents, dashboards, or cross-agent monitoring.
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
The block lists threads across all inboxes in the organization by calling the AgentMail API without an inbox ID. It accepts optional limit, page-token, and label-filter parameters, which are forwarded directly to the API.
|
||||
|
||||
Results include threads from every inbox the organization owns. The block outputs the list of thread objects, the count for the current page, and a next-page token for pagination. Errors propagate to the global error handler.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| limit | Maximum number of threads to return per page (1-100) | int | No |
|
||||
| page_token | Token from a previous response to fetch the next page | str | No |
|
||||
| labels | Only return threads matching ALL of these labels | List[str] | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| threads | List of thread objects from all inboxes in the organization | List[Dict[str, Any]] |
|
||||
| count | Number of threads returned | int |
|
||||
| next_page_token | Token for the next page. Empty if no more results. | str |
|
||||
|
||||
### Possible use case
|
||||
<!-- MANUAL: use_case -->
|
||||
- **Organization-Wide Activity Feed** — Build a real-time dashboard showing the latest conversations across every agent inbox.
|
||||
- **Cross-Agent Analytics** — Aggregate thread counts and labels across all inboxes to measure overall communication volume and topic distribution.
|
||||
- **Escalation Routing** — Scan all org threads for a specific label (e.g., "urgent") and route matching threads to a dedicated escalation agent.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
---
|
||||
Reference in New Issue
Block a user