mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 08:38:09 -05:00
Compare commits
73 Commits
swiftyos/d
...
ci/test-op
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b33732fac1 | ||
|
|
62c2d1cdc7 | ||
|
|
93e5c40189 | ||
|
|
4ea2411fda | ||
|
|
95154f03e6 | ||
|
|
f4b3358cb3 | ||
|
|
c5e8b0b08f | ||
|
|
48f8c70e6f | ||
|
|
3e1a8c800c | ||
|
|
bcf3a0cd9c | ||
|
|
cd3e35df9e | ||
|
|
4e0ae67067 | ||
|
|
32acf066d0 | ||
|
|
8268d919f5 | ||
|
|
c7063a46a6 | ||
|
|
3509db9ebd | ||
|
|
4c474417bc | ||
|
|
99e2261254 | ||
|
|
cab498fa8c | ||
|
|
22078671df | ||
|
|
0082a72657 | ||
|
|
9a1d940677 | ||
|
|
e640d36265 | ||
|
|
cc9179178f | ||
|
|
e8d37ab116 | ||
|
|
7f7ef6a271 | ||
|
|
aefac541d9 | ||
|
|
ff5c8f324b | ||
|
|
f121a22544 | ||
|
|
71157bddd7 | ||
|
|
152e747ea6 | ||
|
|
4d4741d558 | ||
|
|
bd37fe946d | ||
|
|
7ff282c908 | ||
|
|
117bb05438 | ||
|
|
979d7c3b74 | ||
|
|
95200b67f8 | ||
|
|
f8afc6044e | ||
|
|
7edf01777e | ||
|
|
c9681f5d44 | ||
|
|
1305325813 | ||
|
|
4f349281bd | ||
|
|
6c43b34dee | ||
|
|
79534efa68 | ||
|
|
69d0c05017 | ||
|
|
c1e21d07e6 | ||
|
|
aaa8dcc5a8 | ||
|
|
a46976decd | ||
|
|
c4eb7edb65 | ||
|
|
3f690ea7b8 | ||
|
|
521dbdc25f | ||
|
|
3b9abbcdbc | ||
|
|
e0cd070e4d | ||
|
|
8be3c88711 | ||
|
|
e4d0dbc283 | ||
|
|
8e476c3f8d | ||
|
|
2f63defb53 | ||
|
|
2934e9ea69 | ||
|
|
c880db439d | ||
|
|
486099140d | ||
|
|
6d8906ced7 | ||
|
|
b6b7b77ddd | ||
|
|
fc5cf113a7 | ||
|
|
9a5a041102 | ||
|
|
1137cfde48 | ||
|
|
f7a8e372dd | ||
|
|
bf32a76f49 | ||
|
|
3ccc712463 | ||
|
|
2b9816cfa5 | ||
|
|
4e87f668e3 | ||
|
|
729400dbe1 | ||
|
|
da8e7405b0 | ||
|
|
f6608e99c8 |
50
.github/workflows/platform-backend-ci.yml
vendored
50
.github/workflows/platform-backend-ci.yml
vendored
@@ -32,7 +32,9 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
# Use Python 3.13 to match Docker image (see backend/Dockerfile)
|
||||
# ClamAV tests moved to platform-backend-security-ci.yml (runs on merge to master)
|
||||
python-version: ["3.13"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
@@ -48,23 +50,6 @@ jobs:
|
||||
env:
|
||||
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
||||
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
||||
clamav:
|
||||
image: clamav/clamav-debian:latest
|
||||
ports:
|
||||
- 3310:3310
|
||||
env:
|
||||
CLAMAV_NO_FRESHCLAMD: false
|
||||
CLAMD_CONF_StreamMaxLength: 50M
|
||||
CLAMD_CONF_MaxFileSize: 100M
|
||||
CLAMD_CONF_MaxScanSize: 100M
|
||||
CLAMD_CONF_MaxThreads: 4
|
||||
CLAMD_CONF_ReadTimeout: 300
|
||||
options: >-
|
||||
--health-cmd "clamdscan --version || exit 1"
|
||||
--health-interval 30s
|
||||
--health-timeout 10s
|
||||
--health-retries 5
|
||||
--health-start-period 180s
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -146,35 +131,6 @@ jobs:
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Wait for ClamAV to be ready
|
||||
run: |
|
||||
echo "Waiting for ClamAV daemon to start..."
|
||||
max_attempts=60
|
||||
attempt=0
|
||||
|
||||
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
|
||||
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
|
||||
sleep 5
|
||||
attempt=$((attempt+1))
|
||||
done
|
||||
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
|
||||
echo "Checking ClamAV service logs..."
|
||||
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "ClamAV is ready!"
|
||||
|
||||
# Verify ClamAV is responsive
|
||||
echo "Testing ClamAV connection..."
|
||||
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
|
||||
echo "ClamAV is not responding to PING"
|
||||
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
|
||||
145
.github/workflows/platform-backend-security-ci.yml
vendored
Normal file
145
.github/workflows/platform-backend-security-ci.yml
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
name: AutoGPT Platform - Backend Security CI
|
||||
|
||||
# This workflow runs ClamAV-dependent security tests.
|
||||
# It only runs on merge to master to avoid the 3-5 minute ClamAV startup time on every PR.
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "autogpt_platform/backend/**/file*.py"
|
||||
- "autogpt_platform/backend/**/scan*.py"
|
||||
- "autogpt_platform/backend/**/virus*.py"
|
||||
- "autogpt_platform/backend/**/media*.py"
|
||||
- ".github/workflows/platform-backend-security-ci.yml"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-security-ci-{0}', github.sha) }}
|
||||
cancel-in-progress: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/backend
|
||||
|
||||
jobs:
|
||||
security-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis:latest
|
||||
ports:
|
||||
- 6379:6379
|
||||
clamav:
|
||||
image: clamav/clamav-debian:latest
|
||||
ports:
|
||||
- 3310:3310
|
||||
env:
|
||||
CLAMAV_NO_FRESHCLAMD: false
|
||||
CLAMD_CONF_StreamMaxLength: 50M
|
||||
CLAMD_CONF_MaxFileSize: 100M
|
||||
CLAMD_CONF_MaxScanSize: 100M
|
||||
CLAMD_CONF_MaxThreads: 4
|
||||
CLAMD_CONF_ReadTimeout: 300
|
||||
options: >-
|
||||
--health-cmd "clamdscan --version || exit 1"
|
||||
--health-interval 30s
|
||||
--health-timeout 10s
|
||||
--health-retries 5
|
||||
--health-start-period 180s
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: 1.178.1
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Using Poetry version ${HEAD_POETRY_VERSION}"
|
||||
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$HEAD_POETRY_VERSION python3 -
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Wait for ClamAV to be ready
|
||||
run: |
|
||||
echo "Waiting for ClamAV daemon to start..."
|
||||
max_attempts=60
|
||||
attempt=0
|
||||
|
||||
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
|
||||
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
|
||||
sleep 5
|
||||
attempt=$((attempt+1))
|
||||
done
|
||||
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "ClamAV is ready!"
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- name: Run security-related tests
|
||||
run: |
|
||||
poetry run pytest -v \
|
||||
backend/util/virus_scanner_test.py \
|
||||
backend/util/file_test.py \
|
||||
backend/server/v2/store/media_test.py \
|
||||
-x
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
JWT_VERIFY_KEY: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
REDIS_HOST: "localhost"
|
||||
REDIS_PORT: "6379"
|
||||
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw="
|
||||
CLAMAV_SERVICE_HOST: "localhost"
|
||||
CLAMAV_SERVICE_PORT: "3310"
|
||||
CLAMAV_SERVICE_ENABLED: "true"
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
93
.github/workflows/platform-frontend-ci.yml
vendored
93
.github/workflows/platform-frontend-ci.yml
vendored
@@ -154,35 +154,78 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
# Docker image tar caching - loads images from cache in parallel for faster startup
|
||||
- name: Set up Docker image cache
|
||||
id: docker-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||
path: ~/docker-cache
|
||||
key: docker-images-frontend-${{ runner.os }}-${{ hashFiles('autogpt_platform/docker-compose.yml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-frontend-test-
|
||||
docker-images-frontend-${{ runner.os }}-
|
||||
|
||||
- name: Load or pull Docker images
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
mkdir -p ~/docker-cache
|
||||
|
||||
# Define image list for easy maintenance
|
||||
IMAGES=(
|
||||
"redis:latest"
|
||||
"rabbitmq:management"
|
||||
"kong:2.8.1"
|
||||
"supabase/gotrue:v2.170.0"
|
||||
"supabase/postgres:15.8.1.049"
|
||||
)
|
||||
|
||||
# Check if any cached tar files exist
|
||||
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
|
||||
echo "Docker cache found, loading images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
if [ -f ~/docker-cache/${filename}.tar ]; then
|
||||
echo "Loading $image..."
|
||||
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
|
||||
fi
|
||||
done
|
||||
wait
|
||||
echo "All cached images loaded"
|
||||
else
|
||||
echo "No Docker cache found, pulling images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
docker pull "$image" &
|
||||
done
|
||||
wait
|
||||
|
||||
# Only save cache on main branches (not PRs) to avoid cache pollution
|
||||
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "Saving Docker images to cache in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
echo "Saving $image..."
|
||||
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
|
||||
done
|
||||
wait
|
||||
echo "Docker image cache saved"
|
||||
else
|
||||
echo "Skipping cache save for PR/feature branch"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Docker images ready for use"
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
fi
|
||||
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
timeout 30 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
timeout 30 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Create E2E test data
|
||||
run: |
|
||||
@@ -221,9 +264,27 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Install Browser 'chromium'
|
||||
# Playwright browser caching - saves 30-60s when cache hits
|
||||
- name: Get Playwright version
|
||||
id: playwright-version
|
||||
run: |
|
||||
echo "version=$(pnpm list @playwright/test --json | jq -r '.[0].dependencies["@playwright/test"].version')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache Playwright browsers
|
||||
uses: actions/cache@v4
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: ~/.cache/ms-playwright
|
||||
key: playwright-${{ runner.os }}-${{ steps.playwright-version.outputs.version }}
|
||||
|
||||
- name: Install Playwright browsers
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Install Playwright deps only (when cache hit)
|
||||
if: steps.playwright-cache.outputs.cache-hit == 'true'
|
||||
run: pnpm playwright install-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build
|
||||
|
||||
|
||||
64
.github/workflows/platform-fullstack-ci.yml
vendored
64
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -83,6 +83,66 @@ jobs:
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
|
||||
# Docker image tar caching - loads images from cache in parallel for faster startup
|
||||
- name: Set up Docker image cache
|
||||
id: docker-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/docker-cache
|
||||
key: docker-images-fullstack-${{ runner.os }}-${{ hashFiles('autogpt_platform/docker-compose.yml') }}
|
||||
restore-keys: |
|
||||
docker-images-fullstack-${{ runner.os }}-
|
||||
|
||||
- name: Load or pull Docker images
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
mkdir -p ~/docker-cache
|
||||
|
||||
# Define image list for easy maintenance
|
||||
IMAGES=(
|
||||
"redis:latest"
|
||||
"rabbitmq:management"
|
||||
"kong:2.8.1"
|
||||
"supabase/gotrue:v2.170.0"
|
||||
"supabase/postgres:15.8.1.049"
|
||||
)
|
||||
|
||||
# Check if any cached tar files exist
|
||||
if ls ~/docker-cache/*.tar 1> /dev/null 2>&1; then
|
||||
echo "Docker cache found, loading images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
if [ -f ~/docker-cache/${filename}.tar ]; then
|
||||
echo "Loading $image..."
|
||||
docker load -i ~/docker-cache/${filename}.tar || echo "Warning: Failed to load $image from cache" &
|
||||
fi
|
||||
done
|
||||
wait
|
||||
echo "All cached images loaded"
|
||||
else
|
||||
echo "No Docker cache found, pulling images in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
docker pull "$image" &
|
||||
done
|
||||
wait
|
||||
|
||||
# Only save cache on main branches (not PRs) to avoid cache pollution
|
||||
if [[ "${{ github.ref }}" == "refs/heads/master" ]] || [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "Saving Docker images to cache in parallel..."
|
||||
for image in "${IMAGES[@]}"; do
|
||||
filename=$(echo "$image" | tr ':/' '--')
|
||||
echo "Saving $image..."
|
||||
docker save -o ~/docker-cache/${filename}.tar "$image" || echo "Warning: Failed to save $image" &
|
||||
done
|
||||
wait
|
||||
echo "Docker image cache saved"
|
||||
else
|
||||
echo "Skipping cache save for PR/feature branch"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Docker images ready for use"
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
|
||||
@@ -104,9 +164,9 @@ jobs:
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
timeout 30 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
timeout 30 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Generate API queries
|
||||
run: pnpm generate:api:force
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
|
||||
2
.github/workflows/repo-pr-label.yml
vendored
2
.github/workflows/repo-pr-label.yml
vendored
@@ -61,6 +61,6 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
- uses: actions/labeler@v6
|
||||
with:
|
||||
sync-labels: true
|
||||
|
||||
@@ -44,7 +44,7 @@ test-data:
|
||||
cd backend && poetry run python test/test_data_creator.py
|
||||
|
||||
load-store-agents:
|
||||
cd backend && poetry run python test/load_store_agents.py
|
||||
cd backend && poetry run load-store-agents
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
|
||||
@@ -1371,7 +1371,7 @@ async def create_base(
|
||||
if tables:
|
||||
params["tables"] = tables
|
||||
|
||||
print(params)
|
||||
logger.debug(f"Creating Airtable base with params: {params}")
|
||||
|
||||
response = await Requests().post(
|
||||
"https://api.airtable.com/v0/meta/bases",
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
{
|
||||
"action": "created",
|
||||
"discussion": {
|
||||
"repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"category": {
|
||||
"id": 12345678,
|
||||
"node_id": "DIC_kwDOJKSTjM4CXXXX",
|
||||
"repository_id": 614765452,
|
||||
"emoji": ":pray:",
|
||||
"name": "Q&A",
|
||||
"description": "Ask the community for help",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2023-03-16T09:21:07Z",
|
||||
"slug": "q-a",
|
||||
"is_answerable": true
|
||||
},
|
||||
"answer_html_url": null,
|
||||
"answer_chosen_at": null,
|
||||
"answer_chosen_by": null,
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/discussions/9999",
|
||||
"id": 5000000001,
|
||||
"node_id": "D_kwDOJKSTjM4AYYYY",
|
||||
"number": 9999,
|
||||
"title": "How do I configure custom blocks?",
|
||||
"user": {
|
||||
"login": "curious-user",
|
||||
"id": 22222222,
|
||||
"node_id": "MDQ6VXNlcjIyMjIyMjIy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4",
|
||||
"url": "https://api.github.com/users/curious-user",
|
||||
"html_url": "https://github.com/curious-user",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"state": "open",
|
||||
"state_reason": null,
|
||||
"locked": false,
|
||||
"comments": 0,
|
||||
"created_at": "2024-12-01T17:00:00Z",
|
||||
"updated_at": "2024-12-01T17:00:00Z",
|
||||
"author_association": "NONE",
|
||||
"active_lock_reason": null,
|
||||
"body": "## Question\n\nI'm trying to create a custom block for my specific use case. I've read the documentation but I'm not sure how to:\n\n1. Define the input/output schema\n2. Handle authentication\n3. Test my block locally\n\nCan someone point me to examples or provide guidance?\n\n## Environment\n\n- AutoGPT Platform version: latest\n- Python: 3.11",
|
||||
"reactions": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/reactions",
|
||||
"total_count": 0,
|
||||
"+1": 0,
|
||||
"-1": 0,
|
||||
"laugh": 0,
|
||||
"hooray": 0,
|
||||
"confused": 0,
|
||||
"heart": 0,
|
||||
"rocket": 0,
|
||||
"eyes": 0
|
||||
},
|
||||
"timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/timeline"
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T17:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"has_discussions": true,
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "curious-user",
|
||||
"id": 22222222,
|
||||
"node_id": "MDQ6VXNlcjIyMjIyMjIy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/curious-user",
|
||||
"html_url": "https://github.com/curious-user",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"action": "opened",
|
||||
"issue": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345",
|
||||
"repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/labels{/name}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/comments",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/events",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/issues/12345",
|
||||
"id": 2000000001,
|
||||
"node_id": "I_kwDOJKSTjM5wXXXX",
|
||||
"number": 12345,
|
||||
"title": "Bug: Application crashes when processing large files",
|
||||
"user": {
|
||||
"login": "bug-reporter",
|
||||
"id": 11111111,
|
||||
"node_id": "MDQ6VXNlcjExMTExMTEx",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4",
|
||||
"url": "https://api.github.com/users/bug-reporter",
|
||||
"html_url": "https://github.com/bug-reporter",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"labels": [
|
||||
{
|
||||
"id": 5272676214,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABOkandg",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/bug",
|
||||
"name": "bug",
|
||||
"color": "d73a4a",
|
||||
"default": true,
|
||||
"description": "Something isn't working"
|
||||
}
|
||||
],
|
||||
"state": "open",
|
||||
"locked": false,
|
||||
"assignee": null,
|
||||
"assignees": [],
|
||||
"milestone": null,
|
||||
"comments": 0,
|
||||
"created_at": "2024-12-01T16:00:00Z",
|
||||
"updated_at": "2024-12-01T16:00:00Z",
|
||||
"closed_at": null,
|
||||
"author_association": "NONE",
|
||||
"active_lock_reason": null,
|
||||
"body": "## Description\n\nWhen I try to process a file larger than 100MB, the application crashes with an out of memory error.\n\n## Steps to Reproduce\n\n1. Open the application\n2. Select a file larger than 100MB\n3. Click 'Process'\n4. Application crashes\n\n## Expected Behavior\n\nThe application should handle large files gracefully.\n\n## Environment\n\n- OS: Ubuntu 22.04\n- Python: 3.11\n- AutoGPT Version: 1.0.0",
|
||||
"reactions": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/reactions",
|
||||
"total_count": 0,
|
||||
"+1": 0,
|
||||
"-1": 0,
|
||||
"laugh": 0,
|
||||
"hooray": 0,
|
||||
"confused": 0,
|
||||
"heart": 0,
|
||||
"rocket": 0,
|
||||
"eyes": 0
|
||||
},
|
||||
"timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/timeline",
|
||||
"state_reason": null
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T16:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"open_issues_count": 190,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "bug-reporter",
|
||||
"id": 11111111,
|
||||
"node_id": "MDQ6VXNlcjExMTExMTEx",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/bug-reporter",
|
||||
"html_url": "https://github.com/bug-reporter",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"action": "published",
|
||||
"release": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789",
|
||||
"assets_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets",
|
||||
"upload_url": "https://uploads.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets{?name,label}",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/tag/v1.0.0",
|
||||
"id": 123456789,
|
||||
"author": {
|
||||
"login": "ntindle",
|
||||
"id": 12345678,
|
||||
"node_id": "MDQ6VXNlcjEyMzQ1Njc4",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/ntindle",
|
||||
"html_url": "https://github.com/ntindle",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"node_id": "RE_kwDOJKSTjM4HWwAA",
|
||||
"tag_name": "v1.0.0",
|
||||
"target_commitish": "master",
|
||||
"name": "AutoGPT Platform v1.0.0",
|
||||
"draft": false,
|
||||
"prerelease": false,
|
||||
"created_at": "2024-12-01T10:00:00Z",
|
||||
"published_at": "2024-12-01T12:00:00Z",
|
||||
"assets": [
|
||||
{
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/assets/987654321",
|
||||
"id": 987654321,
|
||||
"node_id": "RA_kwDOJKSTjM4HWwBB",
|
||||
"name": "autogpt-v1.0.0.zip",
|
||||
"label": "Release Package",
|
||||
"content_type": "application/zip",
|
||||
"state": "uploaded",
|
||||
"size": 52428800,
|
||||
"download_count": 0,
|
||||
"created_at": "2024-12-01T11:30:00Z",
|
||||
"updated_at": "2024-12-01T11:35:00Z",
|
||||
"browser_download_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/download/v1.0.0/autogpt-v1.0.0.zip"
|
||||
}
|
||||
],
|
||||
"tarball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tarball/v1.0.0",
|
||||
"zipball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/zipball/v1.0.0",
|
||||
"body": "## What's New\n\n- Feature 1: Amazing new capability\n- Feature 2: Performance improvements\n- Bug fixes and stability improvements\n\n## Breaking Changes\n\nNone\n\n## Contributors\n\nThanks to all our contributors!"
|
||||
},
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T12:00:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170000,
|
||||
"watchers_count": 170000,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "ntindle",
|
||||
"id": 12345678,
|
||||
"node_id": "MDQ6VXNlcjEyMzQ1Njc4",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/ntindle",
|
||||
"html_url": "https://github.com/ntindle",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"action": "created",
|
||||
"starred_at": "2024-12-01T15:30:00Z",
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"type": "Organization",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-12-01T15:30:00Z",
|
||||
"pushed_at": "2024-12-01T12:00:00Z",
|
||||
"stargazers_count": 170001,
|
||||
"watchers_count": 170001,
|
||||
"language": "Python",
|
||||
"forks_count": 45000,
|
||||
"visibility": "public",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"sender": {
|
||||
"login": "awesome-contributor",
|
||||
"id": 98765432,
|
||||
"node_id": "MDQ6VXNlcjk4NzY1NDMy",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/98765432?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/awesome-contributor",
|
||||
"html_url": "https://github.com/awesome-contributor",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -159,3 +159,391 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
|
||||
|
||||
|
||||
# --8<-- [end:GithubTriggerExample]
|
||||
|
||||
|
||||
class GithubStarTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub star events - useful for milestone celebrations."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "star.created.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#star
|
||||
"""
|
||||
|
||||
created: bool = False
|
||||
deleted: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The star events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The star event that triggered the webhook ('created' or 'deleted')"
|
||||
)
|
||||
starred_at: str = SchemaField(
|
||||
description="ISO timestamp when the repo was starred (empty if deleted)"
|
||||
)
|
||||
stargazers_count: int = SchemaField(
|
||||
description="Current number of stars on the repository"
|
||||
)
|
||||
repository_name: str = SchemaField(
|
||||
description="Full name of the repository (owner/repo)"
|
||||
)
|
||||
repository_url: str = SchemaField(description="URL to the repository")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="551e0a35-100b-49b7-89b8-3031322239b6",
|
||||
description="This block triggers on GitHub star events. "
|
||||
"Useful for celebrating milestones (e.g., 1k, 10k stars) or tracking engagement.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubStarTriggerBlock.Input,
|
||||
output_schema=GithubStarTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="star.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"created": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("starred_at", example_payload.get("starred_at", "")),
|
||||
("stargazers_count", example_payload["repository"]["stargazers_count"]),
|
||||
("repository_name", example_payload["repository"]["full_name"]),
|
||||
("repository_url", example_payload["repository"]["html_url"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "starred_at", input_data.payload.get("starred_at", "")
|
||||
yield "stargazers_count", input_data.payload["repository"]["stargazers_count"]
|
||||
yield "repository_name", input_data.payload["repository"]["full_name"]
|
||||
yield "repository_url", input_data.payload["repository"]["html_url"]
|
||||
|
||||
|
||||
class GithubReleaseTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub release events - ideal for announcing new versions."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "release.published.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#release
|
||||
"""
|
||||
|
||||
published: bool = False
|
||||
unpublished: bool = False
|
||||
created: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
prereleased: bool = False
|
||||
released: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The release events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The release event that triggered the webhook (e.g., 'published')"
|
||||
)
|
||||
release: dict = SchemaField(description="The full release object")
|
||||
release_url: str = SchemaField(description="URL to the release page")
|
||||
tag_name: str = SchemaField(description="The release tag name (e.g., 'v1.0.0')")
|
||||
release_name: str = SchemaField(description="Human-readable release name")
|
||||
body: str = SchemaField(description="Release notes/description")
|
||||
prerelease: bool = SchemaField(description="Whether this is a prerelease")
|
||||
draft: bool = SchemaField(description="Whether this is a draft release")
|
||||
assets: list = SchemaField(description="List of release assets/files")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="2052dd1b-74e1-46ac-9c87-c7a0e057b60b",
|
||||
description="This block triggers on GitHub release events. "
|
||||
"Perfect for automating announcements to Discord, Twitter, or other platforms.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubReleaseTriggerBlock.Input,
|
||||
output_schema=GithubReleaseTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="release.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"published": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("release", example_payload["release"]),
|
||||
("release_url", example_payload["release"]["html_url"]),
|
||||
("tag_name", example_payload["release"]["tag_name"]),
|
||||
("release_name", example_payload["release"]["name"]),
|
||||
("body", example_payload["release"]["body"]),
|
||||
("prerelease", example_payload["release"]["prerelease"]),
|
||||
("draft", example_payload["release"]["draft"]),
|
||||
("assets", example_payload["release"]["assets"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
release = input_data.payload["release"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "release", release
|
||||
yield "release_url", release["html_url"]
|
||||
yield "tag_name", release["tag_name"]
|
||||
yield "release_name", release.get("name", "")
|
||||
yield "body", release.get("body", "")
|
||||
yield "prerelease", release["prerelease"]
|
||||
yield "draft", release["draft"]
|
||||
yield "assets", release["assets"]
|
||||
|
||||
|
||||
class GithubIssuesTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub issues events - great for triage and notifications."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "issues.opened.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#issues
|
||||
"""
|
||||
|
||||
opened: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
closed: bool = False
|
||||
reopened: bool = False
|
||||
assigned: bool = False
|
||||
unassigned: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
transferred: bool = False
|
||||
milestoned: bool = False
|
||||
demilestoned: bool = False
|
||||
pinned: bool = False
|
||||
unpinned: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The issue events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The issue event that triggered the webhook (e.g., 'opened')"
|
||||
)
|
||||
number: int = SchemaField(description="The issue number")
|
||||
issue: dict = SchemaField(description="The full issue object")
|
||||
issue_url: str = SchemaField(description="URL to the issue")
|
||||
issue_title: str = SchemaField(description="The issue title")
|
||||
issue_body: str = SchemaField(description="The issue body/description")
|
||||
labels: list = SchemaField(description="List of labels on the issue")
|
||||
assignees: list = SchemaField(description="List of assignees")
|
||||
state: str = SchemaField(description="Issue state ('open' or 'closed')")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="b2605464-e486-4bf4-aad3-d8a213c8a48a",
|
||||
description="This block triggers on GitHub issues events. "
|
||||
"Useful for automated triage, notifications, and welcoming first-time contributors.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubIssuesTriggerBlock.Input,
|
||||
output_schema=GithubIssuesTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="issues.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"opened": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["issue"]["number"]),
|
||||
("issue", example_payload["issue"]),
|
||||
("issue_url", example_payload["issue"]["html_url"]),
|
||||
("issue_title", example_payload["issue"]["title"]),
|
||||
("issue_body", example_payload["issue"]["body"]),
|
||||
("labels", example_payload["issue"]["labels"]),
|
||||
("assignees", example_payload["issue"]["assignees"]),
|
||||
("state", example_payload["issue"]["state"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
issue = input_data.payload["issue"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", issue["number"]
|
||||
yield "issue", issue
|
||||
yield "issue_url", issue["html_url"]
|
||||
yield "issue_title", issue["title"]
|
||||
yield "issue_body", issue.get("body") or ""
|
||||
yield "labels", issue["labels"]
|
||||
yield "assignees", issue["assignees"]
|
||||
yield "state", issue["state"]
|
||||
|
||||
|
||||
class GithubDiscussionTriggerBlock(GitHubTriggerBase, Block):
|
||||
"""Trigger block for GitHub discussion events - perfect for community Q&A sync."""
|
||||
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "discussion.created.json"
|
||||
)
|
||||
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#discussion
|
||||
"""
|
||||
|
||||
created: bool = False
|
||||
edited: bool = False
|
||||
deleted: bool = False
|
||||
answered: bool = False
|
||||
unanswered: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
category_changed: bool = False
|
||||
transferred: bool = False
|
||||
pinned: bool = False
|
||||
unpinned: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The discussion events to subscribe to"
|
||||
)
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The discussion event that triggered the webhook"
|
||||
)
|
||||
number: int = SchemaField(description="The discussion number")
|
||||
discussion: dict = SchemaField(description="The full discussion object")
|
||||
discussion_url: str = SchemaField(description="URL to the discussion")
|
||||
title: str = SchemaField(description="The discussion title")
|
||||
body: str = SchemaField(description="The discussion body")
|
||||
category: dict = SchemaField(description="The discussion category object")
|
||||
category_name: str = SchemaField(description="Name of the category")
|
||||
state: str = SchemaField(description="Discussion state")
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="87f847b3-d81a-424e-8e89-acadb5c9d52b",
|
||||
description="This block triggers on GitHub Discussions events. "
|
||||
"Great for syncing Q&A to Discord or auto-responding to common questions. "
|
||||
"Note: Discussions must be enabled on the repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubDiscussionTriggerBlock.Input,
|
||||
output_schema=GithubDiscussionTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.GITHUB,
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="discussion.{event}",
|
||||
),
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"created": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["discussion"]["number"]),
|
||||
("discussion", example_payload["discussion"]),
|
||||
("discussion_url", example_payload["discussion"]["html_url"]),
|
||||
("title", example_payload["discussion"]["title"]),
|
||||
("body", example_payload["discussion"]["body"]),
|
||||
("category", example_payload["discussion"]["category"]),
|
||||
("category_name", example_payload["discussion"]["category"]["name"]),
|
||||
("state", example_payload["discussion"]["state"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
discussion = input_data.payload["discussion"]
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", discussion["number"]
|
||||
yield "discussion", discussion
|
||||
yield "discussion_url", discussion["html_url"]
|
||||
yield "title", discussion["title"]
|
||||
yield "body", discussion.get("body") or ""
|
||||
yield "category", discussion["category"]
|
||||
yield "category_name", discussion["category"]["name"]
|
||||
yield "state", discussion["state"]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
@@ -45,11 +45,11 @@ class HumanInTheLoopBlock(Block):
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
reviewed_data: Any = SchemaField(
|
||||
description="The data after human review (may be modified)"
|
||||
approved_data: Any = SchemaField(
|
||||
description="The data when approved (may be modified by reviewer)"
|
||||
)
|
||||
status: Literal["approved", "rejected"] = SchemaField(
|
||||
description="Status of the review: 'approved' or 'rejected'"
|
||||
rejected_data: Any = SchemaField(
|
||||
description="The data when rejected (may be modified by reviewer)"
|
||||
)
|
||||
review_message: str = SchemaField(
|
||||
description="Any message provided by the reviewer", default=""
|
||||
@@ -69,8 +69,7 @@ class HumanInTheLoopBlock(Block):
|
||||
"editable": True,
|
||||
},
|
||||
test_output=[
|
||||
("status", "approved"),
|
||||
("reviewed_data", {"name": "John Doe", "age": 30}),
|
||||
("approved_data", {"name": "John Doe", "age": 30}),
|
||||
],
|
||||
test_mock={
|
||||
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
|
||||
@@ -116,8 +115,7 @@ class HumanInTheLoopBlock(Block):
|
||||
logger.info(
|
||||
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
|
||||
)
|
||||
yield "status", "approved"
|
||||
yield "reviewed_data", input_data.data
|
||||
yield "approved_data", input_data.data
|
||||
yield "review_message", "Auto-approved (safe mode disabled)"
|
||||
return
|
||||
|
||||
@@ -158,12 +156,11 @@ class HumanInTheLoopBlock(Block):
|
||||
)
|
||||
|
||||
if result.status == ReviewStatus.APPROVED:
|
||||
yield "status", "approved"
|
||||
yield "reviewed_data", result.data
|
||||
yield "approved_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
elif result.status == ReviewStatus.REJECTED:
|
||||
yield "status", "rejected"
|
||||
yield "rejected_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Iterator, Literal
|
||||
|
||||
@@ -64,6 +65,7 @@ class RedditComment(BaseModel):
|
||||
|
||||
|
||||
settings = Settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
@@ -77,7 +79,7 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
me = client.user.me()
|
||||
if not me:
|
||||
raise ValueError("Invalid Reddit credentials.")
|
||||
print(f"Logged in as Reddit user: {me.name}")
|
||||
logger.info(f"Logged in as Reddit user: {me.name}")
|
||||
return client
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import Counter
|
||||
from concurrent.futures import Future
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
import backend.blocks.llm as llm
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.data.block import (
|
||||
@@ -20,16 +23,41 @@ from backend.data.dynamic_fields import (
|
||||
is_dynamic_field,
|
||||
is_tool_pin,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import NodeExecutionStats, SchemaField
|
||||
from backend.util import json
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
from backend.util.prompt import MAIN_OBJECTIVE_PREFIX
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.graph import Link, Node
|
||||
from backend.executor.manager import ExecutionProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolInfo(BaseModel):
|
||||
"""Processed tool call information."""
|
||||
|
||||
tool_call: Any # The original tool call object from LLM response
|
||||
tool_name: str # The function name
|
||||
tool_def: dict[str, Any] # The tool definition from tool_functions
|
||||
input_data: dict[str, Any] # Processed input data ready for tool execution
|
||||
field_mapping: dict[str, str] # Field name mapping for the tool
|
||||
|
||||
|
||||
class ExecutionParams(BaseModel):
|
||||
"""Tool execution parameters."""
|
||||
|
||||
user_id: str
|
||||
graph_id: str
|
||||
node_id: str
|
||||
graph_version: int
|
||||
graph_exec_id: str
|
||||
node_exec_id: str
|
||||
execution_context: "ExecutionContext"
|
||||
|
||||
|
||||
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
|
||||
"""
|
||||
Return a list of tool_call_ids if the entry is a tool request.
|
||||
@@ -105,6 +133,50 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]:
|
||||
return {"role": "tool", "tool_call_id": call_id, "content": content}
|
||||
|
||||
|
||||
def _combine_tool_responses(tool_outputs: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Combine multiple Anthropic tool responses into a single user message.
|
||||
For non-Anthropic formats, returns the original list unchanged.
|
||||
"""
|
||||
if len(tool_outputs) <= 1:
|
||||
return tool_outputs
|
||||
|
||||
# Anthropic responses have role="user", type="message", and content is a list with tool_result items
|
||||
anthropic_responses = [
|
||||
output
|
||||
for output in tool_outputs
|
||||
if (
|
||||
output.get("role") == "user"
|
||||
and output.get("type") == "message"
|
||||
and isinstance(output.get("content"), list)
|
||||
and any(
|
||||
item.get("type") == "tool_result"
|
||||
for item in output.get("content", [])
|
||||
if isinstance(item, dict)
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
if len(anthropic_responses) > 1:
|
||||
combined_content = [
|
||||
item for response in anthropic_responses for item in response["content"]
|
||||
]
|
||||
|
||||
combined_response = {
|
||||
"role": "user",
|
||||
"type": "message",
|
||||
"content": combined_content,
|
||||
}
|
||||
|
||||
non_anthropic_responses = [
|
||||
output for output in tool_outputs if output not in anthropic_responses
|
||||
]
|
||||
|
||||
return [combined_response] + non_anthropic_responses
|
||||
|
||||
return tool_outputs
|
||||
|
||||
|
||||
def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
|
||||
"""
|
||||
Safely convert raw_response to dictionary format for conversation history.
|
||||
@@ -204,6 +276,17 @@ class SmartDecisionMakerBlock(Block):
|
||||
default="localhost:11434",
|
||||
description="Ollama host for local models",
|
||||
)
|
||||
agent_mode_max_iterations: int = SchemaField(
|
||||
title="Agent Mode Max Iterations",
|
||||
description="Maximum iterations for agent mode. 0 = traditional mode (single LLM call, yield tool calls for external execution), -1 = infinite agent mode (loop until finished), 1+ = agent mode with max iterations limit.",
|
||||
advanced=True,
|
||||
default=0,
|
||||
)
|
||||
conversation_compaction: bool = SchemaField(
|
||||
default=True,
|
||||
title="Context window auto-compaction",
|
||||
description="Automatically compact the context window once it hits the limit",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||
@@ -506,6 +589,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
Returns the response if successful, raises ValueError if validation fails.
|
||||
"""
|
||||
resp = await llm.llm_call(
|
||||
compress_prompt_to_fit=input_data.conversation_compaction,
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=current_prompt,
|
||||
@@ -593,6 +677,291 @@ class SmartDecisionMakerBlock(Block):
|
||||
|
||||
return resp
|
||||
|
||||
def _process_tool_calls(
|
||||
self, response, tool_functions: list[dict[str, Any]]
|
||||
) -> list[ToolInfo]:
|
||||
"""Process tool calls and extract tool definitions, arguments, and input data.
|
||||
|
||||
Returns a list of tool info dicts with:
|
||||
- tool_call: The original tool call object
|
||||
- tool_name: The function name
|
||||
- tool_def: The tool definition from tool_functions
|
||||
- input_data: Processed input data dict (includes None values)
|
||||
- field_mapping: Field name mapping for the tool
|
||||
"""
|
||||
if not response.tool_calls:
|
||||
return []
|
||||
|
||||
processed_tools = []
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
tool_def = next(
|
||||
(
|
||||
tool
|
||||
for tool in tool_functions
|
||||
if tool["function"]["name"] == tool_name
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not tool_def:
|
||||
if len(tool_functions) == 1:
|
||||
tool_def = tool_functions[0]
|
||||
else:
|
||||
continue
|
||||
|
||||
# Build input data for the tool
|
||||
input_data = {}
|
||||
field_mapping = tool_def["function"].get("_field_mapping", {})
|
||||
if "function" in tool_def and "parameters" in tool_def["function"]:
|
||||
expected_args = tool_def["function"]["parameters"].get("properties", {})
|
||||
for clean_arg_name in expected_args:
|
||||
original_field_name = field_mapping.get(
|
||||
clean_arg_name, clean_arg_name
|
||||
)
|
||||
arg_value = tool_args.get(clean_arg_name)
|
||||
# Include all expected parameters, even if None (for backward compatibility with tests)
|
||||
input_data[original_field_name] = arg_value
|
||||
|
||||
processed_tools.append(
|
||||
ToolInfo(
|
||||
tool_call=tool_call,
|
||||
tool_name=tool_name,
|
||||
tool_def=tool_def,
|
||||
input_data=input_data,
|
||||
field_mapping=field_mapping,
|
||||
)
|
||||
)
|
||||
|
||||
return processed_tools
|
||||
|
||||
def _update_conversation(
|
||||
self, prompt: list[dict], response, tool_outputs: list | None = None
|
||||
):
|
||||
"""Update conversation history with response and tool outputs."""
|
||||
# Don't add separate reasoning message with tool calls (breaks Anthropic's tool_use->tool_result pairing)
|
||||
assistant_message = _convert_raw_response_to_dict(response.raw_response)
|
||||
has_tool_calls = isinstance(assistant_message.get("content"), list) and any(
|
||||
item.get("type") == "tool_use"
|
||||
for item in assistant_message.get("content", [])
|
||||
)
|
||||
|
||||
if response.reasoning and not has_tool_calls:
|
||||
prompt.append(
|
||||
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
|
||||
)
|
||||
|
||||
prompt.append(assistant_message)
|
||||
|
||||
if tool_outputs:
|
||||
prompt.extend(tool_outputs)
|
||||
|
||||
async def _execute_single_tool_with_manager(
|
||||
self,
|
||||
tool_info: ToolInfo,
|
||||
execution_params: ExecutionParams,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
) -> dict:
|
||||
"""Execute a single tool using the execution manager for proper integration."""
|
||||
# Lazy imports to avoid circular dependencies
|
||||
from backend.data.execution import NodeExecutionEntry
|
||||
|
||||
tool_call = tool_info.tool_call
|
||||
tool_def = tool_info.tool_def
|
||||
raw_input_data = tool_info.input_data
|
||||
|
||||
# Get sink node and field mapping
|
||||
sink_node_id = tool_def["function"]["_sink_node_id"]
|
||||
|
||||
# Use proper database operations for tool execution
|
||||
db_client = get_database_manager_async_client()
|
||||
|
||||
# Get target node
|
||||
target_node = await db_client.get_node(sink_node_id)
|
||||
if not target_node:
|
||||
raise ValueError(f"Target node {sink_node_id} not found")
|
||||
|
||||
# Create proper node execution using upsert_execution_input
|
||||
node_exec_result = None
|
||||
final_input_data = None
|
||||
|
||||
# Add all inputs to the execution
|
||||
if not raw_input_data:
|
||||
raise ValueError(f"Tool call has no input data: {tool_call}")
|
||||
|
||||
for input_name, input_value in raw_input_data.items():
|
||||
node_exec_result, final_input_data = await db_client.upsert_execution_input(
|
||||
node_id=sink_node_id,
|
||||
graph_exec_id=execution_params.graph_exec_id,
|
||||
input_name=input_name,
|
||||
input_data=input_value,
|
||||
)
|
||||
|
||||
assert node_exec_result is not None, "node_exec_result should not be None"
|
||||
|
||||
# Create NodeExecutionEntry for execution manager
|
||||
node_exec_entry = NodeExecutionEntry(
|
||||
user_id=execution_params.user_id,
|
||||
graph_exec_id=execution_params.graph_exec_id,
|
||||
graph_id=execution_params.graph_id,
|
||||
graph_version=execution_params.graph_version,
|
||||
node_exec_id=node_exec_result.node_exec_id,
|
||||
node_id=sink_node_id,
|
||||
block_id=target_node.block_id,
|
||||
inputs=final_input_data or {},
|
||||
execution_context=execution_params.execution_context,
|
||||
)
|
||||
|
||||
# Use the execution manager to execute the tool node
|
||||
try:
|
||||
# Get NodeExecutionProgress from the execution manager's running nodes
|
||||
node_exec_progress = execution_processor.running_node_execution[
|
||||
sink_node_id
|
||||
]
|
||||
|
||||
# Use the execution manager's own graph stats
|
||||
graph_stats_pair = (
|
||||
execution_processor.execution_stats,
|
||||
execution_processor.execution_stats_lock,
|
||||
)
|
||||
|
||||
# Create a completed future for the task tracking system
|
||||
node_exec_future = Future()
|
||||
node_exec_progress.add_task(
|
||||
node_exec_id=node_exec_result.node_exec_id,
|
||||
task=node_exec_future,
|
||||
)
|
||||
|
||||
# Execute the node directly since we're in the SmartDecisionMaker context
|
||||
node_exec_future.set_result(
|
||||
await execution_processor.on_node_execution(
|
||||
node_exec=node_exec_entry,
|
||||
node_exec_progress=node_exec_progress,
|
||||
nodes_input_masks=None,
|
||||
graph_stats_pair=graph_stats_pair,
|
||||
)
|
||||
)
|
||||
|
||||
# Get outputs from database after execution completes using database manager client
|
||||
node_outputs = await db_client.get_execution_outputs_by_node_exec_id(
|
||||
node_exec_result.node_exec_id
|
||||
)
|
||||
|
||||
# Create tool response
|
||||
tool_response_content = (
|
||||
json.dumps(node_outputs)
|
||||
if node_outputs
|
||||
else "Tool executed successfully"
|
||||
)
|
||||
return _create_tool_response(tool_call.id, tool_response_content)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Tool execution with manager failed: {e}")
|
||||
# Return error response
|
||||
return _create_tool_response(
|
||||
tool_call.id, f"Tool execution failed: {str(e)}"
|
||||
)
|
||||
|
||||
async def _execute_tools_agent_mode(
|
||||
self,
|
||||
input_data,
|
||||
credentials,
|
||||
tool_functions: list[dict[str, Any]],
|
||||
prompt: list[dict],
|
||||
graph_exec_id: str,
|
||||
node_id: str,
|
||||
node_exec_id: str,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
):
|
||||
"""Execute tools in agent mode with a loop until finished."""
|
||||
max_iterations = input_data.agent_mode_max_iterations
|
||||
iteration = 0
|
||||
|
||||
# Execution parameters for tool execution
|
||||
execution_params = ExecutionParams(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
node_id=node_id,
|
||||
graph_version=graph_version,
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_exec_id=node_exec_id,
|
||||
execution_context=execution_context,
|
||||
)
|
||||
|
||||
current_prompt = list(prompt)
|
||||
|
||||
while max_iterations < 0 or iteration < max_iterations:
|
||||
iteration += 1
|
||||
logger.debug(f"Agent mode iteration {iteration}")
|
||||
|
||||
# Prepare prompt for this iteration
|
||||
iteration_prompt = list(current_prompt)
|
||||
|
||||
# On the last iteration, add a special system message to encourage completion
|
||||
if max_iterations > 0 and iteration == max_iterations:
|
||||
last_iteration_message = {
|
||||
"role": "system",
|
||||
"content": f"{MAIN_OBJECTIVE_PREFIX}This is your last iteration ({iteration}/{max_iterations}). "
|
||||
"Try to complete the task with the information you have. If you cannot fully complete it, "
|
||||
"provide a summary of what you've accomplished and what remains to be done. "
|
||||
"Prefer finishing with a clear response rather than making additional tool calls.",
|
||||
}
|
||||
iteration_prompt.append(last_iteration_message)
|
||||
|
||||
# Get LLM response
|
||||
try:
|
||||
response = await self._attempt_llm_call_with_validation(
|
||||
credentials, input_data, iteration_prompt, tool_functions
|
||||
)
|
||||
except Exception as e:
|
||||
yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}"
|
||||
return
|
||||
|
||||
# Process tool calls
|
||||
processed_tools = self._process_tool_calls(response, tool_functions)
|
||||
|
||||
# If no tool calls, we're done
|
||||
if not processed_tools:
|
||||
yield "finished", response.response
|
||||
self._update_conversation(current_prompt, response)
|
||||
yield "conversations", current_prompt
|
||||
return
|
||||
|
||||
# Execute tools and collect responses
|
||||
tool_outputs = []
|
||||
for tool_info in processed_tools:
|
||||
try:
|
||||
tool_response = await self._execute_single_tool_with_manager(
|
||||
tool_info, execution_params, execution_processor
|
||||
)
|
||||
tool_outputs.append(tool_response)
|
||||
except Exception as e:
|
||||
logger.error(f"Tool execution failed: {e}")
|
||||
# Create error response for the tool
|
||||
error_response = _create_tool_response(
|
||||
tool_info.tool_call.id, f"Error: {str(e)}"
|
||||
)
|
||||
tool_outputs.append(error_response)
|
||||
|
||||
tool_outputs = _combine_tool_responses(tool_outputs)
|
||||
|
||||
self._update_conversation(current_prompt, response, tool_outputs)
|
||||
|
||||
# Yield intermediate conversation state
|
||||
yield "conversations", current_prompt
|
||||
|
||||
# If we reach max iterations, yield the current state
|
||||
if max_iterations < 0:
|
||||
yield "finished", f"Agent mode completed after {iteration} iterations"
|
||||
else:
|
||||
yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)"
|
||||
yield "conversations", current_prompt
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
@@ -603,8 +972,12 @@ class SmartDecisionMakerBlock(Block):
|
||||
graph_exec_id: str,
|
||||
node_exec_id: str,
|
||||
user_id: str,
|
||||
graph_version: int,
|
||||
execution_context: ExecutionContext,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
|
||||
tool_functions = await self._create_tool_node_signatures(node_id)
|
||||
yield "tool_functions", json.dumps(tool_functions)
|
||||
|
||||
@@ -648,24 +1021,52 @@ class SmartDecisionMakerBlock(Block):
|
||||
input_data.prompt = llm.fmt.format_string(input_data.prompt, values)
|
||||
input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values)
|
||||
|
||||
prefix = "[Main Objective Prompt]: "
|
||||
|
||||
if input_data.sys_prompt and not any(
|
||||
p["role"] == "system" and p["content"].startswith(prefix) for p in prompt
|
||||
p["role"] == "system" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
for p in prompt
|
||||
):
|
||||
prompt.append({"role": "system", "content": prefix + input_data.sys_prompt})
|
||||
prompt.append(
|
||||
{
|
||||
"role": "system",
|
||||
"content": MAIN_OBJECTIVE_PREFIX + input_data.sys_prompt,
|
||||
}
|
||||
)
|
||||
|
||||
if input_data.prompt and not any(
|
||||
p["role"] == "user" and p["content"].startswith(prefix) for p in prompt
|
||||
p["role"] == "user" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
for p in prompt
|
||||
):
|
||||
prompt.append({"role": "user", "content": prefix + input_data.prompt})
|
||||
prompt.append(
|
||||
{"role": "user", "content": MAIN_OBJECTIVE_PREFIX + input_data.prompt}
|
||||
)
|
||||
|
||||
# Execute tools based on the selected mode
|
||||
if input_data.agent_mode_max_iterations != 0:
|
||||
# In agent mode, execute tools directly in a loop until finished
|
||||
async for result in self._execute_tools_agent_mode(
|
||||
input_data=input_data,
|
||||
credentials=credentials,
|
||||
tool_functions=tool_functions,
|
||||
prompt=prompt,
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_id=node_id,
|
||||
node_exec_id=node_exec_id,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
execution_context=execution_context,
|
||||
execution_processor=execution_processor,
|
||||
):
|
||||
yield result
|
||||
return
|
||||
|
||||
# One-off mode: single LLM call and yield tool calls for external execution
|
||||
current_prompt = list(prompt)
|
||||
max_attempts = max(1, int(input_data.retry))
|
||||
response = None
|
||||
|
||||
last_error = None
|
||||
for attempt in range(max_attempts):
|
||||
for _ in range(max_attempts):
|
||||
try:
|
||||
response = await self._attempt_llm_call_with_validation(
|
||||
credentials, input_data, current_prompt, tool_functions
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import logging
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import ProviderName, User
|
||||
from backend.server.model import CreateGraph
|
||||
from backend.server.rest_api import AgentServer
|
||||
@@ -17,10 +21,10 @@ async def create_graph(s: SpinTestServer, g, u: User):
|
||||
|
||||
|
||||
async def create_credentials(s: SpinTestServer, u: User):
|
||||
import backend.blocks.llm as llm
|
||||
import backend.blocks.llm as llm_module
|
||||
|
||||
provider = ProviderName.OPENAI
|
||||
credentials = llm.TEST_CREDENTIALS
|
||||
credentials = llm_module.TEST_CREDENTIALS
|
||||
return await s.agent_server.test_create_credentials(u.id, provider, credentials)
|
||||
|
||||
|
||||
@@ -196,8 +200,6 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer):
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_tracks_llm_stats():
|
||||
"""Test that SmartDecisionMakerBlock correctly tracks LLM usage stats."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -216,7 +218,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
}
|
||||
|
||||
# Mock the _create_tool_node_signatures method to avoid database calls
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
@@ -234,10 +235,19 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
prompt="Should I continue with this task?",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Execute the block
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -246,6 +256,9 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -263,8 +276,6 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_parameter_validation():
|
||||
"""Test that SmartDecisionMakerBlock correctly validates tool call parameters."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -311,8 +322,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_with_typo.reasoning = None
|
||||
mock_response_with_typo.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -329,8 +338,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2, # Set retry to 2 for testing
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
# Should raise ValueError after retries due to typo'd parameter name
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
@@ -342,6 +360,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -368,8 +389,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_missing_required.reasoning = None
|
||||
mock_response_missing_required.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -385,8 +404,17 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
# Should raise ValueError due to missing required parameter
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
outputs = {}
|
||||
@@ -398,6 +426,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -418,8 +449,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_valid.reasoning = None
|
||||
mock_response_valid.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -435,10 +464,19 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed - optional parameter missing is OK
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -447,6 +485,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -472,8 +513,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
mock_response_all_params.reasoning = None
|
||||
mock_response_all_params.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -489,10 +528,19 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed with all parameters
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -501,6 +549,9 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -513,8 +564,6 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_raw_response_conversion():
|
||||
"""Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
@@ -584,7 +633,6 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
)
|
||||
|
||||
# Mock llm_call to return different responses on different calls
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call", new_callable=AsyncMock
|
||||
@@ -603,10 +651,19 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
# Should succeed after retry, demonstrating our helper function works
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -615,6 +672,9 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -650,8 +710,6 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"I'll help you with that." # Ollama returns string
|
||||
)
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -666,9 +724,18 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
prompt="Simple prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
@@ -677,6 +744,9 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
@@ -696,8 +766,6 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
"content": "Test response",
|
||||
} # Dict format
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
@@ -712,6 +780,160 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
prompt="Another test",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "Test response"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_agent_mode():
|
||||
"""Test that agent mode executes tools directly and loops until finished."""
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool call that requires multiple iterations
|
||||
mock_tool_call_1 = MagicMock()
|
||||
mock_tool_call_1.id = "call_1"
|
||||
mock_tool_call_1.function.name = "search_keywords"
|
||||
mock_tool_call_1.function.arguments = (
|
||||
'{"query": "test", "max_keyword_difficulty": 50}'
|
||||
)
|
||||
|
||||
mock_response_1 = MagicMock()
|
||||
mock_response_1.response = None
|
||||
mock_response_1.tool_calls = [mock_tool_call_1]
|
||||
mock_response_1.prompt_tokens = 50
|
||||
mock_response_1.completion_tokens = 25
|
||||
mock_response_1.reasoning = "Using search tool"
|
||||
mock_response_1.raw_response = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [{"id": "call_1", "type": "function"}],
|
||||
}
|
||||
|
||||
# Final response with no tool calls (finished)
|
||||
mock_response_2 = MagicMock()
|
||||
mock_response_2.response = "Task completed successfully"
|
||||
mock_response_2.tool_calls = []
|
||||
mock_response_2.prompt_tokens = 30
|
||||
mock_response_2.completion_tokens = 15
|
||||
mock_response_2.reasoning = None
|
||||
mock_response_2.raw_response = {
|
||||
"role": "assistant",
|
||||
"content": "Task completed successfully",
|
||||
}
|
||||
|
||||
# Mock the LLM call to return different responses on each iteration
|
||||
llm_call_mock = AsyncMock()
|
||||
llm_call_mock.side_effect = [mock_response_1, mock_response_2]
|
||||
|
||||
# Mock tool node signatures
|
||||
mock_tool_signatures = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_keywords",
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
"_field_mapping": {},
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_keyword_difficulty": {"type": "integer"},
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Mock database and execution components
|
||||
mock_db_client = AsyncMock()
|
||||
mock_node = MagicMock()
|
||||
mock_node.block_id = "test-block-id"
|
||||
mock_db_client.get_node.return_value = mock_node
|
||||
|
||||
# Mock upsert_execution_input to return proper NodeExecutionResult and input data
|
||||
mock_node_exec_result = MagicMock()
|
||||
mock_node_exec_result.node_exec_id = "test-tool-exec-id"
|
||||
mock_input_data = {"query": "test", "max_keyword_difficulty": 50}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_input_data,
|
||||
)
|
||||
|
||||
# No longer need mock_execute_node since we use execution_processor.on_node_execution
|
||||
|
||||
with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
), patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client",
|
||||
return_value=mock_db_client,
|
||||
), patch(
|
||||
"backend.executor.manager.async_update_node_execution_status",
|
||||
new_callable=AsyncMock,
|
||||
), patch(
|
||||
"backend.integrations.creds_manager.IntegrationCredentialsManager"
|
||||
):
|
||||
|
||||
# Create a mock execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(
|
||||
safe_mode=False,
|
||||
)
|
||||
|
||||
# Create a mock execution processor for agent mode tests
|
||||
|
||||
mock_execution_processor = AsyncMock()
|
||||
# Configure the execution processor mock with required attributes
|
||||
mock_execution_processor.running_node_execution = defaultdict(MagicMock)
|
||||
mock_execution_processor.execution_stats = MagicMock()
|
||||
mock_execution_processor.execution_stats_lock = threading.Lock()
|
||||
|
||||
# Mock the on_node_execution method to return successful stats
|
||||
mock_node_stats = MagicMock()
|
||||
mock_node_stats.error = None # No error
|
||||
mock_execution_processor.on_node_execution = AsyncMock(
|
||||
return_value=mock_node_stats
|
||||
)
|
||||
|
||||
# Mock the get_execution_outputs_by_node_exec_id method
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = {
|
||||
"result": {"status": "success", "data": "search completed"}
|
||||
}
|
||||
|
||||
# Test agent mode with max_iterations = 3
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Complete this task using tools",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
@@ -723,8 +945,115 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify agent mode behavior
|
||||
assert "tool_functions" in outputs # tool_functions is yielded in both modes
|
||||
assert "finished" in outputs
|
||||
assert outputs["finished"] == "Test response"
|
||||
assert outputs["finished"] == "Task completed successfully"
|
||||
assert "conversations" in outputs
|
||||
|
||||
# Verify the conversation includes tool responses
|
||||
conversations = outputs["conversations"]
|
||||
assert len(conversations) > 2 # Should have multiple conversation entries
|
||||
|
||||
# Verify LLM was called twice (once for tool call, once for finish)
|
||||
assert llm_call_mock.call_count == 2
|
||||
|
||||
# Verify tool was executed via execution processor
|
||||
assert mock_execution_processor.on_node_execution.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_smart_decision_maker_traditional_mode_default():
|
||||
"""Test that default behavior (agent_mode_max_iterations=0) works as traditional mode."""
|
||||
import backend.blocks.llm as llm_module
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
|
||||
block = SmartDecisionMakerBlock()
|
||||
|
||||
# Mock tool call
|
||||
mock_tool_call = MagicMock()
|
||||
mock_tool_call.function.name = "search_keywords"
|
||||
mock_tool_call.function.arguments = (
|
||||
'{"query": "test", "max_keyword_difficulty": 50}'
|
||||
)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.response = None
|
||||
mock_response.tool_calls = [mock_tool_call]
|
||||
mock_response.prompt_tokens = 50
|
||||
mock_response.completion_tokens = 25
|
||||
mock_response.reasoning = None
|
||||
mock_response.raw_response = {"role": "assistant", "content": None}
|
||||
|
||||
mock_tool_signatures = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_keywords",
|
||||
"_sink_node_id": "test-sink-node-id",
|
||||
"_field_mapping": {},
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_keyword_difficulty": {"type": "integer"},
|
||||
},
|
||||
"required": ["query", "max_keyword_difficulty"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
with patch(
|
||||
"backend.blocks.llm.llm_call",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
), patch.object(
|
||||
block, "_create_tool_node_signatures", return_value=mock_tool_signatures
|
||||
):
|
||||
|
||||
# Test default behavior (traditional mode)
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0, # Traditional mode
|
||||
)
|
||||
|
||||
# Create execution context
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a mock execution processor for tests
|
||||
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
outputs = {}
|
||||
async for output_name, output_data in block.run(
|
||||
input_data,
|
||||
credentials=llm_module.TEST_CREDENTIALS,
|
||||
graph_id="test-graph-id",
|
||||
node_id="test-node-id",
|
||||
graph_exec_id="test-exec-id",
|
||||
node_exec_id="test-node-exec-id",
|
||||
user_id="test-user-id",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_data
|
||||
|
||||
# Verify traditional mode behavior
|
||||
assert (
|
||||
"tool_functions" in outputs
|
||||
) # Should yield tool_functions in traditional mode
|
||||
assert (
|
||||
"tools_^_test-sink-node-id_~_query" in outputs
|
||||
) # Should yield individual tool parameters
|
||||
assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs
|
||||
assert "conversations" in outputs
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Comprehensive tests for SmartDecisionMakerBlock dynamic field handling."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -308,10 +308,47 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
) as mock_llm:
|
||||
mock_llm.return_value = mock_response
|
||||
|
||||
# Mock the function signature creation
|
||||
with patch.object(
|
||||
# Mock the database manager to avoid HTTP calls during tool execution
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager, patch.object(
|
||||
block, "_create_tool_node_signatures", new_callable=AsyncMock
|
||||
) as mock_sig:
|
||||
# Set up the mock database manager
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_manager.return_value = mock_db_client
|
||||
|
||||
# Mock the node retrieval
|
||||
mock_target_node = Mock()
|
||||
mock_target_node.id = "test-sink-node-id"
|
||||
mock_target_node.block_id = "CreateDictionaryBlock"
|
||||
mock_target_node.block = Mock()
|
||||
mock_target_node.block.name = "Create Dictionary"
|
||||
mock_db_client.get_node.return_value = mock_target_node
|
||||
|
||||
# Mock the execution result creation
|
||||
mock_node_exec_result = Mock()
|
||||
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
|
||||
mock_final_input_data = {
|
||||
"values_#_name": "Alice",
|
||||
"values_#_age": 30,
|
||||
"values_#_email": "alice@example.com",
|
||||
}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_final_input_data,
|
||||
)
|
||||
|
||||
# Mock the output retrieval
|
||||
mock_outputs = {
|
||||
"values_#_name": "Alice",
|
||||
"values_#_age": 30,
|
||||
"values_#_email": "alice@example.com",
|
||||
}
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
|
||||
mock_outputs
|
||||
)
|
||||
|
||||
mock_sig.return_value = [
|
||||
{
|
||||
"type": "function",
|
||||
@@ -337,10 +374,16 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
prompt="Create a user dictionary",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
agent_mode_max_iterations=0, # Use traditional mode to test output yielding
|
||||
)
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
mock_execution_processor = MagicMock()
|
||||
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
@@ -349,6 +392,9 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
@@ -511,45 +557,108 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
}
|
||||
]
|
||||
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
# Mock the database manager to avoid HTTP calls during tool execution
|
||||
with patch(
|
||||
"backend.blocks.smart_decision_maker.get_database_manager_async_client"
|
||||
) as mock_db_manager:
|
||||
# Set up the mock database manager for agent mode
|
||||
mock_db_client = AsyncMock()
|
||||
mock_db_manager.return_value = mock_db_client
|
||||
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
retry=3, # Allow retries
|
||||
)
|
||||
# Mock the node retrieval
|
||||
mock_target_node = Mock()
|
||||
mock_target_node.id = "test-sink-node-id"
|
||||
mock_target_node.block_id = "TestBlock"
|
||||
mock_target_node.block = Mock()
|
||||
mock_target_node.block.name = "Test Block"
|
||||
mock_db_client.get_node.return_value = mock_target_node
|
||||
|
||||
# Run the block
|
||||
outputs = {}
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
# Mock the execution result creation
|
||||
mock_node_exec_result = Mock()
|
||||
mock_node_exec_result.node_exec_id = "mock-node-exec-id"
|
||||
mock_final_input_data = {"correct_param": "value"}
|
||||
mock_db_client.upsert_execution_input.return_value = (
|
||||
mock_node_exec_result,
|
||||
mock_final_input_data,
|
||||
)
|
||||
|
||||
# Verify we had 2 LLM calls (initial + retry)
|
||||
assert call_count == 2
|
||||
# Mock the output retrieval
|
||||
mock_outputs = {"correct_param": "value"}
|
||||
mock_db_client.get_execution_outputs_by_node_exec_id.return_value = (
|
||||
mock_outputs
|
||||
)
|
||||
|
||||
# Check the final conversation output
|
||||
final_conversation = outputs.get("conversations", [])
|
||||
# Create input data
|
||||
from backend.blocks import llm
|
||||
|
||||
# The final conversation should NOT contain the validation error message
|
||||
error_messages = [
|
||||
msg
|
||||
for msg in final_conversation
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
retry=3, # Allow retries
|
||||
agent_mode_max_iterations=1,
|
||||
)
|
||||
|
||||
# The final conversation should only have the successful response
|
||||
assert final_conversation[-1]["content"] == "valid"
|
||||
# Run the block
|
||||
outputs = {}
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
mock_execution_context = ExecutionContext(safe_mode=False)
|
||||
|
||||
# Create a proper mock execution processor for agent mode
|
||||
from collections import defaultdict
|
||||
|
||||
mock_execution_processor = AsyncMock()
|
||||
mock_execution_processor.execution_stats = MagicMock()
|
||||
mock_execution_processor.execution_stats_lock = MagicMock()
|
||||
|
||||
# Create a mock NodeExecutionProgress for the sink node
|
||||
mock_node_exec_progress = MagicMock()
|
||||
mock_node_exec_progress.add_task = MagicMock()
|
||||
mock_node_exec_progress.pop_output = MagicMock(
|
||||
return_value=None
|
||||
) # No outputs to process
|
||||
|
||||
# Set up running_node_execution as a defaultdict that returns our mock for any key
|
||||
mock_execution_processor.running_node_execution = defaultdict(
|
||||
lambda: mock_node_exec_progress
|
||||
)
|
||||
|
||||
# Mock the on_node_execution method that gets called during tool execution
|
||||
mock_node_stats = MagicMock()
|
||||
mock_node_stats.error = None
|
||||
mock_execution_processor.on_node_execution.return_value = (
|
||||
mock_node_stats
|
||||
)
|
||||
|
||||
async for output_name, output_value in block.run(
|
||||
input_data,
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
graph_id="test_graph",
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
user_id="test_user",
|
||||
graph_version=1,
|
||||
execution_context=mock_execution_context,
|
||||
execution_processor=mock_execution_processor,
|
||||
):
|
||||
outputs[output_name] = output_value
|
||||
|
||||
# Verify we had at least 1 LLM call
|
||||
assert call_count >= 1
|
||||
|
||||
# Check the final conversation output
|
||||
final_conversation = outputs.get("conversations", [])
|
||||
|
||||
# The final conversation should NOT contain validation error messages
|
||||
# Even if retries don't happen in agent mode, we should not leak errors
|
||||
error_messages = [
|
||||
msg
|
||||
for msg in final_conversation
|
||||
if msg.get("role") == "user"
|
||||
and "parameter errors" in msg.get("content", "")
|
||||
]
|
||||
assert (
|
||||
len(error_messages) == 0
|
||||
), "Validation error leaked into final conversation"
|
||||
|
||||
@@ -1,12 +1,45 @@
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Optional
|
||||
|
||||
import prisma.types
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AccuracyAlertData(BaseModel):
|
||||
"""Alert data when accuracy drops significantly."""
|
||||
|
||||
graph_id: str
|
||||
user_id: Optional[str]
|
||||
drop_percent: float
|
||||
three_day_avg: float
|
||||
seven_day_avg: float
|
||||
detected_at: datetime
|
||||
|
||||
|
||||
class AccuracyLatestData(BaseModel):
|
||||
"""Latest execution accuracy data point."""
|
||||
|
||||
date: datetime
|
||||
daily_score: Optional[float]
|
||||
three_day_avg: Optional[float]
|
||||
seven_day_avg: Optional[float]
|
||||
fourteen_day_avg: Optional[float]
|
||||
|
||||
|
||||
class AccuracyTrendsResponse(BaseModel):
|
||||
"""Response model for accuracy trends and alerts."""
|
||||
|
||||
latest_data: AccuracyLatestData
|
||||
alert: Optional[AccuracyAlertData]
|
||||
historical_data: Optional[list[AccuracyLatestData]] = None
|
||||
|
||||
|
||||
async def log_raw_analytics(
|
||||
user_id: str,
|
||||
type: str,
|
||||
@@ -43,3 +76,217 @@ async def log_raw_metric(
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def get_accuracy_trends_and_alerts(
|
||||
graph_id: str,
|
||||
days_back: int = 30,
|
||||
user_id: Optional[str] = None,
|
||||
drop_threshold: float = 10.0,
|
||||
include_historical: bool = False,
|
||||
) -> AccuracyTrendsResponse:
|
||||
"""Get accuracy trends and detect alerts for a specific graph."""
|
||||
query_template = """
|
||||
WITH daily_scores AS (
|
||||
SELECT
|
||||
DATE(e."createdAt") as execution_date,
|
||||
AVG(CASE
|
||||
WHEN e.stats IS NOT NULL
|
||||
AND e.stats::json->>'correctness_score' IS NOT NULL
|
||||
AND e.stats::json->>'correctness_score' != 'null'
|
||||
THEN (e.stats::json->>'correctness_score')::float * 100
|
||||
ELSE NULL
|
||||
END) as daily_score
|
||||
FROM {schema_prefix}"AgentGraphExecution" e
|
||||
WHERE e."agentGraphId" = $1::text
|
||||
AND e."isDeleted" = false
|
||||
AND e."createdAt" >= $2::timestamp
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
{user_filter}
|
||||
GROUP BY DATE(e."createdAt")
|
||||
HAVING COUNT(*) >= 3 -- Need at least 3 executions per day
|
||||
),
|
||||
trends AS (
|
||||
SELECT
|
||||
execution_date,
|
||||
daily_score,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
|
||||
) as three_day_avg,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 6 PRECEDING AND CURRENT ROW
|
||||
) as seven_day_avg,
|
||||
AVG(daily_score) OVER (
|
||||
ORDER BY execution_date
|
||||
ROWS BETWEEN 13 PRECEDING AND CURRENT ROW
|
||||
) as fourteen_day_avg
|
||||
FROM daily_scores
|
||||
)
|
||||
SELECT *,
|
||||
CASE
|
||||
WHEN three_day_avg IS NOT NULL AND seven_day_avg IS NOT NULL AND seven_day_avg > 0
|
||||
THEN ((seven_day_avg - three_day_avg) / seven_day_avg * 100)
|
||||
ELSE NULL
|
||||
END as drop_percent
|
||||
FROM trends
|
||||
ORDER BY execution_date DESC
|
||||
{limit_clause}
|
||||
"""
|
||||
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
params = [graph_id, start_date]
|
||||
user_filter = ""
|
||||
if user_id:
|
||||
user_filter = 'AND e."userId" = $3::text'
|
||||
params.append(user_id)
|
||||
|
||||
# Determine limit clause
|
||||
limit_clause = "" if include_historical else "LIMIT 1"
|
||||
|
||||
final_query = query_template.format(
|
||||
schema_prefix="{schema_prefix}",
|
||||
user_filter=user_filter,
|
||||
limit_clause=limit_clause,
|
||||
)
|
||||
|
||||
result = await query_raw_with_schema(final_query, *params)
|
||||
|
||||
if not result:
|
||||
return AccuracyTrendsResponse(
|
||||
latest_data=AccuracyLatestData(
|
||||
date=datetime.now(timezone.utc),
|
||||
daily_score=None,
|
||||
three_day_avg=None,
|
||||
seven_day_avg=None,
|
||||
fourteen_day_avg=None,
|
||||
),
|
||||
alert=None,
|
||||
)
|
||||
|
||||
latest = result[0]
|
||||
|
||||
alert = None
|
||||
if (
|
||||
latest["drop_percent"] is not None
|
||||
and latest["drop_percent"] >= drop_threshold
|
||||
and latest["three_day_avg"] is not None
|
||||
and latest["seven_day_avg"] is not None
|
||||
):
|
||||
alert = AccuracyAlertData(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
drop_percent=float(latest["drop_percent"]),
|
||||
three_day_avg=float(latest["three_day_avg"]),
|
||||
seven_day_avg=float(latest["seven_day_avg"]),
|
||||
detected_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
# Prepare historical data if requested
|
||||
historical_data = None
|
||||
if include_historical:
|
||||
historical_data = []
|
||||
for row in result:
|
||||
historical_data.append(
|
||||
AccuracyLatestData(
|
||||
date=row["execution_date"],
|
||||
daily_score=(
|
||||
float(row["daily_score"])
|
||||
if row["daily_score"] is not None
|
||||
else None
|
||||
),
|
||||
three_day_avg=(
|
||||
float(row["three_day_avg"])
|
||||
if row["three_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
seven_day_avg=(
|
||||
float(row["seven_day_avg"])
|
||||
if row["seven_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
fourteen_day_avg=(
|
||||
float(row["fourteen_day_avg"])
|
||||
if row["fourteen_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
return AccuracyTrendsResponse(
|
||||
latest_data=AccuracyLatestData(
|
||||
date=latest["execution_date"],
|
||||
daily_score=(
|
||||
float(latest["daily_score"])
|
||||
if latest["daily_score"] is not None
|
||||
else None
|
||||
),
|
||||
three_day_avg=(
|
||||
float(latest["three_day_avg"])
|
||||
if latest["three_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
seven_day_avg=(
|
||||
float(latest["seven_day_avg"])
|
||||
if latest["seven_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
fourteen_day_avg=(
|
||||
float(latest["fourteen_day_avg"])
|
||||
if latest["fourteen_day_avg"] is not None
|
||||
else None
|
||||
),
|
||||
),
|
||||
alert=alert,
|
||||
historical_data=historical_data,
|
||||
)
|
||||
|
||||
|
||||
class MarketplaceGraphData(BaseModel):
|
||||
"""Data structure for marketplace graph monitoring."""
|
||||
|
||||
graph_id: str
|
||||
user_id: Optional[str]
|
||||
execution_count: int
|
||||
|
||||
|
||||
async def get_marketplace_graphs_for_monitoring(
|
||||
days_back: int = 30,
|
||||
min_executions: int = 10,
|
||||
) -> list[MarketplaceGraphData]:
|
||||
"""Get published marketplace graphs with recent executions for monitoring."""
|
||||
query_template = """
|
||||
WITH marketplace_graphs AS (
|
||||
SELECT DISTINCT
|
||||
slv."agentGraphId" as graph_id,
|
||||
slv."agentGraphVersion" as graph_version
|
||||
FROM {schema_prefix}"StoreListing" sl
|
||||
JOIN {schema_prefix}"StoreListingVersion" slv ON sl."activeVersionId" = slv."id"
|
||||
WHERE sl."hasApprovedVersion" = true
|
||||
AND sl."isDeleted" = false
|
||||
)
|
||||
SELECT DISTINCT
|
||||
mg.graph_id,
|
||||
NULL as user_id, -- Marketplace graphs don't have a specific user_id for monitoring
|
||||
COUNT(*) as execution_count
|
||||
FROM marketplace_graphs mg
|
||||
JOIN {schema_prefix}"AgentGraphExecution" e ON e."agentGraphId" = mg.graph_id
|
||||
WHERE e."createdAt" >= $1::timestamp
|
||||
AND e."isDeleted" = false
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
GROUP BY mg.graph_id
|
||||
HAVING COUNT(*) >= $2
|
||||
ORDER BY execution_count DESC
|
||||
"""
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
result = await query_raw_with_schema(query_template, start_date, min_executions)
|
||||
|
||||
return [
|
||||
MarketplaceGraphData(
|
||||
graph_id=row["graph_id"],
|
||||
user_id=row["user_id"],
|
||||
execution_count=int(row["execution_count"]),
|
||||
)
|
||||
for row in result
|
||||
]
|
||||
|
||||
@@ -601,14 +601,18 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
async for output_name, output_data in self._execute(input_data, **kwargs):
|
||||
yield output_name, output_data
|
||||
except Exception as ex:
|
||||
if not isinstance(ex, BlockError):
|
||||
raise BlockUnknownError(
|
||||
if isinstance(ex, BlockError):
|
||||
raise ex
|
||||
else:
|
||||
raise (
|
||||
BlockExecutionError
|
||||
if isinstance(ex, ValueError)
|
||||
else BlockUnknownError
|
||||
)(
|
||||
message=str(ex),
|
||||
block_name=self.name,
|
||||
block_id=self.id,
|
||||
) from ex
|
||||
else:
|
||||
raise ex
|
||||
|
||||
async def _execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
|
||||
if error := self.input_schema.validate_data(input_data):
|
||||
|
||||
@@ -5,6 +5,7 @@ from enum import Enum
|
||||
from multiprocessing import Manager
|
||||
from queue import Empty
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
@@ -65,6 +66,9 @@ from .includes import (
|
||||
)
|
||||
from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -836,6 +840,30 @@ async def upsert_execution_output(
|
||||
await AgentNodeExecutionInputOutput.prisma().create(data=data)
|
||||
|
||||
|
||||
async def get_execution_outputs_by_node_exec_id(
|
||||
node_exec_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get all execution outputs for a specific node execution ID.
|
||||
|
||||
Args:
|
||||
node_exec_id: The node execution ID to get outputs for
|
||||
|
||||
Returns:
|
||||
Dictionary mapping output names to their data values
|
||||
"""
|
||||
outputs = await AgentNodeExecutionInputOutput.prisma().find_many(
|
||||
where={"referencedByOutputExecId": node_exec_id}
|
||||
)
|
||||
|
||||
result = {}
|
||||
for output in outputs:
|
||||
if output.data is not None:
|
||||
result[output.name] = type_utils.convert(output.data, JsonValue)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def update_graph_execution_start_time(
|
||||
graph_exec_id: str,
|
||||
) -> GraphExecution | None:
|
||||
@@ -1465,3 +1493,35 @@ async def get_graph_execution_by_share_token(
|
||||
created_at=execution.createdAt,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
|
||||
async def get_frequently_executed_graphs(
|
||||
days_back: int = 30,
|
||||
min_executions: int = 10,
|
||||
) -> list[dict]:
|
||||
"""Get graphs that have been frequently executed for monitoring."""
|
||||
query_template = """
|
||||
SELECT DISTINCT
|
||||
e."agentGraphId" as graph_id,
|
||||
e."userId" as user_id,
|
||||
COUNT(*) as execution_count
|
||||
FROM {schema_prefix}"AgentGraphExecution" e
|
||||
WHERE e."createdAt" >= $1::timestamp
|
||||
AND e."isDeleted" = false
|
||||
AND e."executionStatus" IN ('COMPLETED', 'FAILED', 'TERMINATED')
|
||||
GROUP BY e."agentGraphId", e."userId"
|
||||
HAVING COUNT(*) >= $2
|
||||
ORDER BY execution_count DESC
|
||||
"""
|
||||
|
||||
start_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
result = await query_raw_with_schema(query_template, start_date, min_executions)
|
||||
|
||||
return [
|
||||
{
|
||||
"graph_id": row["graph_id"],
|
||||
"user_id": row["user_id"],
|
||||
"execution_count": int(row["execution_count"]),
|
||||
}
|
||||
for row in result
|
||||
]
|
||||
|
||||
@@ -100,7 +100,7 @@ async def get_or_create_human_review(
|
||||
return None
|
||||
else:
|
||||
return ReviewResult(
|
||||
data=review.payload if review.status == ReviewStatus.APPROVED else None,
|
||||
data=review.payload,
|
||||
status=review.status,
|
||||
message=review.reviewMessage or "",
|
||||
processed=review.processed,
|
||||
|
||||
@@ -22,7 +22,7 @@ from typing import (
|
||||
from urllib.parse import urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
from prisma.enums import CreditTransactionType
|
||||
from prisma.enums import CreditTransactionType, OnboardingStep
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
@@ -868,3 +868,20 @@ class UserExecutionSummaryStats(BaseModel):
|
||||
total_execution_time: float = Field(default=0)
|
||||
average_execution_time: float = Field(default=0)
|
||||
cost_breakdown: dict[str, float] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class UserOnboarding(BaseModel):
|
||||
userId: str
|
||||
completedSteps: list[OnboardingStep]
|
||||
walletShown: bool
|
||||
notified: list[OnboardingStep]
|
||||
rewardedFor: list[OnboardingStep]
|
||||
usageReason: Optional[str]
|
||||
integrations: list[str]
|
||||
otherIntegrations: Optional[str]
|
||||
selectedStoreListingVersionId: Optional[str]
|
||||
agentInput: Optional[dict[str, Any]]
|
||||
onboardingAgentExecutionId: Optional[str]
|
||||
agentRuns: int
|
||||
lastRunAt: Optional[datetime]
|
||||
consecutiveRunDays: int
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, field_serializer
|
||||
|
||||
from backend.data.event_bus import AsyncRedisEventBus
|
||||
from backend.server.model import NotificationPayload
|
||||
@@ -15,6 +15,11 @@ class NotificationEvent(BaseModel):
|
||||
user_id: str
|
||||
payload: NotificationPayload
|
||||
|
||||
@field_serializer("payload")
|
||||
def serialize_payload(self, payload: NotificationPayload):
|
||||
"""Ensure extra fields survive Redis serialization."""
|
||||
return payload.model_dump()
|
||||
|
||||
|
||||
class AsyncRedisNotificationEventBus(AsyncRedisEventBus[NotificationEvent]):
|
||||
Model = NotificationEvent # type: ignore
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any, Literal, Optional
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
import prisma
|
||||
import pydantic
|
||||
@@ -8,17 +9,18 @@ from prisma.enums import OnboardingStep
|
||||
from prisma.models import UserOnboarding
|
||||
from prisma.types import UserOnboardingCreateInput, UserOnboardingUpdateInput
|
||||
|
||||
from backend.data.block import get_blocks
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data.credit import get_user_credit_model
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.notification_bus import (
|
||||
AsyncRedisNotificationEventBus,
|
||||
NotificationEvent,
|
||||
)
|
||||
from backend.data.user import get_user_by_id
|
||||
from backend.server.model import OnboardingNotificationPayload
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
from backend.util.cache import cached
|
||||
from backend.util.json import SafeJson
|
||||
from backend.util.timezone_utils import get_user_timezone_or_utc
|
||||
|
||||
# Mapping from user reason id to categories to search for when choosing agent to show
|
||||
REASON_MAPPING: dict[str, list[str]] = {
|
||||
@@ -31,9 +33,20 @@ REASON_MAPPING: dict[str, list[str]] = {
|
||||
POINTS_AGENT_COUNT = 50 # Number of agents to calculate points for
|
||||
MIN_AGENT_COUNT = 2 # Minimum number of marketplace agents to enable onboarding
|
||||
|
||||
FrontendOnboardingStep = Literal[
|
||||
OnboardingStep.WELCOME,
|
||||
OnboardingStep.USAGE_REASON,
|
||||
OnboardingStep.INTEGRATIONS,
|
||||
OnboardingStep.AGENT_CHOICE,
|
||||
OnboardingStep.AGENT_NEW_RUN,
|
||||
OnboardingStep.AGENT_INPUT,
|
||||
OnboardingStep.CONGRATS,
|
||||
OnboardingStep.MARKETPLACE_VISIT,
|
||||
OnboardingStep.BUILDER_OPEN,
|
||||
]
|
||||
|
||||
|
||||
class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
completedSteps: Optional[list[OnboardingStep]] = None
|
||||
walletShown: Optional[bool] = None
|
||||
notified: Optional[list[OnboardingStep]] = None
|
||||
usageReason: Optional[str] = None
|
||||
@@ -42,9 +55,6 @@ class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
selectedStoreListingVersionId: Optional[str] = None
|
||||
agentInput: Optional[dict[str, Any]] = None
|
||||
onboardingAgentExecutionId: Optional[str] = None
|
||||
agentRuns: Optional[int] = None
|
||||
lastRunAt: Optional[datetime] = None
|
||||
consecutiveRunDays: Optional[int] = None
|
||||
|
||||
|
||||
async def get_user_onboarding(user_id: str):
|
||||
@@ -83,14 +93,6 @@ async def reset_user_onboarding(user_id: str):
|
||||
async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update: UserOnboardingUpdateInput = {}
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if data.completedSteps is not None:
|
||||
update["completedSteps"] = list(
|
||||
set(data.completedSteps + onboarding.completedSteps)
|
||||
)
|
||||
for step in data.completedSteps:
|
||||
if step not in onboarding.completedSteps:
|
||||
await _reward_user(user_id, onboarding, step)
|
||||
await _send_onboarding_notification(user_id, step)
|
||||
if data.walletShown:
|
||||
update["walletShown"] = data.walletShown
|
||||
if data.notified is not None:
|
||||
@@ -107,12 +109,6 @@ async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
update["agentInput"] = SafeJson(data.agentInput)
|
||||
if data.onboardingAgentExecutionId is not None:
|
||||
update["onboardingAgentExecutionId"] = data.onboardingAgentExecutionId
|
||||
if data.agentRuns is not None and data.agentRuns > onboarding.agentRuns:
|
||||
update["agentRuns"] = data.agentRuns
|
||||
if data.lastRunAt is not None:
|
||||
update["lastRunAt"] = data.lastRunAt
|
||||
if data.consecutiveRunDays is not None:
|
||||
update["consecutiveRunDays"] = data.consecutiveRunDays
|
||||
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
@@ -161,14 +157,12 @@ async def _reward_user(user_id: str, onboarding: UserOnboarding, step: Onboardin
|
||||
if step in onboarding.rewardedFor:
|
||||
return
|
||||
|
||||
onboarding.rewardedFor.append(step)
|
||||
user_credit_model = await get_user_credit_model(user_id)
|
||||
await user_credit_model.onboarding_reward(user_id, reward, step)
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"completedSteps": list(set(onboarding.completedSteps + [step])),
|
||||
"rewardedFor": onboarding.rewardedFor,
|
||||
"rewardedFor": list(set(onboarding.rewardedFor + [step])),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -177,31 +171,52 @@ async def complete_onboarding_step(user_id: str, step: OnboardingStep):
|
||||
"""
|
||||
Completes the specified onboarding step for the user if not already completed.
|
||||
"""
|
||||
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if step not in onboarding.completedSteps:
|
||||
await update_user_onboarding(
|
||||
user_id,
|
||||
UserOnboardingUpdate(completedSteps=onboarding.completedSteps + [step]),
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"completedSteps": list(set(onboarding.completedSteps + [step])),
|
||||
},
|
||||
)
|
||||
await _reward_user(user_id, onboarding, step)
|
||||
await _send_onboarding_notification(user_id, step)
|
||||
|
||||
|
||||
async def _send_onboarding_notification(user_id: str, step: OnboardingStep):
|
||||
async def _send_onboarding_notification(
|
||||
user_id: str, step: OnboardingStep | None, event: str = "step_completed"
|
||||
):
|
||||
"""
|
||||
Sends an onboarding notification to the user for the specified step.
|
||||
Sends an onboarding notification to the user.
|
||||
"""
|
||||
payload = OnboardingNotificationPayload(
|
||||
type="onboarding",
|
||||
event="step_completed",
|
||||
step=step.value,
|
||||
event=event,
|
||||
step=step,
|
||||
)
|
||||
await AsyncRedisNotificationEventBus().publish(
|
||||
NotificationEvent(user_id=user_id, payload=payload)
|
||||
)
|
||||
|
||||
|
||||
def clean_and_split(text: str) -> list[str]:
|
||||
async def complete_re_run_agent(user_id: str, graph_id: str) -> None:
|
||||
"""
|
||||
Complete RE_RUN_AGENT step when a user runs a graph they've run before.
|
||||
Keeps overhead low by only counting executions if the step is still pending.
|
||||
"""
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if OnboardingStep.RE_RUN_AGENT in onboarding.completedSteps:
|
||||
return
|
||||
|
||||
# Includes current execution, so count > 1 means there was at least one prior run.
|
||||
previous_exec_count = await execution_db.get_graph_executions_count(
|
||||
user_id=user_id, graph_id=graph_id
|
||||
)
|
||||
if previous_exec_count > 1:
|
||||
await complete_onboarding_step(user_id, OnboardingStep.RE_RUN_AGENT)
|
||||
|
||||
|
||||
def _clean_and_split(text: str) -> list[str]:
|
||||
"""
|
||||
Removes all special characters from a string, truncates it to 100 characters,
|
||||
and splits it by whitespace and commas.
|
||||
@@ -224,7 +239,7 @@ def clean_and_split(text: str) -> list[str]:
|
||||
return words
|
||||
|
||||
|
||||
def calculate_points(
|
||||
def _calculate_points(
|
||||
agent, categories: list[str], custom: list[str], integrations: list[str]
|
||||
) -> int:
|
||||
"""
|
||||
@@ -268,18 +283,85 @@ def calculate_points(
|
||||
return int(points)
|
||||
|
||||
|
||||
def get_credentials_blocks() -> dict[str, str]:
|
||||
# Returns a dictionary of block id to credentials field name
|
||||
creds: dict[str, str] = {}
|
||||
blocks = get_blocks()
|
||||
for id, block in blocks.items():
|
||||
for field_name, field_info in block().input_schema.model_fields.items():
|
||||
if field_info.annotation == CredentialsMetaInput:
|
||||
creds[id] = field_name
|
||||
return creds
|
||||
def _normalize_datetime(value: datetime | None) -> datetime | None:
|
||||
if value is None:
|
||||
return None
|
||||
if value.tzinfo is None:
|
||||
return value.replace(tzinfo=timezone.utc)
|
||||
return value.astimezone(timezone.utc)
|
||||
|
||||
|
||||
CREDENTIALS_FIELDS: dict[str, str] = get_credentials_blocks()
|
||||
def _calculate_consecutive_run_days(
|
||||
last_run_at: datetime | None, current_consecutive_days: int, user_timezone: str
|
||||
) -> tuple[datetime, int]:
|
||||
tz = ZoneInfo(user_timezone)
|
||||
local_now = datetime.now(tz)
|
||||
normalized_last_run = _normalize_datetime(last_run_at)
|
||||
|
||||
if normalized_last_run is None:
|
||||
return local_now.astimezone(timezone.utc), 1
|
||||
|
||||
last_run_local = normalized_last_run.astimezone(tz)
|
||||
last_run_date = last_run_local.date()
|
||||
today = local_now.date()
|
||||
|
||||
if last_run_date == today:
|
||||
return local_now.astimezone(timezone.utc), current_consecutive_days
|
||||
|
||||
if last_run_date == today - timedelta(days=1):
|
||||
return local_now.astimezone(timezone.utc), current_consecutive_days + 1
|
||||
|
||||
return local_now.astimezone(timezone.utc), 1
|
||||
|
||||
|
||||
def _get_run_milestone_steps(
|
||||
new_run_count: int, consecutive_days: int
|
||||
) -> list[OnboardingStep]:
|
||||
milestones: list[OnboardingStep] = []
|
||||
if new_run_count >= 10:
|
||||
milestones.append(OnboardingStep.RUN_AGENTS)
|
||||
if new_run_count >= 100:
|
||||
milestones.append(OnboardingStep.RUN_AGENTS_100)
|
||||
if consecutive_days >= 3:
|
||||
milestones.append(OnboardingStep.RUN_3_DAYS)
|
||||
if consecutive_days >= 14:
|
||||
milestones.append(OnboardingStep.RUN_14_DAYS)
|
||||
return milestones
|
||||
|
||||
|
||||
async def _get_user_timezone(user_id: str) -> str:
|
||||
user = await get_user_by_id(user_id)
|
||||
return get_user_timezone_or_utc(user.timezone if user else None)
|
||||
|
||||
|
||||
async def increment_runs(user_id: str):
|
||||
"""
|
||||
Increment a user's run counters and trigger any onboarding milestones.
|
||||
"""
|
||||
user_timezone = await _get_user_timezone(user_id)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
new_run_count = onboarding.agentRuns + 1
|
||||
last_run_at, consecutive_run_days = _calculate_consecutive_run_days(
|
||||
onboarding.lastRunAt, onboarding.consecutiveRunDays, user_timezone
|
||||
)
|
||||
|
||||
await UserOnboarding.prisma().update(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"agentRuns": {"increment": 1},
|
||||
"lastRunAt": last_run_at,
|
||||
"consecutiveRunDays": consecutive_run_days,
|
||||
},
|
||||
)
|
||||
|
||||
milestones = _get_run_milestone_steps(new_run_count, consecutive_run_days)
|
||||
new_steps = [step for step in milestones if step not in onboarding.completedSteps]
|
||||
|
||||
for step in new_steps:
|
||||
await complete_onboarding_step(user_id, step)
|
||||
# Send progress notification if no steps were completed, so client refetches onboarding state
|
||||
if not new_steps:
|
||||
await _send_onboarding_notification(user_id, None, event="increment_runs")
|
||||
|
||||
|
||||
async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
@@ -288,7 +370,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
|
||||
where_clause: dict[str, Any] = {}
|
||||
|
||||
custom = clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
custom = _clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
|
||||
if categories:
|
||||
where_clause["OR"] = [
|
||||
@@ -336,7 +418,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
# Calculate points for the first X agents and choose the top 2
|
||||
agent_points = []
|
||||
for agent in storeAgents[:POINTS_AGENT_COUNT]:
|
||||
points = calculate_points(
|
||||
points = _calculate_points(
|
||||
agent, categories, custom, user_onboarding.integrations
|
||||
)
|
||||
agent_points.append((agent, points))
|
||||
@@ -350,6 +432,7 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_output_demo=agent.agent_output_demo or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username,
|
||||
creator_avatar=agent.creator_avatar,
|
||||
|
||||
@@ -3,12 +3,18 @@ from contextlib import asynccontextmanager
|
||||
from typing import TYPE_CHECKING, Callable, Concatenate, ParamSpec, TypeVar, cast
|
||||
|
||||
from backend.data import db
|
||||
from backend.data.analytics import (
|
||||
get_accuracy_trends_and_alerts,
|
||||
get_marketplace_graphs_for_monitoring,
|
||||
)
|
||||
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
|
||||
from backend.data.execution import (
|
||||
create_graph_execution,
|
||||
get_block_error_stats,
|
||||
get_child_graph_executions,
|
||||
get_execution_kv_data,
|
||||
get_execution_outputs_by_node_exec_id,
|
||||
get_frequently_executed_graphs,
|
||||
get_graph_execution_meta,
|
||||
get_graph_executions,
|
||||
get_graph_executions_count,
|
||||
@@ -142,9 +148,13 @@ class DatabaseManager(AppService):
|
||||
update_graph_execution_stats = _(update_graph_execution_stats)
|
||||
upsert_execution_input = _(upsert_execution_input)
|
||||
upsert_execution_output = _(upsert_execution_output)
|
||||
get_execution_outputs_by_node_exec_id = _(get_execution_outputs_by_node_exec_id)
|
||||
get_execution_kv_data = _(get_execution_kv_data)
|
||||
set_execution_kv_data = _(set_execution_kv_data)
|
||||
get_block_error_stats = _(get_block_error_stats)
|
||||
get_accuracy_trends_and_alerts = _(get_accuracy_trends_and_alerts)
|
||||
get_frequently_executed_graphs = _(get_frequently_executed_graphs)
|
||||
get_marketplace_graphs_for_monitoring = _(get_marketplace_graphs_for_monitoring)
|
||||
|
||||
# Graphs
|
||||
get_node = _(get_node)
|
||||
@@ -226,6 +236,10 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
|
||||
# Block error monitoring
|
||||
get_block_error_stats = _(d.get_block_error_stats)
|
||||
# Execution accuracy monitoring
|
||||
get_accuracy_trends_and_alerts = _(d.get_accuracy_trends_and_alerts)
|
||||
get_frequently_executed_graphs = _(d.get_frequently_executed_graphs)
|
||||
get_marketplace_graphs_for_monitoring = _(d.get_marketplace_graphs_for_monitoring)
|
||||
|
||||
# Human In The Loop
|
||||
has_pending_reviews_for_graph_exec = _(d.has_pending_reviews_for_graph_exec)
|
||||
@@ -265,6 +279,7 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_user_integrations = d.get_user_integrations
|
||||
upsert_execution_input = d.upsert_execution_input
|
||||
upsert_execution_output = d.upsert_execution_output
|
||||
get_execution_outputs_by_node_exec_id = d.get_execution_outputs_by_node_exec_id
|
||||
update_graph_execution_stats = d.update_graph_execution_stats
|
||||
update_node_execution_status = d.update_node_execution_status
|
||||
update_node_execution_status_batch = d.update_node_execution_status_batch
|
||||
|
||||
@@ -133,9 +133,8 @@ def execute_graph(
|
||||
cluster_lock: ClusterLock,
|
||||
):
|
||||
"""Execute graph using thread-local ExecutionProcessor instance"""
|
||||
return _tls.processor.on_graph_execution(
|
||||
graph_exec_entry, cancel_event, cluster_lock
|
||||
)
|
||||
processor: ExecutionProcessor = _tls.processor
|
||||
return processor.on_graph_execution(graph_exec_entry, cancel_event, cluster_lock)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -143,8 +142,8 @@ T = TypeVar("T")
|
||||
|
||||
async def execute_node(
|
||||
node: Node,
|
||||
creds_manager: IntegrationCredentialsManager,
|
||||
data: NodeExecutionEntry,
|
||||
execution_processor: "ExecutionProcessor",
|
||||
execution_stats: NodeExecutionStats | None = None,
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
) -> BlockOutput:
|
||||
@@ -169,6 +168,7 @@ async def execute_node(
|
||||
node_id = data.node_id
|
||||
node_block = node.block
|
||||
execution_context = data.execution_context
|
||||
creds_manager = execution_processor.creds_manager
|
||||
|
||||
log_metadata = LogMetadata(
|
||||
logger=_logger,
|
||||
@@ -212,6 +212,7 @@ async def execute_node(
|
||||
"node_exec_id": node_exec_id,
|
||||
"user_id": user_id,
|
||||
"execution_context": execution_context,
|
||||
"execution_processor": execution_processor,
|
||||
}
|
||||
|
||||
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
|
||||
@@ -608,8 +609,8 @@ class ExecutionProcessor:
|
||||
|
||||
async for output_name, output_data in execute_node(
|
||||
node=node,
|
||||
creds_manager=self.creds_manager,
|
||||
data=node_exec,
|
||||
execution_processor=self,
|
||||
execution_stats=stats,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
):
|
||||
@@ -860,12 +861,17 @@ class ExecutionProcessor:
|
||||
execution_stats_lock = threading.Lock()
|
||||
|
||||
# State holders ----------------------------------------------------
|
||||
running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
|
||||
self.running_node_execution: dict[str, NodeExecutionProgress] = defaultdict(
|
||||
NodeExecutionProgress
|
||||
)
|
||||
running_node_evaluation: dict[str, Future] = {}
|
||||
self.running_node_evaluation: dict[str, Future] = {}
|
||||
self.execution_stats = execution_stats
|
||||
self.execution_stats_lock = execution_stats_lock
|
||||
execution_queue = ExecutionQueue[NodeExecutionEntry]()
|
||||
|
||||
running_node_execution = self.running_node_execution
|
||||
running_node_evaluation = self.running_node_evaluation
|
||||
|
||||
try:
|
||||
if db_client.get_credits(graph_exec.user_id) <= 0:
|
||||
raise InsufficientBalanceError(
|
||||
|
||||
@@ -26,12 +26,14 @@ from sqlalchemy import MetaData, create_engine
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.execution import GraphExecutionWithNodes
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.onboarding import increment_runs
|
||||
from backend.executor import utils as execution_utils
|
||||
from backend.monitoring import (
|
||||
NotificationJobArgs,
|
||||
process_existing_batches,
|
||||
process_weekly_summary,
|
||||
report_block_error_rates,
|
||||
report_execution_accuracy_alerts,
|
||||
report_late_executions,
|
||||
)
|
||||
from backend.util.clients import get_scheduler_client
|
||||
@@ -153,6 +155,7 @@ async def _execute_graph(**kwargs):
|
||||
inputs=args.input_data,
|
||||
graph_credentials_inputs=args.input_credentials,
|
||||
)
|
||||
await increment_runs(args.user_id)
|
||||
elapsed = asyncio.get_event_loop().time() - start_time
|
||||
logger.info(
|
||||
f"Graph execution started with ID {graph_exec.id} for graph {args.graph_id} "
|
||||
@@ -239,6 +242,11 @@ def cleanup_expired_files():
|
||||
run_async(cleanup_expired_files_async())
|
||||
|
||||
|
||||
def execution_accuracy_alerts():
|
||||
"""Check execution accuracy and send alerts if drops are detected."""
|
||||
return report_execution_accuracy_alerts()
|
||||
|
||||
|
||||
# Monitoring functions are now imported from monitoring module
|
||||
|
||||
|
||||
@@ -438,6 +446,17 @@ class Scheduler(AppService):
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
)
|
||||
|
||||
# Execution Accuracy Monitoring - configurable interval
|
||||
self.scheduler.add_job(
|
||||
execution_accuracy_alerts,
|
||||
id="report_execution_accuracy_alerts",
|
||||
trigger="interval",
|
||||
replace_existing=True,
|
||||
seconds=config.execution_accuracy_check_interval_hours
|
||||
* 3600, # Convert hours to seconds
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
)
|
||||
|
||||
self.scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
|
||||
self.scheduler.add_listener(job_missed_listener, EVENT_JOB_MISSED)
|
||||
self.scheduler.add_listener(job_max_instances_listener, EVENT_JOB_MAX_INSTANCES)
|
||||
@@ -585,6 +604,11 @@ class Scheduler(AppService):
|
||||
"""Manually trigger cleanup of expired cloud storage files."""
|
||||
return cleanup_expired_files()
|
||||
|
||||
@expose
|
||||
def execute_report_execution_accuracy_alerts(self):
|
||||
"""Manually trigger execution accuracy alert checking."""
|
||||
return execution_accuracy_alerts()
|
||||
|
||||
|
||||
class SchedulerClient(AppServiceClient):
|
||||
@classmethod
|
||||
|
||||
@@ -4,6 +4,7 @@ Embedding service for generating text embeddings using OpenAI.
|
||||
Used for vector-based semantic search in the store.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
@@ -27,7 +28,12 @@ MAX_BATCH_SIZE = 100 # maximum texts per batch request
|
||||
|
||||
|
||||
class EmbeddingService:
|
||||
"""Service for generating text embeddings using OpenAI."""
|
||||
"""Service for generating text embeddings using OpenAI.
|
||||
|
||||
The service can be created without an API key - the key is validated
|
||||
only when the client property is first accessed. This allows the service
|
||||
to be instantiated at module load time without requiring configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key: Optional[str] = None):
|
||||
settings = Settings()
|
||||
@@ -36,12 +42,16 @@ class EmbeddingService:
|
||||
or settings.secrets.openai_internal_api_key
|
||||
or settings.secrets.openai_api_key
|
||||
)
|
||||
|
||||
@functools.cached_property
|
||||
def client(self) -> openai.AsyncOpenAI:
|
||||
"""Lazily create the OpenAI client, raising if no API key is configured."""
|
||||
if not self.api_key:
|
||||
raise ValueError(
|
||||
"OpenAI API key not configured. "
|
||||
"Set OPENAI_API_KEY or OPENAI_INTERNAL_API_KEY environment variable."
|
||||
)
|
||||
self.client = openai.AsyncOpenAI(api_key=self.api_key)
|
||||
return openai.AsyncOpenAI(api_key=self.api_key)
|
||||
|
||||
async def generate_embedding(self, text: str) -> list[float]:
|
||||
"""
|
||||
@@ -65,16 +75,14 @@ class EmbeddingService:
|
||||
f"Text exceeds maximum length of {MAX_TEXT_LENGTH} characters"
|
||||
)
|
||||
|
||||
try:
|
||||
response = await self.client.embeddings.create(
|
||||
model=EMBEDDING_MODEL,
|
||||
input=text,
|
||||
dimensions=EMBEDDING_DIMENSIONS,
|
||||
)
|
||||
return response.data[0].embedding
|
||||
except openai.APIError as e:
|
||||
logger.error(f"OpenAI API error generating embedding: {e}")
|
||||
raise
|
||||
response = await self.client.embeddings.create(
|
||||
model=EMBEDDING_MODEL,
|
||||
input=text,
|
||||
dimensions=EMBEDDING_DIMENSIONS,
|
||||
)
|
||||
if not response.data:
|
||||
raise ValueError("OpenAI API returned empty embedding data")
|
||||
return response.data[0].embedding
|
||||
|
||||
async def generate_embeddings(self, texts: list[str]) -> list[list[float]]:
|
||||
"""
|
||||
@@ -103,18 +111,14 @@ class EmbeddingService:
|
||||
f"Text at index {i} exceeds maximum length of {MAX_TEXT_LENGTH} characters"
|
||||
)
|
||||
|
||||
try:
|
||||
response = await self.client.embeddings.create(
|
||||
model=EMBEDDING_MODEL,
|
||||
input=texts,
|
||||
dimensions=EMBEDDING_DIMENSIONS,
|
||||
)
|
||||
# Sort by index to ensure correct ordering
|
||||
sorted_data = sorted(response.data, key=lambda x: x.index)
|
||||
return [item.embedding for item in sorted_data]
|
||||
except openai.APIError as e:
|
||||
logger.error(f"OpenAI API error generating embeddings: {e}")
|
||||
raise
|
||||
response = await self.client.embeddings.create(
|
||||
model=EMBEDDING_MODEL,
|
||||
input=texts,
|
||||
dimensions=EMBEDDING_DIMENSIONS,
|
||||
)
|
||||
# Sort by index to ensure correct ordering
|
||||
sorted_data = sorted(response.data, key=lambda x: x.index)
|
||||
return [item.embedding for item in sorted_data]
|
||||
|
||||
|
||||
def create_search_text(name: str, sub_heading: str, description: str) -> str:
|
||||
@@ -133,24 +137,21 @@ def create_search_text(name: str, sub_heading: str, description: str) -> str:
|
||||
A single string combining all non-empty fields.
|
||||
"""
|
||||
parts = [name or "", sub_heading or "", description or ""]
|
||||
# filter(None, parts) removes empty strings since empty string is falsy
|
||||
return " ".join(filter(None, parts)).strip()
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_embedding_service: Optional[EmbeddingService] = None
|
||||
|
||||
|
||||
async def get_embedding_service() -> EmbeddingService:
|
||||
@functools.cache
|
||||
def get_embedding_service() -> EmbeddingService:
|
||||
"""
|
||||
Get or create the embedding service singleton.
|
||||
|
||||
Uses functools.cache for thread-safe lazy initialization.
|
||||
|
||||
Returns:
|
||||
The shared EmbeddingService instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If OpenAI API key is not configured.
|
||||
ValueError: If OpenAI API key is not configured (when generating embeddings).
|
||||
"""
|
||||
global _embedding_service
|
||||
if _embedding_service is None:
|
||||
_embedding_service = EmbeddingService()
|
||||
return _embedding_service
|
||||
return EmbeddingService()
|
||||
|
||||
@@ -76,19 +76,25 @@ class TestEmbeddingServiceValidation:
|
||||
@pytest.fixture
|
||||
def service(self, mock_settings):
|
||||
"""Create an EmbeddingService instance with mocked settings."""
|
||||
with patch("backend.integrations.embeddings.openai.AsyncOpenAI"):
|
||||
return EmbeddingService()
|
||||
service = EmbeddingService()
|
||||
# Inject a mock client by setting the cached_property directly
|
||||
service.__dict__["client"] = MagicMock()
|
||||
return service
|
||||
|
||||
def test_init_requires_api_key(self):
|
||||
"""Test that initialization fails without an API key."""
|
||||
def test_client_access_requires_api_key(self):
|
||||
"""Test that accessing client fails without an API key."""
|
||||
with patch("backend.integrations.embeddings.Settings") as mock:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.secrets.openai_internal_api_key = ""
|
||||
mock_instance.secrets.openai_api_key = ""
|
||||
mock.return_value = mock_instance
|
||||
|
||||
# Service creation should succeed
|
||||
service = EmbeddingService()
|
||||
|
||||
# But accessing client should fail
|
||||
with pytest.raises(ValueError, match="OpenAI API key not configured"):
|
||||
EmbeddingService()
|
||||
_ = service.client
|
||||
|
||||
def test_init_accepts_explicit_api_key(self):
|
||||
"""Test that explicit API key overrides settings."""
|
||||
@@ -167,12 +173,10 @@ class TestEmbeddingServiceAPI:
|
||||
mock_instance.secrets.openai_api_key = ""
|
||||
mock_settings.return_value = mock_instance
|
||||
|
||||
with patch(
|
||||
"backend.integrations.embeddings.openai.AsyncOpenAI"
|
||||
) as mock_openai:
|
||||
mock_openai.return_value = mock_openai_client
|
||||
service = EmbeddingService()
|
||||
return service, mock_openai_client
|
||||
service = EmbeddingService()
|
||||
# Inject mock client by setting the cached_property directly
|
||||
service.__dict__["client"] = mock_openai_client
|
||||
return service, mock_openai_client
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_embedding_success(self, service_with_mock_client):
|
||||
|
||||
@@ -18,7 +18,9 @@ class ManualWebhookManagerBase(BaseWebhooksManager[WT]):
|
||||
ingress_url: str,
|
||||
secret: str,
|
||||
) -> tuple[str, dict]:
|
||||
print(ingress_url) # FIXME: pass URL to user in front end
|
||||
# TODO: pass ingress_url to user in frontend
|
||||
# See: https://github.com/Significant-Gravitas/AutoGPT/issues/8537
|
||||
logger.debug(f"Manual webhook registered with ingress URL: {ingress_url}")
|
||||
|
||||
return "", {}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Monitoring module for platform health and alerting."""
|
||||
|
||||
from .accuracy_monitor import AccuracyMonitor, report_execution_accuracy_alerts
|
||||
from .block_error_monitor import BlockErrorMonitor, report_block_error_rates
|
||||
from .late_execution_monitor import (
|
||||
LateExecutionException,
|
||||
@@ -13,10 +14,12 @@ from .notification_monitor import (
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AccuracyMonitor",
|
||||
"BlockErrorMonitor",
|
||||
"LateExecutionMonitor",
|
||||
"LateExecutionException",
|
||||
"NotificationJobArgs",
|
||||
"report_execution_accuracy_alerts",
|
||||
"report_block_error_rates",
|
||||
"report_late_executions",
|
||||
"process_existing_batches",
|
||||
|
||||
107
autogpt_platform/backend/backend/monitoring/accuracy_monitor.py
Normal file
107
autogpt_platform/backend/backend/monitoring/accuracy_monitor.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""Execution accuracy monitoring module."""
|
||||
|
||||
import logging
|
||||
|
||||
from backend.util.clients import (
|
||||
get_database_manager_client,
|
||||
get_notification_manager_client,
|
||||
)
|
||||
from backend.util.metrics import DiscordChannel, sentry_capture_error
|
||||
from backend.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = Config()
|
||||
|
||||
|
||||
class AccuracyMonitor:
|
||||
"""Monitor execution accuracy trends and send alerts for drops."""
|
||||
|
||||
def __init__(self, drop_threshold: float = 10.0):
|
||||
self.config = config
|
||||
self.notification_client = get_notification_manager_client()
|
||||
self.database_client = get_database_manager_client()
|
||||
self.drop_threshold = drop_threshold
|
||||
|
||||
def check_execution_accuracy_alerts(self) -> str:
|
||||
"""Check marketplace agents for accuracy drops and send alerts."""
|
||||
try:
|
||||
logger.info("Checking execution accuracy for marketplace agents")
|
||||
|
||||
# Get marketplace graphs using database client
|
||||
graphs = self.database_client.get_marketplace_graphs_for_monitoring(
|
||||
days_back=30, min_executions=10
|
||||
)
|
||||
|
||||
alerts_found = 0
|
||||
|
||||
for graph_data in graphs:
|
||||
result = self.database_client.get_accuracy_trends_and_alerts(
|
||||
graph_id=graph_data.graph_id,
|
||||
user_id=graph_data.user_id,
|
||||
days_back=21, # 3 weeks
|
||||
drop_threshold=self.drop_threshold,
|
||||
)
|
||||
|
||||
if result.alert:
|
||||
alert = result.alert
|
||||
|
||||
# Get graph details for better alert info
|
||||
try:
|
||||
graph_info = self.database_client.get_graph_metadata(
|
||||
graph_id=alert.graph_id
|
||||
)
|
||||
graph_name = graph_info.name if graph_info else "Unknown Agent"
|
||||
except Exception:
|
||||
graph_name = "Unknown Agent"
|
||||
|
||||
# Create detailed alert message
|
||||
alert_msg = (
|
||||
f"🚨 **AGENT ACCURACY DROP DETECTED**\n\n"
|
||||
f"**Agent:** {graph_name}\n"
|
||||
f"**Graph ID:** `{alert.graph_id}`\n"
|
||||
f"**Accuracy Drop:** {alert.drop_percent:.1f}%\n"
|
||||
f"**Recent Performance:**\n"
|
||||
f" • 3-day average: {alert.three_day_avg:.1f}%\n"
|
||||
f" • 7-day average: {alert.seven_day_avg:.1f}%\n"
|
||||
)
|
||||
|
||||
if alert.user_id:
|
||||
alert_msg += f"**Owner:** {alert.user_id}\n"
|
||||
|
||||
# Send individual alert for each agent (not batched)
|
||||
self.notification_client.discord_system_alert(
|
||||
alert_msg, DiscordChannel.PRODUCT
|
||||
)
|
||||
alerts_found += 1
|
||||
logger.warning(
|
||||
f"Sent accuracy alert for agent: {graph_name} ({alert.graph_id})"
|
||||
)
|
||||
|
||||
if alerts_found > 0:
|
||||
return f"Alert sent for {alerts_found} agents with accuracy drops"
|
||||
|
||||
logger.info("No execution accuracy alerts detected")
|
||||
return "No accuracy alerts detected"
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error checking execution accuracy alerts: {e}")
|
||||
|
||||
error = Exception(f"Error checking execution accuracy alerts: {e}")
|
||||
msg = str(error)
|
||||
sentry_capture_error(error)
|
||||
self.notification_client.discord_system_alert(msg, DiscordChannel.PRODUCT)
|
||||
return msg
|
||||
|
||||
|
||||
def report_execution_accuracy_alerts(drop_threshold: float = 10.0) -> str:
|
||||
"""
|
||||
Check execution accuracy and send alerts if drops are detected.
|
||||
|
||||
Args:
|
||||
drop_threshold: Percentage drop threshold to trigger alerts (default 10.0%)
|
||||
|
||||
Returns:
|
||||
Status message indicating results of the check
|
||||
"""
|
||||
monitor = AccuracyMonitor(drop_threshold=drop_threshold)
|
||||
return monitor.check_execution_accuracy_alerts()
|
||||
@@ -33,7 +33,11 @@ from backend.data.model import (
|
||||
OAuth2Credentials,
|
||||
UserIntegrations,
|
||||
)
|
||||
from backend.data.onboarding import OnboardingStep, complete_onboarding_step
|
||||
from backend.data.onboarding import (
|
||||
OnboardingStep,
|
||||
complete_onboarding_step,
|
||||
increment_runs,
|
||||
)
|
||||
from backend.data.user import get_user_integrations
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.integrations.ayrshare import AyrshareClient, SocialPlatform
|
||||
@@ -377,6 +381,7 @@ async def webhook_ingress_generic(
|
||||
return
|
||||
|
||||
await complete_onboarding_step(user_id, OnboardingStep.TRIGGER_WEBHOOK)
|
||||
await increment_runs(user_id)
|
||||
|
||||
# Execute all triggers concurrently for better performance
|
||||
tasks = []
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import enum
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
import pydantic
|
||||
from prisma.enums import OnboardingStep
|
||||
|
||||
from backend.data.api_key import APIKeyInfo, APIKeyPermission
|
||||
from backend.data.graph import Graph
|
||||
@@ -35,8 +36,13 @@ class WSSubscribeGraphExecutionsRequest(pydantic.BaseModel):
|
||||
graph_id: str
|
||||
|
||||
|
||||
GraphCreationSource = Literal["builder", "upload"]
|
||||
GraphExecutionSource = Literal["builder", "library", "onboarding"]
|
||||
|
||||
|
||||
class CreateGraph(pydantic.BaseModel):
|
||||
graph: Graph
|
||||
source: GraphCreationSource | None = None
|
||||
|
||||
|
||||
class CreateAPIKeyRequest(pydantic.BaseModel):
|
||||
@@ -83,6 +89,8 @@ class NotificationPayload(pydantic.BaseModel):
|
||||
type: str
|
||||
event: str
|
||||
|
||||
model_config = pydantic.ConfigDict(extra="allow")
|
||||
|
||||
|
||||
class OnboardingNotificationPayload(NotificationPayload):
|
||||
step: str
|
||||
step: OnboardingStep | None
|
||||
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Annotated, Any, Sequence
|
||||
from typing import Annotated, Any, Sequence, get_args
|
||||
|
||||
import pydantic
|
||||
import stripe
|
||||
@@ -45,12 +45,17 @@ from backend.data.credit import (
|
||||
set_auto_top_up,
|
||||
)
|
||||
from backend.data.graph import GraphSettings
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.model import CredentialsMetaInput, UserOnboarding
|
||||
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
|
||||
from backend.data.onboarding import (
|
||||
FrontendOnboardingStep,
|
||||
OnboardingStep,
|
||||
UserOnboardingUpdate,
|
||||
complete_onboarding_step,
|
||||
complete_re_run_agent,
|
||||
get_recommended_agents,
|
||||
get_user_onboarding,
|
||||
increment_runs,
|
||||
onboarding_enabled,
|
||||
reset_user_onboarding,
|
||||
update_user_onboarding,
|
||||
@@ -78,6 +83,7 @@ from backend.server.model import (
|
||||
CreateAPIKeyRequest,
|
||||
CreateAPIKeyResponse,
|
||||
CreateGraph,
|
||||
GraphExecutionSource,
|
||||
RequestTopUp,
|
||||
SetGraphActiveVersion,
|
||||
TimezoneResponse,
|
||||
@@ -85,6 +91,7 @@ from backend.server.model import (
|
||||
UpdateTimezoneRequest,
|
||||
UploadFileResponse,
|
||||
)
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
from backend.util.cache import cached
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
@@ -274,9 +281,10 @@ async def update_preferences(
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding",
|
||||
summary="Get onboarding status",
|
||||
summary="Onboarding state",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def get_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
return await get_user_onboarding(user_id)
|
||||
@@ -284,9 +292,10 @@ async def get_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
|
||||
@v1_router.patch(
|
||||
"/onboarding",
|
||||
summary="Update onboarding progress",
|
||||
summary="Update onboarding state",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def update_onboarding(
|
||||
user_id: Annotated[str, Security(get_user_id)], data: UserOnboardingUpdate
|
||||
@@ -294,25 +303,39 @@ async def update_onboarding(
|
||||
return await update_user_onboarding(user_id, data)
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/onboarding/step",
|
||||
summary="Complete onboarding step",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def onboarding_complete_step(
|
||||
user_id: Annotated[str, Security(get_user_id)], step: FrontendOnboardingStep
|
||||
):
|
||||
if step not in get_args(FrontendOnboardingStep):
|
||||
raise HTTPException(status_code=400, detail="Invalid onboarding step")
|
||||
return await complete_onboarding_step(user_id, step)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding/agents",
|
||||
summary="Get recommended agents",
|
||||
summary="Recommended onboarding agents",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def get_onboarding_agents(
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
):
|
||||
) -> list[StoreAgentDetails]:
|
||||
return await get_recommended_agents(user_id)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding/enabled",
|
||||
summary="Check onboarding enabled",
|
||||
summary="Is onboarding enabled",
|
||||
tags=["onboarding", "public"],
|
||||
dependencies=[Security(requires_user)],
|
||||
)
|
||||
async def is_onboarding_enabled():
|
||||
async def is_onboarding_enabled() -> bool:
|
||||
return await onboarding_enabled()
|
||||
|
||||
|
||||
@@ -321,6 +344,7 @@ async def is_onboarding_enabled():
|
||||
summary="Reset onboarding progress",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Security(requires_user)],
|
||||
response_model=UserOnboarding,
|
||||
)
|
||||
async def reset_onboarding(user_id: Annotated[str, Security(get_user_id)]):
|
||||
return await reset_user_onboarding(user_id)
|
||||
@@ -809,7 +833,12 @@ async def create_new_graph(
|
||||
# as the graph already valid and no sub-graphs are returned back.
|
||||
await graph_db.create_graph(graph, user_id=user_id)
|
||||
await library_db.create_library_agent(graph, user_id=user_id)
|
||||
return await on_graph_activate(graph, user_id=user_id)
|
||||
activated_graph = await on_graph_activate(graph, user_id=user_id)
|
||||
|
||||
if create_graph.source == "builder":
|
||||
await complete_onboarding_step(user_id, OnboardingStep.BUILDER_SAVE_AGENT)
|
||||
|
||||
return activated_graph
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
@@ -967,6 +996,7 @@ async def execute_graph(
|
||||
credentials_inputs: Annotated[
|
||||
dict[str, CredentialsMetaInput], Body(..., embed=True, default_factory=dict)
|
||||
],
|
||||
source: Annotated[GraphExecutionSource | None, Body(embed=True)] = None,
|
||||
graph_version: Optional[int] = None,
|
||||
preset_id: Optional[str] = None,
|
||||
) -> execution_db.GraphExecutionMeta:
|
||||
@@ -990,6 +1020,14 @@ async def execute_graph(
|
||||
# Record successful graph execution
|
||||
record_graph_execution(graph_id=graph_id, status="success", user_id=user_id)
|
||||
record_graph_operation(operation="execute", status="success")
|
||||
await increment_runs(user_id)
|
||||
await complete_re_run_agent(user_id, graph_id)
|
||||
if source == "library":
|
||||
await complete_onboarding_step(
|
||||
user_id, OnboardingStep.MARKETPLACE_RUN_AGENT
|
||||
)
|
||||
elif source == "builder":
|
||||
await complete_onboarding_step(user_id, OnboardingStep.BUILDER_RUN_AGENT)
|
||||
return result
|
||||
except GraphValidationError as e:
|
||||
# Record failed graph execution
|
||||
@@ -1103,6 +1141,15 @@ async def list_graph_executions(
|
||||
filtered_executions = await hide_activity_summaries_if_disabled(
|
||||
paginated_result.executions, user_id
|
||||
)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if (
|
||||
onboarding.onboardingAgentExecutionId
|
||||
and onboarding.onboardingAgentExecutionId
|
||||
in [exec.id for exec in filtered_executions]
|
||||
and OnboardingStep.GET_RESULTS not in onboarding.completedSteps
|
||||
):
|
||||
await complete_onboarding_step(user_id, OnboardingStep.GET_RESULTS)
|
||||
|
||||
return execution_db.GraphExecutionsPaginated(
|
||||
executions=filtered_executions, pagination=paginated_result.pagination
|
||||
)
|
||||
@@ -1140,6 +1187,12 @@ async def get_graph_execution(
|
||||
|
||||
# Apply feature flags to filter out disabled features
|
||||
result = await hide_activity_summary_if_disabled(result, user_id)
|
||||
onboarding = await get_user_onboarding(user_id)
|
||||
if (
|
||||
onboarding.onboardingAgentExecutionId == graph_exec_id
|
||||
and OnboardingStep.GET_RESULTS not in onboarding.completedSteps
|
||||
):
|
||||
await complete_onboarding_step(user_id, OnboardingStep.GET_RESULTS)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1316,6 +1369,8 @@ async def create_graph_execution_schedule(
|
||||
result.next_run_time, user_timezone
|
||||
)
|
||||
|
||||
await complete_onboarding_step(user_id, OnboardingStep.SCHEDULE_AGENT)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@@ -8,6 +8,10 @@ from fastapi import APIRouter, HTTPException, Security
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.analytics import (
|
||||
AccuracyTrendsResponse,
|
||||
get_accuracy_trends_and_alerts,
|
||||
)
|
||||
from backend.data.execution import (
|
||||
ExecutionStatus,
|
||||
GraphExecutionMeta,
|
||||
@@ -83,6 +87,18 @@ class ExecutionAnalyticsConfig(BaseModel):
|
||||
recommended_model: str
|
||||
|
||||
|
||||
class AccuracyTrendsRequest(BaseModel):
|
||||
graph_id: str = Field(..., description="Graph ID to analyze", min_length=1)
|
||||
user_id: Optional[str] = Field(None, description="Optional user ID filter")
|
||||
days_back: int = Field(30, description="Number of days to look back", ge=7, le=90)
|
||||
drop_threshold: float = Field(
|
||||
10.0, description="Alert threshold percentage", ge=1.0, le=50.0
|
||||
)
|
||||
include_historical: bool = Field(
|
||||
False, description="Include historical data for charts"
|
||||
)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/admin",
|
||||
tags=["admin", "execution_analytics"],
|
||||
@@ -419,3 +435,40 @@ async def _process_batch(
|
||||
return await asyncio.gather(
|
||||
*[process_single_execution(execution) for execution in executions]
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution_accuracy_trends",
|
||||
response_model=AccuracyTrendsResponse,
|
||||
summary="Get Execution Accuracy Trends and Alerts",
|
||||
)
|
||||
async def get_execution_accuracy_trends(
|
||||
graph_id: str,
|
||||
user_id: Optional[str] = None,
|
||||
days_back: int = 30,
|
||||
drop_threshold: float = 10.0,
|
||||
include_historical: bool = False,
|
||||
admin_user_id: str = Security(get_user_id),
|
||||
) -> AccuracyTrendsResponse:
|
||||
"""
|
||||
Get execution accuracy trends with moving averages and alert detection.
|
||||
Simple single-query approach.
|
||||
"""
|
||||
logger.info(
|
||||
f"Admin user {admin_user_id} requesting accuracy trends for graph {graph_id}"
|
||||
)
|
||||
|
||||
try:
|
||||
result = await get_accuracy_trends_and_alerts(
|
||||
graph_id=graph_id,
|
||||
days_back=days_back,
|
||||
user_id=user_id,
|
||||
drop_threshold=drop_threshold,
|
||||
include_historical=include_historical,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error getting accuracy trends for graph {graph_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Sequence
|
||||
|
||||
import prisma
|
||||
|
||||
import backend.data.block
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||
@@ -14,17 +21,36 @@ from backend.server.v2.builder.model import (
|
||||
BlockResponse,
|
||||
BlockType,
|
||||
CountResponse,
|
||||
FilterType,
|
||||
Provider,
|
||||
ProviderResponse,
|
||||
SearchBlocksResponse,
|
||||
SearchEntry,
|
||||
)
|
||||
from backend.util.cache import cached
|
||||
from backend.util.models import Pagination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||
_static_counts_cache: dict | None = None
|
||||
_suggested_blocks: list[BlockInfo] | None = None
|
||||
|
||||
MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
|
||||
|
||||
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
|
||||
|
||||
|
||||
@dataclass
|
||||
class _ScoredItem:
|
||||
item: SearchResultItem
|
||||
filter_type: FilterType
|
||||
score: float
|
||||
sort_key: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class _SearchCacheEntry:
|
||||
items: list[SearchResultItem]
|
||||
total_items: dict[FilterType, int]
|
||||
|
||||
|
||||
def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]:
|
||||
@@ -130,71 +156,244 @@ def get_block_by_id(block_id: str) -> BlockInfo | None:
|
||||
return None
|
||||
|
||||
|
||||
def search_blocks(
|
||||
include_blocks: bool = True,
|
||||
include_integrations: bool = True,
|
||||
query: str = "",
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> SearchBlocksResponse:
|
||||
async def update_search(user_id: str, search: SearchEntry) -> str:
|
||||
"""
|
||||
Get blocks based on the filter and query.
|
||||
`providers` only applies for `integrations` filter.
|
||||
Upsert a search request for the user and return the search ID.
|
||||
"""
|
||||
blocks: list[AnyBlockSchema] = []
|
||||
query = query.lower()
|
||||
if search.search_id:
|
||||
# Update existing search
|
||||
await prisma.models.BuilderSearchHistory.prisma().update(
|
||||
where={
|
||||
"id": search.search_id,
|
||||
},
|
||||
data={
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
},
|
||||
)
|
||||
return search.search_id
|
||||
else:
|
||||
# Create new search
|
||||
new_search = await prisma.models.BuilderSearchHistory.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"searchQuery": search.search_query or "",
|
||||
"filter": search.filter or [], # type: ignore
|
||||
"byCreator": search.by_creator or [],
|
||||
}
|
||||
)
|
||||
return new_search.id
|
||||
|
||||
total = 0
|
||||
skip = (page - 1) * page_size
|
||||
take = page_size
|
||||
|
||||
async def get_recent_searches(user_id: str, limit: int = 5) -> list[SearchEntry]:
|
||||
"""
|
||||
Get the user's most recent search requests.
|
||||
"""
|
||||
searches = await prisma.models.BuilderSearchHistory.prisma().find_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
},
|
||||
order={
|
||||
"updatedAt": "desc",
|
||||
},
|
||||
take=limit,
|
||||
)
|
||||
return [
|
||||
SearchEntry(
|
||||
search_query=s.searchQuery,
|
||||
filter=s.filter, # type: ignore
|
||||
by_creator=s.byCreator,
|
||||
search_id=s.id,
|
||||
)
|
||||
for s in searches
|
||||
]
|
||||
|
||||
|
||||
async def get_sorted_search_results(
|
||||
*,
|
||||
user_id: str,
|
||||
search_query: str | None,
|
||||
filters: Sequence[FilterType],
|
||||
by_creator: Sequence[str] | None = None,
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_filters: tuple[FilterType, ...] = tuple(sorted(set(filters or [])))
|
||||
normalized_creators: tuple[str, ...] = tuple(sorted(set(by_creator or [])))
|
||||
return await _build_cached_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query or "",
|
||||
filters=normalized_filters,
|
||||
by_creator=normalized_creators,
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=300, shared_cache=True)
|
||||
async def _build_cached_search_results(
|
||||
user_id: str,
|
||||
search_query: str,
|
||||
filters: tuple[FilterType, ...],
|
||||
by_creator: tuple[str, ...],
|
||||
) -> _SearchCacheEntry:
|
||||
normalized_query = (search_query or "").strip().lower()
|
||||
|
||||
include_blocks = "blocks" in filters
|
||||
include_integrations = "integrations" in filters
|
||||
include_library_agents = "my_agents" in filters
|
||||
include_marketplace_agents = "marketplace_agents" in filters
|
||||
|
||||
scored_items: list[_ScoredItem] = []
|
||||
total_items: dict[FilterType, int] = {
|
||||
"blocks": 0,
|
||||
"integrations": 0,
|
||||
"marketplace_agents": 0,
|
||||
"my_agents": 0,
|
||||
}
|
||||
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
normalized_query=normalized_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
|
||||
if include_library_agents:
|
||||
library_response = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_LIBRARY_AGENT_RESULTS,
|
||||
)
|
||||
total_items["my_agents"] = library_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_library_items(
|
||||
agents=library_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
if include_marketplace_agents:
|
||||
marketplace_response = await store_db.get_store_agents(
|
||||
creators=list(by_creator) or None,
|
||||
search_query=search_query or None,
|
||||
page=1,
|
||||
page_size=MAX_MARKETPLACE_AGENT_RESULTS,
|
||||
)
|
||||
total_items["marketplace_agents"] = marketplace_response.pagination.total_items
|
||||
scored_items.extend(
|
||||
_build_marketplace_items(
|
||||
agents=marketplace_response.agents,
|
||||
normalized_query=normalized_query,
|
||||
)
|
||||
)
|
||||
|
||||
sorted_items = sorted(
|
||||
scored_items,
|
||||
key=lambda entry: (-entry.score, entry.sort_key, entry.filter_type),
|
||||
)
|
||||
|
||||
return _SearchCacheEntry(
|
||||
items=[entry.item for entry in sorted_items],
|
||||
total_items=total_items,
|
||||
)
|
||||
|
||||
|
||||
def _collect_block_results(
|
||||
*,
|
||||
normalized_query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
|
||||
if not include_blocks and not include_integrations:
|
||||
return results, block_count, integration_count
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip blocks that don't match the query
|
||||
if (
|
||||
query not in block.name.lower()
|
||||
and query not in block.description.lower()
|
||||
and not _matches_llm_model(block.input_schema, query)
|
||||
):
|
||||
continue
|
||||
keep = False
|
||||
|
||||
block_info = block.get_info()
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
if include_integrations and len(credentials) > 0:
|
||||
keep = True
|
||||
is_integration = len(credentials) > 0
|
||||
|
||||
if is_integration and not include_integrations:
|
||||
continue
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
score = _score_block(block, block_info, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
if include_blocks and len(credentials) == 0:
|
||||
keep = True
|
||||
else:
|
||||
block_count += 1
|
||||
|
||||
if not keep:
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=score,
|
||||
sort_key=_get_item_name(block_info),
|
||||
)
|
||||
)
|
||||
|
||||
return results, block_count, integration_count
|
||||
|
||||
|
||||
def _build_library_items(
|
||||
*,
|
||||
agents: list[library_model.LibraryAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_library_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
total += 1
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
if take > 0:
|
||||
take -= 1
|
||||
blocks.append(block)
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="my_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return SearchBlocksResponse(
|
||||
blocks=BlockResponse(
|
||||
blocks=[b.get_info() for b in blocks],
|
||||
pagination=Pagination(
|
||||
total_items=total,
|
||||
total_pages=(total + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
),
|
||||
total_block_count=block_count,
|
||||
total_integration_count=integration_count,
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
def _build_marketplace_items(
|
||||
*,
|
||||
agents: list[store_model.StoreAgent],
|
||||
normalized_query: str,
|
||||
) -> list[_ScoredItem]:
|
||||
results: list[_ScoredItem] = []
|
||||
|
||||
for agent in agents:
|
||||
score = _score_store_agent(agent, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=agent,
|
||||
filter_type="marketplace_agents",
|
||||
score=score,
|
||||
sort_key=_get_item_name(agent),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_providers(
|
||||
@@ -251,16 +450,12 @@ async def get_counts(user_id: str) -> CountResponse:
|
||||
)
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def _get_static_counts():
|
||||
"""
|
||||
Get counts of blocks, integrations, and marketplace agents.
|
||||
This is cached to avoid unnecessary database queries and calculations.
|
||||
Can't use functools.cache here because the function is async.
|
||||
"""
|
||||
global _static_counts_cache
|
||||
if _static_counts_cache is not None:
|
||||
return _static_counts_cache
|
||||
|
||||
all_blocks = 0
|
||||
input_blocks = 0
|
||||
action_blocks = 0
|
||||
@@ -287,7 +482,7 @@ async def _get_static_counts():
|
||||
|
||||
marketplace_agents = await prisma.models.StoreAgent.prisma().count()
|
||||
|
||||
_static_counts_cache = {
|
||||
return {
|
||||
"all_blocks": all_blocks,
|
||||
"input_blocks": input_blocks,
|
||||
"action_blocks": action_blocks,
|
||||
@@ -296,8 +491,6 @@ async def _get_static_counts():
|
||||
"marketplace_agents": marketplace_agents,
|
||||
}
|
||||
|
||||
return _static_counts_cache
|
||||
|
||||
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
@@ -308,6 +501,123 @@ def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _score_block(
|
||||
block: AnyBlockSchema,
|
||||
block_info: BlockInfo,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = block_info.name.lower()
|
||||
description = block_info.description.lower()
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
|
||||
category_text = " ".join(
|
||||
category.get("category", "").lower() for category in block_info.categories
|
||||
)
|
||||
score += _score_additional_field(category_text, normalized_query, 12, 6)
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
provider_names = [
|
||||
provider.value.lower()
|
||||
for info in credentials_info
|
||||
for provider in info.provider
|
||||
]
|
||||
provider_text = " ".join(provider_names)
|
||||
score += _score_additional_field(provider_text, normalized_query, 15, 6)
|
||||
|
||||
if _matches_llm_model(block.input_schema, normalized_query):
|
||||
score += 20
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_library_agent(
|
||||
agent: library_model.LibraryAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.name.lower()
|
||||
description = (agent.description or "").lower()
|
||||
instructions = (agent.instructions or "").lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(instructions, normalized_query, 15, 6)
|
||||
score += _score_additional_field(
|
||||
agent.creator_name.lower(), normalized_query, 10, 5
|
||||
)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_store_agent(
|
||||
agent: store_model.StoreAgent,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = agent.agent_name.lower()
|
||||
description = agent.description.lower()
|
||||
sub_heading = agent.sub_heading.lower()
|
||||
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
score += _score_additional_field(sub_heading, normalized_query, 12, 6)
|
||||
score += _score_additional_field(agent.creator.lower(), normalized_query, 10, 5)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_primary_fields(name: str, description: str, query: str) -> float:
|
||||
score = 0.0
|
||||
if name == query:
|
||||
score += 120
|
||||
elif name.startswith(query):
|
||||
score += 90
|
||||
elif query in name:
|
||||
score += 60
|
||||
|
||||
score += SequenceMatcher(None, name, query).ratio() * 50
|
||||
if description:
|
||||
if query in description:
|
||||
score += 30
|
||||
score += SequenceMatcher(None, description, query).ratio() * 25
|
||||
return score
|
||||
|
||||
|
||||
def _score_additional_field(
|
||||
value: str,
|
||||
query: str,
|
||||
contains_weight: float,
|
||||
similarity_weight: float,
|
||||
) -> float:
|
||||
if not value or not query:
|
||||
return 0.0
|
||||
|
||||
score = 0.0
|
||||
if query in value:
|
||||
score += contains_weight
|
||||
score += SequenceMatcher(None, value, query).ratio() * similarity_weight
|
||||
return score
|
||||
|
||||
|
||||
def _should_include_item(score: float, normalized_query: str) -> bool:
|
||||
if not normalized_query:
|
||||
return True
|
||||
return score >= MIN_SCORE_FOR_FILTERED_RESULTS
|
||||
|
||||
|
||||
def _get_item_name(item: SearchResultItem) -> str:
|
||||
if isinstance(item, BlockInfo):
|
||||
return item.name.lower()
|
||||
if isinstance(item, library_model.LibraryAgent):
|
||||
return item.name.lower()
|
||||
return item.agent_name.lower()
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
providers: dict[ProviderName, Provider] = {}
|
||||
@@ -329,13 +639,9 @@ def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
return providers
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
global _suggested_blocks
|
||||
|
||||
if _suggested_blocks is not None and len(_suggested_blocks) >= count:
|
||||
return _suggested_blocks[:count]
|
||||
|
||||
_suggested_blocks = []
|
||||
suggested_blocks = []
|
||||
# Sum the number of executions for each block type
|
||||
# Prisma cannot group by nested relations, so we do a raw query
|
||||
# Calculate the cutoff timestamp
|
||||
@@ -376,7 +682,7 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
# Sort blocks by execution count
|
||||
blocks.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
_suggested_blocks = [block[0] for block in blocks]
|
||||
suggested_blocks = [block[0] for block in blocks]
|
||||
|
||||
# Return the top blocks
|
||||
return _suggested_blocks[:count]
|
||||
return suggested_blocks[:count]
|
||||
|
||||
@@ -18,10 +18,17 @@ FilterType = Literal[
|
||||
BlockType = Literal["all", "input", "action", "output"]
|
||||
|
||||
|
||||
class SearchEntry(BaseModel):
|
||||
search_query: str | None = None
|
||||
filter: list[FilterType] | None = None
|
||||
by_creator: list[str] | None = None
|
||||
search_id: str | None = None
|
||||
|
||||
|
||||
# Suggestions
|
||||
class SuggestionsResponse(BaseModel):
|
||||
otto_suggestions: list[str]
|
||||
recent_searches: list[str]
|
||||
recent_searches: list[SearchEntry]
|
||||
providers: list[ProviderName]
|
||||
top_blocks: list[BlockInfo]
|
||||
|
||||
@@ -32,7 +39,7 @@ class BlockCategoryResponse(BaseModel):
|
||||
total_blocks: int
|
||||
blocks: list[BlockInfo]
|
||||
|
||||
model_config = {"use_enum_values": False} # <== use enum names like "AI"
|
||||
model_config = {"use_enum_values": False} # Use enum names like "AI"
|
||||
|
||||
|
||||
# Input/Action/Output and see all for block categories
|
||||
@@ -53,17 +60,11 @@ class ProviderResponse(BaseModel):
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class SearchBlocksResponse(BaseModel):
|
||||
blocks: BlockResponse
|
||||
total_block_count: int
|
||||
total_integration_count: int
|
||||
|
||||
|
||||
class SearchResponse(BaseModel):
|
||||
items: list[BlockInfo | library_model.LibraryAgent | store_model.StoreAgent]
|
||||
search_id: str
|
||||
total_items: dict[FilterType, int]
|
||||
page: int
|
||||
more_pages: bool
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class CountResponse(BaseModel):
|
||||
|
||||
@@ -6,10 +6,6 @@ from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||
|
||||
import backend.server.v2.builder.db as builder_db
|
||||
import backend.server.v2.builder.model as builder_model
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.db as store_db
|
||||
import backend.server.v2.store.model as store_model
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.models import Pagination
|
||||
|
||||
@@ -45,7 +41,9 @@ def sanitize_query(query: str | None) -> str | None:
|
||||
summary="Get Builder suggestions",
|
||||
response_model=builder_model.SuggestionsResponse,
|
||||
)
|
||||
async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
async def get_suggestions(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
) -> builder_model.SuggestionsResponse:
|
||||
"""
|
||||
Get all suggestions for the Blocks Menu.
|
||||
"""
|
||||
@@ -55,11 +53,7 @@ async def get_suggestions() -> builder_model.SuggestionsResponse:
|
||||
"Help me create a list",
|
||||
"Help me feed my data to Google Maps",
|
||||
],
|
||||
recent_searches=[
|
||||
"image generation",
|
||||
"deepfake",
|
||||
"competitor analysis",
|
||||
],
|
||||
recent_searches=await builder_db.get_recent_searches(user_id),
|
||||
providers=[
|
||||
ProviderName.TWITTER,
|
||||
ProviderName.GITHUB,
|
||||
@@ -147,7 +141,6 @@ async def get_providers(
|
||||
)
|
||||
|
||||
|
||||
# Not using post method because on frontend, orval doesn't support Infinite Query with POST method.
|
||||
@router.get(
|
||||
"/search",
|
||||
summary="Builder search",
|
||||
@@ -157,7 +150,7 @@ async def get_providers(
|
||||
async def search(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
search_query: Annotated[str | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
|
||||
search_id: Annotated[str | None, fastapi.Query()] = None,
|
||||
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
@@ -176,69 +169,43 @@ async def search(
|
||||
]
|
||||
search_query = sanitize_query(search_query)
|
||||
|
||||
# Blocks&Integrations
|
||||
blocks = builder_model.SearchBlocksResponse(
|
||||
blocks=builder_model.BlockResponse(
|
||||
blocks=[],
|
||||
pagination=Pagination.empty(),
|
||||
),
|
||||
total_block_count=0,
|
||||
total_integration_count=0,
|
||||
# Get all possible results
|
||||
cached_results = await builder_db.get_sorted_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query,
|
||||
filters=filter,
|
||||
by_creator=by_creator,
|
||||
)
|
||||
if "blocks" in filter or "integrations" in filter:
|
||||
blocks = builder_db.search_blocks(
|
||||
include_blocks="blocks" in filter,
|
||||
include_integrations="integrations" in filter,
|
||||
query=search_query or "",
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Library Agents
|
||||
my_agents = library_model.LibraryAgentResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
# Paginate results
|
||||
total_combined_items = len(cached_results.items)
|
||||
pagination = Pagination(
|
||||
total_items=total_combined_items,
|
||||
total_pages=(total_combined_items + page_size - 1) // page_size,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
if "my_agents" in filter:
|
||||
my_agents = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
# Marketplace Agents
|
||||
marketplace_agents = store_model.StoreAgentsResponse(
|
||||
agents=[],
|
||||
pagination=Pagination.empty(),
|
||||
)
|
||||
if "marketplace_agents" in filter:
|
||||
marketplace_agents = await store_db.get_store_agents(
|
||||
creators=by_creator,
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = start_idx + page_size
|
||||
paginated_items = cached_results.items[start_idx:end_idx]
|
||||
|
||||
# Update the search entry by id
|
||||
search_id = await builder_db.update_search(
|
||||
user_id,
|
||||
builder_model.SearchEntry(
|
||||
search_query=search_query,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
|
||||
more_pages = False
|
||||
if (
|
||||
blocks.blocks.pagination.current_page < blocks.blocks.pagination.total_pages
|
||||
or my_agents.pagination.current_page < my_agents.pagination.total_pages
|
||||
or marketplace_agents.pagination.current_page
|
||||
< marketplace_agents.pagination.total_pages
|
||||
):
|
||||
more_pages = True
|
||||
filter=filter,
|
||||
by_creator=by_creator,
|
||||
search_id=search_id,
|
||||
),
|
||||
)
|
||||
|
||||
return builder_model.SearchResponse(
|
||||
items=blocks.blocks.blocks + my_agents.agents + marketplace_agents.agents,
|
||||
total_items={
|
||||
"blocks": blocks.total_block_count,
|
||||
"integrations": blocks.total_integration_count,
|
||||
"marketplace_agents": marketplace_agents.pagination.total_items,
|
||||
"my_agents": my_agents.pagination.total_items,
|
||||
},
|
||||
page=page,
|
||||
more_pages=more_pages,
|
||||
items=paginated_items,
|
||||
search_id=search_id,
|
||||
total_items=cached_results.total_items,
|
||||
pagination=pagination,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import functools
|
||||
import logging
|
||||
from collections.abc import AsyncGenerator
|
||||
from datetime import UTC, datetime
|
||||
@@ -32,7 +33,12 @@ from backend.util.exceptions import NotFoundError
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
config = backend.server.v2.chat.config.ChatConfig()
|
||||
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
|
||||
|
||||
@functools.cache
|
||||
def get_openai_client() -> AsyncOpenAI:
|
||||
"""Lazily create the OpenAI client singleton."""
|
||||
return AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
|
||||
|
||||
async def create_chat_session(
|
||||
@@ -355,7 +361,7 @@ async def _stream_chat_chunks(
|
||||
logger.info("Creating OpenAI chat completion stream...")
|
||||
|
||||
# Create the stream with proper types
|
||||
stream = await client.chat.completions.create(
|
||||
stream = await get_openai_client().chat.completions.create(
|
||||
model=model,
|
||||
messages=session.to_openai_messages(),
|
||||
tools=tools,
|
||||
|
||||
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
tags=["executions", "review", "private"],
|
||||
tags=["v2", "executions", "review"],
|
||||
dependencies=[Security(autogpt_auth_lib.requires_user)],
|
||||
)
|
||||
|
||||
@@ -134,18 +134,14 @@ async def process_review_action(
|
||||
# Build review decisions map
|
||||
review_decisions = {}
|
||||
for review in request.reviews:
|
||||
if review.approved:
|
||||
review_decisions[review.node_exec_id] = (
|
||||
ReviewStatus.APPROVED,
|
||||
review.reviewed_data,
|
||||
review.message,
|
||||
)
|
||||
else:
|
||||
review_decisions[review.node_exec_id] = (
|
||||
ReviewStatus.REJECTED,
|
||||
None,
|
||||
review.message,
|
||||
)
|
||||
review_status = (
|
||||
ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED
|
||||
)
|
||||
review_decisions[review.node_exec_id] = (
|
||||
review_status,
|
||||
review.reviewed_data,
|
||||
review.message,
|
||||
)
|
||||
|
||||
# Process all reviews
|
||||
updated_reviews = await process_all_reviews_for_execution(
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, Body, HTTPException, Query, Security, status
|
||||
from fastapi.responses import Response
|
||||
from prisma.enums import OnboardingStep
|
||||
|
||||
import backend.server.v2.library.db as library_db
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.exceptions as store_exceptions
|
||||
from backend.data.onboarding import complete_onboarding_step
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -200,6 +202,9 @@ async def get_library_agent_by_store_listing_version_id(
|
||||
)
|
||||
async def add_marketplace_agent_to_library(
|
||||
store_listing_version_id: str = Body(embed=True),
|
||||
source: Literal["onboarding", "marketplace"] = Body(
|
||||
default="marketplace", embed=True
|
||||
),
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
@@ -217,10 +222,15 @@ async def add_marketplace_agent_to_library(
|
||||
HTTPException(500): If a server/database error occurs.
|
||||
"""
|
||||
try:
|
||||
return await library_db.add_store_agent_to_library(
|
||||
agent = await library_db.add_store_agent_to_library(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
if source != "onboarding":
|
||||
await complete_onboarding_step(
|
||||
user_id, OnboardingStep.MARKETPLACE_ADD_AGENT
|
||||
)
|
||||
return agent
|
||||
|
||||
except store_exceptions.AgentNotFoundError as e:
|
||||
logger.warning(
|
||||
|
||||
@@ -10,6 +10,7 @@ from backend.data.execution import GraphExecutionMeta
|
||||
from backend.data.graph import get_graph
|
||||
from backend.data.integrations import get_webhook
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.onboarding import increment_runs
|
||||
from backend.executor.utils import add_graph_execution, make_node_credentials_input_map
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks import get_webhook_manager
|
||||
@@ -401,6 +402,8 @@ async def execute_preset(
|
||||
merged_node_input = preset.inputs | inputs
|
||||
merged_credential_inputs = preset.credentials | credential_inputs
|
||||
|
||||
await increment_runs(user_id)
|
||||
|
||||
return await add_graph_execution(
|
||||
user_id=user_id,
|
||||
graph_id=preset.graph_id,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import datetime
|
||||
import json
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
@@ -225,6 +226,10 @@ def test_add_agent_to_library_success(
|
||||
"backend.server.v2.library.db.add_store_agent_to_library"
|
||||
)
|
||||
mock_db_call.return_value = mock_library_agent
|
||||
mock_complete_onboarding = mocker.patch(
|
||||
"backend.server.v2.library.routes.agents.complete_onboarding_step",
|
||||
new_callable=AsyncMock,
|
||||
)
|
||||
|
||||
response = client.post(
|
||||
"/agents", json={"store_listing_version_id": "test-version-id"}
|
||||
@@ -239,6 +244,7 @@ def test_add_agent_to_library_success(
|
||||
mock_db_call.assert_called_once_with(
|
||||
store_listing_version_id="test-version-id", user_id=test_user_id
|
||||
)
|
||||
mock_complete_onboarding.assert_awaited_once()
|
||||
|
||||
|
||||
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture, test_user_id: str):
|
||||
|
||||
@@ -16,7 +16,7 @@ import logging
|
||||
import sys
|
||||
|
||||
from backend.data.db import connect, disconnect, query_raw_with_schema
|
||||
from backend.integrations.embeddings import EmbeddingService, create_search_text
|
||||
from backend.integrations.embeddings import create_search_text, get_embedding_service
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
@@ -48,7 +48,7 @@ async def backfill_embeddings(
|
||||
await connect()
|
||||
|
||||
try:
|
||||
embedding_service = EmbeddingService()
|
||||
embedding_service = get_embedding_service()
|
||||
|
||||
# Get all versions without embeddings
|
||||
versions = await query_raw_with_schema(
|
||||
|
||||
@@ -27,8 +27,9 @@ async def _get_cached_store_agents(
|
||||
category: str | None,
|
||||
page: int,
|
||||
page_size: int,
|
||||
filter_mode: Literal["strict", "permissive", "combined"] = "permissive",
|
||||
):
|
||||
"""Cached helper to get store agents."""
|
||||
"""Cached helper to get store agents with hybrid search support."""
|
||||
return await backend.server.v2.store.db.get_store_agents(
|
||||
featured=featured,
|
||||
creators=[creator] if creator else None,
|
||||
@@ -37,6 +38,7 @@ async def _get_cached_store_agents(
|
||||
category=category,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
filter_mode=filter_mode,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -39,6 +39,25 @@ settings = Settings()
|
||||
DEFAULT_ADMIN_NAME = "AutoGPT Admin"
|
||||
DEFAULT_ADMIN_EMAIL = "admin@autogpt.co"
|
||||
|
||||
# Minimum similarity threshold for vector search results
|
||||
# Cosine similarity ranges from -1 to 1, where 1 is identical
|
||||
# 0.4 filters loosely related results while keeping semantically relevant ones
|
||||
VECTOR_SEARCH_SIMILARITY_THRESHOLD = 0.4
|
||||
|
||||
# Minimum relevance threshold for BM25 full-text search results
|
||||
# ts_rank_cd returns values typically in range 0-1 (can exceed 1 for exact matches)
|
||||
# 0.05 allows partial keyword matches
|
||||
BM25_RELEVANCE_THRESHOLD = 0.05
|
||||
|
||||
# RRF constant (k) - standard value that balances influence of top vs lower ranks
|
||||
# Higher k values reduce the influence of high-ranking items
|
||||
RRF_K = 60
|
||||
|
||||
# Minimum RRF score threshold for combined mode
|
||||
# Filters out results that rank poorly across all signals
|
||||
# For reference: rank #1 in all = ~0.041, rank #100 in all = ~0.016
|
||||
RRF_SCORE_THRESHOLD = 0.02
|
||||
|
||||
|
||||
async def get_store_agents(
|
||||
featured: bool = False,
|
||||
@@ -48,75 +67,223 @@ async def get_store_agents(
|
||||
category: str | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 20,
|
||||
filter_mode: Literal["strict", "permissive", "combined"] = "permissive",
|
||||
) -> backend.server.v2.store.model.StoreAgentsResponse:
|
||||
"""
|
||||
Get PUBLIC store agents from the StoreAgent view
|
||||
Get PUBLIC store agents from the StoreAgent view.
|
||||
|
||||
When search_query is provided, uses hybrid search combining:
|
||||
- BM25 full-text search (lexical matching via PostgreSQL tsvector)
|
||||
- Vector semantic similarity (meaning-based matching via pgvector)
|
||||
- Popularity signal (run counts as PageRank proxy)
|
||||
|
||||
Results are ranked using Reciprocal Rank Fusion (RRF).
|
||||
|
||||
Args:
|
||||
featured: Filter to only show featured agents.
|
||||
creators: Filter agents by creator usernames.
|
||||
sorted_by: Sort agents by "runs", "rating", "name", or "updated_at".
|
||||
search_query: Search query for hybrid search.
|
||||
category: Filter agents by category.
|
||||
page: Page number for pagination.
|
||||
page_size: Number of agents per page.
|
||||
filter_mode: Controls how results are filtered when searching:
|
||||
- "strict": Must match BOTH BM25 AND vector thresholds
|
||||
- "permissive": Must match EITHER BM25 OR vector threshold
|
||||
- "combined": No threshold filtering, rely on RRF score (default)
|
||||
|
||||
Returns:
|
||||
StoreAgentsResponse with paginated list of agents.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Getting store agents. featured={featured}, creators={creators}, sorted_by={sorted_by}, search={search_query}, category={category}, page={page}"
|
||||
f"Getting store agents. featured={featured}, creators={creators}, "
|
||||
f"sorted_by={sorted_by}, search={search_query}, category={category}, "
|
||||
f"page={page}, filter_mode={filter_mode}"
|
||||
)
|
||||
|
||||
try:
|
||||
# If search_query is provided, use vector similarity search
|
||||
# If search_query is provided, use hybrid search (BM25 + vector + popularity)
|
||||
if search_query:
|
||||
offset = (page - 1) * page_size
|
||||
|
||||
# Generate embedding for search query
|
||||
embedding_service = await get_embedding_service()
|
||||
query_embedding = await embedding_service.generate_embedding(search_query)
|
||||
# Convert embedding to PostgreSQL array format
|
||||
embedding_str = "[" + ",".join(map(str, query_embedding)) + "]"
|
||||
# Try to generate embedding for vector search
|
||||
# Falls back to BM25-only if embedding service is not available
|
||||
query_embedding: list[float] | None = None
|
||||
try:
|
||||
embedding_service = get_embedding_service()
|
||||
query_embedding = await embedding_service.generate_embedding(
|
||||
search_query
|
||||
)
|
||||
except (ValueError, Exception) as e:
|
||||
# Embedding service not configured or failed - use BM25 only
|
||||
logger.warning(f"Embedding generation failed, using BM25 only: {e}")
|
||||
|
||||
# Whitelist allowed order_by columns
|
||||
# For vector search, we use similarity instead of rank
|
||||
ALLOWED_ORDER_BY = {
|
||||
"rating": "rating DESC, similarity DESC",
|
||||
"runs": "runs DESC, similarity DESC",
|
||||
"name": "agent_name ASC, similarity DESC",
|
||||
"updated_at": "updated_at DESC, similarity DESC",
|
||||
}
|
||||
|
||||
# Validate and get order clause
|
||||
if sorted_by and sorted_by in ALLOWED_ORDER_BY:
|
||||
order_by_clause = ALLOWED_ORDER_BY[sorted_by]
|
||||
else:
|
||||
# Default: order by vector similarity (most similar first)
|
||||
order_by_clause = "similarity DESC, updated_at DESC"
|
||||
# Convert embedding to PostgreSQL array format (or None for BM25-only)
|
||||
embedding_str = (
|
||||
"[" + ",".join(map(str, query_embedding)) + "]"
|
||||
if query_embedding
|
||||
else None
|
||||
)
|
||||
|
||||
# Build WHERE conditions and parameters list
|
||||
# When embedding is not available (no OpenAI key), $1 will be NULL
|
||||
where_parts: list[str] = []
|
||||
params: list[typing.Any] = [embedding_str] # $1 - query embedding
|
||||
params: list[typing.Any] = [embedding_str] # $1 - query embedding (or NULL)
|
||||
param_index = 2 # Start at $2 for next parameter
|
||||
|
||||
# Always filter for available agents and agents with embeddings
|
||||
# Always filter for available agents
|
||||
where_parts.append("is_available = true")
|
||||
where_parts.append("embedding IS NOT NULL")
|
||||
|
||||
# Require search signals to be present
|
||||
if embedding_str is None:
|
||||
# No embedding available - require BM25 search only
|
||||
where_parts.append("search IS NOT NULL")
|
||||
elif filter_mode == "strict":
|
||||
# Strict mode: require both embedding AND search to be available
|
||||
where_parts.append("embedding IS NOT NULL")
|
||||
where_parts.append("search IS NOT NULL")
|
||||
else:
|
||||
# Permissive/combined: require at least one signal
|
||||
where_parts.append("(embedding IS NOT NULL OR search IS NOT NULL)")
|
||||
|
||||
if featured:
|
||||
where_parts.append("featured = true")
|
||||
|
||||
if creators and creators:
|
||||
if creators:
|
||||
# Use ANY with array parameter
|
||||
where_parts.append(f"creator_username = ANY(${param_index})")
|
||||
params.append(creators)
|
||||
param_index += 1
|
||||
|
||||
if category and category:
|
||||
if category:
|
||||
where_parts.append(f"${param_index} = ANY(categories)")
|
||||
params.append(category)
|
||||
param_index += 1
|
||||
|
||||
# Add search query for BM25
|
||||
params.append(search_query)
|
||||
bm25_query_param = f"${param_index}"
|
||||
param_index += 1
|
||||
|
||||
sql_where_clause: str = " AND ".join(where_parts) if where_parts else "1=1"
|
||||
|
||||
# Build score filter based on filter_mode
|
||||
# This filter is applied BEFORE RRF ranking in the filtered_agents CTE
|
||||
if embedding_str is None:
|
||||
# No embedding - filter only on BM25 score
|
||||
score_filter = f"bm25_score >= {BM25_RELEVANCE_THRESHOLD}"
|
||||
elif filter_mode == "strict":
|
||||
score_filter = f"""
|
||||
bm25_score >= {BM25_RELEVANCE_THRESHOLD}
|
||||
AND vector_score >= {VECTOR_SEARCH_SIMILARITY_THRESHOLD}
|
||||
"""
|
||||
elif filter_mode == "permissive":
|
||||
score_filter = f"""
|
||||
bm25_score >= {BM25_RELEVANCE_THRESHOLD}
|
||||
OR vector_score >= {VECTOR_SEARCH_SIMILARITY_THRESHOLD}
|
||||
"""
|
||||
else: # combined - no pre-filtering on individual scores
|
||||
score_filter = "1=1"
|
||||
|
||||
# RRF score filter is applied AFTER ranking to filter irrelevant results
|
||||
rrf_score_filter = f"rrf_score >= {RRF_SCORE_THRESHOLD}"
|
||||
|
||||
# Build ORDER BY clause - sorted_by takes precedence, rrf_score as secondary
|
||||
if sorted_by == "rating":
|
||||
order_by_clause = "rating DESC, rrf_score DESC"
|
||||
elif sorted_by == "runs":
|
||||
order_by_clause = "runs DESC, rrf_score DESC"
|
||||
elif sorted_by == "name":
|
||||
order_by_clause = "agent_name ASC, rrf_score DESC"
|
||||
elif sorted_by == "updated_at":
|
||||
order_by_clause = "updated_at DESC, rrf_score DESC"
|
||||
else:
|
||||
# Default: order by RRF relevance score
|
||||
order_by_clause = "rrf_score DESC, updated_at DESC"
|
||||
|
||||
# Add pagination params
|
||||
params.extend([page_size, offset])
|
||||
limit_param = f"${param_index}"
|
||||
offset_param = f"${param_index + 1}"
|
||||
|
||||
# Vector similarity search query using cosine distance
|
||||
# The <=> operator returns cosine distance (0 = identical, 2 = opposite)
|
||||
# We convert to similarity: 1 - distance/2 gives range [0, 1]
|
||||
# Hybrid search SQL with Reciprocal Rank Fusion (RRF)
|
||||
# CTEs: scored_agents -> filtered_agents -> ranked_agents -> rrf_scored
|
||||
sql_query = f"""
|
||||
WITH scored_agents AS (
|
||||
SELECT
|
||||
slug,
|
||||
agent_name,
|
||||
agent_image,
|
||||
creator_username,
|
||||
creator_avatar,
|
||||
sub_heading,
|
||||
description,
|
||||
runs,
|
||||
rating,
|
||||
categories,
|
||||
featured,
|
||||
is_available,
|
||||
updated_at,
|
||||
-- BM25 score using ts_rank_cd (covers density normalization)
|
||||
COALESCE(
|
||||
ts_rank_cd(
|
||||
search,
|
||||
plainto_tsquery('english', {bm25_query_param}),
|
||||
32 -- normalization: divide by document length
|
||||
),
|
||||
0
|
||||
) AS bm25_score,
|
||||
-- Vector similarity score (cosine: 1 - distance)
|
||||
-- Returns 0 when query embedding ($1) is NULL (no OpenAI key)
|
||||
CASE
|
||||
WHEN $1 IS NOT NULL AND embedding IS NOT NULL
|
||||
THEN 1 - (embedding <=> $1::vector)
|
||||
ELSE 0
|
||||
END AS vector_score,
|
||||
-- Popularity score (log-normalized run count)
|
||||
CASE
|
||||
WHEN runs > 0
|
||||
THEN LN(runs + 1)
|
||||
ELSE 0
|
||||
END AS popularity_score
|
||||
FROM {{schema_prefix}}"StoreAgent"
|
||||
WHERE {sql_where_clause}
|
||||
),
|
||||
max_popularity AS (
|
||||
SELECT GREATEST(MAX(popularity_score), 1) AS max_pop
|
||||
FROM scored_agents
|
||||
),
|
||||
normalized_agents AS (
|
||||
SELECT
|
||||
sa.*,
|
||||
-- Normalize popularity to [0, 1] range
|
||||
sa.popularity_score / mp.max_pop AS norm_popularity_score
|
||||
FROM scored_agents sa
|
||||
CROSS JOIN max_popularity mp
|
||||
),
|
||||
filtered_agents AS (
|
||||
SELECT *
|
||||
FROM normalized_agents
|
||||
WHERE {score_filter}
|
||||
),
|
||||
ranked_agents AS (
|
||||
SELECT
|
||||
*,
|
||||
ROW_NUMBER() OVER (ORDER BY bm25_score DESC NULLS LAST) AS bm25_rank,
|
||||
ROW_NUMBER() OVER (ORDER BY vector_score DESC NULLS LAST) AS vector_rank,
|
||||
ROW_NUMBER() OVER (ORDER BY norm_popularity_score DESC NULLS LAST) AS popularity_rank
|
||||
FROM filtered_agents
|
||||
),
|
||||
rrf_scored AS (
|
||||
SELECT
|
||||
*,
|
||||
-- RRF formula with weighted contributions
|
||||
-- BM25 and vector get full weight, popularity gets 0.5x weight
|
||||
(1.0 / ({RRF_K} + bm25_rank)) +
|
||||
(1.0 / ({RRF_K} + vector_rank)) +
|
||||
(0.5 / ({RRF_K} + popularity_rank)) AS rrf_score
|
||||
FROM ranked_agents
|
||||
)
|
||||
SELECT
|
||||
slug,
|
||||
agent_name,
|
||||
@@ -131,21 +298,79 @@ async def get_store_agents(
|
||||
featured,
|
||||
is_available,
|
||||
updated_at,
|
||||
1 - (embedding <=> $1::vector) AS similarity
|
||||
FROM {{schema_prefix}}"StoreAgent"
|
||||
WHERE {sql_where_clause}
|
||||
rrf_score
|
||||
FROM rrf_scored
|
||||
WHERE {rrf_score_filter}
|
||||
ORDER BY {order_by_clause}
|
||||
LIMIT {limit_param} OFFSET {offset_param}
|
||||
"""
|
||||
|
||||
# Count query for pagination
|
||||
# Count query (without pagination) - requires same CTE structure because:
|
||||
# 1. RRF scoring requires computing ranks across ALL matching results
|
||||
# 2. The rrf_score_filter threshold must be applied consistently
|
||||
# Note: This is inherent to RRF - there's no way to count without ranking
|
||||
count_query = f"""
|
||||
WITH scored_agents AS (
|
||||
SELECT
|
||||
runs,
|
||||
COALESCE(
|
||||
ts_rank_cd(
|
||||
search,
|
||||
plainto_tsquery('english', {bm25_query_param}),
|
||||
32
|
||||
),
|
||||
0
|
||||
) AS bm25_score,
|
||||
CASE
|
||||
WHEN $1 IS NOT NULL AND embedding IS NOT NULL
|
||||
THEN 1 - (embedding <=> $1::vector)
|
||||
ELSE 0
|
||||
END AS vector_score,
|
||||
CASE
|
||||
WHEN runs > 0
|
||||
THEN LN(runs + 1)
|
||||
ELSE 0
|
||||
END AS popularity_score
|
||||
FROM {{schema_prefix}}"StoreAgent"
|
||||
WHERE {sql_where_clause}
|
||||
),
|
||||
max_popularity AS (
|
||||
SELECT GREATEST(MAX(popularity_score), 1) AS max_pop
|
||||
FROM scored_agents
|
||||
),
|
||||
normalized_agents AS (
|
||||
SELECT
|
||||
sa.*,
|
||||
sa.popularity_score / mp.max_pop AS norm_popularity_score
|
||||
FROM scored_agents sa
|
||||
CROSS JOIN max_popularity mp
|
||||
),
|
||||
filtered_agents AS (
|
||||
SELECT *
|
||||
FROM normalized_agents
|
||||
WHERE {score_filter}
|
||||
),
|
||||
ranked_agents AS (
|
||||
SELECT
|
||||
*,
|
||||
ROW_NUMBER() OVER (ORDER BY bm25_score DESC NULLS LAST) AS bm25_rank,
|
||||
ROW_NUMBER() OVER (ORDER BY vector_score DESC NULLS LAST) AS vector_rank,
|
||||
ROW_NUMBER() OVER (ORDER BY norm_popularity_score DESC NULLS LAST) AS popularity_rank
|
||||
FROM filtered_agents
|
||||
),
|
||||
rrf_scored AS (
|
||||
SELECT
|
||||
(1.0 / ({RRF_K} + bm25_rank)) +
|
||||
(1.0 / ({RRF_K} + vector_rank)) +
|
||||
(0.5 / ({RRF_K} + popularity_rank)) AS rrf_score
|
||||
FROM ranked_agents
|
||||
)
|
||||
SELECT COUNT(*) as count
|
||||
FROM {{schema_prefix}}"StoreAgent"
|
||||
WHERE {sql_where_clause}
|
||||
FROM rrf_scored
|
||||
WHERE {rrf_score_filter}
|
||||
"""
|
||||
|
||||
# Execute both queries with parameters
|
||||
# Execute queries
|
||||
agents = await query_raw_with_schema(sql_query, *params)
|
||||
|
||||
# For count, use params without pagination (last 2 params)
|
||||
@@ -282,7 +507,7 @@ async def _generate_and_store_embedding(
|
||||
description: The agent description.
|
||||
"""
|
||||
try:
|
||||
embedding_service = await get_embedding_service()
|
||||
embedding_service = get_embedding_service()
|
||||
search_text = create_search_text(name, sub_heading, description)
|
||||
|
||||
if not search_text:
|
||||
@@ -385,6 +610,7 @@ async def get_store_agent_details(
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_output_demo=agent.agent_output_demo or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username or "",
|
||||
creator_avatar=agent.creator_avatar or "",
|
||||
@@ -455,6 +681,7 @@ async def get_store_agent_by_version_id(
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_output_demo=agent.agent_output_demo or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username or "",
|
||||
creator_avatar=agent.creator_avatar or "",
|
||||
@@ -741,6 +968,7 @@ async def create_store_submission(
|
||||
slug: str,
|
||||
name: str,
|
||||
video_url: str | None = None,
|
||||
agent_output_demo_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
instructions: str | None = None,
|
||||
@@ -835,6 +1063,7 @@ async def create_store_submission(
|
||||
agentGraphVersion=agent_version,
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
@@ -913,6 +1142,7 @@ async def edit_store_submission(
|
||||
store_listing_version_id: str,
|
||||
name: str,
|
||||
video_url: str | None = None,
|
||||
agent_output_demo_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
sub_heading: str = "",
|
||||
@@ -994,6 +1224,7 @@ async def edit_store_submission(
|
||||
store_listing_id=current_version.storeListingId,
|
||||
name=name,
|
||||
video_url=video_url,
|
||||
agent_output_demo_url=agent_output_demo_url,
|
||||
image_urls=image_urls,
|
||||
description=description,
|
||||
sub_heading=sub_heading,
|
||||
@@ -1011,6 +1242,7 @@ async def edit_store_submission(
|
||||
data=prisma.types.StoreListingVersionUpdateInput(
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
categories=categories,
|
||||
@@ -1078,6 +1310,7 @@ async def create_store_version(
|
||||
store_listing_id: str,
|
||||
name: str,
|
||||
video_url: str | None = None,
|
||||
agent_output_demo_url: str | None = None,
|
||||
image_urls: list[str] = [],
|
||||
description: str = "",
|
||||
instructions: str | None = None,
|
||||
@@ -1147,6 +1380,7 @@ async def create_store_version(
|
||||
agentGraphVersion=agent_version,
|
||||
name=name,
|
||||
videoUrl=video_url,
|
||||
agentOutputDemoUrl=agent_output_demo_url,
|
||||
imageUrls=image_urls,
|
||||
description=description,
|
||||
instructions=instructions,
|
||||
|
||||
@@ -407,12 +407,12 @@ async def test_get_store_agents_search_category_array_injection():
|
||||
assert isinstance(result.agents, list)
|
||||
|
||||
|
||||
# Vector search tests
|
||||
# Hybrid search tests (BM25 + vector + popularity with RRF ranking)
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_get_store_agents_vector_search_mocked(mocker):
|
||||
"""Test vector search uses embedding service and executes query safely."""
|
||||
async def test_get_store_agents_hybrid_search_mocked(mocker):
|
||||
"""Test hybrid search uses embedding service and executes query safely."""
|
||||
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
|
||||
|
||||
# Mock embedding service
|
||||
@@ -423,7 +423,7 @@ async def test_get_store_agents_vector_search_mocked(mocker):
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.AsyncMock(return_value=mock_embedding_service),
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query_raw_with_schema to return empty results
|
||||
@@ -444,8 +444,8 @@ async def test_get_store_agents_vector_search_mocked(mocker):
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_get_store_agents_vector_search_with_results(mocker):
|
||||
"""Test vector search returns properly formatted results."""
|
||||
async def test_get_store_agents_hybrid_search_with_results(mocker):
|
||||
"""Test hybrid search returns properly formatted results with RRF scoring."""
|
||||
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
|
||||
|
||||
# Mock embedding service
|
||||
@@ -456,10 +456,10 @@ async def test_get_store_agents_vector_search_with_results(mocker):
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.AsyncMock(return_value=mock_embedding_service),
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query results
|
||||
# Mock query results (hybrid search returns rrf_score instead of similarity)
|
||||
mock_agents = [
|
||||
{
|
||||
"slug": "test-agent",
|
||||
@@ -475,7 +475,7 @@ async def test_get_store_agents_vector_search_with_results(mocker):
|
||||
"featured": False,
|
||||
"is_available": True,
|
||||
"updated_at": datetime.now(),
|
||||
"similarity": 0.95,
|
||||
"rrf_score": 0.048, # RRF score from combined rankings
|
||||
}
|
||||
]
|
||||
mock_count = [{"count": 1}]
|
||||
@@ -496,8 +496,8 @@ async def test_get_store_agents_vector_search_with_results(mocker):
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_get_store_agents_vector_search_with_filters(mocker):
|
||||
"""Test vector search works correctly with additional filters."""
|
||||
async def test_get_store_agents_hybrid_search_with_filters(mocker):
|
||||
"""Test hybrid search works correctly with additional filters."""
|
||||
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
|
||||
|
||||
# Mock embedding service
|
||||
@@ -508,7 +508,7 @@ async def test_get_store_agents_vector_search_with_filters(mocker):
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.AsyncMock(return_value=mock_embedding_service),
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query_raw_with_schema
|
||||
@@ -523,7 +523,6 @@ async def test_get_store_agents_vector_search_with_filters(mocker):
|
||||
featured=True,
|
||||
creators=["creator1", "creator2"],
|
||||
category="AI",
|
||||
sorted_by="rating",
|
||||
)
|
||||
|
||||
# Verify query was called with parameterized values
|
||||
@@ -534,13 +533,124 @@ async def test_get_store_agents_vector_search_with_filters(mocker):
|
||||
first_call_args = mock_query.call_args_list[0]
|
||||
sql_query = first_call_args[0][0]
|
||||
|
||||
# Verify key elements of the query
|
||||
assert "embedding <=> $1::vector" in sql_query
|
||||
# Verify key elements of hybrid search query
|
||||
assert "embedding <=> $1::vector" in sql_query # Vector search
|
||||
assert "ts_rank_cd" in sql_query # BM25 search
|
||||
assert "rrf_score" in sql_query # RRF ranking
|
||||
assert "featured = true" in sql_query
|
||||
assert "creator_username = ANY($" in sql_query
|
||||
assert "= ANY(categories)" in sql_query
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_get_store_agents_hybrid_search_strict_filter_mode(mocker):
|
||||
"""Test hybrid search with strict filter mode requires both BM25 and vector matches."""
|
||||
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
|
||||
|
||||
# Mock embedding service
|
||||
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
|
||||
mock_embedding_service = mocker.MagicMock()
|
||||
mock_embedding_service.generate_embedding = mocker.AsyncMock(
|
||||
return_value=mock_embedding
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query_raw_with_schema
|
||||
mock_query = mocker.patch(
|
||||
"backend.server.v2.store.db.query_raw_with_schema",
|
||||
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
|
||||
)
|
||||
|
||||
# Call function with strict filter mode
|
||||
await db.get_store_agents(search_query="test query", filter_mode="strict")
|
||||
|
||||
# Check that the SQL query includes strict filtering conditions
|
||||
first_call_args = mock_query.call_args_list[0]
|
||||
sql_query = first_call_args[0][0]
|
||||
|
||||
# Strict mode requires both embedding AND search to be present
|
||||
assert "embedding IS NOT NULL" in sql_query
|
||||
assert "search IS NOT NULL" in sql_query
|
||||
# Strict score filter requires both thresholds to be met
|
||||
assert "bm25_score >=" in sql_query
|
||||
assert "AND vector_score >=" in sql_query
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_get_store_agents_hybrid_search_permissive_filter_mode(mocker):
|
||||
"""Test hybrid search with permissive filter mode requires either BM25 or vector match."""
|
||||
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
|
||||
|
||||
# Mock embedding service
|
||||
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
|
||||
mock_embedding_service = mocker.MagicMock()
|
||||
mock_embedding_service.generate_embedding = mocker.AsyncMock(
|
||||
return_value=mock_embedding
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query_raw_with_schema
|
||||
mock_query = mocker.patch(
|
||||
"backend.server.v2.store.db.query_raw_with_schema",
|
||||
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
|
||||
)
|
||||
|
||||
# Call function with permissive filter mode
|
||||
await db.get_store_agents(search_query="test query", filter_mode="permissive")
|
||||
|
||||
# Check that the SQL query includes permissive filtering conditions
|
||||
first_call_args = mock_query.call_args_list[0]
|
||||
sql_query = first_call_args[0][0]
|
||||
|
||||
# Permissive mode requires at least one signal
|
||||
assert "(embedding IS NOT NULL OR search IS NOT NULL)" in sql_query
|
||||
# Permissive score filter requires either threshold to be met
|
||||
assert "OR vector_score >=" in sql_query
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_get_store_agents_hybrid_search_combined_filter_mode(mocker):
|
||||
"""Test hybrid search with combined filter mode (default) filters by RRF score."""
|
||||
from backend.integrations.embeddings import EMBEDDING_DIMENSIONS
|
||||
|
||||
# Mock embedding service
|
||||
mock_embedding = [0.1] * EMBEDDING_DIMENSIONS
|
||||
mock_embedding_service = mocker.MagicMock()
|
||||
mock_embedding_service.generate_embedding = mocker.AsyncMock(
|
||||
return_value=mock_embedding
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query_raw_with_schema
|
||||
mock_query = mocker.patch(
|
||||
"backend.server.v2.store.db.query_raw_with_schema",
|
||||
mocker.AsyncMock(side_effect=[[], [{"count": 0}]]),
|
||||
)
|
||||
|
||||
# Call function with combined filter mode (default)
|
||||
await db.get_store_agents(search_query="test query", filter_mode="combined")
|
||||
|
||||
# Check that the SQL query includes combined filtering
|
||||
first_call_args = mock_query.call_args_list[0]
|
||||
sql_query = first_call_args[0][0]
|
||||
|
||||
# Combined mode requires at least one signal
|
||||
assert "(embedding IS NOT NULL OR search IS NOT NULL)" in sql_query
|
||||
# Combined mode uses "1=1" as pre-filter (no individual score filtering)
|
||||
# But applies RRF score threshold to filter irrelevant results
|
||||
assert "rrf_score" in sql_query
|
||||
assert "rrf_score >=" in sql_query # RRF threshold filter applied
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_generate_and_store_embedding_success(mocker):
|
||||
"""Test that embedding generation and storage works correctly."""
|
||||
@@ -554,7 +664,7 @@ async def test_generate_and_store_embedding_success(mocker):
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.AsyncMock(return_value=mock_embedding_service),
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query_raw_with_schema
|
||||
@@ -592,7 +702,7 @@ async def test_generate_and_store_embedding_empty_text(mocker):
|
||||
mock_embedding_service.generate_embedding = mocker.AsyncMock()
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.AsyncMock(return_value=mock_embedding_service),
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Mock query_raw_with_schema
|
||||
@@ -626,7 +736,7 @@ async def test_generate_and_store_embedding_handles_error(mocker):
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.server.v2.store.db.get_embedding_service",
|
||||
mocker.AsyncMock(return_value=mock_embedding_service),
|
||||
mocker.MagicMock(return_value=mock_embedding_service),
|
||||
)
|
||||
|
||||
# Call should not raise - errors are logged but not propagated
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import datetime
|
||||
from enum import Enum
|
||||
from typing import List
|
||||
|
||||
import prisma.enums
|
||||
@@ -7,6 +8,19 @@ import pydantic
|
||||
from backend.util.models import Pagination
|
||||
|
||||
|
||||
class SearchFilterMode(str, Enum):
|
||||
"""How to combine BM25 and vector search results for filtering.
|
||||
|
||||
- STRICT: Must pass BOTH BM25 AND vector similarity thresholds
|
||||
- PERMISSIVE: Must pass EITHER BM25 OR vector similarity threshold
|
||||
- COMBINED: No pre-filtering, only the combined RRF score matters (default)
|
||||
"""
|
||||
|
||||
STRICT = "strict"
|
||||
PERMISSIVE = "permissive"
|
||||
COMBINED = "combined"
|
||||
|
||||
|
||||
class MyAgent(pydantic.BaseModel):
|
||||
agent_id: str
|
||||
agent_version: int
|
||||
@@ -44,6 +58,7 @@ class StoreAgentDetails(pydantic.BaseModel):
|
||||
slug: str
|
||||
agent_name: str
|
||||
agent_video: str
|
||||
agent_output_demo: str
|
||||
agent_image: list[str]
|
||||
creator: str
|
||||
creator_avatar: str
|
||||
@@ -121,6 +136,7 @@ class StoreSubmission(pydantic.BaseModel):
|
||||
|
||||
# Additional fields for editing
|
||||
video_url: str | None = None
|
||||
agent_output_demo_url: str | None = None
|
||||
categories: list[str] = []
|
||||
|
||||
|
||||
@@ -157,6 +173,7 @@ class StoreSubmissionRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
sub_heading: str
|
||||
video_url: str | None = None
|
||||
agent_output_demo_url: str | None = None
|
||||
image_urls: list[str] = []
|
||||
description: str = ""
|
||||
instructions: str | None = None
|
||||
@@ -169,6 +186,7 @@ class StoreSubmissionEditRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
sub_heading: str
|
||||
video_url: str | None = None
|
||||
agent_output_demo_url: str | None = None
|
||||
image_urls: list[str] = []
|
||||
description: str = ""
|
||||
instructions: str | None = None
|
||||
|
||||
@@ -62,6 +62,7 @@ def test_store_agent_details():
|
||||
slug="test-agent",
|
||||
agent_name="Test Agent",
|
||||
agent_video="video.mp4",
|
||||
agent_output_demo="demo.mp4",
|
||||
agent_image=["image1.jpg", "image2.jpg"],
|
||||
creator="creator1",
|
||||
creator_avatar="avatar.jpg",
|
||||
|
||||
@@ -99,18 +99,30 @@ async def get_agents(
|
||||
category: str | None = None,
|
||||
page: int = 1,
|
||||
page_size: int = 20,
|
||||
filter_mode: Literal["strict", "permissive", "combined"] = "permissive",
|
||||
):
|
||||
"""
|
||||
Get a paginated list of agents from the store with optional filtering and sorting.
|
||||
|
||||
When search_query is provided, uses hybrid search combining:
|
||||
- BM25 full-text search (lexical matching)
|
||||
- Vector semantic similarity (meaning-based matching)
|
||||
- Popularity signal (run counts)
|
||||
|
||||
Results are ranked using Reciprocal Rank Fusion (RRF).
|
||||
|
||||
Args:
|
||||
featured (bool, optional): Filter to only show featured agents. Defaults to False.
|
||||
creator (str | None, optional): Filter agents by creator username. Defaults to None.
|
||||
sorted_by (str | None, optional): Sort agents by "runs" or "rating". Defaults to None.
|
||||
search_query (str | None, optional): Search agents by name, subheading and description. Defaults to None.
|
||||
search_query (str | None, optional): Search agents by name, subheading and description.
|
||||
category (str | None, optional): Filter agents by category. Defaults to None.
|
||||
page (int, optional): Page number for pagination. Defaults to 1.
|
||||
page_size (int, optional): Number of agents per page. Defaults to 20.
|
||||
filter_mode (str, optional): Controls result filtering when searching:
|
||||
- "strict": Must match BOTH BM25 AND vector thresholds
|
||||
- "permissive": Must match EITHER BM25 OR vector threshold
|
||||
- "combined": No threshold filtering, rely on RRF score (default)
|
||||
|
||||
Returns:
|
||||
StoreAgentsResponse: Paginated list of agents matching the filters
|
||||
@@ -144,6 +156,7 @@ async def get_agents(
|
||||
category=category,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
filter_mode=filter_mode,
|
||||
)
|
||||
return agents
|
||||
|
||||
@@ -438,6 +451,7 @@ async def create_submission(
|
||||
slug=submission_request.slug,
|
||||
name=submission_request.name,
|
||||
video_url=submission_request.video_url,
|
||||
agent_output_demo_url=submission_request.agent_output_demo_url,
|
||||
image_urls=submission_request.image_urls,
|
||||
description=submission_request.description,
|
||||
instructions=submission_request.instructions,
|
||||
@@ -481,6 +495,7 @@ async def edit_submission(
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
name=submission_request.name,
|
||||
video_url=submission_request.video_url,
|
||||
agent_output_demo_url=submission_request.agent_output_demo_url,
|
||||
image_urls=submission_request.image_urls,
|
||||
description=submission_request.description,
|
||||
instructions=submission_request.instructions,
|
||||
|
||||
@@ -65,6 +65,7 @@ def test_get_agents_defaults(
|
||||
category=None,
|
||||
page=1,
|
||||
page_size=20,
|
||||
filter_mode="permissive",
|
||||
)
|
||||
|
||||
|
||||
@@ -112,6 +113,7 @@ def test_get_agents_featured(
|
||||
category=None,
|
||||
page=1,
|
||||
page_size=20,
|
||||
filter_mode="permissive",
|
||||
)
|
||||
|
||||
|
||||
@@ -159,6 +161,7 @@ def test_get_agents_by_creator(
|
||||
category=None,
|
||||
page=1,
|
||||
page_size=20,
|
||||
filter_mode="permissive",
|
||||
)
|
||||
|
||||
|
||||
@@ -206,6 +209,7 @@ def test_get_agents_sorted(
|
||||
category=None,
|
||||
page=1,
|
||||
page_size=20,
|
||||
filter_mode="permissive",
|
||||
)
|
||||
|
||||
|
||||
@@ -253,6 +257,7 @@ def test_get_agents_search(
|
||||
category=None,
|
||||
page=1,
|
||||
page_size=20,
|
||||
filter_mode="permissive",
|
||||
)
|
||||
|
||||
|
||||
@@ -299,6 +304,7 @@ def test_get_agents_category(
|
||||
category="test-category",
|
||||
page=1,
|
||||
page_size=20,
|
||||
filter_mode="permissive",
|
||||
)
|
||||
|
||||
|
||||
@@ -348,6 +354,7 @@ def test_get_agents_pagination(
|
||||
category=None,
|
||||
page=2,
|
||||
page_size=5,
|
||||
filter_mode="permissive",
|
||||
)
|
||||
|
||||
|
||||
@@ -378,6 +385,7 @@ def test_get_agent_details(
|
||||
slug="test-agent",
|
||||
agent_name="Test Agent",
|
||||
agent_video="video.mp4",
|
||||
agent_output_demo="demo.mp4",
|
||||
agent_image=["image1.jpg", "image2.jpg"],
|
||||
creator="creator1",
|
||||
creator_avatar="avatar1.jpg",
|
||||
|
||||
@@ -5,6 +5,13 @@ from tiktoken import encoding_for_model
|
||||
|
||||
from backend.util import json
|
||||
|
||||
# ---------------------------------------------------------------------------#
|
||||
# CONSTANTS #
|
||||
# ---------------------------------------------------------------------------#
|
||||
|
||||
# Message prefixes for important system messages that should be protected during compression
|
||||
MAIN_OBJECTIVE_PREFIX = "[Main Objective Prompt]: "
|
||||
|
||||
# ---------------------------------------------------------------------------#
|
||||
# INTERNAL UTILITIES #
|
||||
# ---------------------------------------------------------------------------#
|
||||
@@ -63,6 +70,55 @@ def _msg_tokens(msg: dict, enc) -> int:
|
||||
return WRAPPER + content_tokens + tool_call_tokens
|
||||
|
||||
|
||||
def _is_tool_message(msg: dict) -> bool:
|
||||
"""Check if a message contains tool calls or results that should be protected."""
|
||||
content = msg.get("content")
|
||||
|
||||
# Check for Anthropic-style tool messages
|
||||
if isinstance(content, list) and any(
|
||||
isinstance(item, dict) and item.get("type") in ("tool_use", "tool_result")
|
||||
for item in content
|
||||
):
|
||||
return True
|
||||
|
||||
# Check for OpenAI-style tool calls in the message
|
||||
if "tool_calls" in msg or msg.get("role") == "tool":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _is_objective_message(msg: dict) -> bool:
|
||||
"""Check if a message contains objective/system prompts that should be absolutely protected."""
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str):
|
||||
# Protect any message with the main objective prefix
|
||||
return content.startswith(MAIN_OBJECTIVE_PREFIX)
|
||||
return False
|
||||
|
||||
|
||||
def _truncate_tool_message_content(msg: dict, enc, max_tokens: int) -> None:
|
||||
"""
|
||||
Carefully truncate tool message content while preserving tool structure.
|
||||
Only truncates tool_result content, leaves tool_use intact.
|
||||
"""
|
||||
content = msg.get("content")
|
||||
if not isinstance(content, list):
|
||||
return
|
||||
|
||||
for item in content:
|
||||
# Only process tool_result items, leave tool_use blocks completely intact
|
||||
if not (isinstance(item, dict) and item.get("type") == "tool_result"):
|
||||
continue
|
||||
|
||||
result_content = item.get("content", "")
|
||||
if (
|
||||
isinstance(result_content, str)
|
||||
and _tok_len(result_content, enc) > max_tokens
|
||||
):
|
||||
item["content"] = _truncate_middle_tokens(result_content, enc, max_tokens)
|
||||
|
||||
|
||||
def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str:
|
||||
"""
|
||||
Return *text* shortened to ≈max_tok tokens by keeping the head & tail
|
||||
@@ -140,13 +196,21 @@ def compress_prompt(
|
||||
return sum(_msg_tokens(m, enc) for m in msgs)
|
||||
|
||||
original_token_count = total_tokens()
|
||||
|
||||
if original_token_count + reserve <= target_tokens:
|
||||
return msgs
|
||||
|
||||
# ---- STEP 0 : normalise content --------------------------------------
|
||||
# Convert non-string payloads to strings so token counting is coherent.
|
||||
for m in msgs[1:-1]: # keep the first & last intact
|
||||
for i, m in enumerate(msgs):
|
||||
if not isinstance(m.get("content"), str) and m.get("content") is not None:
|
||||
if _is_tool_message(m):
|
||||
continue
|
||||
|
||||
# Keep first and last messages intact (unless they're tool messages)
|
||||
if i == 0 or i == len(msgs) - 1:
|
||||
continue
|
||||
|
||||
# Reasonable 20k-char ceiling prevents pathological blobs
|
||||
content_str = json.dumps(m["content"], separators=(",", ":"))
|
||||
if len(content_str) > 20_000:
|
||||
@@ -157,34 +221,45 @@ def compress_prompt(
|
||||
cap = start_cap
|
||||
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
|
||||
for m in msgs[1:-1]: # keep first & last intact
|
||||
if _tok_len(m.get("content") or "", enc) > cap:
|
||||
m["content"] = _truncate_middle_tokens(m["content"], enc, cap)
|
||||
if _is_tool_message(m):
|
||||
# For tool messages, only truncate tool result content, preserve structure
|
||||
_truncate_tool_message_content(m, enc, cap)
|
||||
continue
|
||||
|
||||
if _is_objective_message(m):
|
||||
# Never truncate objective messages - they contain the core task
|
||||
continue
|
||||
|
||||
content = m.get("content") or ""
|
||||
if _tok_len(content, enc) > cap:
|
||||
m["content"] = _truncate_middle_tokens(content, enc, cap)
|
||||
cap //= 2 # tighten the screw
|
||||
|
||||
# ---- STEP 2 : middle-out deletion -----------------------------------
|
||||
while total_tokens() + reserve > target_tokens and len(msgs) > 2:
|
||||
# Identify all deletable messages (not first/last, not tool messages, not objective messages)
|
||||
deletable_indices = []
|
||||
for i in range(1, len(msgs) - 1): # Skip first and last
|
||||
if not _is_tool_message(msgs[i]) and not _is_objective_message(msgs[i]):
|
||||
deletable_indices.append(i)
|
||||
|
||||
if not deletable_indices:
|
||||
break # nothing more we can drop
|
||||
|
||||
# Delete from center outward - find the index closest to center
|
||||
centre = len(msgs) // 2
|
||||
# Build a symmetrical centre-out index walk: centre, centre+1, centre-1, ...
|
||||
order = [centre] + [
|
||||
i
|
||||
for pair in zip(range(centre + 1, len(msgs) - 1), range(centre - 1, 0, -1))
|
||||
for i in pair
|
||||
]
|
||||
removed = False
|
||||
for i in order:
|
||||
msg = msgs[i]
|
||||
if "tool_calls" in msg or msg.get("role") == "tool":
|
||||
continue # protect tool shells
|
||||
del msgs[i]
|
||||
removed = True
|
||||
break
|
||||
if not removed: # nothing more we can drop
|
||||
break
|
||||
to_delete = min(deletable_indices, key=lambda i: abs(i - centre))
|
||||
del msgs[to_delete]
|
||||
|
||||
# ---- STEP 3 : final safety-net trim on first & last ------------------
|
||||
cap = start_cap
|
||||
while total_tokens() + reserve > target_tokens and cap >= floor_cap:
|
||||
for idx in (0, -1): # first and last
|
||||
if _is_tool_message(msgs[idx]):
|
||||
# For tool messages at first/last position, truncate tool result content only
|
||||
_truncate_tool_message_content(msgs[idx], enc, cap)
|
||||
continue
|
||||
|
||||
text = msgs[idx].get("content") or ""
|
||||
if _tok_len(text, enc) > cap:
|
||||
msgs[idx]["content"] = _truncate_middle_tokens(text, enc, cap)
|
||||
|
||||
@@ -185,6 +185,12 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="Number of top blocks with most errors to show when no blocks exceed threshold (0 to disable).",
|
||||
)
|
||||
|
||||
# Execution Accuracy Monitoring
|
||||
execution_accuracy_check_interval_hours: int = Field(
|
||||
default=24,
|
||||
description="Interval in hours between execution accuracy alert checks.",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_file=".env",
|
||||
extra="allow",
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
-- AlterTable
|
||||
ALTER TABLE "StoreListingVersion" ADD COLUMN "agentOutputDemoUrl" TEXT;
|
||||
|
||||
-- Drop and recreate the StoreAgent view with agentOutputDemoUrl field
|
||||
DROP VIEW IF EXISTS "StoreAgent";
|
||||
|
||||
CREATE OR REPLACE VIEW "StoreAgent" AS
|
||||
WITH latest_versions AS (
|
||||
SELECT
|
||||
"storeListingId",
|
||||
MAX(version) AS max_version
|
||||
FROM "StoreListingVersion"
|
||||
WHERE "submissionStatus" = 'APPROVED'
|
||||
GROUP BY "storeListingId"
|
||||
),
|
||||
agent_versions AS (
|
||||
SELECT
|
||||
"storeListingId",
|
||||
array_agg(DISTINCT version::text ORDER BY version::text) AS versions
|
||||
FROM "StoreListingVersion"
|
||||
WHERE "submissionStatus" = 'APPROVED'
|
||||
GROUP BY "storeListingId"
|
||||
)
|
||||
SELECT
|
||||
sl.id AS listing_id,
|
||||
slv.id AS "storeListingVersionId",
|
||||
slv."createdAt" AS updated_at,
|
||||
sl.slug,
|
||||
COALESCE(slv.name, '') AS agent_name,
|
||||
slv."videoUrl" AS agent_video,
|
||||
slv."agentOutputDemoUrl" AS agent_output_demo,
|
||||
COALESCE(slv."imageUrls", ARRAY[]::text[]) AS agent_image,
|
||||
slv."isFeatured" AS featured,
|
||||
p.username AS creator_username, -- Allow NULL for malformed sub-agents
|
||||
p."avatarUrl" AS creator_avatar, -- Allow NULL for malformed sub-agents
|
||||
slv."subHeading" AS sub_heading,
|
||||
slv.description,
|
||||
slv.categories,
|
||||
slv.search,
|
||||
COALESCE(ar.run_count, 0::bigint) AS runs,
|
||||
COALESCE(rs.avg_rating, 0.0)::double precision AS rating,
|
||||
COALESCE(av.versions, ARRAY[slv.version::text]) AS versions,
|
||||
slv."isAvailable" AS is_available,
|
||||
COALESCE(sl."useForOnboarding", false) AS "useForOnboarding"
|
||||
FROM "StoreListing" sl
|
||||
JOIN latest_versions lv
|
||||
ON sl.id = lv."storeListingId"
|
||||
JOIN "StoreListingVersion" slv
|
||||
ON slv."storeListingId" = lv."storeListingId"
|
||||
AND slv.version = lv.max_version
|
||||
AND slv."submissionStatus" = 'APPROVED'
|
||||
JOIN "AgentGraph" a
|
||||
ON slv."agentGraphId" = a.id
|
||||
AND slv."agentGraphVersion" = a.version
|
||||
LEFT JOIN "Profile" p
|
||||
ON sl."owningUserId" = p."userId"
|
||||
LEFT JOIN "mv_review_stats" rs
|
||||
ON sl.id = rs."storeListingId"
|
||||
LEFT JOIN "mv_agent_run_counts" ar
|
||||
ON a.id = ar."agentGraphId"
|
||||
LEFT JOIN agent_versions av
|
||||
ON sl.id = av."storeListingId"
|
||||
WHERE sl."isDeleted" = false
|
||||
AND sl."hasApprovedVersion" = true;
|
||||
@@ -13,13 +13,6 @@ CREATE EXTENSION IF NOT EXISTS vector;
|
||||
-- First drop the view that depends on the search column
|
||||
DROP VIEW IF EXISTS "StoreAgent";
|
||||
|
||||
-- Remove full-text search infrastructure
|
||||
DROP TRIGGER IF EXISTS "update_tsvector" ON "StoreListingVersion";
|
||||
DROP FUNCTION IF EXISTS update_tsvector_column();
|
||||
|
||||
-- Drop the tsvector search column
|
||||
ALTER TABLE "StoreListingVersion" DROP COLUMN IF EXISTS "search";
|
||||
|
||||
-- Add embedding column for vector search (1536 dimensions for text-embedding-3-small)
|
||||
ALTER TABLE "StoreListingVersion"
|
||||
ADD COLUMN IF NOT EXISTS "embedding" vector(1536);
|
||||
@@ -57,6 +50,7 @@ SELECT
|
||||
sl.slug,
|
||||
COALESCE(slv.name, '') AS agent_name,
|
||||
slv."videoUrl" AS agent_video,
|
||||
slv."agentOutputDemoUrl" AS agent_output_demo,
|
||||
COALESCE(slv."imageUrls", ARRAY[]::text[]) AS agent_image,
|
||||
slv."isFeatured" AS featured,
|
||||
p.username AS creator_username,
|
||||
@@ -64,6 +58,7 @@ SELECT
|
||||
slv."subHeading" AS sub_heading,
|
||||
slv.description,
|
||||
slv.categories,
|
||||
slv.search,
|
||||
slv.embedding,
|
||||
COALESCE(ar.run_count, 0::bigint) AS runs,
|
||||
COALESCE(rs.avg_rating, 0.0)::double precision AS rating,
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
-- Create BuilderSearchHistory table
|
||||
CREATE TABLE "BuilderSearchHistory" (
|
||||
"id" TEXT NOT NULL,
|
||||
"userId" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"searchQuery" TEXT NOT NULL,
|
||||
"filter" TEXT[] DEFAULT ARRAY[]::TEXT[],
|
||||
"byCreator" TEXT[] DEFAULT ARRAY[]::TEXT[],
|
||||
|
||||
CONSTRAINT "BuilderSearchHistory_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- Define User foreign relation
|
||||
ALTER TABLE "BuilderSearchHistory" ADD CONSTRAINT "BuilderSearchHistory_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -0,0 +1,35 @@
|
||||
-- Migration: Add hybrid search infrastructure (BM25 + vector + popularity)
|
||||
-- This migration:
|
||||
-- 1. Creates/updates the tsvector trigger with weighted fields
|
||||
-- 2. Adds GIN index for full-text search performance
|
||||
-- 3. Backfills existing records with tsvector data
|
||||
|
||||
-- Create or replace the trigger function with WEIGHTED tsvector
|
||||
-- Weight A = name (highest priority), B = subHeading, C = description
|
||||
CREATE OR REPLACE FUNCTION update_tsvector_column() RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.search := setweight(to_tsvector('english', COALESCE(NEW.name, '')), 'A') ||
|
||||
setweight(to_tsvector('english', COALESCE(NEW."subHeading", '')), 'B') ||
|
||||
setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'C');
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Drop and recreate trigger to ensure it's active with the updated function
|
||||
DROP TRIGGER IF EXISTS "update_tsvector" ON "StoreListingVersion";
|
||||
CREATE TRIGGER "update_tsvector"
|
||||
BEFORE INSERT OR UPDATE OF name, "subHeading", description ON "StoreListingVersion"
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_tsvector_column();
|
||||
|
||||
-- Create GIN index for full-text search performance
|
||||
CREATE INDEX IF NOT EXISTS idx_store_listing_version_search_gin
|
||||
ON "StoreListingVersion" USING GIN (search);
|
||||
|
||||
-- Backfill existing records with weighted tsvector
|
||||
UPDATE "StoreListingVersion"
|
||||
SET search = setweight(to_tsvector('english', COALESCE(name, '')), 'A') ||
|
||||
setweight(to_tsvector('english', COALESCE("subHeading", '')), 'B') ||
|
||||
setweight(to_tsvector('english', COALESCE(description, '')), 'C')
|
||||
WHERE search IS NULL
|
||||
OR search = ''::tsvector;
|
||||
@@ -114,6 +114,7 @@ cli = "backend.cli:main"
|
||||
format = "linter:format"
|
||||
lint = "linter:lint"
|
||||
test = "run_tests:test"
|
||||
load-store-agents = "test.load_store_agents:run"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
@@ -137,3 +138,4 @@ filterwarnings = [
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py310"
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ model User {
|
||||
|
||||
Profile Profile[]
|
||||
UserOnboarding UserOnboarding?
|
||||
BuilderSearchHistory BuilderSearchHistory[]
|
||||
StoreListings StoreListing[]
|
||||
StoreListingReviews StoreListingReview[]
|
||||
StoreVersionsReviewed StoreListingVersion[]
|
||||
@@ -114,6 +115,19 @@ model UserOnboarding {
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
}
|
||||
|
||||
model BuilderSearchHistory {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
|
||||
searchQuery String
|
||||
filter String[] @default([])
|
||||
byCreator String[] @default([])
|
||||
|
||||
userId String
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
}
|
||||
|
||||
// This model describes the Agent Graph/Flow (Multi Agent System).
|
||||
model AgentGraph {
|
||||
id String @default(uuid())
|
||||
@@ -701,10 +715,11 @@ view StoreAgent {
|
||||
storeListingVersionId String
|
||||
updated_at DateTime
|
||||
|
||||
slug String
|
||||
agent_name String
|
||||
agent_video String?
|
||||
agent_image String[]
|
||||
slug String
|
||||
agent_name String
|
||||
agent_video String?
|
||||
agent_output_demo String?
|
||||
agent_image String[]
|
||||
|
||||
featured Boolean @default(false)
|
||||
creator_username String?
|
||||
@@ -833,13 +848,14 @@ model StoreListingVersion {
|
||||
AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version])
|
||||
|
||||
// Content fields
|
||||
name String
|
||||
subHeading String
|
||||
videoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
instructions String?
|
||||
categories String[]
|
||||
name String
|
||||
subHeading String
|
||||
videoUrl String?
|
||||
agentOutputDemoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
instructions String?
|
||||
categories String[]
|
||||
|
||||
isFeatured Boolean @default(false)
|
||||
|
||||
@@ -847,7 +863,10 @@ model StoreListingVersion {
|
||||
// Old versions can be made unavailable by the author if desired
|
||||
isAvailable Boolean @default(true)
|
||||
|
||||
// Vector embedding for semantic search (replaces tsvector full-text search)
|
||||
// Full-text search tsvector column
|
||||
search Unsupported("tsvector")?
|
||||
|
||||
// Vector embedding for semantic search
|
||||
embedding Unsupported("vector(1536)")?
|
||||
|
||||
// Version workflow state
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"slug": "test-agent",
|
||||
"agent_name": "Test Agent",
|
||||
"agent_video": "video.mp4",
|
||||
"agent_output_demo": "demo.mp4",
|
||||
"agent_image": [
|
||||
"image1.jpg",
|
||||
"image2.jpg"
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
"reviewed_at": null,
|
||||
"changes_summary": null,
|
||||
"video_url": "test.mp4",
|
||||
"agent_output_demo_url": null,
|
||||
"categories": [
|
||||
"test-category"
|
||||
]
|
||||
|
||||
@@ -10,7 +10,7 @@ It creates:
|
||||
|
||||
Usage:
|
||||
cd backend
|
||||
poetry run python test/load_store_agents.py
|
||||
poetry run load-store-agents
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@@ -37,7 +37,8 @@ from prisma.types import (
|
||||
AGENTS_DIR = Path(__file__).parent.parent / "agents"
|
||||
CSV_FILE = AGENTS_DIR / "StoreAgent_rows.csv"
|
||||
|
||||
# Fixed user ID for the autogpt creator (test data, not production)
|
||||
# User constants for the autogpt creator (test data, not production)
|
||||
# Fixed uuid4 for idempotency - same user is reused across script runs
|
||||
AUTOGPT_USER_ID = "79d96c73-e6f5-4656-a83a-185b41ee0d06"
|
||||
AUTOGPT_EMAIL = "autogpt-test@agpt.co"
|
||||
AUTOGPT_USERNAME = "autogpt"
|
||||
@@ -253,7 +254,12 @@ async def create_agent_graph(
|
||||
for node in nodes:
|
||||
block_id = node["block_id"]
|
||||
# Ensure the block exists (create placeholder if needed)
|
||||
await ensure_block_exists(db, block_id, known_blocks)
|
||||
block_exists = await ensure_block_exists(db, block_id, known_blocks)
|
||||
if not block_exists:
|
||||
print(
|
||||
f" Skipping node {node['id']} - block {block_id} could not be created"
|
||||
)
|
||||
continue
|
||||
|
||||
await db.agentnode.create(
|
||||
data=AgentNodeCreateInput(
|
||||
@@ -353,7 +359,7 @@ async def create_store_listing(
|
||||
if is_approved:
|
||||
await db.storelisting.update(
|
||||
where={"id": listing_id},
|
||||
data={"activeVersionId": version_id},
|
||||
data={"ActiveVersion": {"connect": {"id": version_id}}},
|
||||
)
|
||||
|
||||
|
||||
@@ -387,6 +393,7 @@ async def main():
|
||||
|
||||
# Build mapping from version_id to json file
|
||||
loaded_graphs = {} # graph_id -> (graph_id, version)
|
||||
failed_agents = []
|
||||
|
||||
for json_file in json_files:
|
||||
# Extract the version ID from filename (agent_<version_id>.json)
|
||||
@@ -399,17 +406,25 @@ async def main():
|
||||
continue
|
||||
|
||||
metadata = csv_metadata[version_id]
|
||||
print(f"\nProcessing: {metadata['agent_name']}")
|
||||
agent_name = metadata["agent_name"]
|
||||
print(f"\nProcessing: {agent_name}")
|
||||
|
||||
# Load and create the agent graph
|
||||
agent_data = await load_agent_json(json_file)
|
||||
graph_id, graph_version = await create_agent_graph(
|
||||
db, agent_data, known_blocks
|
||||
)
|
||||
loaded_graphs[graph_id] = (graph_id, graph_version)
|
||||
# Use a transaction per agent to prevent dangling resources
|
||||
try:
|
||||
async with db.tx() as tx:
|
||||
# Load and create the agent graph
|
||||
agent_data = await load_agent_json(json_file)
|
||||
graph_id, graph_version = await create_agent_graph(
|
||||
tx, agent_data, known_blocks
|
||||
)
|
||||
loaded_graphs[graph_id] = (graph_id, graph_version)
|
||||
|
||||
# Create store listing
|
||||
await create_store_listing(db, graph_id, graph_version, metadata)
|
||||
# Create store listing
|
||||
await create_store_listing(tx, graph_id, graph_version, metadata)
|
||||
except Exception as e:
|
||||
print(f" Error loading agent '{agent_name}': {e}")
|
||||
failed_agents.append(agent_name)
|
||||
continue
|
||||
|
||||
# Step 4: Refresh materialized views
|
||||
print("\n[Step 4] Refreshing materialized views...")
|
||||
@@ -421,11 +436,20 @@ async def main():
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print(f"Successfully loaded {len(loaded_graphs)} agents")
|
||||
if failed_agents:
|
||||
print(
|
||||
f"Failed to load {len(failed_agents)} agents: {', '.join(failed_agents)}"
|
||||
)
|
||||
print("=" * 60)
|
||||
|
||||
finally:
|
||||
await db.disconnect()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def run():
|
||||
"""Entry point for poetry script."""
|
||||
asyncio.run(main())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
|
||||
@@ -3,6 +3,14 @@ import { withSentryConfig } from "@sentry/nextjs";
|
||||
/** @type {import('next').NextConfig} */
|
||||
const nextConfig = {
|
||||
productionBrowserSourceMaps: true,
|
||||
experimental: {
|
||||
serverActions: {
|
||||
bodySizeLimit: "256mb",
|
||||
},
|
||||
// Increase body size limit for API routes (file uploads) - 256MB to match backend limit
|
||||
proxyClientMaxBodySize: "256mb",
|
||||
middlewareClientMaxBodySize: "256mb",
|
||||
},
|
||||
images: {
|
||||
domains: [
|
||||
// We dont need to maintain alphabetical order here
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"lodash": "4.17.21",
|
||||
"lucide-react": "0.552.0",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.4.7",
|
||||
"next": "15.4.10",
|
||||
"next-themes": "0.4.6",
|
||||
"nuqs": "2.7.2",
|
||||
"party-js": "2.2.0",
|
||||
@@ -137,9 +137,8 @@
|
||||
"concurrently": "9.2.1",
|
||||
"cross-env": "10.1.0",
|
||||
"eslint": "8.57.1",
|
||||
"eslint-config-next": "15.5.2",
|
||||
"eslint-config-next": "15.5.7",
|
||||
"eslint-plugin-storybook": "9.1.5",
|
||||
"import-in-the-middle": "1.14.2",
|
||||
"msw": "2.11.6",
|
||||
"msw-storybook-addon": "2.0.6",
|
||||
"orval": "7.13.0",
|
||||
@@ -159,3 +158,4 @@
|
||||
},
|
||||
"packageManager": "pnpm@10.20.0+sha512.cf9998222162dd85864d0a8102e7892e7ba4ceadebbf5a31f9c2fce48dfce317a9c53b9f6464d1ef9042cba2e02ae02a9f7c143a2b438cd93c91840f0192b9dd"
|
||||
}
|
||||
|
||||
|
||||
408
autogpt_platform/frontend/pnpm-lock.yaml
generated
408
autogpt_platform/frontend/pnpm-lock.yaml
generated
@@ -16,7 +16,7 @@ importers:
|
||||
version: 5.2.2(react-hook-form@7.66.0(react@18.3.1))
|
||||
'@next/third-parties':
|
||||
specifier: 15.4.6
|
||||
version: 15.4.6(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@phosphor-icons/react':
|
||||
specifier: 2.1.10
|
||||
version: 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
@@ -88,7 +88,7 @@ importers:
|
||||
version: 5.24.13(@rjsf/utils@5.24.13(react@18.3.1))
|
||||
'@sentry/nextjs':
|
||||
specifier: 10.27.0
|
||||
version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
'@supabase/ssr':
|
||||
specifier: 0.7.0
|
||||
version: 0.7.0(@supabase/supabase-js@2.78.0)
|
||||
@@ -106,10 +106,10 @@ importers:
|
||||
version: 0.2.4
|
||||
'@vercel/analytics':
|
||||
specifier: 1.5.0
|
||||
version: 1.5.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@vercel/speed-insights':
|
||||
specifier: 1.2.0
|
||||
version: 1.2.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
'@xyflow/react':
|
||||
specifier: 12.9.2
|
||||
version: 12.9.2(@types/react@18.3.17)(immer@10.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
@@ -148,7 +148,7 @@ importers:
|
||||
version: 12.23.24(@emotion/is-prop-valid@1.2.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
geist:
|
||||
specifier: 1.5.1
|
||||
version: 1.5.1(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))
|
||||
version: 1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))
|
||||
highlight.js:
|
||||
specifier: 11.11.1
|
||||
version: 11.11.1
|
||||
@@ -171,14 +171,14 @@ importers:
|
||||
specifier: 2.30.1
|
||||
version: 2.30.1
|
||||
next:
|
||||
specifier: 15.4.7
|
||||
version: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
specifier: 15.4.10
|
||||
version: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next-themes:
|
||||
specifier: 0.4.6
|
||||
version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
nuqs:
|
||||
specifier: 2.7.2
|
||||
version: 2.7.2(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
version: 2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
|
||||
party-js:
|
||||
specifier: 2.2.0
|
||||
version: 2.2.0
|
||||
@@ -284,7 +284,7 @@ importers:
|
||||
version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))
|
||||
'@storybook/nextjs':
|
||||
specifier: 9.1.5
|
||||
version: 9.1.5(esbuild@0.25.9)(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
version: 9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))
|
||||
'@tanstack/eslint-plugin-query':
|
||||
specifier: 5.91.2
|
||||
version: 5.91.2(eslint@8.57.1)(typescript@5.9.3)
|
||||
@@ -331,14 +331,11 @@ importers:
|
||||
specifier: 8.57.1
|
||||
version: 8.57.1
|
||||
eslint-config-next:
|
||||
specifier: 15.5.2
|
||||
version: 15.5.2(eslint@8.57.1)(typescript@5.9.3)
|
||||
specifier: 15.5.7
|
||||
version: 15.5.7(eslint@8.57.1)(typescript@5.9.3)
|
||||
eslint-plugin-storybook:
|
||||
specifier: 9.1.5
|
||||
version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3)
|
||||
import-in-the-middle:
|
||||
specifier: 1.14.2
|
||||
version: 1.14.2
|
||||
msw:
|
||||
specifier: 2.11.6
|
||||
version: 2.11.6(@types/node@24.10.0)(typescript@5.9.3)
|
||||
@@ -986,12 +983,15 @@ packages:
|
||||
'@date-fns/tz@1.4.1':
|
||||
resolution: {integrity: sha512-P5LUNhtbj6YfI3iJjw5EL9eUAG6OitD0W3fWQcpQjDRc/QIsL0tRNuO1PcDvPccWL1fSTXXdE1ds+l95DV/OFA==}
|
||||
|
||||
'@emnapi/core@1.5.0':
|
||||
resolution: {integrity: sha512-sbP8GzB1WDzacS8fgNPpHlp6C9VZe+SJP3F90W9rLemaQj2PzIuTEl1qDOYQf58YIpyjViI24y9aPWCjEzY2cg==}
|
||||
'@emnapi/core@1.7.1':
|
||||
resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==}
|
||||
|
||||
'@emnapi/runtime@1.5.0':
|
||||
resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==}
|
||||
|
||||
'@emnapi/runtime@1.7.1':
|
||||
resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==}
|
||||
|
||||
'@emnapi/wasi-threads@1.1.0':
|
||||
resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==}
|
||||
|
||||
@@ -1329,6 +1329,10 @@ packages:
|
||||
resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==}
|
||||
engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
|
||||
|
||||
'@eslint-community/regexpp@4.12.2':
|
||||
resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==}
|
||||
engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
|
||||
|
||||
'@eslint/eslintrc@2.1.4':
|
||||
resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==}
|
||||
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
|
||||
@@ -1602,56 +1606,56 @@ packages:
|
||||
'@neoconfetti/react@1.0.0':
|
||||
resolution: {integrity: sha512-klcSooChXXOzIm+SE5IISIAn3bYzYfPjbX7D7HoqZL84oAfgREeSg5vSIaSFH+DaGzzvImTyWe1OyrJ67vik4A==}
|
||||
|
||||
'@next/env@15.4.7':
|
||||
resolution: {integrity: sha512-PrBIpO8oljZGTOe9HH0miix1w5MUiGJ/q83Jge03mHEE0E3pyqzAy2+l5G6aJDbXoobmxPJTVhbCuwlLtjSHwg==}
|
||||
'@next/env@15.4.10':
|
||||
resolution: {integrity: sha512-knhmoJ0Vv7VRf6pZEPSnciUG1S4bIhWx+qTYBW/AjxEtlzsiNORPk8sFDCEvqLfmKuey56UB9FL1UdHEV3uBrg==}
|
||||
|
||||
'@next/eslint-plugin-next@15.5.2':
|
||||
resolution: {integrity: sha512-lkLrRVxcftuOsJNhWatf1P2hNVfh98k/omQHrCEPPriUypR6RcS13IvLdIrEvkm9AH2Nu2YpR5vLqBuy6twH3Q==}
|
||||
'@next/eslint-plugin-next@15.5.7':
|
||||
resolution: {integrity: sha512-DtRU2N7BkGr8r+pExfuWHwMEPX5SD57FeA6pxdgCHODo+b/UgIgjE+rgWKtJAbEbGhVZ2jtHn4g3wNhWFoNBQQ==}
|
||||
|
||||
'@next/swc-darwin-arm64@15.4.7':
|
||||
resolution: {integrity: sha512-2Dkb+VUTp9kHHkSqtws4fDl2Oxms29HcZBwFIda1X7Ztudzy7M6XF9HDS2dq85TmdN47VpuhjE+i6wgnIboVzQ==}
|
||||
'@next/swc-darwin-arm64@15.4.8':
|
||||
resolution: {integrity: sha512-Pf6zXp7yyQEn7sqMxur6+kYcywx5up1J849psyET7/8pG2gQTVMjU3NzgIt8SeEP5to3If/SaWmaA6H6ysBr1A==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@next/swc-darwin-x64@15.4.7':
|
||||
resolution: {integrity: sha512-qaMnEozKdWezlmh1OGDVFueFv2z9lWTcLvt7e39QA3YOvZHNpN2rLs/IQLwZaUiw2jSvxW07LxMCWtOqsWFNQg==}
|
||||
'@next/swc-darwin-x64@15.4.8':
|
||||
resolution: {integrity: sha512-xla6AOfz68a6kq3gRQccWEvFC/VRGJmA/QuSLENSO7CZX5WIEkSz7r1FdXUjtGCQ1c2M+ndUAH7opdfLK1PQbw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@next/swc-linux-arm64-gnu@15.4.7':
|
||||
resolution: {integrity: sha512-ny7lODPE7a15Qms8LZiN9wjNWIeI+iAZOFDOnv2pcHStncUr7cr9lD5XF81mdhrBXLUP9yT9RzlmSWKIazWoDw==}
|
||||
'@next/swc-linux-arm64-gnu@15.4.8':
|
||||
resolution: {integrity: sha512-y3fmp+1Px/SJD+5ntve5QLZnGLycsxsVPkTzAc3zUiXYSOlTPqT8ynfmt6tt4fSo1tAhDPmryXpYKEAcoAPDJw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-linux-arm64-musl@15.4.7':
|
||||
resolution: {integrity: sha512-4SaCjlFR/2hGJqZLLWycccy1t+wBrE/vyJWnYaZJhUVHccpGLG5q0C+Xkw4iRzUIkE+/dr90MJRUym3s1+vO8A==}
|
||||
'@next/swc-linux-arm64-musl@15.4.8':
|
||||
resolution: {integrity: sha512-DX/L8VHzrr1CfwaVjBQr3GWCqNNFgyWJbeQ10Lx/phzbQo3JNAxUok1DZ8JHRGcL6PgMRgj6HylnLNndxn4Z6A==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-linux-x64-gnu@15.4.7':
|
||||
resolution: {integrity: sha512-2uNXjxvONyRidg00VwvlTYDwC9EgCGNzPAPYbttIATZRxmOZ3hllk/YYESzHZb65eyZfBR5g9xgCZjRAl9YYGg==}
|
||||
'@next/swc-linux-x64-gnu@15.4.8':
|
||||
resolution: {integrity: sha512-9fLAAXKAL3xEIFdKdzG5rUSvSiZTLLTCc6JKq1z04DR4zY7DbAPcRvNm3K1inVhTiQCs19ZRAgUerHiVKMZZIA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-linux-x64-musl@15.4.7':
|
||||
resolution: {integrity: sha512-ceNbPjsFgLscYNGKSu4I6LYaadq2B8tcK116nVuInpHHdAWLWSwVK6CHNvCi0wVS9+TTArIFKJGsEyVD1H+4Kg==}
|
||||
'@next/swc-linux-x64-musl@15.4.8':
|
||||
resolution: {integrity: sha512-s45V7nfb5g7dbS7JK6XZDcapicVrMMvX2uYgOHP16QuKH/JA285oy6HcxlKqwUNaFY/UC6EvQ8QZUOo19cBKSA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@next/swc-win32-arm64-msvc@15.4.7':
|
||||
resolution: {integrity: sha512-pZyxmY1iHlZJ04LUL7Css8bNvsYAMYOY9JRwFA3HZgpaNKsJSowD09Vg2R9734GxAcLJc2KDQHSCR91uD6/AAw==}
|
||||
'@next/swc-win32-arm64-msvc@15.4.8':
|
||||
resolution: {integrity: sha512-KjgeQyOAq7t/HzAJcWPGA8X+4WY03uSCZ2Ekk98S9OgCFsb6lfBE3dbUzUuEQAN2THbwYgFfxX2yFTCMm8Kehw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@next/swc-win32-x64-msvc@15.4.7':
|
||||
resolution: {integrity: sha512-HjuwPJ7BeRzgl3KrjKqD2iDng0eQIpIReyhpF5r4yeAHFwWRuAhfW92rWv/r3qeQHEwHsLRzFDvMqRjyM5DI6A==}
|
||||
'@next/swc-win32-x64-msvc@15.4.8':
|
||||
resolution: {integrity: sha512-Exsmf/+42fWVnLMaZHzshukTBxZrSwuuLKFvqhGHJ+mC1AokqieLY/XzAl3jc/CqhXLqLY3RRjkKJ9YnLPcRWg==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
@@ -2622,8 +2626,8 @@ packages:
|
||||
'@rtsao/scc@1.1.0':
|
||||
resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==}
|
||||
|
||||
'@rushstack/eslint-patch@1.12.0':
|
||||
resolution: {integrity: sha512-5EwMtOqvJMMa3HbmxLlF74e+3/HhwBTMcvt3nqVJgGCozO6hzIPOBlwm8mGVNR9SN2IJpxSnlxczyDjcn7qIyw==}
|
||||
'@rushstack/eslint-patch@1.15.0':
|
||||
resolution: {integrity: sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw==}
|
||||
|
||||
'@scarf/scarf@1.4.0':
|
||||
resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==}
|
||||
@@ -3097,8 +3101,8 @@ packages:
|
||||
peerDependencies:
|
||||
'@testing-library/dom': '>=7.21.4'
|
||||
|
||||
'@tybys/wasm-util@0.10.0':
|
||||
resolution: {integrity: sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==}
|
||||
'@tybys/wasm-util@0.10.1':
|
||||
resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==}
|
||||
|
||||
'@types/aria-query@5.0.4':
|
||||
resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==}
|
||||
@@ -3288,16 +3292,16 @@ packages:
|
||||
'@types/ws@8.18.1':
|
||||
resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==}
|
||||
|
||||
'@typescript-eslint/eslint-plugin@8.43.0':
|
||||
resolution: {integrity: sha512-8tg+gt7ENL7KewsKMKDHXR1vm8tt9eMxjJBYINf6swonlWgkYn5NwyIgXpbbDxTNU5DgpDFfj95prcTq2clIQQ==}
|
||||
'@typescript-eslint/eslint-plugin@8.48.1':
|
||||
resolution: {integrity: sha512-X63hI1bxl5ohelzr0LY5coufyl0LJNthld+abwxpCoo6Gq+hSqhKwci7MUWkXo67mzgUK6YFByhmaHmUcuBJmA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
'@typescript-eslint/parser': ^8.43.0
|
||||
'@typescript-eslint/parser': ^8.48.1
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/parser@8.43.0':
|
||||
resolution: {integrity: sha512-B7RIQiTsCBBmY+yW4+ILd6mF5h1FUwJsVvpqkrgpszYifetQ2Ke+Z4u6aZh0CblkUGIdR59iYVyXqqZGkZ3aBw==}
|
||||
'@typescript-eslint/parser@8.48.1':
|
||||
resolution: {integrity: sha512-PC0PDZfJg8sP7cmKe6L3QIL8GZwU5aRvUFedqSIpw3B+QjRSUZeeITC2M5XKeMXEzL6wccN196iy3JLwKNvDVA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
@@ -3315,6 +3319,12 @@ packages:
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/project-service@8.48.1':
|
||||
resolution: {integrity: sha512-HQWSicah4s9z2/HifRPQ6b6R7G+SBx64JlFQpgSSHWPKdvCZX57XCbszg/bapbRsOEv42q5tayTYcEFpACcX1w==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/scope-manager@8.43.0':
|
||||
resolution: {integrity: sha512-daSWlQ87ZhsjrbMLvpuuMAt3y4ba57AuvadcR7f3nl8eS3BjRc8L9VLxFLk92RL5xdXOg6IQ+qKjjqNEimGuAg==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3323,6 +3333,10 @@ packages:
|
||||
resolution: {integrity: sha512-LF4b/NmGvdWEHD2H4MsHD8ny6JpiVNDzrSZr3CsckEgCbAGZbYM4Cqxvi9L+WqDMT+51Ozy7lt2M+d0JLEuBqA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/scope-manager@8.48.1':
|
||||
resolution: {integrity: sha512-rj4vWQsytQbLxC5Bf4XwZ0/CKd362DkWMUkviT7DCS057SK64D5lH74sSGzhI6PDD2HCEq02xAP9cX68dYyg1w==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/tsconfig-utils@8.43.0':
|
||||
resolution: {integrity: sha512-ALC2prjZcj2YqqL5X/bwWQmHA2em6/94GcbB/KKu5SX3EBDOsqztmmX1kMkvAJHzxk7TazKzJfFiEIagNV3qEA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3335,8 +3349,14 @@ packages:
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/type-utils@8.43.0':
|
||||
resolution: {integrity: sha512-qaH1uLBpBuBBuRf8c1mLJ6swOfzCXryhKND04Igr4pckzSEW9JX5Aw9AgW00kwfjWJF0kk0ps9ExKTfvXfw4Qg==}
|
||||
'@typescript-eslint/tsconfig-utils@8.48.1':
|
||||
resolution: {integrity: sha512-k0Jhs4CpEffIBm6wPaCXBAD7jxBtrHjrSgtfCjUvPp9AZ78lXKdTR8fxyZO5y4vWNlOvYXRtngSZNSn+H53Jkw==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/type-utils@8.48.1':
|
||||
resolution: {integrity: sha512-1jEop81a3LrJQLTf/1VfPQdhIY4PlGDBc/i67EVWObrtvcziysbLN3oReexHOM6N3jyXgCrkBsZpqwH0hiDOQg==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
@@ -3350,6 +3370,10 @@ packages:
|
||||
resolution: {integrity: sha512-lNCWCbq7rpg7qDsQrd3D6NyWYu+gkTENkG5IKYhUIcxSb59SQC/hEQ+MrG4sTgBVghTonNWq42bA/d4yYumldQ==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/types@8.48.1':
|
||||
resolution: {integrity: sha512-+fZ3LZNeiELGmimrujsDCT4CRIbq5oXdHe7chLiW8qzqyPMnn1puNstCrMNVAqwcl2FdIxkuJ4tOs/RFDBVc/Q==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.43.0':
|
||||
resolution: {integrity: sha512-7Vv6zlAhPb+cvEpP06WXXy/ZByph9iL6BQRBDj4kmBsW98AqEeQHlj/13X+sZOrKSo9/rNKH4Ul4f6EICREFdw==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3362,6 +3386,12 @@ packages:
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.48.1':
|
||||
resolution: {integrity: sha512-/9wQ4PqaefTK6POVTjJaYS0bynCgzh6ClJHGSBj06XEHjkfylzB+A3qvyaXnErEZSaxhIo4YdyBgq6j4RysxDg==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/utils@8.43.0':
|
||||
resolution: {integrity: sha512-S1/tEmkUeeswxd0GGcnwuVQPFWo8NzZTOMxCvw8BX7OMxnNae+i8Tm7REQen/SwUIPoPqfKn7EaZ+YLpiB3k9g==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3376,6 +3406,13 @@ packages:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/utils@8.48.1':
|
||||
resolution: {integrity: sha512-fAnhLrDjiVfey5wwFRwrweyRlCmdz5ZxXz2G/4cLn0YDLjTapmN4gcCsTBR1N2rWnZSDeWpYtgLDsJt+FpmcwA==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
peerDependencies:
|
||||
eslint: ^8.57.0 || ^9.0.0
|
||||
typescript: '>=4.8.4 <6.0.0'
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.43.0':
|
||||
resolution: {integrity: sha512-T+S1KqRD4sg/bHfLwrpF/K3gQLBM1n7Rp7OjjikjTEssI2YJzQpi5WXoynOaQ93ERIuq3O8RBTOUYDKszUCEHw==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
@@ -3384,6 +3421,10 @@ packages:
|
||||
resolution: {integrity: sha512-tUFMXI4gxzzMXt4xpGJEsBsTox0XbNQ1y94EwlD/CuZwFcQP79xfQqMhau9HsRc/J0cAPA/HZt1dZPtGn9V/7w==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.48.1':
|
||||
resolution: {integrity: sha512-BmxxndzEWhE4TIEEMBs8lP3MBWN3jFPs/p6gPm/wkv02o41hI6cq9AuSmGAaTTHPtA1FTi2jBre4A9rm5ZmX+Q==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@ungap/structured-clone@1.3.0':
|
||||
resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
|
||||
|
||||
@@ -4585,8 +4626,8 @@ packages:
|
||||
resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
eslint-config-next@15.5.2:
|
||||
resolution: {integrity: sha512-3hPZghsLupMxxZ2ggjIIrat/bPniM2yRpsVPVM40rp8ZMzKWOJp2CGWn7+EzoV2ddkUr5fxNfHpF+wU1hGt/3g==}
|
||||
eslint-config-next@15.5.7:
|
||||
resolution: {integrity: sha512-nU/TRGHHeG81NeLW5DeQT5t6BDUqbpsNQTvef1ld/tqHT+/zTx60/TIhKnmPISTTe++DVo+DLxDmk4rnwHaZVw==}
|
||||
peerDependencies:
|
||||
eslint: ^7.23.0 || ^8.0.0 || ^9.0.0
|
||||
typescript: '>=3.3.1'
|
||||
@@ -4918,6 +4959,10 @@ packages:
|
||||
peerDependencies:
|
||||
next: '>=13.2.0'
|
||||
|
||||
generator-function@2.0.1:
|
||||
resolution: {integrity: sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
gensync@1.0.0-beta.2:
|
||||
resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
@@ -4946,8 +4991,8 @@ packages:
|
||||
resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
get-tsconfig@4.10.1:
|
||||
resolution: {integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==}
|
||||
get-tsconfig@4.13.0:
|
||||
resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==}
|
||||
|
||||
github-slugger@2.0.0:
|
||||
resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==}
|
||||
@@ -5168,9 +5213,6 @@ packages:
|
||||
resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==}
|
||||
engines: {node: '>=6'}
|
||||
|
||||
import-in-the-middle@1.14.2:
|
||||
resolution: {integrity: sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==}
|
||||
|
||||
import-in-the-middle@2.0.0:
|
||||
resolution: {integrity: sha512-yNZhyQYqXpkT0AKq3F3KLasUSK4fHvebNH5hOsKQw2dhGSALvQ4U0BqUc5suziKvydO5u5hgN2hy1RJaho8U5A==}
|
||||
|
||||
@@ -5282,6 +5324,10 @@ packages:
|
||||
resolution: {integrity: sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
is-generator-function@1.1.2:
|
||||
resolution: {integrity: sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
is-glob@4.0.3:
|
||||
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
@@ -5903,8 +5949,8 @@ packages:
|
||||
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
|
||||
hasBin: true
|
||||
|
||||
napi-postinstall@0.3.3:
|
||||
resolution: {integrity: sha512-uTp172LLXSxuSYHv/kou+f6KW3SMppU9ivthaVTXian9sOt3XM/zHYHpRZiLgQoxeWfYUnslNWQHF1+G71xcow==}
|
||||
napi-postinstall@0.3.4:
|
||||
resolution: {integrity: sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==}
|
||||
engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0}
|
||||
hasBin: true
|
||||
|
||||
@@ -5920,8 +5966,8 @@ packages:
|
||||
react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
|
||||
react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
|
||||
|
||||
next@15.4.7:
|
||||
resolution: {integrity: sha512-OcqRugwF7n7mC8OSYjvsZhhG1AYSvulor1EIUsIkbbEbf1qoE5EbH36Swj8WhF4cHqmDgkiam3z1c1W0J1Wifg==}
|
||||
next@15.4.10:
|
||||
resolution: {integrity: sha512-itVlc79QjpKMFMRhP+kbGKaSG/gZM6RCvwhEbwmCNF06CdDiNaoHcbeg0PqkEa2GOcn8KJ0nnc7+yL7EjoYLHQ==}
|
||||
engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0}
|
||||
hasBin: true
|
||||
peerDependencies:
|
||||
@@ -6769,6 +6815,11 @@ packages:
|
||||
engines: {node: '>= 0.4'}
|
||||
hasBin: true
|
||||
|
||||
resolve@1.22.11:
|
||||
resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
hasBin: true
|
||||
|
||||
resolve@1.22.8:
|
||||
resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==}
|
||||
hasBin: true
|
||||
@@ -7858,7 +7909,7 @@ snapshots:
|
||||
'@babel/helper-plugin-utils': 7.27.1
|
||||
debug: 4.4.3
|
||||
lodash.debounce: 4.0.8
|
||||
resolve: 1.22.10
|
||||
resolve: 1.22.11
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
@@ -8550,7 +8601,7 @@ snapshots:
|
||||
|
||||
'@date-fns/tz@1.4.1': {}
|
||||
|
||||
'@emnapi/core@1.5.0':
|
||||
'@emnapi/core@1.7.1':
|
||||
dependencies:
|
||||
'@emnapi/wasi-threads': 1.1.0
|
||||
tslib: 2.8.1
|
||||
@@ -8561,6 +8612,11 @@ snapshots:
|
||||
tslib: 2.8.1
|
||||
optional: true
|
||||
|
||||
'@emnapi/runtime@1.7.1':
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
optional: true
|
||||
|
||||
'@emnapi/wasi-threads@1.1.0':
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
@@ -8739,6 +8795,8 @@ snapshots:
|
||||
|
||||
'@eslint-community/regexpp@4.12.1': {}
|
||||
|
||||
'@eslint-community/regexpp@4.12.2': {}
|
||||
|
||||
'@eslint/eslintrc@2.1.4':
|
||||
dependencies:
|
||||
ajv: 6.12.6
|
||||
@@ -8996,46 +9054,46 @@ snapshots:
|
||||
|
||||
'@napi-rs/wasm-runtime@0.2.12':
|
||||
dependencies:
|
||||
'@emnapi/core': 1.5.0
|
||||
'@emnapi/runtime': 1.5.0
|
||||
'@tybys/wasm-util': 0.10.0
|
||||
'@emnapi/core': 1.7.1
|
||||
'@emnapi/runtime': 1.7.1
|
||||
'@tybys/wasm-util': 0.10.1
|
||||
optional: true
|
||||
|
||||
'@neoconfetti/react@1.0.0': {}
|
||||
|
||||
'@next/env@15.4.7': {}
|
||||
'@next/env@15.4.10': {}
|
||||
|
||||
'@next/eslint-plugin-next@15.5.2':
|
||||
'@next/eslint-plugin-next@15.5.7':
|
||||
dependencies:
|
||||
fast-glob: 3.3.1
|
||||
|
||||
'@next/swc-darwin-arm64@15.4.7':
|
||||
'@next/swc-darwin-arm64@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-darwin-x64@15.4.7':
|
||||
'@next/swc-darwin-x64@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-arm64-gnu@15.4.7':
|
||||
'@next/swc-linux-arm64-gnu@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-arm64-musl@15.4.7':
|
||||
'@next/swc-linux-arm64-musl@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-x64-gnu@15.4.7':
|
||||
'@next/swc-linux-x64-gnu@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-linux-x64-musl@15.4.7':
|
||||
'@next/swc-linux-x64-musl@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-win32-arm64-msvc@15.4.7':
|
||||
'@next/swc-win32-arm64-msvc@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/swc-win32-x64-msvc@15.4.7':
|
||||
'@next/swc-win32-x64-msvc@15.4.8':
|
||||
optional: true
|
||||
|
||||
'@next/third-parties@15.4.6(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
'@next/third-parties@15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
dependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
third-party-capital: 1.0.20
|
||||
|
||||
@@ -10115,7 +10173,7 @@ snapshots:
|
||||
|
||||
'@rtsao/scc@1.1.0': {}
|
||||
|
||||
'@rushstack/eslint-patch@1.12.0': {}
|
||||
'@rushstack/eslint-patch@1.15.0': {}
|
||||
|
||||
'@scarf/scarf@1.4.0': {}
|
||||
|
||||
@@ -10267,7 +10325,7 @@ snapshots:
|
||||
|
||||
'@sentry/core@10.27.0': {}
|
||||
|
||||
'@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
'@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
dependencies:
|
||||
'@opentelemetry/api': 1.9.0
|
||||
'@opentelemetry/semantic-conventions': 1.37.0
|
||||
@@ -10280,7 +10338,7 @@ snapshots:
|
||||
'@sentry/react': 10.27.0(react@18.3.1)
|
||||
'@sentry/vercel-edge': 10.27.0
|
||||
'@sentry/webpack-plugin': 4.3.0(webpack@5.101.3(esbuild@0.25.9))
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
resolve: 1.22.8
|
||||
rollup: 4.52.2
|
||||
stacktrace-parser: 0.1.11
|
||||
@@ -10642,7 +10700,7 @@ snapshots:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
'@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
'@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))':
|
||||
dependencies:
|
||||
'@babel/core': 7.28.4
|
||||
'@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.28.4)
|
||||
@@ -10666,7 +10724,7 @@ snapshots:
|
||||
css-loader: 6.11.0(webpack@5.101.3(esbuild@0.25.9))
|
||||
image-size: 2.0.2
|
||||
loader-utils: 3.3.1
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
node-polyfill-webpack-plugin: 2.0.1(webpack@5.101.3(esbuild@0.25.9))
|
||||
postcss: 8.5.6
|
||||
postcss-loader: 8.2.0(postcss@8.5.6)(typescript@5.9.3)(webpack@5.101.3(esbuild@0.25.9))
|
||||
@@ -10867,7 +10925,7 @@ snapshots:
|
||||
dependencies:
|
||||
'@testing-library/dom': 10.4.1
|
||||
|
||||
'@tybys/wasm-util@0.10.0':
|
||||
'@tybys/wasm-util@0.10.1':
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
optional: true
|
||||
@@ -11065,14 +11123,14 @@ snapshots:
|
||||
dependencies:
|
||||
'@types/node': 24.10.0
|
||||
|
||||
'@typescript-eslint/eslint-plugin@8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)':
|
||||
'@typescript-eslint/eslint-plugin@8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@eslint-community/regexpp': 4.12.1
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/scope-manager': 8.43.0
|
||||
'@typescript-eslint/type-utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.43.0
|
||||
'@eslint-community/regexpp': 4.12.2
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/scope-manager': 8.48.1
|
||||
'@typescript-eslint/type-utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
eslint: 8.57.1
|
||||
graphemer: 1.4.0
|
||||
ignore: 7.0.5
|
||||
@@ -11082,12 +11140,12 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
|
||||
'@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/scope-manager': 8.43.0
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
'@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.43.0
|
||||
'@typescript-eslint/scope-manager': 8.48.1
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
debug: 4.4.3
|
||||
eslint: 8.57.1
|
||||
typescript: 5.9.3
|
||||
@@ -11097,7 +11155,7 @@ snapshots:
|
||||
'@typescript-eslint/project-service@8.43.0(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/tsconfig-utils': 8.43.0(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
debug: 4.4.3
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
@@ -11106,7 +11164,16 @@ snapshots:
|
||||
'@typescript-eslint/project-service@8.46.2(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/tsconfig-utils': 8.46.2(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.46.2
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
debug: 4.4.3
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/project-service@8.48.1(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
debug: 4.4.3
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
@@ -11122,6 +11189,11 @@ snapshots:
|
||||
'@typescript-eslint/types': 8.46.2
|
||||
'@typescript-eslint/visitor-keys': 8.46.2
|
||||
|
||||
'@typescript-eslint/scope-manager@8.48.1':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
|
||||
'@typescript-eslint/tsconfig-utils@8.43.0(typescript@5.9.3)':
|
||||
dependencies:
|
||||
typescript: 5.9.3
|
||||
@@ -11130,11 +11202,15 @@ snapshots:
|
||||
dependencies:
|
||||
typescript: 5.9.3
|
||||
|
||||
'@typescript-eslint/type-utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
|
||||
'@typescript-eslint/tsconfig-utils@8.48.1(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
'@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
typescript: 5.9.3
|
||||
|
||||
'@typescript-eslint/type-utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
debug: 4.4.3
|
||||
eslint: 8.57.1
|
||||
ts-api-utils: 2.1.0(typescript@5.9.3)
|
||||
@@ -11146,6 +11222,8 @@ snapshots:
|
||||
|
||||
'@typescript-eslint/types@8.46.2': {}
|
||||
|
||||
'@typescript-eslint/types@8.48.1': {}
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.43.0(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/project-service': 8.43.0(typescript@5.9.3)
|
||||
@@ -11156,7 +11234,7 @@ snapshots:
|
||||
fast-glob: 3.3.3
|
||||
is-glob: 4.0.3
|
||||
minimatch: 9.0.5
|
||||
semver: 7.7.2
|
||||
semver: 7.7.3
|
||||
ts-api-utils: 2.1.0(typescript@5.9.3)
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
@@ -11178,6 +11256,21 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/typescript-estree@8.48.1(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@typescript-eslint/project-service': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3)
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/visitor-keys': 8.48.1
|
||||
debug: 4.4.3
|
||||
minimatch: 9.0.5
|
||||
semver: 7.7.3
|
||||
tinyglobby: 0.2.15
|
||||
ts-api-utils: 2.1.0(typescript@5.9.3)
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1)
|
||||
@@ -11200,6 +11293,17 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)':
|
||||
dependencies:
|
||||
'@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1)
|
||||
'@typescript-eslint/scope-manager': 8.48.1
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
'@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3)
|
||||
eslint: 8.57.1
|
||||
typescript: 5.9.3
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.43.0':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.43.0
|
||||
@@ -11210,6 +11314,11 @@ snapshots:
|
||||
'@typescript-eslint/types': 8.46.2
|
||||
eslint-visitor-keys: 4.2.1
|
||||
|
||||
'@typescript-eslint/visitor-keys@8.48.1':
|
||||
dependencies:
|
||||
'@typescript-eslint/types': 8.48.1
|
||||
eslint-visitor-keys: 4.2.1
|
||||
|
||||
'@ungap/structured-clone@1.3.0': {}
|
||||
|
||||
'@unrs/resolver-binding-android-arm-eabi@1.11.1':
|
||||
@@ -11271,14 +11380,14 @@ snapshots:
|
||||
'@unrs/resolver-binding-win32-x64-msvc@1.11.1':
|
||||
optional: true
|
||||
|
||||
'@vercel/analytics@1.5.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
'@vercel/analytics@1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
|
||||
'@vercel/speed-insights@1.2.0(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
'@vercel/speed-insights@1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react: 18.3.1
|
||||
|
||||
'@vitest/expect@3.2.4':
|
||||
@@ -12532,16 +12641,16 @@ snapshots:
|
||||
|
||||
escape-string-regexp@5.0.0: {}
|
||||
|
||||
eslint-config-next@15.5.2(eslint@8.57.1)(typescript@5.9.3):
|
||||
eslint-config-next@15.5.7(eslint@8.57.1)(typescript@5.9.3):
|
||||
dependencies:
|
||||
'@next/eslint-plugin-next': 15.5.2
|
||||
'@rushstack/eslint-patch': 1.12.0
|
||||
'@typescript-eslint/eslint-plugin': 8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@next/eslint-plugin-next': 15.5.7
|
||||
'@rushstack/eslint-patch': 1.15.0
|
||||
'@typescript-eslint/eslint-plugin': 8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
eslint: 8.57.1
|
||||
eslint-import-resolver-node: 0.3.9
|
||||
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1)
|
||||
eslint-plugin-react: 7.37.5(eslint@8.57.1)
|
||||
eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1)
|
||||
@@ -12556,7 +12665,7 @@ snapshots:
|
||||
dependencies:
|
||||
debug: 3.2.7
|
||||
is-core-module: 2.16.1
|
||||
resolve: 1.22.10
|
||||
resolve: 1.22.11
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
@@ -12565,28 +12674,28 @@ snapshots:
|
||||
'@nolyfill/is-core-module': 1.0.39
|
||||
debug: 4.4.3
|
||||
eslint: 8.57.1
|
||||
get-tsconfig: 4.10.1
|
||||
get-tsconfig: 4.13.0
|
||||
is-bun-module: 2.0.0
|
||||
stable-hash: 0.0.5
|
||||
tinyglobby: 0.2.15
|
||||
unrs-resolver: 1.11.1
|
||||
optionalDependencies:
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
eslint-module-utils@2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
|
||||
eslint-module-utils@2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
|
||||
dependencies:
|
||||
debug: 3.2.7
|
||||
optionalDependencies:
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
eslint: 8.57.1
|
||||
eslint-import-resolver-node: 0.3.9
|
||||
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
|
||||
eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
|
||||
dependencies:
|
||||
'@rtsao/scc': 1.1.0
|
||||
array-includes: 3.1.9
|
||||
@@ -12597,7 +12706,7 @@ snapshots:
|
||||
doctrine: 2.1.0
|
||||
eslint: 8.57.1
|
||||
eslint-import-resolver-node: 0.3.9
|
||||
eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
|
||||
hasown: 2.0.2
|
||||
is-core-module: 2.16.1
|
||||
is-glob: 4.0.3
|
||||
@@ -12609,7 +12718,7 @@ snapshots:
|
||||
string.prototype.trimend: 1.0.9
|
||||
tsconfig-paths: 3.15.0
|
||||
optionalDependencies:
|
||||
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
|
||||
'@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3)
|
||||
transitivePeerDependencies:
|
||||
- eslint-import-resolver-typescript
|
||||
- eslint-import-resolver-webpack
|
||||
@@ -12954,9 +13063,11 @@ snapshots:
|
||||
|
||||
functions-have-names@1.2.3: {}
|
||||
|
||||
geist@1.5.1(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)):
|
||||
geist@1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)):
|
||||
dependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
|
||||
generator-function@2.0.1: {}
|
||||
|
||||
gensync@1.0.0-beta.2: {}
|
||||
|
||||
@@ -12990,7 +13101,7 @@ snapshots:
|
||||
es-errors: 1.3.0
|
||||
get-intrinsic: 1.3.0
|
||||
|
||||
get-tsconfig@4.10.1:
|
||||
get-tsconfig@4.13.0:
|
||||
dependencies:
|
||||
resolve-pkg-maps: 1.0.0
|
||||
|
||||
@@ -13274,13 +13385,6 @@ snapshots:
|
||||
parent-module: 1.0.1
|
||||
resolve-from: 4.0.0
|
||||
|
||||
import-in-the-middle@1.14.2:
|
||||
dependencies:
|
||||
acorn: 8.15.0
|
||||
acorn-import-attributes: 1.9.5(acorn@8.15.0)
|
||||
cjs-module-lexer: 1.4.3
|
||||
module-details-from-path: 1.0.4
|
||||
|
||||
import-in-the-middle@2.0.0:
|
||||
dependencies:
|
||||
acorn: 8.15.0
|
||||
@@ -13357,7 +13461,7 @@ snapshots:
|
||||
|
||||
is-bun-module@2.0.0:
|
||||
dependencies:
|
||||
semver: 7.7.2
|
||||
semver: 7.7.3
|
||||
|
||||
is-callable@1.2.7: {}
|
||||
|
||||
@@ -13395,6 +13499,14 @@ snapshots:
|
||||
has-tostringtag: 1.0.2
|
||||
safe-regex-test: 1.1.0
|
||||
|
||||
is-generator-function@1.1.2:
|
||||
dependencies:
|
||||
call-bound: 1.0.4
|
||||
generator-function: 2.0.1
|
||||
get-proto: 1.0.1
|
||||
has-tostringtag: 1.0.2
|
||||
safe-regex-test: 1.1.0
|
||||
|
||||
is-glob@4.0.3:
|
||||
dependencies:
|
||||
is-extglob: 2.1.1
|
||||
@@ -14215,7 +14327,7 @@ snapshots:
|
||||
|
||||
nanoid@3.3.11: {}
|
||||
|
||||
napi-postinstall@0.3.3: {}
|
||||
napi-postinstall@0.3.4: {}
|
||||
|
||||
natural-compare@1.4.0: {}
|
||||
|
||||
@@ -14226,9 +14338,9 @@ snapshots:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
'@next/env': 15.4.7
|
||||
'@next/env': 15.4.10
|
||||
'@swc/helpers': 0.5.15
|
||||
caniuse-lite: 1.0.30001741
|
||||
postcss: 8.4.31
|
||||
@@ -14236,14 +14348,14 @@ snapshots:
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
styled-jsx: 5.1.6(@babel/core@7.28.4)(react@18.3.1)
|
||||
optionalDependencies:
|
||||
'@next/swc-darwin-arm64': 15.4.7
|
||||
'@next/swc-darwin-x64': 15.4.7
|
||||
'@next/swc-linux-arm64-gnu': 15.4.7
|
||||
'@next/swc-linux-arm64-musl': 15.4.7
|
||||
'@next/swc-linux-x64-gnu': 15.4.7
|
||||
'@next/swc-linux-x64-musl': 15.4.7
|
||||
'@next/swc-win32-arm64-msvc': 15.4.7
|
||||
'@next/swc-win32-x64-msvc': 15.4.7
|
||||
'@next/swc-darwin-arm64': 15.4.8
|
||||
'@next/swc-darwin-x64': 15.4.8
|
||||
'@next/swc-linux-arm64-gnu': 15.4.8
|
||||
'@next/swc-linux-arm64-musl': 15.4.8
|
||||
'@next/swc-linux-x64-gnu': 15.4.8
|
||||
'@next/swc-linux-x64-musl': 15.4.8
|
||||
'@next/swc-win32-arm64-msvc': 15.4.8
|
||||
'@next/swc-win32-x64-msvc': 15.4.8
|
||||
'@opentelemetry/api': 1.9.0
|
||||
'@playwright/test': 1.56.1
|
||||
sharp: 0.34.3
|
||||
@@ -14321,12 +14433,12 @@ snapshots:
|
||||
dependencies:
|
||||
boolbase: 1.0.0
|
||||
|
||||
nuqs@2.7.2(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1):
|
||||
nuqs@2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
'@standard-schema/spec': 1.0.0
|
||||
react: 18.3.1
|
||||
optionalDependencies:
|
||||
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
|
||||
oas-kit-common@1.0.8:
|
||||
dependencies:
|
||||
@@ -15185,6 +15297,12 @@ snapshots:
|
||||
path-parse: 1.0.7
|
||||
supports-preserve-symlinks-flag: 1.0.0
|
||||
|
||||
resolve@1.22.11:
|
||||
dependencies:
|
||||
is-core-module: 2.16.1
|
||||
path-parse: 1.0.7
|
||||
supports-preserve-symlinks-flag: 1.0.0
|
||||
|
||||
resolve@1.22.8:
|
||||
dependencies:
|
||||
is-core-module: 2.16.1
|
||||
@@ -15340,7 +15458,7 @@ snapshots:
|
||||
dependencies:
|
||||
color: 4.2.3
|
||||
detect-libc: 2.0.4
|
||||
semver: 7.7.2
|
||||
semver: 7.7.3
|
||||
optionalDependencies:
|
||||
'@img/sharp-darwin-arm64': 0.34.3
|
||||
'@img/sharp-darwin-x64': 0.34.3
|
||||
@@ -15996,7 +16114,7 @@ snapshots:
|
||||
|
||||
unrs-resolver@1.11.1:
|
||||
dependencies:
|
||||
napi-postinstall: 0.3.3
|
||||
napi-postinstall: 0.3.4
|
||||
optionalDependencies:
|
||||
'@unrs/resolver-binding-android-arm-eabi': 1.11.1
|
||||
'@unrs/resolver-binding-android-arm64': 1.11.1
|
||||
@@ -16224,7 +16342,7 @@ snapshots:
|
||||
is-async-function: 2.1.1
|
||||
is-date-object: 1.1.0
|
||||
is-finalizationregistry: 1.1.1
|
||||
is-generator-function: 1.1.0
|
||||
is-generator-function: 1.1.2
|
||||
is-regex: 1.2.1
|
||||
is-weakref: 1.1.1
|
||||
isarray: 2.0.5
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
"use client";
|
||||
import { StoreAgentDetails } from "@/lib/autogpt-server-api";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import { isEmptyOrWhitespace } from "@/lib/utils";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect, useState } from "react";
|
||||
@@ -13,15 +11,17 @@ import {
|
||||
OnboardingStep,
|
||||
} from "../components/OnboardingStep";
|
||||
import { OnboardingText } from "../components/OnboardingText";
|
||||
import { getV1RecommendedOnboardingAgents } from "@/app/api/__generated__/endpoints/onboarding/onboarding";
|
||||
import { resolveResponse } from "@/app/api/helpers";
|
||||
import { StoreAgentDetails } from "@/app/api/__generated__/models/storeAgentDetails";
|
||||
|
||||
export default function Page() {
|
||||
const { state, updateState, completeStep } = useOnboarding(4, "INTEGRATIONS");
|
||||
const [agents, setAgents] = useState<StoreAgentDetails[]>([]);
|
||||
const api = useBackendAPI();
|
||||
const router = useRouter();
|
||||
|
||||
useEffect(() => {
|
||||
api.getOnboardingAgents().then((agents) => {
|
||||
resolveResponse(getV1RecommendedOnboardingAgents()).then((agents) => {
|
||||
if (agents.length < 2) {
|
||||
completeStep("CONGRATS");
|
||||
router.replace("/");
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr";
|
||||
import { Play } from "lucide-react";
|
||||
import OnboardingButton from "../components/OnboardingButton";
|
||||
@@ -79,20 +78,13 @@ export default function Page() {
|
||||
<CardContent className="flex flex-col gap-4">
|
||||
{Object.entries(agent?.input_schema.properties || {}).map(
|
||||
([key, inputSubSchema]) => (
|
||||
<div key={key} className="flex flex-col space-y-2">
|
||||
<label className="flex items-center gap-1 text-sm font-medium">
|
||||
{inputSubSchema.title || key}
|
||||
<InformationTooltip
|
||||
description={inputSubSchema.description}
|
||||
/>
|
||||
</label>
|
||||
<RunAgentInputs
|
||||
schema={inputSubSchema}
|
||||
value={onboarding.state?.agentInput?.[key]}
|
||||
placeholder={inputSubSchema.description}
|
||||
onChange={(value) => handleSetAgentInput(key, value)}
|
||||
/>
|
||||
</div>
|
||||
<RunAgentInputs
|
||||
key={key}
|
||||
schema={inputSubSchema}
|
||||
value={onboarding.state?.agentInput?.[key]}
|
||||
placeholder={inputSubSchema.description}
|
||||
onChange={(value) => handleSetAgentInput(key, value)}
|
||||
/>
|
||||
),
|
||||
)}
|
||||
<AgentOnboardingCredentials
|
||||
|
||||
@@ -12,6 +12,9 @@ import {
|
||||
useGetV2GetAgentByVersion,
|
||||
useGetV2GetAgentGraph,
|
||||
} from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { resolveResponse } from "@/app/api/helpers";
|
||||
import { postV2AddMarketplaceAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { GraphID } from "@/lib/autogpt-server-api";
|
||||
|
||||
export function useOnboardingRunStep() {
|
||||
const onboarding = useOnboarding(undefined, "AGENT_CHOICE");
|
||||
@@ -77,12 +80,7 @@ export function useOnboardingRunStep() {
|
||||
|
||||
setShowInput(true);
|
||||
onboarding.setStep(6);
|
||||
onboarding.updateState({
|
||||
completedSteps: [
|
||||
...(onboarding.state.completedSteps || []),
|
||||
"AGENT_NEW_RUN",
|
||||
],
|
||||
});
|
||||
onboarding.completeStep("AGENT_NEW_RUN");
|
||||
}
|
||||
|
||||
function handleSetAgentInput(key: string, value: string) {
|
||||
@@ -111,21 +109,22 @@ export function useOnboardingRunStep() {
|
||||
setRunningAgent(true);
|
||||
|
||||
try {
|
||||
const libraryAgent = await api.addMarketplaceAgentToLibrary(
|
||||
storeAgent?.store_listing_version_id || "",
|
||||
const libraryAgent = await resolveResponse(
|
||||
postV2AddMarketplaceAgent({
|
||||
store_listing_version_id: storeAgent?.store_listing_version_id || "",
|
||||
source: "onboarding",
|
||||
}),
|
||||
);
|
||||
|
||||
const { id: runID } = await api.executeGraph(
|
||||
libraryAgent.graph_id,
|
||||
libraryAgent.graph_id as GraphID,
|
||||
libraryAgent.graph_version,
|
||||
onboarding.state.agentInput || {},
|
||||
inputCredentials,
|
||||
"onboarding",
|
||||
);
|
||||
|
||||
onboarding.updateState({
|
||||
onboardingAgentExecutionId: runID,
|
||||
agentRuns: (onboarding.state.agentRuns || 0) + 1,
|
||||
});
|
||||
onboarding.updateState({ onboardingAgentExecutionId: runID });
|
||||
|
||||
router.push("/onboarding/6-congrats");
|
||||
} catch (error) {
|
||||
|
||||
@@ -5,6 +5,9 @@ import { useRouter } from "next/navigation";
|
||||
import * as party from "party-js";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { useOnboarding } from "../../../../providers/onboarding/onboarding-provider";
|
||||
import { resolveResponse } from "@/app/api/helpers";
|
||||
import { getV1OnboardingState } from "@/app/api/__generated__/endpoints/onboarding/onboarding";
|
||||
import { postV2AddMarketplaceAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
|
||||
export default function Page() {
|
||||
const { completeStep } = useOnboarding(7, "AGENT_INPUT");
|
||||
@@ -37,11 +40,15 @@ export default function Page() {
|
||||
completeStep("CONGRATS");
|
||||
|
||||
try {
|
||||
const onboarding = await api.getUserOnboarding();
|
||||
const onboarding = await resolveResponse(getV1OnboardingState());
|
||||
if (onboarding?.selectedStoreListingVersionId) {
|
||||
try {
|
||||
const libraryAgent = await api.addMarketplaceAgentToLibrary(
|
||||
onboarding.selectedStoreListingVersionId,
|
||||
const libraryAgent = await resolveResponse(
|
||||
postV2AddMarketplaceAgent({
|
||||
store_listing_version_id:
|
||||
onboarding.selectedStoreListingVersionId,
|
||||
source: "onboarding",
|
||||
}),
|
||||
);
|
||||
router.replace(`/library/agents/${libraryAgent.id}`);
|
||||
} catch (error) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { cn } from "@/lib/utils";
|
||||
import StarRating from "./StarRating";
|
||||
import { StoreAgentDetails } from "@/lib/autogpt-server-api";
|
||||
import SmartImage from "@/components/__legacy__/SmartImage";
|
||||
import { StoreAgentDetails } from "@/app/api/__generated__/models/storeAgentDetails";
|
||||
|
||||
type OnboardingAgentCardProps = {
|
||||
agent?: StoreAgentDetails;
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
"use client";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect } from "react";
|
||||
import { resolveResponse, shouldShowOnboarding } from "@/app/api/helpers";
|
||||
import { getV1OnboardingState } from "@/app/api/__generated__/endpoints/onboarding/onboarding";
|
||||
|
||||
export default function OnboardingPage() {
|
||||
const router = useRouter();
|
||||
const api = useBackendAPI();
|
||||
|
||||
useEffect(() => {
|
||||
async function redirectToStep() {
|
||||
try {
|
||||
// Check if onboarding is enabled
|
||||
const isEnabled = await api.isOnboardingEnabled();
|
||||
const isEnabled = await shouldShowOnboarding();
|
||||
if (!isEnabled) {
|
||||
router.replace("/");
|
||||
return;
|
||||
}
|
||||
|
||||
const onboarding = await api.getUserOnboarding();
|
||||
const onboarding = await resolveResponse(getV1OnboardingState());
|
||||
|
||||
// Handle completed onboarding
|
||||
if (onboarding.completedSteps.includes("GET_RESULTS")) {
|
||||
@@ -66,7 +66,7 @@ export default function OnboardingPage() {
|
||||
}
|
||||
|
||||
redirectToStep();
|
||||
}, [api, router]);
|
||||
}, [router]);
|
||||
|
||||
return <LoadingSpinner size="large" cover />;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useEffect } from "react";
|
||||
import {
|
||||
LineChart,
|
||||
Line,
|
||||
XAxis,
|
||||
YAxis,
|
||||
CartesianGrid,
|
||||
Tooltip,
|
||||
Legend,
|
||||
ResponsiveContainer,
|
||||
} from "recharts";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
@@ -18,9 +28,12 @@ import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import {
|
||||
usePostV2GenerateExecutionAnalytics,
|
||||
useGetV2GetExecutionAnalyticsConfiguration,
|
||||
useGetV2GetExecutionAccuracyTrendsAndAlerts,
|
||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
import type { ExecutionAnalyticsRequest } from "@/app/api/__generated__/models/executionAnalyticsRequest";
|
||||
import type { ExecutionAnalyticsResponse } from "@/app/api/__generated__/models/executionAnalyticsResponse";
|
||||
import type { AccuracyTrendsResponse } from "@/app/api/__generated__/models/accuracyTrendsResponse";
|
||||
import type { AccuracyLatestData } from "@/app/api/__generated__/models/accuracyLatestData";
|
||||
|
||||
// Use the generated type with minimal adjustment for form handling
|
||||
interface FormData extends Omit<ExecutionAnalyticsRequest, "created_after"> {
|
||||
@@ -33,8 +46,133 @@ export function ExecutionAnalyticsForm() {
|
||||
const [results, setResults] = useState<ExecutionAnalyticsResponse | null>(
|
||||
null,
|
||||
);
|
||||
const [trendsData, setTrendsData] = useState<AccuracyTrendsResponse | null>(
|
||||
null,
|
||||
);
|
||||
const { toast } = useToast();
|
||||
|
||||
// State for accuracy trends query parameters
|
||||
const [accuracyParams, setAccuracyParams] = useState<{
|
||||
graph_id: string;
|
||||
user_id?: string;
|
||||
days_back: number;
|
||||
drop_threshold: number;
|
||||
include_historical?: boolean;
|
||||
} | null>(null);
|
||||
|
||||
// Use the generated API client for accuracy trends (GET)
|
||||
const { data: accuracyApiResponse, error: accuracyError } =
|
||||
useGetV2GetExecutionAccuracyTrendsAndAlerts(
|
||||
accuracyParams || {
|
||||
graph_id: "",
|
||||
days_back: 30,
|
||||
drop_threshold: 10.0,
|
||||
include_historical: false,
|
||||
},
|
||||
{
|
||||
query: {
|
||||
enabled: !!accuracyParams?.graph_id,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Update local state when data changes and handle success/error
|
||||
useEffect(() => {
|
||||
if (accuracyError) {
|
||||
console.error("Failed to fetch trends:", accuracyError);
|
||||
toast({
|
||||
title: "Trends Error",
|
||||
description:
|
||||
(accuracyError as any)?.message || "Failed to fetch accuracy trends",
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const data = accuracyApiResponse?.data;
|
||||
if (data && "latest_data" in data) {
|
||||
setTrendsData(data);
|
||||
|
||||
// Check for alerts
|
||||
if (data.alert) {
|
||||
toast({
|
||||
title: "🚨 Accuracy Alert Detected",
|
||||
description: `${data.alert.drop_percent.toFixed(1)}% accuracy drop detected for this agent`,
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}
|
||||
}, [accuracyApiResponse, accuracyError, toast]);
|
||||
|
||||
// Chart component for accuracy trends
|
||||
function AccuracyChart({ data }: { data: AccuracyLatestData[] }) {
|
||||
const chartData = data.map((item) => ({
|
||||
date: new Date(item.date).toLocaleDateString(),
|
||||
"Daily Score": item.daily_score,
|
||||
"3-Day Avg": item.three_day_avg,
|
||||
"7-Day Avg": item.seven_day_avg,
|
||||
"14-Day Avg": item.fourteen_day_avg,
|
||||
}));
|
||||
|
||||
return (
|
||||
<ResponsiveContainer width="100%" height={400}>
|
||||
<LineChart
|
||||
data={chartData}
|
||||
margin={{ top: 5, right: 30, left: 20, bottom: 5 }}
|
||||
>
|
||||
<CartesianGrid strokeDasharray="3 3" />
|
||||
<XAxis dataKey="date" />
|
||||
<YAxis domain={[0, 100]} />
|
||||
<Tooltip
|
||||
formatter={(value) => [`${Number(value).toFixed(2)}%`, ""]}
|
||||
/>
|
||||
<Legend />
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="Daily Score"
|
||||
stroke="#3b82f6"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="3-Day Avg"
|
||||
stroke="#10b981"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="7-Day Avg"
|
||||
stroke="#f59e0b"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
<Line
|
||||
type="monotone"
|
||||
dataKey="14-Day Avg"
|
||||
stroke="#8b5cf6"
|
||||
strokeWidth={2}
|
||||
dot={{ r: 3 }}
|
||||
/>
|
||||
</LineChart>
|
||||
</ResponsiveContainer>
|
||||
);
|
||||
}
|
||||
|
||||
// Function to fetch accuracy trends using generated API client
|
||||
const fetchAccuracyTrends = (graphId: string, userId?: string) => {
|
||||
if (!graphId.trim()) return;
|
||||
|
||||
setAccuracyParams({
|
||||
graph_id: graphId.trim(),
|
||||
user_id: userId?.trim() || undefined,
|
||||
days_back: 30,
|
||||
drop_threshold: 10.0,
|
||||
include_historical: showAccuracyChart, // Include historical data when chart is enabled
|
||||
});
|
||||
};
|
||||
|
||||
// Fetch configuration from API
|
||||
const {
|
||||
data: config,
|
||||
@@ -50,6 +188,7 @@ export function ExecutionAnalyticsForm() {
|
||||
}
|
||||
const result = res.data;
|
||||
setResults(result);
|
||||
|
||||
toast({
|
||||
title: "Analytics Generated",
|
||||
description: `Processed ${result.processed_executions} executions. ${result.successful_analytics} successful, ${result.failed_analytics} failed, ${result.skipped_executions} skipped.`,
|
||||
@@ -58,11 +197,21 @@ export function ExecutionAnalyticsForm() {
|
||||
},
|
||||
onError: (error: any) => {
|
||||
console.error("Analytics generation error:", error);
|
||||
|
||||
const errorMessage =
|
||||
error?.message || error?.detail || "An unexpected error occurred";
|
||||
const isOpenAIError = errorMessage.includes(
|
||||
"OpenAI API key not configured",
|
||||
);
|
||||
|
||||
toast({
|
||||
title: "Analytics Generation Failed",
|
||||
description:
|
||||
error?.message || error?.detail || "An unexpected error occurred",
|
||||
variant: "destructive",
|
||||
title: isOpenAIError
|
||||
? "Analytics Generation Skipped"
|
||||
: "Analytics Generation Failed",
|
||||
description: isOpenAIError
|
||||
? "Analytics generation requires OpenAI configuration, but accuracy trends are still available above."
|
||||
: errorMessage,
|
||||
variant: isOpenAIError ? "default" : "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
@@ -77,6 +226,9 @@ export function ExecutionAnalyticsForm() {
|
||||
user_prompt: "", // Will use config default when empty
|
||||
});
|
||||
|
||||
// State for accuracy trends chart toggle
|
||||
const [showAccuracyChart, setShowAccuracyChart] = useState(true);
|
||||
|
||||
// Update form defaults when config loads
|
||||
useEffect(() => {
|
||||
if (config?.data && config.status === 200 && !formData.model_name) {
|
||||
@@ -101,6 +253,11 @@ export function ExecutionAnalyticsForm() {
|
||||
|
||||
setResults(null);
|
||||
|
||||
// Fetch accuracy trends if chart is enabled
|
||||
if (showAccuracyChart) {
|
||||
fetchAccuracyTrends(formData.graph_id, formData.user_id || undefined);
|
||||
}
|
||||
|
||||
// Prepare the request payload
|
||||
const payload: ExecutionAnalyticsRequest = {
|
||||
graph_id: formData.graph_id.trim(),
|
||||
@@ -262,6 +419,18 @@ export function ExecutionAnalyticsForm() {
|
||||
</Label>
|
||||
</div>
|
||||
|
||||
{/* Show Accuracy Chart Checkbox */}
|
||||
<div className="flex items-center space-x-2">
|
||||
<Checkbox
|
||||
id="show_accuracy_chart"
|
||||
checked={showAccuracyChart}
|
||||
onCheckedChange={(checked) => setShowAccuracyChart(!!checked)}
|
||||
/>
|
||||
<Label htmlFor="show_accuracy_chart" className="text-sm">
|
||||
Show accuracy trends chart and historical data visualization
|
||||
</Label>
|
||||
</div>
|
||||
|
||||
{/* Custom System Prompt */}
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="system_prompt">
|
||||
@@ -370,6 +539,98 @@ export function ExecutionAnalyticsForm() {
|
||||
</div>
|
||||
</form>
|
||||
|
||||
{/* Accuracy Trends Display */}
|
||||
{trendsData && (
|
||||
<div className="space-y-4">
|
||||
<h3 className="text-lg font-semibold">Execution Accuracy Trends</h3>
|
||||
|
||||
{/* Alert Section */}
|
||||
{trendsData.alert && (
|
||||
<div className="rounded-lg border-l-4 border-red-500 bg-red-50 p-4">
|
||||
<div className="flex items-start">
|
||||
<span className="text-2xl">🚨</span>
|
||||
<div className="ml-3 space-y-2">
|
||||
<h4 className="text-lg font-semibold text-red-800">
|
||||
Accuracy Alert Detected
|
||||
</h4>
|
||||
<p className="text-red-700">
|
||||
<strong>
|
||||
{trendsData.alert.drop_percent.toFixed(1)}% accuracy drop
|
||||
</strong>{" "}
|
||||
detected for agent{" "}
|
||||
<code className="rounded bg-red-100 px-1 text-sm">
|
||||
{formData.graph_id}
|
||||
</code>
|
||||
</p>
|
||||
<div className="space-y-1 text-sm text-red-600">
|
||||
<p>
|
||||
• 3-day average:{" "}
|
||||
<strong>
|
||||
{trendsData.alert.three_day_avg.toFixed(2)}%
|
||||
</strong>
|
||||
</p>
|
||||
<p>
|
||||
• 7-day average:{" "}
|
||||
<strong>
|
||||
{trendsData.alert.seven_day_avg.toFixed(2)}%
|
||||
</strong>
|
||||
</p>
|
||||
<p>
|
||||
• Detected at:{" "}
|
||||
<strong>
|
||||
{new Date(
|
||||
trendsData.alert.detected_at,
|
||||
).toLocaleString()}
|
||||
</strong>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Latest Data Summary */}
|
||||
<div className="grid grid-cols-2 gap-4 md:grid-cols-4">
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-blue-600">
|
||||
{trendsData.latest_data.daily_score?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">Daily Score</div>
|
||||
</div>
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-green-600">
|
||||
{trendsData.latest_data.three_day_avg?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">3-Day Avg</div>
|
||||
</div>
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-orange-600">
|
||||
{trendsData.latest_data.seven_day_avg?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">7-Day Avg</div>
|
||||
</div>
|
||||
<div className="rounded-lg border bg-white p-4 text-center">
|
||||
<div className="text-2xl font-bold text-purple-600">
|
||||
{trendsData.latest_data.fourteen_day_avg?.toFixed(2) || "N/A"}
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">14-Day Avg</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Chart Section - only show when toggle is enabled and historical data exists */}
|
||||
{showAccuracyChart && trendsData?.historical_data && (
|
||||
<div className="mt-6">
|
||||
<h4 className="mb-4 text-lg font-semibold">
|
||||
Execution Accuracy Trends Chart
|
||||
</h4>
|
||||
<div className="rounded-lg border bg-white p-6">
|
||||
<AccuracyChart data={trendsData.historical_data} />
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{results && <AnalyticsResultsTable results={results} />}
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -17,12 +17,13 @@ function ExecutionAnalyticsDashboard() {
|
||||
</div>
|
||||
|
||||
<div className="rounded-lg border bg-white p-6 shadow-sm">
|
||||
<h2 className="mb-4 text-xl font-semibold">Analytics Generation</h2>
|
||||
<h2 className="mb-4 text-xl font-semibold">
|
||||
Execution Analytics & Accuracy Monitoring
|
||||
</h2>
|
||||
<p className="mb-6 text-gray-600">
|
||||
This tool will identify completed executions missing activity
|
||||
summaries or success scores and generate them using AI. Only
|
||||
executions that meet the criteria and are missing these fields will
|
||||
be processed.
|
||||
Generate missing activity summaries and success scores for agent
|
||||
executions. After generation, accuracy trends and alerts will
|
||||
automatically be displayed to help monitor agent health over time.
|
||||
</p>
|
||||
|
||||
<Suspense
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { OAuthPopupResultMessage } from "@/components/renderers/input-renderer/fields/CredentialField/models/OAuthCredentialModal/useOAuthCredentialModal";
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
// This route is intended to be used as the callback for integration OAuth flows,
|
||||
|
||||
@@ -9,6 +9,8 @@ import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useState } from "react";
|
||||
import { useSaveGraph } from "@/app/(platform)/build/hooks/useSaveGraph";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { ApiError } from "@/lib/autogpt-server-api/helpers"; // Check if this exists
|
||||
|
||||
export const useRunGraph = () => {
|
||||
const { saveGraph, isSaving } = useSaveGraph({
|
||||
@@ -24,6 +26,13 @@ export const useRunGraph = () => {
|
||||
);
|
||||
const [openRunInputDialog, setOpenRunInputDialog] = useState(false);
|
||||
|
||||
const setNodeErrorsForBackendId = useNodeStore(
|
||||
useShallow((state) => state.setNodeErrorsForBackendId),
|
||||
);
|
||||
const clearAllNodeErrors = useNodeStore(
|
||||
useShallow((state) => state.clearAllNodeErrors),
|
||||
);
|
||||
|
||||
const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] =
|
||||
useQueryStates({
|
||||
flowID: parseAsString,
|
||||
@@ -35,19 +44,49 @@ export const useRunGraph = () => {
|
||||
usePostV1ExecuteGraphAgent({
|
||||
mutation: {
|
||||
onSuccess: (response: any) => {
|
||||
clearAllNodeErrors();
|
||||
const { id } = response.data as GraphExecutionMeta;
|
||||
setQueryStates({
|
||||
flowExecutionID: id,
|
||||
});
|
||||
},
|
||||
onError: (error: any) => {
|
||||
// Reset running state on error
|
||||
setIsGraphRunning(false);
|
||||
toast({
|
||||
title: (error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
if (error instanceof ApiError && error.isGraphValidationError?.()) {
|
||||
const errorData = error.response?.detail;
|
||||
|
||||
if (errorData?.node_errors) {
|
||||
Object.entries(errorData.node_errors).forEach(
|
||||
([backendId, nodeErrors]) => {
|
||||
setNodeErrorsForBackendId(
|
||||
backendId,
|
||||
nodeErrors as { [key: string]: string },
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
useNodeStore.getState().nodes.forEach((node) => {
|
||||
const backendId = node.data.metadata?.backend_id || node.id;
|
||||
if (!errorData.node_errors[backendId as string]) {
|
||||
useNodeStore.getState().updateNodeErrors(node.id, {});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
toast({
|
||||
title: errorData?.message || "Graph validation failed",
|
||||
description:
|
||||
"Please fix the validation errors on the highlighted nodes and try again.",
|
||||
variant: "destructive",
|
||||
});
|
||||
} else {
|
||||
toast({
|
||||
title:
|
||||
(error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -77,7 +116,7 @@ export const useRunGraph = () => {
|
||||
await executeGraph({
|
||||
graphId: flowID ?? "",
|
||||
graphVersion: flowVersion || null,
|
||||
data: { inputs: {}, credentials_inputs: {} },
|
||||
data: { inputs: {}, credentials_inputs: {}, source: "builder" },
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -79,7 +79,11 @@ export const useRunInputDialog = ({
|
||||
await executeGraph({
|
||||
graphId: flowID ?? "",
|
||||
graphVersion: flowVersion || null,
|
||||
data: { inputs: inputValues, credentials_inputs: credentialValues },
|
||||
data: {
|
||||
inputs: inputValues,
|
||||
credentials_inputs: credentialValues,
|
||||
source: "builder",
|
||||
},
|
||||
});
|
||||
// Optimistically set running state immediately for responsive UI
|
||||
setIsGraphRunning(true);
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
import { useCallback } from "react";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||
import { CustomNode } from "../nodes/CustomNode/CustomNode";
|
||||
import { CustomEdge } from "../edges/CustomEdge";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
|
||||
interface CopyableData {
|
||||
nodes: CustomNode[];
|
||||
edges: CustomEdge[];
|
||||
}
|
||||
|
||||
const CLIPBOARD_PREFIX = "autogpt-flow-data:";
|
||||
|
||||
export function useCopyPaste() {
|
||||
// Only use useReactFlow for viewport (not managed by stores)
|
||||
const { getViewport } = useReactFlow();
|
||||
const { toast } = useToast();
|
||||
|
||||
const handleCopyPaste = useCallback(
|
||||
(event: KeyboardEvent) => {
|
||||
// Prevent copy/paste if any modal is open or if the focus is on an input element
|
||||
const activeElement = document.activeElement;
|
||||
const isInputField =
|
||||
activeElement?.tagName === "INPUT" ||
|
||||
@@ -28,7 +29,6 @@ export function useCopyPaste() {
|
||||
if (isInputField) return;
|
||||
|
||||
if (event.ctrlKey || event.metaKey) {
|
||||
// COPY: Ctrl+C or Cmd+C
|
||||
if (event.key === "c" || event.key === "C") {
|
||||
const { nodes } = useNodeStore.getState();
|
||||
const { edges } = useEdgeStore.getState();
|
||||
@@ -53,81 +53,102 @@ export function useCopyPaste() {
|
||||
edges: selectedEdges,
|
||||
};
|
||||
|
||||
storage.set(Key.COPIED_FLOW_DATA, JSON.stringify(copiedData));
|
||||
const clipboardText = `${CLIPBOARD_PREFIX}${JSON.stringify(copiedData)}`;
|
||||
navigator.clipboard
|
||||
.writeText(clipboardText)
|
||||
.then(() => {
|
||||
toast({
|
||||
title: "Copied successfully",
|
||||
description: `${selectedNodes.length} node(s) copied to clipboard`,
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error("Failed to copy to clipboard:", error);
|
||||
});
|
||||
}
|
||||
|
||||
// PASTE: Ctrl+V or Cmd+V
|
||||
if (event.key === "v" || event.key === "V") {
|
||||
const copiedDataString = storage.get(Key.COPIED_FLOW_DATA);
|
||||
if (copiedDataString) {
|
||||
const copiedData = JSON.parse(copiedDataString) as CopyableData;
|
||||
const oldToNewIdMap: Record<string, string> = {};
|
||||
navigator.clipboard
|
||||
.readText()
|
||||
.then((clipboardText) => {
|
||||
if (!clipboardText.startsWith(CLIPBOARD_PREFIX)) {
|
||||
return; // Not our data, ignore
|
||||
}
|
||||
|
||||
// Get fresh viewport values at paste time to ensure correct positioning
|
||||
const { x, y, zoom } = getViewport();
|
||||
const viewportCenter = {
|
||||
x: (window.innerWidth / 2 - x) / zoom,
|
||||
y: (window.innerHeight / 2 - y) / zoom,
|
||||
};
|
||||
const jsonString = clipboardText.slice(CLIPBOARD_PREFIX.length);
|
||||
const copiedData = JSON.parse(jsonString) as CopyableData;
|
||||
const oldToNewIdMap: Record<string, string> = {};
|
||||
|
||||
let minX = Infinity,
|
||||
minY = Infinity,
|
||||
maxX = -Infinity,
|
||||
maxY = -Infinity;
|
||||
copiedData.nodes.forEach((node) => {
|
||||
minX = Math.min(minX, node.position.x);
|
||||
minY = Math.min(minY, node.position.y);
|
||||
maxX = Math.max(maxX, node.position.x);
|
||||
maxY = Math.max(maxY, node.position.y);
|
||||
});
|
||||
|
||||
const offsetX = viewportCenter.x - (minX + maxX) / 2;
|
||||
const offsetY = viewportCenter.y - (minY + maxY) / 2;
|
||||
|
||||
// Deselect existing nodes first
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({ ...node, selected: false })),
|
||||
}));
|
||||
|
||||
// Create and add new nodes with UNIQUE IDs using UUID
|
||||
copiedData.nodes.forEach((node) => {
|
||||
const newNodeId = uuidv4();
|
||||
oldToNewIdMap[node.id] = newNodeId;
|
||||
|
||||
const newNode: CustomNode = {
|
||||
...node,
|
||||
id: newNodeId,
|
||||
selected: true,
|
||||
position: {
|
||||
x: node.position.x + offsetX,
|
||||
y: node.position.y + offsetY,
|
||||
},
|
||||
const { x, y, zoom } = getViewport();
|
||||
const viewportCenter = {
|
||||
x: (window.innerWidth / 2 - x) / zoom,
|
||||
y: (window.innerHeight / 2 - y) / zoom,
|
||||
};
|
||||
|
||||
useNodeStore.getState().addNode(newNode);
|
||||
});
|
||||
|
||||
// Add edges with updated source/target IDs
|
||||
const { addEdge } = useEdgeStore.getState();
|
||||
copiedData.edges.forEach((edge) => {
|
||||
const newSourceId = oldToNewIdMap[edge.source] ?? edge.source;
|
||||
const newTargetId = oldToNewIdMap[edge.target] ?? edge.target;
|
||||
|
||||
addEdge({
|
||||
source: newSourceId,
|
||||
target: newTargetId,
|
||||
sourceHandle: edge.sourceHandle ?? "",
|
||||
targetHandle: edge.targetHandle ?? "",
|
||||
data: {
|
||||
...edge.data,
|
||||
},
|
||||
let minX = Infinity,
|
||||
minY = Infinity,
|
||||
maxX = -Infinity,
|
||||
maxY = -Infinity;
|
||||
copiedData.nodes.forEach((node) => {
|
||||
minX = Math.min(minX, node.position.x);
|
||||
minY = Math.min(minY, node.position.y);
|
||||
maxX = Math.max(maxX, node.position.x);
|
||||
maxY = Math.max(maxY, node.position.y);
|
||||
});
|
||||
|
||||
const offsetX = viewportCenter.x - (minX + maxX) / 2;
|
||||
const offsetY = viewportCenter.y - (minY + maxY) / 2;
|
||||
|
||||
// Deselect existing nodes first
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({
|
||||
...node,
|
||||
selected: false,
|
||||
})),
|
||||
}));
|
||||
|
||||
// Create and add new nodes with UNIQUE IDs using UUID
|
||||
copiedData.nodes.forEach((node) => {
|
||||
const newNodeId = uuidv4();
|
||||
oldToNewIdMap[node.id] = newNodeId;
|
||||
|
||||
const newNode: CustomNode = {
|
||||
...node,
|
||||
id: newNodeId,
|
||||
selected: true,
|
||||
position: {
|
||||
x: node.position.x + offsetX,
|
||||
y: node.position.y + offsetY,
|
||||
},
|
||||
};
|
||||
|
||||
useNodeStore.getState().addNode(newNode);
|
||||
});
|
||||
|
||||
// Add edges with updated source/target IDs
|
||||
const { addEdge } = useEdgeStore.getState();
|
||||
copiedData.edges.forEach((edge) => {
|
||||
const newSourceId = oldToNewIdMap[edge.source] ?? edge.source;
|
||||
const newTargetId = oldToNewIdMap[edge.target] ?? edge.target;
|
||||
|
||||
addEdge({
|
||||
source: newSourceId,
|
||||
target: newTargetId,
|
||||
sourceHandle: edge.sourceHandle ?? "",
|
||||
targetHandle: edge.targetHandle ?? "",
|
||||
data: {
|
||||
...edge.data,
|
||||
},
|
||||
});
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error("Failed to read from clipboard:", error);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
[getViewport],
|
||||
[getViewport, toast],
|
||||
);
|
||||
|
||||
return handleCopyPaste;
|
||||
|
||||
@@ -42,11 +42,12 @@ export const useFlow = () => {
|
||||
const setBlockMenuOpen = useControlPanelStore(
|
||||
useShallow((state) => state.setBlockMenuOpen),
|
||||
);
|
||||
const [{ flowID, flowVersion, flowExecutionID }] = useQueryStates({
|
||||
flowID: parseAsString,
|
||||
flowVersion: parseAsInteger,
|
||||
flowExecutionID: parseAsString,
|
||||
});
|
||||
const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] =
|
||||
useQueryStates({
|
||||
flowID: parseAsString,
|
||||
flowVersion: parseAsInteger,
|
||||
flowExecutionID: parseAsString,
|
||||
});
|
||||
|
||||
const { data: executionDetails } = useGetV1GetExecutionDetails(
|
||||
flowID || "",
|
||||
@@ -81,7 +82,7 @@ export const useFlow = () => {
|
||||
{
|
||||
query: {
|
||||
select: (res) => res.data as BlockInfo[],
|
||||
enabled: !!flowID && !!blockIds,
|
||||
enabled: !!flowID && !!blockIds && blockIds.length > 0,
|
||||
},
|
||||
},
|
||||
);
|
||||
@@ -102,6 +103,9 @@ export const useFlow = () => {
|
||||
// load graph schemas
|
||||
useEffect(() => {
|
||||
if (graph) {
|
||||
setQueryStates({
|
||||
flowVersion: graph.version ?? 1,
|
||||
});
|
||||
setGraphSchemas(
|
||||
graph.input_schema as Record<string, any> | null,
|
||||
graph.credentials_input_schema as Record<string, any> | null,
|
||||
|
||||
@@ -37,6 +37,7 @@ export type CustomNodeData = {
|
||||
costs: BlockCost[];
|
||||
categories: BlockInfoCategoriesItem[];
|
||||
metadata?: NodeModelMetadata;
|
||||
errors?: { [key: string]: string };
|
||||
};
|
||||
|
||||
export type CustomNode = XYNode<CustomNodeData, "custom">;
|
||||
@@ -71,10 +72,24 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
? (data.hardcodedValues.output_schema ?? {})
|
||||
: data.outputSchema;
|
||||
|
||||
const hasConfigErrors =
|
||||
data.errors &&
|
||||
Object.values(data.errors).some(
|
||||
(value) => value !== null && value !== undefined && value !== "",
|
||||
);
|
||||
|
||||
const outputData = data.nodeExecutionResult?.output_data;
|
||||
const hasOutputError =
|
||||
typeof outputData === "object" &&
|
||||
outputData !== null &&
|
||||
"error" in outputData;
|
||||
|
||||
const hasErrors = hasConfigErrors || hasOutputError;
|
||||
|
||||
// Currently all blockTypes design are similar - that's why i am using the same component for all of them
|
||||
// If in future - if we need some drastic change in some blockTypes design - we can create separate components for them
|
||||
return (
|
||||
<NodeContainer selected={selected} nodeId={nodeId}>
|
||||
<NodeContainer selected={selected} nodeId={nodeId} hasErrors={hasErrors}>
|
||||
<div className="rounded-xlarge bg-white">
|
||||
<NodeHeader data={data} nodeId={nodeId} />
|
||||
{isWebhook && <WebhookDisclaimer nodeId={nodeId} />}
|
||||
@@ -91,7 +106,11 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
/>
|
||||
<NodeAdvancedToggle nodeId={nodeId} />
|
||||
{data.uiType != BlockUIType.OUTPUT && (
|
||||
<OutputHandler outputSchema={outputSchema} nodeId={nodeId} />
|
||||
<OutputHandler
|
||||
uiType={data.uiType}
|
||||
outputSchema={outputSchema}
|
||||
nodeId={nodeId}
|
||||
/>
|
||||
)}
|
||||
<NodeDataRenderer nodeId={nodeId} />
|
||||
</div>
|
||||
|
||||
@@ -3,15 +3,18 @@ import { nodeStyleBasedOnStatus } from "../helpers";
|
||||
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
|
||||
export const NodeContainer = ({
|
||||
children,
|
||||
nodeId,
|
||||
selected,
|
||||
hasErrors, // these are configuration errors that occur before executing the graph -- more like validation errors
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
nodeId: string;
|
||||
selected: boolean;
|
||||
hasErrors?: boolean;
|
||||
}) => {
|
||||
const status = useNodeStore(
|
||||
useShallow((state) => state.getNodeStatus(nodeId)),
|
||||
@@ -22,6 +25,7 @@ export const NodeContainer = ({
|
||||
"z-12 max-w-[370px] rounded-xlarge ring-1 ring-slate-200/60",
|
||||
selected && "shadow-lg ring-2 ring-slate-200",
|
||||
status && nodeStyleBasedOnStatus[status],
|
||||
hasErrors ? nodeStyleBasedOnStatus[AgentExecutionStatus.FAILED] : "",
|
||||
)}
|
||||
>
|
||||
{children}
|
||||
|
||||
@@ -28,6 +28,7 @@ export const NodeContextMenu = ({
|
||||
})),
|
||||
}));
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
};
|
||||
|
||||
|
||||
@@ -20,17 +20,32 @@ export const FormCreator = React.memo(
|
||||
className?: string;
|
||||
}) => {
|
||||
const updateNodeData = useNodeStore((state) => state.updateNodeData);
|
||||
|
||||
const getHardCodedValues = useNodeStore(
|
||||
(state) => state.getHardCodedValues,
|
||||
);
|
||||
|
||||
const handleChange = ({ formData }: any) => {
|
||||
if ("credentials" in formData && !formData.credentials?.id) {
|
||||
delete formData.credentials;
|
||||
}
|
||||
updateNodeData(nodeId, { hardcodedValues: formData });
|
||||
|
||||
const updatedValues =
|
||||
uiType === BlockUIType.AGENT
|
||||
? {
|
||||
...getHardCodedValues(nodeId),
|
||||
inputs: formData,
|
||||
}
|
||||
: formData;
|
||||
|
||||
updateNodeData(nodeId, { hardcodedValues: updatedValues });
|
||||
};
|
||||
|
||||
const initialValues = getHardCodedValues(nodeId);
|
||||
const hardcodedValues = getHardCodedValues(nodeId);
|
||||
const initialValues =
|
||||
uiType === BlockUIType.AGENT
|
||||
? (hardcodedValues.inputs ?? {})
|
||||
: hardcodedValues;
|
||||
|
||||
return (
|
||||
<div className={className}>
|
||||
|
||||
@@ -14,13 +14,16 @@ import {
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import { getTypeDisplayInfo } from "./helpers";
|
||||
import { generateHandleId } from "../handlers/helpers";
|
||||
import { BlockUIType } from "../../types";
|
||||
|
||||
export const OutputHandler = ({
|
||||
outputSchema,
|
||||
nodeId,
|
||||
uiType,
|
||||
}: {
|
||||
outputSchema: RJSFSchema;
|
||||
nodeId: string;
|
||||
uiType: BlockUIType;
|
||||
}) => {
|
||||
const { isOutputConnected } = useEdgeStore();
|
||||
const properties = outputSchema?.properties || {};
|
||||
@@ -79,7 +82,9 @@ export const OutputHandler = ({
|
||||
</Text>
|
||||
|
||||
<NodeHandle
|
||||
handleId={generateHandleId(key)}
|
||||
handleId={
|
||||
uiType === BlockUIType.AGENT ? key : generateHandleId(key)
|
||||
}
|
||||
isConnected={isConnected}
|
||||
side="right"
|
||||
/>
|
||||
|
||||
@@ -1,24 +1,36 @@
|
||||
import { useBlockMenuStore } from "../../../../stores/blockMenuStore";
|
||||
import { useGetV2BuilderSearchInfinite } from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { SearchResponse } from "@/app/api/__generated__/models/searchResponse";
|
||||
import { useState } from "react";
|
||||
import { useCallback, useEffect, useState } from "react";
|
||||
import { useAddAgentToBuilder } from "../hooks/useAddAgentToBuilder";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { getV2GetSpecificAgent } from "@/app/api/__generated__/endpoints/store/store";
|
||||
import {
|
||||
getGetV2ListLibraryAgentsQueryKey,
|
||||
getV2GetLibraryAgent,
|
||||
usePostV2AddMarketplaceAgent,
|
||||
} from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default";
|
||||
import {
|
||||
getGetV2GetBuilderItemCountsQueryKey,
|
||||
getGetV2GetBuilderSuggestionsQueryKey,
|
||||
} from "@/app/api/__generated__/endpoints/default/default";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
|
||||
export const useBlockMenuSearch = () => {
|
||||
const { searchQuery } = useBlockMenuStore();
|
||||
const { searchQuery, searchId, setSearchId } = useBlockMenuStore();
|
||||
const { toast } = useToast();
|
||||
const { addAgentToBuilder, addLibraryAgentToBuilder } =
|
||||
useAddAgentToBuilder();
|
||||
const queryClient = getQueryClient();
|
||||
|
||||
const resetSearchSession = useCallback(() => {
|
||||
setSearchId(undefined);
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2GetBuilderSuggestionsQueryKey(),
|
||||
});
|
||||
}, [queryClient, setSearchId]);
|
||||
|
||||
const [addingLibraryAgentId, setAddingLibraryAgentId] = useState<
|
||||
string | null
|
||||
@@ -38,13 +50,19 @@ export const useBlockMenuSearch = () => {
|
||||
page: 1,
|
||||
page_size: 8,
|
||||
search_query: searchQuery,
|
||||
search_id: searchId,
|
||||
},
|
||||
{
|
||||
query: {
|
||||
getNextPageParam: (lastPage, allPages) => {
|
||||
const pagination = lastPage.data as SearchResponse;
|
||||
const isMore = pagination.more_pages;
|
||||
return isMore ? allPages.length + 1 : undefined;
|
||||
getNextPageParam: (lastPage) => {
|
||||
const response = lastPage.data as SearchResponse;
|
||||
const { pagination } = response;
|
||||
if (!pagination) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const { current_page, total_pages } = pagination;
|
||||
return current_page < total_pages ? current_page + 1 : undefined;
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -53,7 +71,6 @@ export const useBlockMenuSearch = () => {
|
||||
const { mutateAsync: addMarketplaceAgent } = usePostV2AddMarketplaceAgent({
|
||||
mutation: {
|
||||
onSuccess: () => {
|
||||
const queryClient = getQueryClient();
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListLibraryAgentsQueryKey(),
|
||||
});
|
||||
@@ -75,6 +92,24 @@ export const useBlockMenuSearch = () => {
|
||||
},
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
if (!searchData?.pages?.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
const latestPage = searchData.pages[searchData.pages.length - 1];
|
||||
const response = latestPage?.data as SearchResponse;
|
||||
if (response?.search_id && response.search_id !== searchId) {
|
||||
setSearchId(response.search_id);
|
||||
}
|
||||
}, [searchData, searchId, setSearchId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (searchId && !searchQuery) {
|
||||
resetSearchSession();
|
||||
}
|
||||
}, [resetSearchSession, searchId, searchQuery]);
|
||||
|
||||
const allSearchData =
|
||||
searchData?.pages?.flatMap((page) => {
|
||||
const response = page.data as SearchResponse;
|
||||
@@ -117,7 +152,12 @@ export const useBlockMenuSearch = () => {
|
||||
});
|
||||
|
||||
const libraryAgent = response.data as LibraryAgent;
|
||||
addAgentToBuilder(libraryAgent);
|
||||
|
||||
const { data: libraryAgentDetails } = await getV2GetLibraryAgent(
|
||||
libraryAgent.id,
|
||||
);
|
||||
|
||||
addAgentToBuilder(libraryAgentDetails as LibraryAgent);
|
||||
|
||||
toast({
|
||||
title: "Agent Added",
|
||||
|
||||
@@ -1,30 +1,32 @@
|
||||
import { debounce } from "lodash";
|
||||
import { useCallback, useEffect, useRef, useState } from "react";
|
||||
import { useBlockMenuStore } from "../../../../stores/blockMenuStore";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { getGetV2GetBuilderSuggestionsQueryKey } from "@/app/api/__generated__/endpoints/default/default";
|
||||
|
||||
const SEARCH_DEBOUNCE_MS = 300;
|
||||
|
||||
export const useBlockMenuSearchBar = () => {
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const [localQuery, setLocalQuery] = useState("");
|
||||
const { setSearchQuery, setSearchId, searchId, searchQuery } =
|
||||
useBlockMenuStore();
|
||||
const { setSearchQuery, setSearchId, searchQuery } = useBlockMenuStore();
|
||||
const queryClient = getQueryClient();
|
||||
|
||||
const searchIdRef = useRef(searchId);
|
||||
useEffect(() => {
|
||||
searchIdRef.current = searchId;
|
||||
}, [searchId]);
|
||||
const clearSearchSession = useCallback(() => {
|
||||
setSearchId(undefined);
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2GetBuilderSuggestionsQueryKey(),
|
||||
});
|
||||
}, [queryClient, setSearchId]);
|
||||
|
||||
const debouncedSetSearchQuery = useCallback(
|
||||
debounce((value: string) => {
|
||||
setSearchQuery(value);
|
||||
if (value.length === 0) {
|
||||
setSearchId(undefined);
|
||||
} else if (!searchIdRef.current) {
|
||||
setSearchId(crypto.randomUUID());
|
||||
clearSearchSession();
|
||||
}
|
||||
}, SEARCH_DEBOUNCE_MS),
|
||||
[setSearchQuery, setSearchId],
|
||||
[clearSearchSession, setSearchQuery],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
@@ -36,13 +38,13 @@ export const useBlockMenuSearchBar = () => {
|
||||
const handleClear = () => {
|
||||
setLocalQuery("");
|
||||
setSearchQuery("");
|
||||
setSearchId(undefined);
|
||||
clearSearchSession();
|
||||
debouncedSetSearchQuery.cancel();
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setLocalQuery(searchQuery);
|
||||
}, []);
|
||||
}, [searchQuery]);
|
||||
|
||||
return {
|
||||
handleClear,
|
||||
|
||||
@@ -0,0 +1,109 @@
|
||||
import React, { useEffect, useRef, useState } from "react";
|
||||
import { ArrowLeftIcon, ArrowRightIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
interface HorizontalScrollAreaProps {
|
||||
children: React.ReactNode;
|
||||
wrapperClassName?: string;
|
||||
scrollContainerClassName?: string;
|
||||
scrollAmount?: number;
|
||||
dependencyList?: React.DependencyList;
|
||||
}
|
||||
|
||||
const defaultDependencies: React.DependencyList = [];
|
||||
const baseScrollClasses =
|
||||
"flex gap-2 overflow-x-auto px-8 [scrollbar-width:none] [-ms-overflow-style:'none'] [&::-webkit-scrollbar]:hidden";
|
||||
|
||||
export const HorizontalScroll: React.FC<HorizontalScrollAreaProps> = ({
|
||||
children,
|
||||
wrapperClassName,
|
||||
scrollContainerClassName,
|
||||
scrollAmount = 300,
|
||||
dependencyList = defaultDependencies,
|
||||
}) => {
|
||||
const scrollRef = useRef<HTMLDivElement | null>(null);
|
||||
const [canScrollLeft, setCanScrollLeft] = useState(false);
|
||||
const [canScrollRight, setCanScrollRight] = useState(false);
|
||||
|
||||
const scrollByDelta = (delta: number) => {
|
||||
if (!scrollRef.current) {
|
||||
return;
|
||||
}
|
||||
scrollRef.current.scrollBy({ left: delta, behavior: "smooth" });
|
||||
};
|
||||
|
||||
const updateScrollState = () => {
|
||||
const element = scrollRef.current;
|
||||
if (!element) {
|
||||
setCanScrollLeft(false);
|
||||
setCanScrollRight(false);
|
||||
return;
|
||||
}
|
||||
setCanScrollLeft(element.scrollLeft > 0);
|
||||
setCanScrollRight(
|
||||
Math.ceil(element.scrollLeft + element.clientWidth) < element.scrollWidth,
|
||||
);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
updateScrollState();
|
||||
const element = scrollRef.current;
|
||||
if (!element) {
|
||||
return;
|
||||
}
|
||||
const handleScroll = () => updateScrollState();
|
||||
element.addEventListener("scroll", handleScroll);
|
||||
window.addEventListener("resize", handleScroll);
|
||||
return () => {
|
||||
element.removeEventListener("scroll", handleScroll);
|
||||
window.removeEventListener("resize", handleScroll);
|
||||
};
|
||||
}, dependencyList);
|
||||
|
||||
return (
|
||||
<div className={wrapperClassName}>
|
||||
<div className="group relative">
|
||||
<div
|
||||
ref={scrollRef}
|
||||
className={cn(baseScrollClasses, scrollContainerClassName)}
|
||||
>
|
||||
{children}
|
||||
</div>
|
||||
{canScrollLeft && (
|
||||
<div className="pointer-events-none absolute inset-y-0 left-0 w-8 bg-gradient-to-r from-white via-white/80 to-white/0" />
|
||||
)}
|
||||
{canScrollRight && (
|
||||
<div className="pointer-events-none absolute inset-y-0 right-0 w-8 bg-gradient-to-l from-white via-white/80 to-white/0" />
|
||||
)}
|
||||
{canScrollLeft && (
|
||||
<button
|
||||
type="button"
|
||||
aria-label="Scroll left"
|
||||
className="pointer-events-none absolute left-2 top-5 -translate-y-1/2 opacity-0 transition-opacity duration-200 group-hover:pointer-events-auto group-hover:opacity-100"
|
||||
onClick={() => scrollByDelta(-scrollAmount)}
|
||||
>
|
||||
<ArrowLeftIcon
|
||||
size={28}
|
||||
className="rounded-full bg-zinc-700 p-1 text-white drop-shadow"
|
||||
weight="light"
|
||||
/>
|
||||
</button>
|
||||
)}
|
||||
{canScrollRight && (
|
||||
<button
|
||||
type="button"
|
||||
aria-label="Scroll right"
|
||||
className="pointer-events-none absolute right-2 top-5 -translate-y-1/2 opacity-0 transition-opacity duration-200 group-hover:pointer-events-auto group-hover:opacity-100"
|
||||
onClick={() => scrollByDelta(scrollAmount)}
|
||||
>
|
||||
<ArrowRightIcon
|
||||
size={28}
|
||||
className="rounded-full bg-zinc-700 p-1 text-white drop-shadow"
|
||||
weight="light"
|
||||
/>
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -1,6 +1,7 @@
|
||||
import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default";
|
||||
import {
|
||||
getGetV2ListLibraryAgentsQueryKey,
|
||||
getV2GetLibraryAgent,
|
||||
usePostV2AddMarketplaceAgent,
|
||||
} from "@/app/api/__generated__/endpoints/library/library";
|
||||
import {
|
||||
@@ -105,8 +106,16 @@ export const useMarketplaceAgentsContent = () => {
|
||||
},
|
||||
});
|
||||
|
||||
// Here, libraryAgent has empty input and output schemas.
|
||||
// Not updating the endpoint because this endpoint is used elsewhere.
|
||||
// TODO: Create a new endpoint for builder specific to marketplace agents.
|
||||
const libraryAgent = response.data as LibraryAgent;
|
||||
addAgentToBuilder(libraryAgent);
|
||||
|
||||
const { data: libraryAgentDetails } = await getV2GetLibraryAgent(
|
||||
libraryAgent.id,
|
||||
);
|
||||
|
||||
addAgentToBuilder(libraryAgentDetails as LibraryAgent);
|
||||
|
||||
toast({
|
||||
title: "Agent Added",
|
||||
|
||||
@@ -6,10 +6,15 @@ import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { blockMenuContainerStyle } from "../style";
|
||||
import { useBlockMenuStore } from "../../../../stores/blockMenuStore";
|
||||
import { DefaultStateType } from "../types";
|
||||
import { SearchHistoryChip } from "../SearchHistoryChip";
|
||||
import { HorizontalScroll } from "../HorizontalScroll";
|
||||
|
||||
export const SuggestionContent = () => {
|
||||
const { setIntegration, setDefaultState } = useBlockMenuStore();
|
||||
const { setIntegration, setDefaultState, setSearchQuery, setSearchId } =
|
||||
useBlockMenuStore();
|
||||
const { data, isLoading, isError, error, refetch } = useSuggestionContent();
|
||||
const suggestions = data?.suggestions;
|
||||
const hasRecentSearches = (suggestions?.recent_searches?.length ?? 0) > 0;
|
||||
|
||||
if (isError) {
|
||||
return (
|
||||
@@ -29,11 +34,45 @@ export const SuggestionContent = () => {
|
||||
);
|
||||
}
|
||||
|
||||
const suggestions = data?.suggestions;
|
||||
|
||||
return (
|
||||
<div className={blockMenuContainerStyle}>
|
||||
<div className="w-full space-y-6 pb-4">
|
||||
{/* Recent searches */}
|
||||
{hasRecentSearches && (
|
||||
<div className="space-y-2.5 px-4">
|
||||
<p className="font-sans text-sm font-medium leading-[1.375rem] text-zinc-800">
|
||||
Recent searches
|
||||
</p>
|
||||
<HorizontalScroll
|
||||
wrapperClassName="-mx-8"
|
||||
scrollContainerClassName="flex gap-2 overflow-x-auto px-8 [scrollbar-width:none] [-ms-overflow-style:'none'] [&::-webkit-scrollbar]:hidden"
|
||||
dependencyList={[
|
||||
suggestions?.recent_searches?.length ?? 0,
|
||||
isLoading,
|
||||
]}
|
||||
>
|
||||
{!isLoading && suggestions
|
||||
? suggestions.recent_searches.map((entry, index) => (
|
||||
<SearchHistoryChip
|
||||
key={entry.search_id || `${entry.search_query}-${index}`}
|
||||
content={entry.search_query || "Untitled search"}
|
||||
onClick={() => {
|
||||
setSearchQuery(entry.search_query || "");
|
||||
setSearchId(entry.search_id || undefined);
|
||||
}}
|
||||
/>
|
||||
))
|
||||
: Array(3)
|
||||
.fill(0)
|
||||
.map((_, index) => (
|
||||
<SearchHistoryChip.Skeleton
|
||||
key={`recent-search-skeleton-${index}`}
|
||||
/>
|
||||
))}
|
||||
</HorizontalScroll>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Integrations */}
|
||||
<div className="space-y-2.5 px-4">
|
||||
<p className="font-sans text-sm font-medium leading-[1.375rem] text-zinc-800">
|
||||
|
||||
@@ -981,15 +981,17 @@ const NodeArrayInput: FC<{
|
||||
);
|
||||
return (
|
||||
<div key={entryKey}>
|
||||
<NodeHandle
|
||||
title={`#${index + 1}`}
|
||||
className="text-sm text-gray-500"
|
||||
keyName={entryKey}
|
||||
schema={schema.items!}
|
||||
isConnected={isConnected}
|
||||
isRequired={false}
|
||||
side="left"
|
||||
/>
|
||||
{schema.items && (
|
||||
<NodeHandle
|
||||
title={`#${index + 1}`}
|
||||
className="text-sm text-gray-500"
|
||||
keyName={entryKey}
|
||||
schema={schema.items}
|
||||
isConnected={isConnected}
|
||||
isRequired={false}
|
||||
side="left"
|
||||
/>
|
||||
)}
|
||||
<div className="mb-2 flex space-x-2">
|
||||
{!isConnected &&
|
||||
(schema.items ? (
|
||||
|
||||
@@ -83,7 +83,6 @@ export function RunnerInputDialog({
|
||||
onRun={doRun ? undefined : doClose}
|
||||
doCreateSchedule={doCreateSchedule ? handleSchedule : undefined}
|
||||
onCreateSchedule={doCreateSchedule ? undefined : doClose}
|
||||
runCount={0}
|
||||
/>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
@@ -152,7 +152,9 @@ export const useSaveGraph = ({
|
||||
links: graphLinks,
|
||||
};
|
||||
|
||||
const response = await createNewGraph({ data: { graph: data } });
|
||||
const response = await createNewGraph({
|
||||
data: { graph: data, source: "builder" },
|
||||
});
|
||||
const graphData = response.data as GraphModel;
|
||||
setGraphSchemas(
|
||||
graphData.input_schema,
|
||||
|
||||
@@ -53,6 +53,15 @@ type NodeStore = {
|
||||
getNodeExecutionResult: (nodeId: string) => NodeExecutionResult | undefined;
|
||||
getNodeBlockUIType: (nodeId: string) => BlockUIType;
|
||||
hasWebhookNodes: () => boolean;
|
||||
|
||||
updateNodeErrors: (nodeId: string, errors: { [key: string]: string }) => void;
|
||||
clearNodeErrors: (nodeId: string) => void;
|
||||
getNodeErrors: (nodeId: string) => { [key: string]: string } | undefined;
|
||||
setNodeErrorsForBackendId: (
|
||||
backendId: string,
|
||||
errors: { [key: string]: string },
|
||||
) => void;
|
||||
clearAllNodeErrors: () => void; // Add this
|
||||
};
|
||||
|
||||
export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
@@ -253,4 +262,47 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
[BlockUIType.WEBHOOK, BlockUIType.WEBHOOK_MANUAL].includes(n.data.uiType),
|
||||
);
|
||||
},
|
||||
|
||||
updateNodeErrors: (nodeId: string, errors: { [key: string]: string }) => {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) =>
|
||||
n.id === nodeId ? { ...n, data: { ...n.data, errors } } : n,
|
||||
),
|
||||
}));
|
||||
},
|
||||
|
||||
clearNodeErrors: (nodeId: string) => {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) =>
|
||||
n.id === nodeId ? { ...n, data: { ...n.data, errors: undefined } } : n,
|
||||
),
|
||||
}));
|
||||
},
|
||||
|
||||
getNodeErrors: (nodeId: string) => {
|
||||
return get().nodes.find((n) => n.id === nodeId)?.data?.errors;
|
||||
},
|
||||
|
||||
setNodeErrorsForBackendId: (
|
||||
backendId: string,
|
||||
errors: { [key: string]: string },
|
||||
) => {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) => {
|
||||
// Match by backend_id if nodes have it, or by id
|
||||
const matches =
|
||||
n.data.metadata?.backend_id === backendId || n.id === backendId;
|
||||
return matches ? { ...n, data: { ...n.data, errors } } : n;
|
||||
}),
|
||||
}));
|
||||
},
|
||||
|
||||
clearAllNodeErrors: () => {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) => ({
|
||||
...n,
|
||||
data: { ...n.data, errors: undefined },
|
||||
})),
|
||||
}));
|
||||
},
|
||||
}));
|
||||
|
||||
@@ -141,7 +141,6 @@ export function ChatCredentialsSetup({
|
||||
onSelectCredentials={(credMeta) =>
|
||||
handleCredentialSelect(cred.provider, credMeta)
|
||||
}
|
||||
hideIfSingleCredentialAvailable={false}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
"use client";
|
||||
|
||||
import { Skeleton } from "@/components/__legacy__/ui/skeleton";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
@@ -11,20 +10,27 @@ import { AgentRunsLoading } from "./components/other/AgentRunsLoading";
|
||||
import { EmptySchedules } from "./components/other/EmptySchedules";
|
||||
import { EmptyTasks } from "./components/other/EmptyTasks";
|
||||
import { EmptyTemplates } from "./components/other/EmptyTemplates";
|
||||
import { EmptyTriggers } from "./components/other/EmptyTriggers";
|
||||
import { SectionWrap } from "./components/other/SectionWrap";
|
||||
import { LoadingSelectedContent } from "./components/selected-views/LoadingSelectedContent";
|
||||
import { SelectedRunView } from "./components/selected-views/SelectedRunView/SelectedRunView";
|
||||
import { SelectedScheduleView } from "./components/selected-views/SelectedScheduleView/SelectedScheduleView";
|
||||
import { SelectedTemplateView } from "./components/selected-views/SelectedTemplateView/SelectedTemplateView";
|
||||
import { SelectedTriggerView } from "./components/selected-views/SelectedTriggerView/SelectedTriggerView";
|
||||
import { SelectedViewLayout } from "./components/selected-views/SelectedViewLayout";
|
||||
import { SidebarRunsList } from "./components/sidebar/SidebarRunsList/SidebarRunsList";
|
||||
import { AGENT_LIBRARY_SECTION_PADDING_X } from "./helpers";
|
||||
import { useNewAgentLibraryView } from "./useNewAgentLibraryView";
|
||||
|
||||
export function NewAgentLibraryView() {
|
||||
const {
|
||||
agent,
|
||||
hasAnyItems,
|
||||
ready,
|
||||
error,
|
||||
agentId,
|
||||
agent,
|
||||
ready,
|
||||
activeTemplate,
|
||||
isTemplateLoading,
|
||||
error,
|
||||
hasAnyItems,
|
||||
activeItem,
|
||||
sidebarLoading,
|
||||
activeTab,
|
||||
@@ -32,6 +38,9 @@ export function NewAgentLibraryView() {
|
||||
handleSelectRun,
|
||||
handleCountsChange,
|
||||
handleClearSelectedRun,
|
||||
onRunInitiated,
|
||||
onTriggerSetup,
|
||||
onScheduleCreated,
|
||||
} = useNewAgentLibraryView();
|
||||
|
||||
if (error) {
|
||||
@@ -61,14 +70,19 @@ export function NewAgentLibraryView() {
|
||||
/>
|
||||
</div>
|
||||
<div className="flex min-h-0 flex-1">
|
||||
<EmptyTasks agent={agent} />
|
||||
<EmptyTasks
|
||||
agent={agent}
|
||||
onRun={onRunInitiated}
|
||||
onTriggerSetup={onTriggerSetup}
|
||||
onScheduleCreated={onScheduleCreated}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="ml-4 grid h-full grid-cols-1 gap-0 pt-3 md:gap-4 lg:grid-cols-[25%_70%]">
|
||||
<div className="mx-4 grid h-full grid-cols-1 gap-0 pt-3 md:ml-4 md:mr-0 md:gap-4 lg:grid-cols-[25%_70%]">
|
||||
<SectionWrap className="mb-3 block">
|
||||
<div
|
||||
className={cn(
|
||||
@@ -78,16 +92,21 @@ export function NewAgentLibraryView() {
|
||||
>
|
||||
<RunAgentModal
|
||||
triggerSlot={
|
||||
<Button variant="primary" size="large" className="w-full">
|
||||
<Button
|
||||
variant="primary"
|
||||
size="large"
|
||||
className="w-full"
|
||||
disabled={isTemplateLoading && activeTab === "templates"}
|
||||
>
|
||||
<PlusIcon size={20} /> New task
|
||||
</Button>
|
||||
}
|
||||
agent={agent}
|
||||
agentId={agent.id.toString()}
|
||||
onRunCreated={(execution) => handleSelectRun(execution.id, "runs")}
|
||||
onScheduleCreated={(schedule) =>
|
||||
handleSelectRun(schedule.id, "scheduled")
|
||||
}
|
||||
onRunCreated={onRunInitiated}
|
||||
onScheduleCreated={onScheduleCreated}
|
||||
onTriggerSetup={onTriggerSetup}
|
||||
initialInputValues={activeTemplate?.inputs}
|
||||
initialInputCredentials={activeTemplate?.credentials}
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -101,49 +120,60 @@ export function NewAgentLibraryView() {
|
||||
/>
|
||||
</SectionWrap>
|
||||
|
||||
<SectionWrap className="mb-3">
|
||||
<div
|
||||
className={`${AGENT_LIBRARY_SECTION_PADDING_X} border-b border-zinc-100 pb-4`}
|
||||
>
|
||||
<Breadcrumbs
|
||||
items={[
|
||||
{ name: "My Library", link: "/library" },
|
||||
{ name: agent.name, link: `/library/agents/${agentId}` },
|
||||
]}
|
||||
{activeItem ? (
|
||||
activeTab === "scheduled" ? (
|
||||
<SelectedScheduleView
|
||||
agent={agent}
|
||||
scheduleId={activeItem}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex min-h-0 flex-1 flex-col">
|
||||
{activeItem ? (
|
||||
activeTab === "scheduled" ? (
|
||||
<SelectedScheduleView
|
||||
agent={agent}
|
||||
scheduleId={activeItem}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
/>
|
||||
) : (
|
||||
<SelectedRunView
|
||||
agent={agent}
|
||||
runId={activeItem}
|
||||
onSelectRun={handleSelectRun}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
/>
|
||||
)
|
||||
) : sidebarLoading ? (
|
||||
<div className="flex flex-col gap-4">
|
||||
<Skeleton className="h-8 w-full bg-slate-100" />
|
||||
<Skeleton className="h-12 w-full bg-slate-100" />
|
||||
<Skeleton className="h-64 w-full bg-slate-100" />
|
||||
<Skeleton className="h-32 w-full bg-slate-100" />
|
||||
</div>
|
||||
) : activeTab === "scheduled" ? (
|
||||
<EmptySchedules />
|
||||
) : activeTab === "templates" ? (
|
||||
<EmptyTemplates />
|
||||
) : (
|
||||
<EmptyTasks agent={agent} />
|
||||
)}
|
||||
</div>
|
||||
</SectionWrap>
|
||||
) : activeTab === "templates" ? (
|
||||
<SelectedTemplateView
|
||||
agent={agent}
|
||||
templateId={activeItem}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
onRunCreated={(execution) => handleSelectRun(execution.id, "runs")}
|
||||
onSwitchToRunsTab={() => setActiveTab("runs")}
|
||||
/>
|
||||
) : activeTab === "triggers" ? (
|
||||
<SelectedTriggerView
|
||||
agent={agent}
|
||||
triggerId={activeItem}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
onSwitchToRunsTab={() => setActiveTab("runs")}
|
||||
/>
|
||||
) : (
|
||||
<SelectedRunView
|
||||
agent={agent}
|
||||
runId={activeItem}
|
||||
onSelectRun={handleSelectRun}
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
/>
|
||||
)
|
||||
) : sidebarLoading ? (
|
||||
<LoadingSelectedContent agentName={agent.name} agentId={agent.id} />
|
||||
) : activeTab === "scheduled" ? (
|
||||
<SelectedViewLayout agentName={agent.name} agentId={agent.id}>
|
||||
<EmptySchedules />
|
||||
</SelectedViewLayout>
|
||||
) : activeTab === "templates" ? (
|
||||
<SelectedViewLayout agentName={agent.name} agentId={agent.id}>
|
||||
<EmptyTemplates />
|
||||
</SelectedViewLayout>
|
||||
) : activeTab === "triggers" ? (
|
||||
<SelectedViewLayout agentName={agent.name} agentId={agent.id}>
|
||||
<EmptyTriggers />
|
||||
</SelectedViewLayout>
|
||||
) : (
|
||||
<SelectedViewLayout agentName={agent.name} agentId={agent.id}>
|
||||
<EmptyTasks
|
||||
agent={agent}
|
||||
onRun={onRunInitiated}
|
||||
onTriggerSetup={onTriggerSetup}
|
||||
onScheduleCreated={onScheduleCreated}
|
||||
/>
|
||||
</SelectedViewLayout>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
|
||||
import { toDisplayName } from "@/providers/agent-credentials/helper";
|
||||
import {
|
||||
getAgentCredentialsFields,
|
||||
getAgentInputFields,
|
||||
getCredentialTypeDisplayName,
|
||||
renderValue,
|
||||
} from "./helpers";
|
||||
import { CredentialsInput } from "../CredentialsInputs/CredentialsInputs";
|
||||
import { RunAgentInputs } from "../RunAgentInputs/RunAgentInputs";
|
||||
import { getAgentCredentialsFields, getAgentInputFields } from "./helpers";
|
||||
|
||||
type Props = {
|
||||
agent: LibraryAgent;
|
||||
@@ -22,16 +18,28 @@ export function AgentInputsReadOnly({
|
||||
inputs,
|
||||
credentialInputs,
|
||||
}: Props) {
|
||||
const fields = getAgentInputFields(agent);
|
||||
const credentialFields = getAgentCredentialsFields(agent);
|
||||
const inputEntries = Object.entries(fields);
|
||||
const credentialEntries = Object.entries(credentialFields);
|
||||
const inputFields = getAgentInputFields(agent);
|
||||
const credentialFieldEntries = Object.entries(
|
||||
getAgentCredentialsFields(agent),
|
||||
);
|
||||
|
||||
const hasInputs = inputs && inputEntries.length > 0;
|
||||
const hasCredentials = credentialInputs && credentialEntries.length > 0;
|
||||
const inputEntries =
|
||||
inputs &&
|
||||
Object.entries(inputs).map(([key, value]) => ({
|
||||
key,
|
||||
schema: inputFields[key],
|
||||
value,
|
||||
}));
|
||||
|
||||
const hasInputs = inputEntries && inputEntries.length > 0;
|
||||
const hasCredentials = credentialInputs && credentialFieldEntries.length > 0;
|
||||
|
||||
if (!hasInputs && !hasCredentials) {
|
||||
return <div className="text-neutral-600">No input for this run.</div>;
|
||||
return (
|
||||
<Text variant="body" className="text-zinc-700">
|
||||
No input for this run.
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
@@ -39,14 +47,20 @@ export function AgentInputsReadOnly({
|
||||
{/* Regular inputs */}
|
||||
{hasInputs && (
|
||||
<div className="flex flex-col gap-4">
|
||||
{inputEntries.map(([key, sub]) => (
|
||||
<div key={key} className="flex flex-col gap-1.5">
|
||||
<label className="text-sm font-medium">{sub?.title || key}</label>
|
||||
<p className="whitespace-pre-wrap break-words text-sm text-neutral-700">
|
||||
{renderValue((inputs as Record<string, any>)[key])}
|
||||
</p>
|
||||
</div>
|
||||
))}
|
||||
{inputEntries.map(({ key, schema, value }) => {
|
||||
if (!schema) return null;
|
||||
|
||||
return (
|
||||
<RunAgentInputs
|
||||
key={key}
|
||||
schema={schema}
|
||||
value={value}
|
||||
placeholder={schema.description}
|
||||
onChange={() => {}}
|
||||
readOnly={true}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -54,32 +68,18 @@ export function AgentInputsReadOnly({
|
||||
{hasCredentials && (
|
||||
<div className="flex flex-col gap-6">
|
||||
{hasInputs && <div className="border-t border-neutral-200 pt-4" />}
|
||||
{credentialEntries.map(([key, _sub]) => {
|
||||
{credentialFieldEntries.map(([key, inputSubSchema]) => {
|
||||
const credential = credentialInputs![key];
|
||||
if (!credential) return null;
|
||||
|
||||
return (
|
||||
<div key={key} className="flex flex-col gap-4">
|
||||
<h3 className="text-lg font-medium text-neutral-900">
|
||||
{toDisplayName(credential.provider)} credentials
|
||||
</h3>
|
||||
<div className="flex flex-col gap-3">
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<span className="text-neutral-600">Name</span>
|
||||
<span className="text-neutral-600">
|
||||
{getCredentialTypeDisplayName(credential.type)}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<span className="text-neutral-900">
|
||||
{credential.title || "Untitled"}
|
||||
</span>
|
||||
<span className="font-mono text-neutral-400">
|
||||
{"*".repeat(25)}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<CredentialsInput
|
||||
key={key}
|
||||
schema={{ ...inputSubSchema, discriminator: undefined } as any}
|
||||
selectedCredentials={credential}
|
||||
onSelectCredentials={() => {}}
|
||||
readOnly={true}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
|
||||
@@ -13,7 +13,8 @@ export function getCredentialTypeDisplayName(type: string): string {
|
||||
}
|
||||
|
||||
export function getAgentInputFields(agent: LibraryAgent): Record<string, any> {
|
||||
const schema = agent.input_schema as unknown as {
|
||||
const schema = (agent.trigger_setup_info?.config_schema ??
|
||||
agent.input_schema) as unknown as {
|
||||
properties?: Record<string, any>;
|
||||
} | null;
|
||||
if (!schema || !schema.properties) return {};
|
||||
|
||||
@@ -1,189 +1,59 @@
|
||||
import {
|
||||
IconKey,
|
||||
IconKeyPlus,
|
||||
IconUserPlus,
|
||||
} from "@/components/__legacy__/ui/icons";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectSeparator,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import useCredentials from "@/hooks/useCredentials";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import {
|
||||
BlockIOCredentialsSubSchema,
|
||||
CredentialsMetaInput,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { getHostFromUrl } from "@/lib/utils/url";
|
||||
import { NotionLogoIcon } from "@radix-ui/react-icons";
|
||||
import { FC, useEffect, useMemo, useState } from "react";
|
||||
import {
|
||||
FaDiscord,
|
||||
FaGithub,
|
||||
FaGoogle,
|
||||
FaHubspot,
|
||||
FaKey,
|
||||
FaMedium,
|
||||
FaTwitter,
|
||||
} from "react-icons/fa";
|
||||
import { APIKeyCredentialsModal } from "./APIKeyCredentialsModal/APIKeyCredentialsModal";
|
||||
import { HostScopedCredentialsModal } from "./HotScopedCredentialsModal/HotScopedCredentialsModal";
|
||||
import { OAuthFlowWaitingModal } from "./OAuthWaitingModal/OAuthWaitingModal";
|
||||
import { PasswordCredentialsModal } from "./PasswordCredentialsModal/PasswordCredentialsModal";
|
||||
import { toDisplayName } from "@/providers/agent-credentials/helper";
|
||||
import { APIKeyCredentialsModal } from "./components/APIKeyCredentialsModal/APIKeyCredentialsModal";
|
||||
import { CredentialRow } from "./components/CredentialRow/CredentialRow";
|
||||
import { CredentialsSelect } from "./components/CredentialsSelect/CredentialsSelect";
|
||||
import { DeleteConfirmationModal } from "./components/DeleteConfirmationModal/DeleteConfirmationModal";
|
||||
import { HostScopedCredentialsModal } from "./components/HotScopedCredentialsModal/HotScopedCredentialsModal";
|
||||
import { OAuthFlowWaitingModal } from "./components/OAuthWaitingModal/OAuthWaitingModal";
|
||||
import { PasswordCredentialsModal } from "./components/PasswordCredentialsModal/PasswordCredentialsModal";
|
||||
import { getCredentialDisplayName } from "./helpers";
|
||||
import { useCredentialsInputs } from "./useCredentialsInputs";
|
||||
|
||||
const fallbackIcon = FaKey;
|
||||
type UseCredentialsInputsReturn = ReturnType<typeof useCredentialsInputs>;
|
||||
|
||||
// --8<-- [start:ProviderIconsEmbed]
|
||||
// Provider icons mapping - uses fallback for unknown providers
|
||||
export const providerIcons: Partial<
|
||||
Record<string, React.FC<{ className?: string }>>
|
||||
> = {
|
||||
aiml_api: fallbackIcon,
|
||||
anthropic: fallbackIcon,
|
||||
apollo: fallbackIcon,
|
||||
e2b: fallbackIcon,
|
||||
github: FaGithub,
|
||||
google: FaGoogle,
|
||||
groq: fallbackIcon,
|
||||
http: fallbackIcon,
|
||||
notion: NotionLogoIcon,
|
||||
nvidia: fallbackIcon,
|
||||
discord: FaDiscord,
|
||||
d_id: fallbackIcon,
|
||||
google_maps: FaGoogle,
|
||||
jina: fallbackIcon,
|
||||
ideogram: fallbackIcon,
|
||||
linear: fallbackIcon,
|
||||
medium: FaMedium,
|
||||
mem0: fallbackIcon,
|
||||
ollama: fallbackIcon,
|
||||
openai: fallbackIcon,
|
||||
openweathermap: fallbackIcon,
|
||||
open_router: fallbackIcon,
|
||||
llama_api: fallbackIcon,
|
||||
pinecone: fallbackIcon,
|
||||
enrichlayer: fallbackIcon,
|
||||
slant3d: fallbackIcon,
|
||||
screenshotone: fallbackIcon,
|
||||
smtp: fallbackIcon,
|
||||
replicate: fallbackIcon,
|
||||
reddit: fallbackIcon,
|
||||
fal: fallbackIcon,
|
||||
revid: fallbackIcon,
|
||||
twitter: FaTwitter,
|
||||
unreal_speech: fallbackIcon,
|
||||
exa: fallbackIcon,
|
||||
hubspot: FaHubspot,
|
||||
smartlead: fallbackIcon,
|
||||
todoist: fallbackIcon,
|
||||
zerobounce: fallbackIcon,
|
||||
};
|
||||
// --8<-- [end:ProviderIconsEmbed]
|
||||
function isLoaded(
|
||||
data: UseCredentialsInputsReturn,
|
||||
): data is Extract<UseCredentialsInputsReturn, { isLoading: false }> {
|
||||
return data.isLoading === false;
|
||||
}
|
||||
|
||||
export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & (
|
||||
| {
|
||||
success: true;
|
||||
code: string;
|
||||
state: string;
|
||||
}
|
||||
| {
|
||||
success: false;
|
||||
message: string;
|
||||
}
|
||||
);
|
||||
|
||||
export const CredentialsInput: FC<{
|
||||
type Props = {
|
||||
schema: BlockIOCredentialsSubSchema;
|
||||
className?: string;
|
||||
selectedCredentials?: CredentialsMetaInput;
|
||||
onSelectCredentials: (newValue?: CredentialsMetaInput) => void;
|
||||
siblingInputs?: Record<string, any>;
|
||||
hideIfSingleCredentialAvailable?: boolean;
|
||||
onSelectCredentials: (newValue?: CredentialsMetaInput) => void;
|
||||
onLoaded?: (loaded: boolean) => void;
|
||||
}> = ({
|
||||
readOnly?: boolean;
|
||||
};
|
||||
|
||||
export function CredentialsInput({
|
||||
schema,
|
||||
className,
|
||||
selectedCredentials,
|
||||
onSelectCredentials,
|
||||
siblingInputs,
|
||||
hideIfSingleCredentialAvailable = true,
|
||||
onLoaded,
|
||||
}) => {
|
||||
const [isAPICredentialsModalOpen, setAPICredentialsModalOpen] =
|
||||
useState(false);
|
||||
const [
|
||||
isUserPasswordCredentialsModalOpen,
|
||||
setUserPasswordCredentialsModalOpen,
|
||||
] = useState(false);
|
||||
const [isHostScopedCredentialsModalOpen, setHostScopedCredentialsModalOpen] =
|
||||
useState(false);
|
||||
const [isOAuth2FlowInProgress, setOAuth2FlowInProgress] = useState(false);
|
||||
const [oAuthPopupController, setOAuthPopupController] =
|
||||
useState<AbortController | null>(null);
|
||||
const [oAuthError, setOAuthError] = useState<string | null>(null);
|
||||
readOnly = false,
|
||||
}: Props) {
|
||||
const hookData = useCredentialsInputs({
|
||||
schema,
|
||||
selectedCredentials,
|
||||
onSelectCredentials,
|
||||
siblingInputs,
|
||||
onLoaded,
|
||||
readOnly,
|
||||
});
|
||||
|
||||
const api = useBackendAPI();
|
||||
const credentials = useCredentials(schema, siblingInputs);
|
||||
|
||||
// Report loaded state to parent
|
||||
useEffect(() => {
|
||||
if (onLoaded) {
|
||||
onLoaded(Boolean(credentials && credentials.isLoading === false));
|
||||
}
|
||||
}, [credentials, onLoaded]);
|
||||
|
||||
// Deselect credentials if they do not exist (e.g. provider was changed)
|
||||
useEffect(() => {
|
||||
if (!credentials || !("savedCredentials" in credentials)) return;
|
||||
if (
|
||||
selectedCredentials &&
|
||||
!credentials.savedCredentials.some((c) => c.id === selectedCredentials.id)
|
||||
) {
|
||||
onSelectCredentials(undefined);
|
||||
}
|
||||
}, [credentials, selectedCredentials, onSelectCredentials]);
|
||||
|
||||
const { hasRelevantCredentials, singleCredential } = useMemo(() => {
|
||||
if (!credentials || !("savedCredentials" in credentials)) {
|
||||
return {
|
||||
hasRelevantCredentials: false,
|
||||
singleCredential: null,
|
||||
};
|
||||
}
|
||||
|
||||
// Simple logic: if we have any saved credentials, we have relevant credentials
|
||||
const hasRelevant = credentials.savedCredentials.length > 0;
|
||||
|
||||
// Auto-select single credential if only one exists
|
||||
const single =
|
||||
credentials.savedCredentials.length === 1
|
||||
? credentials.savedCredentials[0]
|
||||
: null;
|
||||
|
||||
return {
|
||||
hasRelevantCredentials: hasRelevant,
|
||||
singleCredential: single,
|
||||
};
|
||||
}, [credentials]);
|
||||
|
||||
// If only 1 credential is available, auto-select it and hide this input
|
||||
useEffect(() => {
|
||||
if (singleCredential && !selectedCredentials) {
|
||||
onSelectCredentials(singleCredential);
|
||||
}
|
||||
}, [singleCredential, selectedCredentials, onSelectCredentials]);
|
||||
|
||||
if (
|
||||
!credentials ||
|
||||
credentials.isLoading ||
|
||||
(singleCredential && hideIfSingleCredentialAvailable)
|
||||
) {
|
||||
if (!isLoaded(hookData)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -194,309 +64,158 @@ export const CredentialsInput: FC<{
|
||||
supportsOAuth2,
|
||||
supportsUserPassword,
|
||||
supportsHostScoped,
|
||||
savedCredentials,
|
||||
oAuthCallback,
|
||||
} = credentials;
|
||||
credentialsToShow,
|
||||
oAuthError,
|
||||
isAPICredentialsModalOpen,
|
||||
isUserPasswordCredentialsModalOpen,
|
||||
isHostScopedCredentialsModalOpen,
|
||||
isOAuth2FlowInProgress,
|
||||
oAuthPopupController,
|
||||
credentialToDelete,
|
||||
deleteCredentialsMutation,
|
||||
actionButtonText,
|
||||
setAPICredentialsModalOpen,
|
||||
setUserPasswordCredentialsModalOpen,
|
||||
setHostScopedCredentialsModalOpen,
|
||||
setCredentialToDelete,
|
||||
handleActionButtonClick,
|
||||
handleCredentialSelect,
|
||||
handleDeleteCredential,
|
||||
handleDeleteConfirm,
|
||||
} = hookData;
|
||||
|
||||
async function handleOAuthLogin() {
|
||||
setOAuthError(null);
|
||||
const { login_url, state_token } = await api.oAuthLogin(
|
||||
provider,
|
||||
schema.credentials_scopes,
|
||||
);
|
||||
setOAuth2FlowInProgress(true);
|
||||
const popup = window.open(login_url, "_blank", "popup=true");
|
||||
const displayName = toDisplayName(provider);
|
||||
const hasCredentialsToShow = credentialsToShow.length > 0;
|
||||
|
||||
if (!popup) {
|
||||
throw new Error(
|
||||
"Failed to open popup window. Please allow popups for this site.",
|
||||
);
|
||||
}
|
||||
|
||||
const controller = new AbortController();
|
||||
setOAuthPopupController(controller);
|
||||
controller.signal.onabort = () => {
|
||||
console.debug("OAuth flow aborted");
|
||||
setOAuth2FlowInProgress(false);
|
||||
popup.close();
|
||||
};
|
||||
|
||||
const handleMessage = async (e: MessageEvent<OAuthPopupResultMessage>) => {
|
||||
console.debug("Message received:", e.data);
|
||||
if (
|
||||
typeof e.data != "object" ||
|
||||
!("message_type" in e.data) ||
|
||||
e.data.message_type !== "oauth_popup_result"
|
||||
) {
|
||||
console.debug("Ignoring irrelevant message");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!e.data.success) {
|
||||
console.error("OAuth flow failed:", e.data.message);
|
||||
setOAuthError(`OAuth flow failed: ${e.data.message}`);
|
||||
setOAuth2FlowInProgress(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (e.data.state !== state_token) {
|
||||
console.error("Invalid state token received");
|
||||
setOAuthError("Invalid state token received");
|
||||
setOAuth2FlowInProgress(false);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
console.debug("Processing OAuth callback");
|
||||
const credentials = await oAuthCallback(e.data.code, e.data.state);
|
||||
console.debug("OAuth callback processed successfully");
|
||||
onSelectCredentials({
|
||||
id: credentials.id,
|
||||
type: "oauth2",
|
||||
title: credentials.title,
|
||||
provider,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error in OAuth callback:", error);
|
||||
setOAuthError(
|
||||
// type of error is unkown so we need to use String(error)
|
||||
`Error in OAuth callback: ${
|
||||
error instanceof Error ? error.message : String(error)
|
||||
}`,
|
||||
);
|
||||
} finally {
|
||||
console.debug("Finalizing OAuth flow");
|
||||
setOAuth2FlowInProgress(false);
|
||||
controller.abort("success");
|
||||
}
|
||||
};
|
||||
|
||||
console.debug("Adding message event listener");
|
||||
window.addEventListener("message", handleMessage, {
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
setTimeout(
|
||||
() => {
|
||||
console.debug("OAuth flow timed out");
|
||||
controller.abort("timeout");
|
||||
setOAuth2FlowInProgress(false);
|
||||
setOAuthError("OAuth flow timed out");
|
||||
},
|
||||
5 * 60 * 1000,
|
||||
);
|
||||
}
|
||||
|
||||
const ProviderIcon = providerIcons[provider] || fallbackIcon;
|
||||
const modals = (
|
||||
<>
|
||||
{supportsApiKey && (
|
||||
<APIKeyCredentialsModal
|
||||
schema={schema}
|
||||
open={isAPICredentialsModalOpen}
|
||||
onClose={() => setAPICredentialsModalOpen(false)}
|
||||
onCredentialsCreate={(credsMeta) => {
|
||||
onSelectCredentials(credsMeta);
|
||||
setAPICredentialsModalOpen(false);
|
||||
}}
|
||||
siblingInputs={siblingInputs}
|
||||
/>
|
||||
)}
|
||||
{supportsOAuth2 && (
|
||||
<OAuthFlowWaitingModal
|
||||
open={isOAuth2FlowInProgress}
|
||||
onClose={() => oAuthPopupController?.abort("canceled")}
|
||||
providerName={providerName}
|
||||
/>
|
||||
)}
|
||||
{supportsUserPassword && (
|
||||
<PasswordCredentialsModal
|
||||
schema={schema}
|
||||
open={isUserPasswordCredentialsModalOpen}
|
||||
onClose={() => setUserPasswordCredentialsModalOpen(false)}
|
||||
onCredentialsCreate={(creds) => {
|
||||
onSelectCredentials(creds);
|
||||
setUserPasswordCredentialsModalOpen(false);
|
||||
}}
|
||||
siblingInputs={siblingInputs}
|
||||
/>
|
||||
)}
|
||||
{supportsHostScoped && (
|
||||
<HostScopedCredentialsModal
|
||||
schema={schema}
|
||||
open={isHostScopedCredentialsModalOpen}
|
||||
onClose={() => setHostScopedCredentialsModalOpen(false)}
|
||||
onCredentialsCreate={(creds) => {
|
||||
onSelectCredentials(creds);
|
||||
setHostScopedCredentialsModalOpen(false);
|
||||
}}
|
||||
siblingInputs={siblingInputs}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
|
||||
const fieldHeader = (
|
||||
<div className="mb-2 flex gap-1">
|
||||
<span className="text-m green text-gray-900">
|
||||
{providerName} Credentials
|
||||
</span>
|
||||
<InformationTooltip description={schema.description} />
|
||||
</div>
|
||||
);
|
||||
|
||||
// Show credentials creation UI when no relevant credentials exist
|
||||
if (!hasRelevantCredentials) {
|
||||
return (
|
||||
<div className="mb-4">
|
||||
{fieldHeader}
|
||||
|
||||
<div className={cn("flex flex-row space-x-2", className)}>
|
||||
{supportsOAuth2 && (
|
||||
<Button onClick={handleOAuthLogin} size="small">
|
||||
<ProviderIcon className="mr-2 h-4 w-4" />
|
||||
{"Sign in with " + providerName}
|
||||
</Button>
|
||||
)}
|
||||
{supportsApiKey && (
|
||||
<Button
|
||||
onClick={() => setAPICredentialsModalOpen(true)}
|
||||
size="small"
|
||||
>
|
||||
<ProviderIcon className="mr-2 h-4 w-4" />
|
||||
Enter API key
|
||||
</Button>
|
||||
)}
|
||||
{supportsUserPassword && (
|
||||
<Button
|
||||
onClick={() => setUserPasswordCredentialsModalOpen(true)}
|
||||
size="small"
|
||||
>
|
||||
<ProviderIcon className="mr-2 h-4 w-4" />
|
||||
Enter username and password
|
||||
</Button>
|
||||
)}
|
||||
{supportsHostScoped && credentials.discriminatorValue && (
|
||||
<Button
|
||||
onClick={() => setHostScopedCredentialsModalOpen(true)}
|
||||
size="small"
|
||||
>
|
||||
<ProviderIcon className="mr-2 h-4 w-4" />
|
||||
{`Enter sensitive headers for ${getHostFromUrl(credentials.discriminatorValue)}`}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
{modals}
|
||||
{oAuthError && (
|
||||
<div className="mt-2 text-red-500">Error: {oAuthError}</div>
|
||||
return (
|
||||
<div className={cn("mb-6", className)}>
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<Text variant="large-medium">{displayName} credentials</Text>
|
||||
{schema.description && (
|
||||
<InformationTooltip description={schema.description} />
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function handleValueChange(newValue: string) {
|
||||
if (newValue === "sign-in") {
|
||||
// Trigger OAuth2 sign in flow
|
||||
handleOAuthLogin();
|
||||
} else if (newValue === "add-api-key") {
|
||||
// Open API key dialog
|
||||
setAPICredentialsModalOpen(true);
|
||||
} else if (newValue === "add-user-password") {
|
||||
// Open user password dialog
|
||||
setUserPasswordCredentialsModalOpen(true);
|
||||
} else if (newValue === "add-host-scoped") {
|
||||
// Open host-scoped credentials dialog
|
||||
setHostScopedCredentialsModalOpen(true);
|
||||
} else {
|
||||
const selectedCreds = savedCredentials.find((c) => c.id == newValue)!;
|
||||
{hasCredentialsToShow ? (
|
||||
<>
|
||||
{credentialsToShow.length > 1 && !readOnly ? (
|
||||
<CredentialsSelect
|
||||
credentials={credentialsToShow}
|
||||
provider={provider}
|
||||
displayName={displayName}
|
||||
selectedCredentials={selectedCredentials}
|
||||
onSelectCredential={handleCredentialSelect}
|
||||
readOnly={readOnly}
|
||||
/>
|
||||
) : (
|
||||
<div className="mb-4 space-y-2">
|
||||
{credentialsToShow.map((credential) => {
|
||||
return (
|
||||
<CredentialRow
|
||||
key={credential.id}
|
||||
credential={credential}
|
||||
provider={provider}
|
||||
displayName={displayName}
|
||||
onSelect={() => handleCredentialSelect(credential.id)}
|
||||
onDelete={() =>
|
||||
handleDeleteCredential({
|
||||
id: credential.id,
|
||||
title: getCredentialDisplayName(
|
||||
credential,
|
||||
displayName,
|
||||
),
|
||||
})
|
||||
}
|
||||
readOnly={readOnly}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
{!readOnly && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={handleActionButtonClick}
|
||||
className="w-fit"
|
||||
>
|
||||
{actionButtonText}
|
||||
</Button>
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
!readOnly && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={handleActionButtonClick}
|
||||
className="w-fit"
|
||||
>
|
||||
{actionButtonText}
|
||||
</Button>
|
||||
)
|
||||
)}
|
||||
|
||||
onSelectCredentials({
|
||||
id: selectedCreds.id,
|
||||
type: selectedCreds.type,
|
||||
provider: provider,
|
||||
// title: customTitle, // TODO: add input for title
|
||||
});
|
||||
}
|
||||
}
|
||||
{!readOnly && (
|
||||
<>
|
||||
{supportsApiKey ? (
|
||||
<APIKeyCredentialsModal
|
||||
schema={schema}
|
||||
open={isAPICredentialsModalOpen}
|
||||
onClose={() => setAPICredentialsModalOpen(false)}
|
||||
onCredentialsCreate={(credsMeta) => {
|
||||
onSelectCredentials(credsMeta);
|
||||
setAPICredentialsModalOpen(false);
|
||||
}}
|
||||
siblingInputs={siblingInputs}
|
||||
/>
|
||||
) : null}
|
||||
{supportsOAuth2 ? (
|
||||
<OAuthFlowWaitingModal
|
||||
open={isOAuth2FlowInProgress}
|
||||
onClose={() => oAuthPopupController?.abort("canceled")}
|
||||
providerName={providerName}
|
||||
/>
|
||||
) : null}
|
||||
{supportsUserPassword ? (
|
||||
<PasswordCredentialsModal
|
||||
schema={schema}
|
||||
open={isUserPasswordCredentialsModalOpen}
|
||||
onClose={() => setUserPasswordCredentialsModalOpen(false)}
|
||||
onCredentialsCreate={(creds) => {
|
||||
onSelectCredentials(creds);
|
||||
setUserPasswordCredentialsModalOpen(false);
|
||||
}}
|
||||
siblingInputs={siblingInputs}
|
||||
/>
|
||||
) : null}
|
||||
{supportsHostScoped ? (
|
||||
<HostScopedCredentialsModal
|
||||
schema={schema}
|
||||
open={isHostScopedCredentialsModalOpen}
|
||||
onClose={() => setHostScopedCredentialsModalOpen(false)}
|
||||
onCredentialsCreate={(creds) => {
|
||||
onSelectCredentials(creds);
|
||||
setHostScopedCredentialsModalOpen(false);
|
||||
}}
|
||||
siblingInputs={siblingInputs}
|
||||
/>
|
||||
) : null}
|
||||
|
||||
// Saved credentials exist
|
||||
return (
|
||||
<div>
|
||||
{fieldHeader}
|
||||
{oAuthError ? (
|
||||
<Text variant="body" className="mt-2 text-red-500">
|
||||
Error: {oAuthError}
|
||||
</Text>
|
||||
) : null}
|
||||
|
||||
<Select value={selectedCredentials?.id} onValueChange={handleValueChange}>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={schema.placeholder} />
|
||||
</SelectTrigger>
|
||||
<SelectContent className="nodrag">
|
||||
{savedCredentials
|
||||
.filter((c) => c.type == "oauth2")
|
||||
.map((credentials, index) => (
|
||||
<SelectItem key={index} value={credentials.id}>
|
||||
<ProviderIcon className="mr-2 inline h-4 w-4" />
|
||||
{credentials.title ||
|
||||
credentials.username ||
|
||||
`Your ${providerName} account`}
|
||||
</SelectItem>
|
||||
))}
|
||||
{savedCredentials
|
||||
.filter((c) => c.type == "api_key")
|
||||
.map((credentials, index) => (
|
||||
<SelectItem key={index} value={credentials.id}>
|
||||
<ProviderIcon className="mr-2 inline h-4 w-4" />
|
||||
<IconKey className="mr-1.5 inline" />
|
||||
{credentials.title}
|
||||
</SelectItem>
|
||||
))}
|
||||
{savedCredentials
|
||||
.filter((c) => c.type == "user_password")
|
||||
.map((credentials, index) => (
|
||||
<SelectItem key={index} value={credentials.id}>
|
||||
<ProviderIcon className="mr-2 inline h-4 w-4" />
|
||||
<IconUserPlus className="mr-1.5 inline" />
|
||||
{credentials.title}
|
||||
</SelectItem>
|
||||
))}
|
||||
{savedCredentials
|
||||
.filter((c) => c.type == "host_scoped")
|
||||
.map((credentials, index) => (
|
||||
<SelectItem key={index} value={credentials.id}>
|
||||
<ProviderIcon className="mr-2 inline h-4 w-4" />
|
||||
<IconKey className="mr-1.5 inline" />
|
||||
{credentials.title}
|
||||
</SelectItem>
|
||||
))}
|
||||
<SelectSeparator />
|
||||
{supportsOAuth2 && (
|
||||
<SelectItem value="sign-in">
|
||||
<IconUserPlus className="mr-1.5 inline" />
|
||||
Sign in with {providerName}
|
||||
</SelectItem>
|
||||
)}
|
||||
{supportsApiKey && (
|
||||
<SelectItem value="add-api-key">
|
||||
<IconKeyPlus className="mr-1.5 inline" />
|
||||
Add new API key
|
||||
</SelectItem>
|
||||
)}
|
||||
{supportsUserPassword && (
|
||||
<SelectItem value="add-user-password">
|
||||
<IconUserPlus className="mr-1.5 inline" />
|
||||
Add new user password
|
||||
</SelectItem>
|
||||
)}
|
||||
{supportsHostScoped && (
|
||||
<SelectItem value="add-host-scoped">
|
||||
<IconKey className="mr-1.5 inline" />
|
||||
Add host-scoped headers
|
||||
</SelectItem>
|
||||
)}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
{modals}
|
||||
{oAuthError && (
|
||||
<div className="mt-2 text-red-500">Error: {oAuthError}</div>
|
||||
<DeleteConfirmationModal
|
||||
credentialToDelete={credentialToDelete}
|
||||
isDeleting={deleteCredentialsMutation.isPending}
|
||||
onClose={() => setCredentialToDelete(null)}
|
||||
onConfirm={handleDeleteConfirm}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user