mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-13 08:14:58 -05:00
Compare commits
5 Commits
add-llm-ma
...
abhi/folde
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6c2f645f1 | ||
|
|
dce3d26d0a | ||
|
|
4337b67149 | ||
|
|
62bc325d79 | ||
|
|
e02687ad09 |
@@ -5,13 +5,42 @@
|
||||
!docs/
|
||||
|
||||
# Platform - Libs
|
||||
!autogpt_platform/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||
!autogpt_platform/autogpt_libs/poetry.lock
|
||||
!autogpt_platform/autogpt_libs/README.md
|
||||
|
||||
# Platform - Backend
|
||||
!autogpt_platform/backend/
|
||||
!autogpt_platform/backend/backend/
|
||||
!autogpt_platform/backend/test/e2e_test_data.py
|
||||
!autogpt_platform/backend/migrations/
|
||||
!autogpt_platform/backend/schema.prisma
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
!autogpt_platform/backend/.env
|
||||
!autogpt_platform/backend/gen_prisma_types_stub.py
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
!autogpt_platform/market/scripts.py
|
||||
!autogpt_platform/market/schema.prisma
|
||||
!autogpt_platform/market/pyproject.toml
|
||||
!autogpt_platform/market/poetry.lock
|
||||
!autogpt_platform/market/README.md
|
||||
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/scripts/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
!autogpt_platform/frontend/.env
|
||||
|
||||
# Classic - AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
@@ -35,38 +64,6 @@
|
||||
# Classic - Frontend
|
||||
!classic/frontend/build/web/
|
||||
|
||||
# Explicitly re-ignore unwanted files from whitelisted directories
|
||||
# Note: These patterns MUST come after the whitelist rules to take effect
|
||||
|
||||
# Hidden files and directories (but keep frontend .env files needed for build)
|
||||
**/.*
|
||||
!autogpt_platform/frontend/.env
|
||||
!autogpt_platform/frontend/.env.default
|
||||
!autogpt_platform/frontend/.env.production
|
||||
|
||||
# Python artifacts
|
||||
**/__pycache__/
|
||||
**/*.pyc
|
||||
**/*.pyo
|
||||
**/.venv/
|
||||
**/.ruff_cache/
|
||||
**/.pytest_cache/
|
||||
**/.coverage
|
||||
**/htmlcov/
|
||||
|
||||
# Node artifacts
|
||||
**/node_modules/
|
||||
**/.next/
|
||||
**/storybook-static/
|
||||
**/playwright-report/
|
||||
**/test-results/
|
||||
|
||||
# Build artifacts
|
||||
**/dist/
|
||||
**/build/
|
||||
!autogpt_platform/frontend/src/**/build/
|
||||
**/target/
|
||||
|
||||
# Logs and temp files
|
||||
**/*.log
|
||||
**/*.tmp
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
|
||||
249
.github/workflows/platform-frontend-ci.yml
vendored
249
.github/workflows/platform-frontend-ci.yml
vendored
@@ -26,6 +26,7 @@ jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
components-changed: ${{ steps.filter.outputs.components }}
|
||||
|
||||
steps:
|
||||
@@ -40,17 +41,28 @@ jobs:
|
||||
components:
|
||||
- 'autogpt_platform/frontend/src/components/**'
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies to populate cache
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ steps.cache-key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
lint:
|
||||
@@ -61,15 +73,22 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
@@ -92,15 +111,22 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
@@ -115,8 +141,10 @@ jobs:
|
||||
exitOnceUploaded: true
|
||||
|
||||
e2e_test:
|
||||
name: end-to-end tests
|
||||
runs-on: big-boi
|
||||
needs: setup
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -124,11 +152,19 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Platform - Copy default supabase .env
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../.env.default ../.env
|
||||
|
||||
- name: Set up Platform - Copy backend .env and set OpenAI API key
|
||||
- name: Copy backend .env and set OpenAI API key
|
||||
run: |
|
||||
cp ../backend/.env.default ../backend/.env
|
||||
echo "OPENAI_INTERNAL_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> ../backend/.env
|
||||
@@ -136,125 +172,77 @@ jobs:
|
||||
# Used by E2E test data script to generate embeddings for approved store agents
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Set up Platform - Set up Docker Buildx
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Set up Platform - Expose GHA cache to docker buildx CLI
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
|
||||
- name: Set up Platform - Build Docker images (with cache)
|
||||
working-directory: autogpt_platform
|
||||
run: |
|
||||
pip install pyyaml
|
||||
|
||||
# Resolve extends and generate a flat compose file that bake can understand
|
||||
docker compose -f docker-compose.yml config > docker-compose.resolved.yml
|
||||
|
||||
# Add cache configuration to the resolved compose file
|
||||
python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
|
||||
--source docker-compose.resolved.yml \
|
||||
--cache-from "type=gha" \
|
||||
--cache-to "type=gha,mode=max" \
|
||||
--backend-hash "${{ hashFiles('autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/poetry.lock', 'autogpt_platform/backend/backend') }}" \
|
||||
--frontend-hash "${{ hashFiles('autogpt_platform/frontend/Dockerfile', 'autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/src') }}" \
|
||||
--git-ref "${{ github.ref }}"
|
||||
|
||||
# Build with bake using the resolved compose file (now includes cache config)
|
||||
docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
|
||||
- name: Set up tests - Cache E2E test data
|
||||
id: e2e-data-cache
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/e2e_test_data.sql
|
||||
key: e2e-test-data-${{ hashFiles('autogpt_platform/backend/test/e2e_test_data.py', 'autogpt_platform/backend/migrations/**', '.github/workflows/platform-frontend-ci.yml') }}
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-frontend-test-
|
||||
|
||||
- name: Set up Platform - Start Supabase DB + Auth
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d db auth --no-build
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done'
|
||||
echo "Waiting for auth service to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -c "SELECT 1 FROM auth.users LIMIT 1" 2>/dev/null; do sleep 2; done' || echo "Auth schema check timeout, continuing..."
|
||||
|
||||
- name: Set up Platform - Run migrations
|
||||
run: |
|
||||
echo "Running migrations..."
|
||||
docker compose -f ../docker-compose.resolved.yml run --rm migrate
|
||||
echo "✅ Migrations completed"
|
||||
NEXT_PUBLIC_PW_TEST=true docker compose -f ../docker-compose.yml up -d
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_CACHE_FROM: type=local,src=/tmp/.buildx-cache
|
||||
BUILDX_CACHE_TO: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Set up tests - Load cached E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit == 'true'
|
||||
- name: Move cache
|
||||
run: |
|
||||
echo "✅ Found cached E2E test data, restoring..."
|
||||
{
|
||||
echo "SET session_replication_role = 'replica';"
|
||||
cat /tmp/e2e_test_data.sql
|
||||
echo "SET session_replication_role = 'origin';"
|
||||
} | docker compose -f ../docker-compose.resolved.yml exec -T db psql -U postgres -d postgres -b
|
||||
# Refresh materialized views after restore
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
psql -U postgres -d postgres -b -c "SET search_path TO platform; SELECT refresh_store_materialized_views();" || true
|
||||
rm -rf /tmp/.buildx-cache
|
||||
if [ -d "/tmp/.buildx-cache-new" ]; then
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
fi
|
||||
|
||||
echo "✅ E2E test data restored from cache"
|
||||
|
||||
- name: Set up Platform - Start (all other services)
|
||||
- name: Wait for services to be ready
|
||||
run: |
|
||||
docker compose -f ../docker-compose.resolved.yml up -d --no-build
|
||||
echo "Waiting for rest_server to be ready..."
|
||||
timeout 60 sh -c 'until curl -f http://localhost:8006/health 2>/dev/null; do sleep 2; done' || echo "Rest server health check timeout, continuing..."
|
||||
env:
|
||||
NEXT_PUBLIC_PW_TEST: true
|
||||
echo "Waiting for database to be ready..."
|
||||
timeout 60 sh -c 'until docker compose -f ../docker-compose.yml exec -T db pg_isready -U postgres 2>/dev/null; do sleep 2; done' || echo "Database ready check timeout, continuing..."
|
||||
|
||||
- name: Set up tests - Create E2E test data
|
||||
if: steps.e2e-data-cache.outputs.cache-hit != 'true'
|
||||
- name: Create E2E test data
|
||||
run: |
|
||||
echo "Creating E2E test data..."
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.resolved.yml ps -q rest_server):/tmp/e2e_test_data.py
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.resolved.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
# First try to run the script from inside the container
|
||||
if docker compose -f ../docker-compose.yml exec -T rest_server test -f /app/autogpt_platform/backend/test/e2e_test_data.py; then
|
||||
echo "✅ Found e2e_test_data.py in container, running it..."
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python backend/test/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo "⚠️ e2e_test_data.py not found in container, copying and running..."
|
||||
# Copy the script into the container and run it
|
||||
docker cp ../backend/test/e2e_test_data.py $(docker compose -f ../docker-compose.yml ps -q rest_server):/tmp/e2e_test_data.py || {
|
||||
echo "❌ Failed to copy script to container"
|
||||
exit 1
|
||||
}
|
||||
docker compose -f ../docker-compose.yml exec -T rest_server sh -c "cd /app/autogpt_platform && python /tmp/e2e_test_data.py" || {
|
||||
echo "❌ E2E test data creation failed!"
|
||||
docker compose -f ../docker-compose.yml logs --tail=50 rest_server
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
# Dump auth.users + platform schema for cache (two separate dumps)
|
||||
echo "Dumping database for cache..."
|
||||
{
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--table='auth.users' postgres
|
||||
docker compose -f ../docker-compose.resolved.yml exec -T db \
|
||||
pg_dump -U postgres --data-only --column-inserts \
|
||||
--schema=platform \
|
||||
--exclude-table='platform._prisma_migrations' \
|
||||
--exclude-table='platform.apscheduler_jobs' \
|
||||
--exclude-table='platform.apscheduler_jobs_batched_notifications' \
|
||||
postgres
|
||||
} > /tmp/e2e_test_data.sql
|
||||
|
||||
echo "✅ Database dump created for caching ($(wc -l < /tmp/e2e_test_data.sql) lines)"
|
||||
|
||||
- name: Set up tests - Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up tests - Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Set up tests - Install dependencies
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Set up tests - Install browser 'chromium'
|
||||
- name: Install Browser 'chromium'
|
||||
run: pnpm playwright install --with-deps chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
@@ -281,7 +269,7 @@ jobs:
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: docker compose -f ../docker-compose.resolved.yml logs
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
|
||||
integration_test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -293,15 +281,22 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.pnpm-store
|
||||
key: ${{ needs.setup.outputs.cache-key }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add cache configuration to a resolved docker-compose file for all services
|
||||
that have a build key, and ensure image names match what docker compose expects.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
DEFAULT_BRANCH = "dev"
|
||||
CACHE_BUILDS_FOR_COMPONENTS = ["backend", "frontend"]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Add cache config to a resolved compose file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source",
|
||||
required=True,
|
||||
help="Source compose file to read (should be output of `docker compose config`)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache-from",
|
||||
default="type=gha",
|
||||
help="Cache source configuration",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache-to",
|
||||
default="type=gha,mode=max",
|
||||
help="Cache destination configuration",
|
||||
)
|
||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||
parser.add_argument(
|
||||
f"--{component}-hash",
|
||||
default="",
|
||||
help=f"Hash for {component} cache scope (e.g., from hashFiles())",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--git-ref",
|
||||
default="",
|
||||
help="Git ref for branch-based cache scope (e.g., refs/heads/master)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Normalize git ref to a safe scope name (e.g., refs/heads/master -> master)
|
||||
git_ref_scope = ""
|
||||
if args.git_ref:
|
||||
git_ref_scope = args.git_ref.replace("refs/heads/", "").replace("/", "-")
|
||||
|
||||
with open(args.source, "r") as f:
|
||||
compose = yaml.safe_load(f)
|
||||
|
||||
# Get project name from compose file or default
|
||||
project_name = compose.get("name", "autogpt_platform")
|
||||
|
||||
def get_image_name(dockerfile: str, target: str) -> str:
|
||||
"""Generate image name based on Dockerfile folder and build target."""
|
||||
dockerfile_parts = dockerfile.replace("\\", "/").split("/")
|
||||
if len(dockerfile_parts) >= 2:
|
||||
folder_name = dockerfile_parts[-2] # e.g., "backend" or "frontend"
|
||||
else:
|
||||
folder_name = "app"
|
||||
return f"{project_name}-{folder_name}:{target}"
|
||||
|
||||
def get_build_key(dockerfile: str, target: str) -> str:
|
||||
"""Generate a unique key for a Dockerfile+target combination."""
|
||||
return f"{dockerfile}:{target}"
|
||||
|
||||
def get_component(dockerfile: str) -> str | None:
|
||||
"""Get component name (frontend/backend) from dockerfile path."""
|
||||
for component in CACHE_BUILDS_FOR_COMPONENTS:
|
||||
if component in dockerfile:
|
||||
return component
|
||||
return None
|
||||
|
||||
# First pass: collect all services with build configs and identify duplicates
|
||||
# Track which (dockerfile, target) combinations we've seen
|
||||
build_key_to_first_service: dict[str, str] = {}
|
||||
services_to_build: list[str] = []
|
||||
services_to_dedupe: list[str] = []
|
||||
|
||||
for service_name, service_config in compose.get("services", {}).items():
|
||||
if "build" not in service_config:
|
||||
continue
|
||||
|
||||
build_config = service_config["build"]
|
||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||
target = build_config.get("target", "default")
|
||||
build_key = get_build_key(dockerfile, target)
|
||||
|
||||
if build_key not in build_key_to_first_service:
|
||||
# First service with this build config - it will do the actual build
|
||||
build_key_to_first_service[build_key] = service_name
|
||||
services_to_build.append(service_name)
|
||||
else:
|
||||
# Duplicate - will just use the image from the first service
|
||||
services_to_dedupe.append(service_name)
|
||||
|
||||
# Second pass: configure builds and deduplicate
|
||||
modified_services = []
|
||||
for service_name, service_config in compose.get("services", {}).items():
|
||||
if "build" not in service_config:
|
||||
continue
|
||||
|
||||
build_config = service_config["build"]
|
||||
dockerfile = build_config.get("dockerfile", "Dockerfile")
|
||||
target = build_config.get("target", "latest")
|
||||
image_name = get_image_name(dockerfile, target)
|
||||
|
||||
# Set image name for all services (needed for both builders and deduped)
|
||||
service_config["image"] = image_name
|
||||
|
||||
if service_name in services_to_dedupe:
|
||||
# Remove build config - this service will use the pre-built image
|
||||
del service_config["build"]
|
||||
continue
|
||||
|
||||
# This service will do the actual build - add cache config
|
||||
cache_from_list = []
|
||||
cache_to_list = []
|
||||
|
||||
component = get_component(dockerfile)
|
||||
if not component:
|
||||
# Skip services that don't clearly match frontend/backend
|
||||
continue
|
||||
|
||||
# Get the hash for this component
|
||||
component_hash = getattr(args, f"{component}_hash")
|
||||
|
||||
# Scope format: platform-{component}-{target}-{hash|ref}
|
||||
# Example: platform-backend-server-abc123
|
||||
|
||||
if "type=gha" in args.cache_from:
|
||||
# 1. Primary: exact hash match (most specific)
|
||||
if component_hash:
|
||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={hash_scope}")
|
||||
|
||||
# 2. Fallback: branch-based cache
|
||||
if git_ref_scope:
|
||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={ref_scope}")
|
||||
|
||||
# 3. Fallback: dev branch cache (for PRs/feature branches)
|
||||
if git_ref_scope and git_ref_scope != DEFAULT_BRANCH:
|
||||
master_scope = f"platform-{component}-{target}-{DEFAULT_BRANCH}"
|
||||
cache_from_list.append(f"{args.cache_from},scope={master_scope}")
|
||||
|
||||
if "type=gha" in args.cache_to:
|
||||
# Write to both hash-based and branch-based scopes
|
||||
if component_hash:
|
||||
hash_scope = f"platform-{component}-{target}-{component_hash}"
|
||||
cache_to_list.append(f"{args.cache_to},scope={hash_scope}")
|
||||
|
||||
if git_ref_scope:
|
||||
ref_scope = f"platform-{component}-{target}-{git_ref_scope}"
|
||||
cache_to_list.append(f"{args.cache_to},scope={ref_scope}")
|
||||
|
||||
# Ensure we have at least one cache source/target
|
||||
if not cache_from_list:
|
||||
cache_from_list.append(args.cache_from)
|
||||
if not cache_to_list:
|
||||
cache_to_list.append(args.cache_to)
|
||||
|
||||
build_config["cache_from"] = cache_from_list
|
||||
build_config["cache_to"] = cache_to_list
|
||||
modified_services.append(service_name)
|
||||
|
||||
# Write back to the same file
|
||||
with open(args.source, "w") as f:
|
||||
yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
|
||||
|
||||
print(f"Added cache config to {len(modified_services)} services in {args.source}:")
|
||||
for svc in modified_services:
|
||||
svc_config = compose["services"][svc]
|
||||
build_cfg = svc_config.get("build", {})
|
||||
cache_from_list = build_cfg.get("cache_from", ["none"])
|
||||
cache_to_list = build_cfg.get("cache_to", ["none"])
|
||||
print(f" - {svc}")
|
||||
print(f" image: {svc_config.get('image', 'N/A')}")
|
||||
print(f" cache_from: {cache_from_list}")
|
||||
print(f" cache_to: {cache_to_list}")
|
||||
if services_to_dedupe:
|
||||
print(
|
||||
f"Deduplicated {len(services_to_dedupe)} services (will use pre-built images):"
|
||||
)
|
||||
for svc in services_to_dedupe:
|
||||
print(f" - {svc} -> {compose['services'][svc].get('image', 'N/A')}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -45,11 +45,6 @@ AutoGPT Platform is a monorepo containing:
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||
|
||||
### Branching Strategy
|
||||
|
||||
- **`dev`** is the main development branch. All PRs should target `dev`.
|
||||
- **`master`** is the production branch. Only used for production releases.
|
||||
|
||||
### Creating Pull Requests
|
||||
|
||||
- Create the PR against the `dev` branch of the repository.
|
||||
|
||||
169
autogpt_platform/autogpt_libs/poetry.lock
generated
169
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -448,61 +448,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "46.0.5"
|
||||
version = "46.0.4"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"},
|
||||
{file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"},
|
||||
{file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"},
|
||||
{file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"},
|
||||
{file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"},
|
||||
{file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
|
||||
{file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
|
||||
{file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
|
||||
{file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
|
||||
{file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
|
||||
{file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -516,7 +516,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||
sdist = ["build (>=1.0.0)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test-randomorder = ["pytest-randomly"]
|
||||
|
||||
[[package]]
|
||||
@@ -570,25 +570,24 @@ tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.128.7"
|
||||
version = "0.128.0"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||
{file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
|
||||
{file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-doc = ">=0.0.2"
|
||||
pydantic = ">=2.7.0"
|
||||
starlette = ">=0.40.0,<1.0.0"
|
||||
starlette = ">=0.40.0,<0.51.0"
|
||||
typing-extensions = ">=4.8.0"
|
||||
typing-inspection = ">=0.4.2"
|
||||
|
||||
[package.extras]
|
||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.9.3)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=5.8.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||
|
||||
@@ -1063,14 +1062,14 @@ urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-server-sdk"
|
||||
version = "9.15.0"
|
||||
version = "9.14.1"
|
||||
description = "LaunchDarkly SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1479,14 +1478,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||
{file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
|
||||
{file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2249,14 +2248,14 @@ cli = ["click (>=5.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "realtime"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||
{file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
|
||||
{file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2437,14 +2436,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||
{file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
|
||||
{file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2488,35 +2487,35 @@ python-dateutil = ">=2.6.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||
{file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
|
||||
{file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.26,<0.29"
|
||||
postgrest = "2.28.0"
|
||||
realtime = "2.28.0"
|
||||
storage3 = "2.28.0"
|
||||
supabase-auth = "2.28.0"
|
||||
supabase-functions = "2.28.0"
|
||||
postgrest = "2.27.2"
|
||||
realtime = "2.27.2"
|
||||
storage3 = "2.27.2"
|
||||
supabase-auth = "2.27.2"
|
||||
supabase-functions = "2.27.2"
|
||||
yarl = ">=1.22.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase-auth"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||
{file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
|
||||
{file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2526,14 +2525,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||
|
||||
[[package]]
|
||||
name = "supabase-functions"
|
||||
version = "2.28.0"
|
||||
version = "2.27.2"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||
{file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
|
||||
{file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2912,4 +2911,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "9619cae908ad38fa2c48016a58bcf4241f6f5793aa0e6cc140276e91c433cbbb"
|
||||
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
|
||||
|
||||
@@ -11,14 +11,14 @@ python = ">=3.10,<4.0"
|
||||
colorama = "^0.4.6"
|
||||
cryptography = "^46.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.128.7"
|
||||
fastapi = "^0.128.0"
|
||||
google-cloud-logging = "^3.13.0"
|
||||
launchdarkly-server-sdk = "^9.15.0"
|
||||
launchdarkly-server-sdk = "^9.14.1"
|
||||
pydantic = "^2.12.5"
|
||||
pydantic-settings = "^2.12.0"
|
||||
pyjwt = { version = "^2.11.0", extras = ["crypto"] }
|
||||
redis = "^6.2.0"
|
||||
supabase = "^2.28.0"
|
||||
supabase = "^2.27.2"
|
||||
uvicorn = "^0.40.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
# ============================ DEPENDENCY BUILDER ============================ #
|
||||
|
||||
FROM debian:13-slim AS builder
|
||||
|
||||
# Set environment variables
|
||||
@@ -53,9 +51,7 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||
|
||||
# ============================== BACKEND SERVER ============================== #
|
||||
|
||||
FROM debian:13-slim AS server
|
||||
FROM debian:13-slim AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -67,14 +63,15 @@ ENV POETRY_HOME=/opt/poetry \
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ffmpeg \
|
||||
imagemagick \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
COPY --from=builder /app /app
|
||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||
# Copy Node.js installation for Prisma
|
||||
@@ -84,54 +81,30 @@ COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
# Copy only the .venv from builder (not the entire /app directory)
|
||||
# The .venv includes the generated Prisma client
|
||||
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||
|
||||
# Copy dependency files + autogpt_libs (path dependency)
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||
RUN mkdir -p /app/autogpt_platform/backend
|
||||
|
||||
# Copy backend code + docs (for Copilot docs search)
|
||||
COPY autogpt_platform/backend ./
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
FROM server_dependencies AS migrate
|
||||
|
||||
# Migration stage only needs schema and migrations - much lighter than full backend
|
||||
COPY autogpt_platform/backend/schema.prisma /app/autogpt_platform/backend/
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py /app/autogpt_platform/backend/backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/migrations /app/autogpt_platform/backend/migrations
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
COPY docs /app/docs
|
||||
RUN poetry install --no-ansi --only-root
|
||||
|
||||
ENV PORT=8000
|
||||
|
||||
CMD ["poetry", "run", "rest"]
|
||||
|
||||
# =============================== DB MIGRATOR =============================== #
|
||||
|
||||
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
||||
FROM debian:13-slim AS migrate
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install only what's needed for prisma migrate: Node.js and minimal Python for prisma-python
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy Node.js from builder (needed for Prisma CLI)
|
||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||
|
||||
# Copy Prisma binaries
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
# Install prisma-client-py directly (much smaller than copying full venv)
|
||||
RUN pip3 install prisma>=0.15.0 --break-system-packages
|
||||
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||
COPY autogpt_platform/backend/migrations ./migrations
|
||||
|
||||
@@ -122,24 +122,6 @@ class ConnectionManager:
|
||||
|
||||
return len(connections)
|
||||
|
||||
async def broadcast_to_all(self, *, method: WSMethod, data: dict) -> int:
|
||||
"""Broadcast a message to all active websocket connections."""
|
||||
message = WSMessage(
|
||||
method=method,
|
||||
data=data,
|
||||
).model_dump_json()
|
||||
|
||||
connections = tuple(self.active_connections)
|
||||
if not connections:
|
||||
return 0
|
||||
|
||||
await asyncio.gather(
|
||||
*(connection.send_text(message) for connection in connections),
|
||||
return_exceptions=True,
|
||||
)
|
||||
|
||||
return len(connections)
|
||||
|
||||
async def _subscribe(self, channel_key: str, websocket: WebSocket) -> str:
|
||||
if channel_key not in self.subscriptions:
|
||||
self.subscriptions[channel_key] = set()
|
||||
|
||||
@@ -176,64 +176,30 @@ async def get_execution_analytics_config(
|
||||
# Return with provider prefix for clarity
|
||||
return f"{provider_name}: {model_name}"
|
||||
|
||||
# Get all models from the registry (dynamic, not hardcoded enum)
|
||||
from backend.data import llm_registry
|
||||
from backend.server.v2.llm import db as llm_db
|
||||
|
||||
# Get the recommended model from the database (configurable via admin UI)
|
||||
recommended_model_slug = await llm_db.get_recommended_model_slug()
|
||||
|
||||
# Build the available models list
|
||||
first_enabled_slug = None
|
||||
for registry_model in llm_registry.iter_dynamic_models():
|
||||
# Only include enabled models in the list
|
||||
if not registry_model.is_enabled:
|
||||
continue
|
||||
|
||||
# Track first enabled model as fallback
|
||||
if first_enabled_slug is None:
|
||||
first_enabled_slug = registry_model.slug
|
||||
|
||||
model = LlmModel(registry_model.slug)
|
||||
# Include all LlmModel values (no more filtering by hardcoded list)
|
||||
recommended_model = LlmModel.GPT4O_MINI.value
|
||||
for model in LlmModel:
|
||||
label = generate_model_label(model)
|
||||
# Add "(Recommended)" suffix to the recommended model
|
||||
if registry_model.slug == recommended_model_slug:
|
||||
if model.value == recommended_model:
|
||||
label += " (Recommended)"
|
||||
|
||||
available_models.append(
|
||||
ModelInfo(
|
||||
value=registry_model.slug,
|
||||
value=model.value,
|
||||
label=label,
|
||||
provider=registry_model.metadata.provider,
|
||||
provider=model.provider,
|
||||
)
|
||||
)
|
||||
|
||||
# Sort models by provider and name for better UX
|
||||
available_models.sort(key=lambda x: (x.provider, x.label))
|
||||
|
||||
# Handle case where no models are available
|
||||
if not available_models:
|
||||
logger.warning(
|
||||
"No enabled LLM models found in registry. "
|
||||
"Ensure models are configured and enabled in the LLM Registry."
|
||||
)
|
||||
# Provide a placeholder entry so admins see meaningful feedback
|
||||
available_models.append(
|
||||
ModelInfo(
|
||||
value="",
|
||||
label="No models available - configure in LLM Registry",
|
||||
provider="none",
|
||||
)
|
||||
)
|
||||
|
||||
# Use the DB recommended model, or fallback to first enabled model
|
||||
final_recommended = recommended_model_slug or first_enabled_slug or ""
|
||||
|
||||
return ExecutionAnalyticsConfig(
|
||||
available_models=available_models,
|
||||
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
|
||||
default_user_prompt=DEFAULT_USER_PROMPT,
|
||||
recommended_model=final_recommended,
|
||||
recommended_model=recommended_model,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,593 +0,0 @@
|
||||
import logging
|
||||
|
||||
import autogpt_libs.auth
|
||||
import fastapi
|
||||
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
from backend.server.v2.llm import db as llm_db
|
||||
from backend.server.v2.llm import model as llm_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
tags=["llm", "admin"],
|
||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_admin_user)],
|
||||
)
|
||||
|
||||
|
||||
async def _refresh_runtime_state() -> None:
|
||||
"""Refresh the LLM registry and clear all related caches to ensure real-time updates."""
|
||||
logger.info("Refreshing LLM registry runtime state...")
|
||||
try:
|
||||
# Refresh registry from database
|
||||
await llm_registry.refresh_llm_registry()
|
||||
await refresh_llm_costs()
|
||||
|
||||
# Clear block schema caches so they're regenerated with updated model options
|
||||
from backend.blocks._base import BlockSchema
|
||||
|
||||
BlockSchema.clear_all_schema_caches()
|
||||
logger.info("Cleared all block schema caches")
|
||||
|
||||
# Clear the /blocks endpoint cache so frontend gets updated schemas
|
||||
try:
|
||||
from backend.api.features.v1 import _get_cached_blocks
|
||||
|
||||
_get_cached_blocks.cache_clear()
|
||||
logger.info("Cleared /blocks endpoint cache")
|
||||
except Exception as e:
|
||||
logger.warning("Failed to clear /blocks cache: %s", e)
|
||||
|
||||
# Clear the v2 builder caches
|
||||
try:
|
||||
from backend.api.features.builder import db as builder_db
|
||||
|
||||
builder_db._get_all_providers.cache_clear()
|
||||
logger.info("Cleared v2 builder providers cache")
|
||||
builder_db._build_cached_search_results.cache_clear()
|
||||
logger.info("Cleared v2 builder search results cache")
|
||||
except Exception as e:
|
||||
logger.debug("Could not clear v2 builder cache: %s", e)
|
||||
|
||||
# Notify all executor services to refresh their registry cache
|
||||
from backend.data.llm_registry import publish_registry_refresh_notification
|
||||
|
||||
await publish_registry_refresh_notification()
|
||||
logger.info("Published registry refresh notification")
|
||||
except Exception as exc:
|
||||
logger.exception(
|
||||
"LLM runtime state refresh failed; caches may be stale: %s", exc
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/providers",
|
||||
summary="List LLM providers",
|
||||
response_model=llm_model.LlmProvidersResponse,
|
||||
)
|
||||
async def list_llm_providers(include_models: bool = True):
|
||||
providers = await llm_db.list_providers(include_models=include_models)
|
||||
return llm_model.LlmProvidersResponse(providers=providers)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/providers",
|
||||
summary="Create LLM provider",
|
||||
response_model=llm_model.LlmProvider,
|
||||
)
|
||||
async def create_llm_provider(request: llm_model.UpsertLlmProviderRequest):
|
||||
provider = await llm_db.upsert_provider(request=request)
|
||||
await _refresh_runtime_state()
|
||||
return provider
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/providers/{provider_id}",
|
||||
summary="Update LLM provider",
|
||||
response_model=llm_model.LlmProvider,
|
||||
)
|
||||
async def update_llm_provider(
|
||||
provider_id: str,
|
||||
request: llm_model.UpsertLlmProviderRequest,
|
||||
):
|
||||
provider = await llm_db.upsert_provider(request=request, provider_id=provider_id)
|
||||
await _refresh_runtime_state()
|
||||
return provider
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/providers/{provider_id}",
|
||||
summary="Delete LLM provider",
|
||||
response_model=dict,
|
||||
)
|
||||
async def delete_llm_provider(provider_id: str):
|
||||
"""
|
||||
Delete an LLM provider.
|
||||
|
||||
A provider can only be deleted if it has no associated models.
|
||||
Delete all models from the provider first before deleting the provider.
|
||||
"""
|
||||
try:
|
||||
await llm_db.delete_provider(provider_id)
|
||||
await _refresh_runtime_state()
|
||||
logger.info("Deleted LLM provider '%s'", provider_id)
|
||||
return {"success": True, "message": "Provider deleted successfully"}
|
||||
except ValueError as e:
|
||||
logger.warning("Failed to delete provider '%s': %s", provider_id, e)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.exception("Failed to delete provider '%s': %s", provider_id, e)
|
||||
raise fastapi.HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/models",
|
||||
summary="List LLM models",
|
||||
response_model=llm_model.LlmModelsResponse,
|
||||
)
|
||||
async def list_llm_models(
|
||||
provider_id: str | None = fastapi.Query(default=None),
|
||||
page: int = fastapi.Query(default=1, ge=1, description="Page number (1-indexed)"),
|
||||
page_size: int = fastapi.Query(
|
||||
default=50, ge=1, le=100, description="Number of models per page"
|
||||
),
|
||||
):
|
||||
return await llm_db.list_models(
|
||||
provider_id=provider_id, page=page, page_size=page_size
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/models",
|
||||
summary="Create LLM model",
|
||||
response_model=llm_model.LlmModel,
|
||||
)
|
||||
async def create_llm_model(request: llm_model.CreateLlmModelRequest):
|
||||
model = await llm_db.create_model(request=request)
|
||||
await _refresh_runtime_state()
|
||||
return model
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/models/{model_id}",
|
||||
summary="Update LLM model",
|
||||
response_model=llm_model.LlmModel,
|
||||
)
|
||||
async def update_llm_model(
|
||||
model_id: str,
|
||||
request: llm_model.UpdateLlmModelRequest,
|
||||
):
|
||||
model = await llm_db.update_model(model_id=model_id, request=request)
|
||||
await _refresh_runtime_state()
|
||||
return model
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/models/{model_id}/toggle",
|
||||
summary="Toggle LLM model availability",
|
||||
response_model=llm_model.ToggleLlmModelResponse,
|
||||
)
|
||||
async def toggle_llm_model(
|
||||
model_id: str,
|
||||
request: llm_model.ToggleLlmModelRequest,
|
||||
):
|
||||
"""
|
||||
Toggle a model's enabled status, optionally migrating workflows when disabling.
|
||||
|
||||
If disabling a model and `migrate_to_slug` is provided, all workflows using
|
||||
this model will be migrated to the specified replacement model before disabling.
|
||||
A migration record is created which can be reverted later using the revert endpoint.
|
||||
|
||||
Optional fields:
|
||||
- `migration_reason`: Reason for the migration (e.g., "Provider outage")
|
||||
- `custom_credit_cost`: Custom pricing override for billing during migration
|
||||
"""
|
||||
try:
|
||||
result = await llm_db.toggle_model(
|
||||
model_id=model_id,
|
||||
is_enabled=request.is_enabled,
|
||||
migrate_to_slug=request.migrate_to_slug,
|
||||
migration_reason=request.migration_reason,
|
||||
custom_credit_cost=request.custom_credit_cost,
|
||||
)
|
||||
await _refresh_runtime_state()
|
||||
if result.nodes_migrated > 0:
|
||||
logger.info(
|
||||
"Toggled model '%s' to %s and migrated %d nodes to '%s' (migration_id=%s)",
|
||||
result.model.slug,
|
||||
"enabled" if request.is_enabled else "disabled",
|
||||
result.nodes_migrated,
|
||||
result.migrated_to_slug,
|
||||
result.migration_id,
|
||||
)
|
||||
return result
|
||||
except ValueError as exc:
|
||||
logger.warning("Model toggle validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to toggle LLM model %s: %s", model_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to toggle model availability",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/models/{model_id}/usage",
|
||||
summary="Get model usage count",
|
||||
response_model=llm_model.LlmModelUsageResponse,
|
||||
)
|
||||
async def get_llm_model_usage(model_id: str):
|
||||
"""Get the number of workflow nodes using this model."""
|
||||
try:
|
||||
return await llm_db.get_model_usage(model_id=model_id)
|
||||
except ValueError as exc:
|
||||
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get model usage %s: %s", model_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get model usage",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/models/{model_id}",
|
||||
summary="Delete LLM model and migrate workflows",
|
||||
response_model=llm_model.DeleteLlmModelResponse,
|
||||
)
|
||||
async def delete_llm_model(
|
||||
model_id: str,
|
||||
replacement_model_slug: str | None = fastapi.Query(
|
||||
default=None,
|
||||
description="Slug of the model to migrate existing workflows to (required only if workflows use this model)",
|
||||
),
|
||||
):
|
||||
"""
|
||||
Delete a model and optionally migrate workflows using it to a replacement model.
|
||||
|
||||
If no workflows are using this model, it can be deleted without providing a
|
||||
replacement. If workflows exist, replacement_model_slug is required.
|
||||
|
||||
This endpoint:
|
||||
1. Counts how many workflow nodes use the model being deleted
|
||||
2. If nodes exist, validates the replacement model and migrates them
|
||||
3. Deletes the model record
|
||||
4. Refreshes all caches and notifies executors
|
||||
|
||||
Example: DELETE /api/llm/admin/models/{id}?replacement_model_slug=gpt-4o
|
||||
Example (no usage): DELETE /api/llm/admin/models/{id}
|
||||
"""
|
||||
try:
|
||||
result = await llm_db.delete_model(
|
||||
model_id=model_id, replacement_model_slug=replacement_model_slug
|
||||
)
|
||||
await _refresh_runtime_state()
|
||||
logger.info(
|
||||
"Deleted model '%s' and migrated %d nodes to '%s'",
|
||||
result.deleted_model_slug,
|
||||
result.nodes_migrated,
|
||||
result.replacement_model_slug,
|
||||
)
|
||||
return result
|
||||
except ValueError as exc:
|
||||
# Validation errors (model not found, replacement invalid, etc.)
|
||||
logger.warning("Model deletion validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to delete LLM model %s: %s", model_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to delete model and migrate workflows",
|
||||
) from exc
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Migration Management Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/migrations",
|
||||
summary="List model migrations",
|
||||
response_model=llm_model.LlmMigrationsResponse,
|
||||
)
|
||||
async def list_llm_migrations(
|
||||
include_reverted: bool = fastapi.Query(
|
||||
default=False, description="Include reverted migrations in the list"
|
||||
),
|
||||
):
|
||||
"""
|
||||
List all model migrations.
|
||||
|
||||
Migrations are created when disabling a model with the migrate_to_slug option.
|
||||
They can be reverted to restore the original model configuration.
|
||||
"""
|
||||
try:
|
||||
migrations = await llm_db.list_migrations(include_reverted=include_reverted)
|
||||
return llm_model.LlmMigrationsResponse(migrations=migrations)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to list migrations: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to list migrations",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/migrations/{migration_id}",
|
||||
summary="Get migration details",
|
||||
response_model=llm_model.LlmModelMigration,
|
||||
)
|
||||
async def get_llm_migration(migration_id: str):
|
||||
"""Get details of a specific migration."""
|
||||
try:
|
||||
migration = await llm_db.get_migration(migration_id)
|
||||
if not migration:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404, detail=f"Migration '{migration_id}' not found"
|
||||
)
|
||||
return migration
|
||||
except fastapi.HTTPException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get migration %s: %s", migration_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get migration",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/migrations/{migration_id}/revert",
|
||||
summary="Revert a model migration",
|
||||
response_model=llm_model.RevertMigrationResponse,
|
||||
)
|
||||
async def revert_llm_migration(
|
||||
migration_id: str,
|
||||
request: llm_model.RevertMigrationRequest | None = None,
|
||||
):
|
||||
"""
|
||||
Revert a model migration, restoring affected workflows to their original model.
|
||||
|
||||
This only reverts the specific nodes that were part of the migration.
|
||||
The source model must exist for the revert to succeed.
|
||||
|
||||
Options:
|
||||
- `re_enable_source_model`: Whether to re-enable the source model if disabled (default: True)
|
||||
|
||||
Response includes:
|
||||
- `nodes_reverted`: Number of nodes successfully reverted
|
||||
- `nodes_already_changed`: Number of nodes that were modified since migration (not reverted)
|
||||
- `source_model_re_enabled`: Whether the source model was re-enabled
|
||||
|
||||
Requirements:
|
||||
- Migration must not already be reverted
|
||||
- Source model must exist
|
||||
"""
|
||||
try:
|
||||
re_enable = request.re_enable_source_model if request else True
|
||||
result = await llm_db.revert_migration(
|
||||
migration_id,
|
||||
re_enable_source_model=re_enable,
|
||||
)
|
||||
await _refresh_runtime_state()
|
||||
logger.info(
|
||||
"Reverted migration '%s': %d nodes restored from '%s' to '%s' "
|
||||
"(%d already changed, source re-enabled=%s)",
|
||||
migration_id,
|
||||
result.nodes_reverted,
|
||||
result.target_model_slug,
|
||||
result.source_model_slug,
|
||||
result.nodes_already_changed,
|
||||
result.source_model_re_enabled,
|
||||
)
|
||||
return result
|
||||
except ValueError as exc:
|
||||
logger.warning("Migration revert validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to revert migration %s: %s", migration_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to revert migration",
|
||||
) from exc
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Creator Management Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/creators",
|
||||
summary="List model creators",
|
||||
response_model=llm_model.LlmCreatorsResponse,
|
||||
)
|
||||
async def list_llm_creators():
|
||||
"""
|
||||
List all model creators.
|
||||
|
||||
Creators are organizations that create/train models (e.g., OpenAI, Meta, Anthropic).
|
||||
This is distinct from providers who host/serve the models (e.g., OpenRouter).
|
||||
"""
|
||||
try:
|
||||
creators = await llm_db.list_creators()
|
||||
return llm_model.LlmCreatorsResponse(creators=creators)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to list creators: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to list creators",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.get(
|
||||
"/creators/{creator_id}",
|
||||
summary="Get creator details",
|
||||
response_model=llm_model.LlmModelCreator,
|
||||
)
|
||||
async def get_llm_creator(creator_id: str):
|
||||
"""Get details of a specific model creator."""
|
||||
try:
|
||||
creator = await llm_db.get_creator(creator_id)
|
||||
if not creator:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404, detail=f"Creator '{creator_id}' not found"
|
||||
)
|
||||
return creator
|
||||
except fastapi.HTTPException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get creator %s: %s", creator_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get creator",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/creators",
|
||||
summary="Create model creator",
|
||||
response_model=llm_model.LlmModelCreator,
|
||||
)
|
||||
async def create_llm_creator(request: llm_model.UpsertLlmCreatorRequest):
|
||||
"""
|
||||
Create a new model creator.
|
||||
|
||||
A creator represents an organization that creates/trains AI models,
|
||||
such as OpenAI, Anthropic, Meta, or Google.
|
||||
"""
|
||||
try:
|
||||
creator = await llm_db.upsert_creator(request=request)
|
||||
await _refresh_runtime_state()
|
||||
logger.info("Created model creator '%s' (%s)", creator.display_name, creator.id)
|
||||
return creator
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to create creator: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to create creator",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/creators/{creator_id}",
|
||||
summary="Update model creator",
|
||||
response_model=llm_model.LlmModelCreator,
|
||||
)
|
||||
async def update_llm_creator(
|
||||
creator_id: str,
|
||||
request: llm_model.UpsertLlmCreatorRequest,
|
||||
):
|
||||
"""Update an existing model creator."""
|
||||
try:
|
||||
creator = await llm_db.upsert_creator(request=request, creator_id=creator_id)
|
||||
await _refresh_runtime_state()
|
||||
logger.info("Updated model creator '%s' (%s)", creator.display_name, creator_id)
|
||||
return creator
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to update creator %s: %s", creator_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to update creator",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/creators/{creator_id}",
|
||||
summary="Delete model creator",
|
||||
response_model=dict,
|
||||
)
|
||||
async def delete_llm_creator(creator_id: str):
|
||||
"""
|
||||
Delete a model creator.
|
||||
|
||||
This will remove the creator association from all models that reference it
|
||||
(sets creatorId to NULL), but will not delete the models themselves.
|
||||
"""
|
||||
try:
|
||||
await llm_db.delete_creator(creator_id)
|
||||
await _refresh_runtime_state()
|
||||
logger.info("Deleted model creator '%s'", creator_id)
|
||||
return {"success": True, "message": f"Creator '{creator_id}' deleted"}
|
||||
except ValueError as exc:
|
||||
logger.warning("Creator deletion validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=404, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to delete creator %s: %s", creator_id, exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to delete creator",
|
||||
) from exc
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Recommended Model Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recommended-model",
|
||||
summary="Get recommended model",
|
||||
response_model=llm_model.RecommendedModelResponse,
|
||||
)
|
||||
async def get_recommended_model():
|
||||
"""
|
||||
Get the currently recommended LLM model.
|
||||
|
||||
The recommended model is shown to users as the default/suggested option
|
||||
in model selection dropdowns.
|
||||
"""
|
||||
try:
|
||||
model = await llm_db.get_recommended_model()
|
||||
return llm_model.RecommendedModelResponse(
|
||||
model=model,
|
||||
slug=model.slug if model else None,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get recommended model: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to get recommended model",
|
||||
) from exc
|
||||
|
||||
|
||||
@router.post(
|
||||
"/recommended-model",
|
||||
summary="Set recommended model",
|
||||
response_model=llm_model.SetRecommendedModelResponse,
|
||||
)
|
||||
async def set_recommended_model(request: llm_model.SetRecommendedModelRequest):
|
||||
"""
|
||||
Set a model as the recommended model.
|
||||
|
||||
This clears the recommended flag from any other model and sets it on
|
||||
the specified model. The model must be enabled to be set as recommended.
|
||||
|
||||
The recommended model is displayed to users as the default/suggested
|
||||
option in model selection dropdowns throughout the platform.
|
||||
"""
|
||||
try:
|
||||
model, previous_slug = await llm_db.set_recommended_model(request.model_id)
|
||||
await _refresh_runtime_state()
|
||||
logger.info(
|
||||
"Set recommended model to '%s' (previous: %s)",
|
||||
model.slug,
|
||||
previous_slug or "none",
|
||||
)
|
||||
return llm_model.SetRecommendedModelResponse(
|
||||
model=model,
|
||||
previous_recommended_slug=previous_slug,
|
||||
message=f"Model '{model.display_name}' is now the recommended model",
|
||||
)
|
||||
except ValueError as exc:
|
||||
logger.warning("Set recommended model validation failed: %s", exc)
|
||||
raise fastapi.HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to set recommended model: %s", exc)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to set recommended model",
|
||||
) from exc
|
||||
@@ -1,491 +0,0 @@
|
||||
import json
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
import backend.api.features.admin.llm_routes as llm_routes
|
||||
from backend.server.v2.llm import model as llm_model
|
||||
from backend.util.models import Pagination
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(llm_routes.router, prefix="/admin/llm")
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_admin_auth(mock_jwt_admin):
|
||||
"""Setup admin auth overrides for all tests in this module"""
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_admin["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
def test_list_llm_providers_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful listing of LLM providers"""
|
||||
# Mock the database function
|
||||
mock_providers = [
|
||||
{
|
||||
"id": "provider-1",
|
||||
"name": "openai",
|
||||
"display_name": "OpenAI",
|
||||
"description": "OpenAI LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": True,
|
||||
"metadata": {},
|
||||
"models": [],
|
||||
},
|
||||
{
|
||||
"id": "provider-2",
|
||||
"name": "anthropic",
|
||||
"display_name": "Anthropic",
|
||||
"description": "Anthropic LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": True,
|
||||
"metadata": {},
|
||||
"models": [],
|
||||
},
|
||||
]
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.list_providers",
|
||||
new=AsyncMock(return_value=mock_providers),
|
||||
)
|
||||
|
||||
response = client.get("/admin/llm/providers")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data["providers"]) == 2
|
||||
assert response_data["providers"][0]["name"] == "openai"
|
||||
|
||||
# Snapshot test the response (must be string)
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"list_llm_providers_success.json",
|
||||
)
|
||||
|
||||
|
||||
def test_list_llm_models_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful listing of LLM models with pagination"""
|
||||
# Mock the database function - now returns LlmModelsResponse
|
||||
mock_model = llm_model.LlmModel(
|
||||
id="model-1",
|
||||
slug="gpt-4o",
|
||||
display_name="GPT-4o",
|
||||
description="GPT-4 Optimized",
|
||||
provider_id="provider-1",
|
||||
context_window=128000,
|
||||
max_output_tokens=16384,
|
||||
is_enabled=True,
|
||||
capabilities={},
|
||||
metadata={},
|
||||
costs=[
|
||||
llm_model.LlmModelCost(
|
||||
id="cost-1",
|
||||
credit_cost=10,
|
||||
credential_provider="openai",
|
||||
metadata={},
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
mock_response = llm_model.LlmModelsResponse(
|
||||
models=[mock_model],
|
||||
pagination=Pagination(
|
||||
total_items=1,
|
||||
total_pages=1,
|
||||
current_page=1,
|
||||
page_size=50,
|
||||
),
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.list_models",
|
||||
new=AsyncMock(return_value=mock_response),
|
||||
)
|
||||
|
||||
response = client.get("/admin/llm/models")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert len(response_data["models"]) == 1
|
||||
assert response_data["models"][0]["slug"] == "gpt-4o"
|
||||
assert response_data["pagination"]["total_items"] == 1
|
||||
assert response_data["pagination"]["page_size"] == 50
|
||||
|
||||
# Snapshot test the response (must be string)
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"list_llm_models_success.json",
|
||||
)
|
||||
|
||||
|
||||
def test_create_llm_provider_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful creation of LLM provider"""
|
||||
mock_provider = {
|
||||
"id": "new-provider-id",
|
||||
"name": "groq",
|
||||
"display_name": "Groq",
|
||||
"description": "Groq LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": False,
|
||||
"metadata": {},
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.upsert_provider",
|
||||
new=AsyncMock(return_value=mock_provider),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"name": "groq",
|
||||
"display_name": "Groq",
|
||||
"description": "Groq LLM provider",
|
||||
"supports_tools": True,
|
||||
"supports_json_output": True,
|
||||
"supports_reasoning": False,
|
||||
"supports_parallel_tool": False,
|
||||
"metadata": {},
|
||||
}
|
||||
|
||||
response = client.post("/admin/llm/providers", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["name"] == "groq"
|
||||
assert response_data["display_name"] == "Groq"
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response (must be string)
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"create_llm_provider_success.json",
|
||||
)
|
||||
|
||||
|
||||
def test_create_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful creation of LLM model"""
|
||||
mock_model = {
|
||||
"id": "new-model-id",
|
||||
"slug": "gpt-4.1-mini",
|
||||
"display_name": "GPT-4.1 Mini",
|
||||
"description": "Latest GPT-4.1 Mini model",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"is_enabled": True,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [
|
||||
{
|
||||
"id": "cost-id",
|
||||
"credit_cost": 5,
|
||||
"credential_provider": "openai",
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.create_model",
|
||||
new=AsyncMock(return_value=mock_model),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"slug": "gpt-4.1-mini",
|
||||
"display_name": "GPT-4.1 Mini",
|
||||
"description": "Latest GPT-4.1 Mini model",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"is_enabled": True,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [
|
||||
{
|
||||
"credit_cost": 5,
|
||||
"credential_provider": "openai",
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
response = client.post("/admin/llm/models", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["slug"] == "gpt-4.1-mini"
|
||||
assert response_data["is_enabled"] is True
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response (must be string)
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"create_llm_model_success.json",
|
||||
)
|
||||
|
||||
|
||||
def test_update_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful update of LLM model"""
|
||||
mock_model = {
|
||||
"id": "model-1",
|
||||
"slug": "gpt-4o",
|
||||
"display_name": "GPT-4o Updated",
|
||||
"description": "Updated description",
|
||||
"provider_id": "provider-1",
|
||||
"context_window": 256000,
|
||||
"max_output_tokens": 32768,
|
||||
"is_enabled": True,
|
||||
"capabilities": {},
|
||||
"metadata": {},
|
||||
"costs": [
|
||||
{
|
||||
"id": "cost-1",
|
||||
"credit_cost": 15,
|
||||
"credential_provider": "openai",
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.update_model",
|
||||
new=AsyncMock(return_value=mock_model),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {
|
||||
"display_name": "GPT-4o Updated",
|
||||
"description": "Updated description",
|
||||
"context_window": 256000,
|
||||
"max_output_tokens": 32768,
|
||||
}
|
||||
|
||||
response = client.patch("/admin/llm/models/model-1", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["display_name"] == "GPT-4o Updated"
|
||||
assert response_data["context_window"] == 256000
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response (must be string)
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"update_llm_model_success.json",
|
||||
)
|
||||
|
||||
|
||||
def test_toggle_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful toggling of LLM model enabled status"""
|
||||
# Create a proper mock model object
|
||||
mock_model = llm_model.LlmModel(
|
||||
id="model-1",
|
||||
slug="gpt-4o",
|
||||
display_name="GPT-4o",
|
||||
description="GPT-4 Optimized",
|
||||
provider_id="provider-1",
|
||||
context_window=128000,
|
||||
max_output_tokens=16384,
|
||||
is_enabled=False,
|
||||
capabilities={},
|
||||
metadata={},
|
||||
costs=[],
|
||||
)
|
||||
|
||||
# Create a proper ToggleLlmModelResponse
|
||||
mock_response = llm_model.ToggleLlmModelResponse(
|
||||
model=mock_model,
|
||||
nodes_migrated=0,
|
||||
migrated_to_slug=None,
|
||||
migration_id=None,
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.toggle_model",
|
||||
new=AsyncMock(return_value=mock_response),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
request_data = {"is_enabled": False}
|
||||
|
||||
response = client.patch("/admin/llm/models/model-1/toggle", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["model"]["is_enabled"] is False
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response (must be string)
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"toggle_llm_model_success.json",
|
||||
)
|
||||
|
||||
|
||||
def test_delete_llm_model_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
configured_snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test successful deletion of LLM model with migration"""
|
||||
# Create a proper DeleteLlmModelResponse
|
||||
mock_response = llm_model.DeleteLlmModelResponse(
|
||||
deleted_model_slug="gpt-3.5-turbo",
|
||||
deleted_model_display_name="GPT-3.5 Turbo",
|
||||
replacement_model_slug="gpt-4o-mini",
|
||||
nodes_migrated=42,
|
||||
message="Successfully deleted model 'GPT-3.5 Turbo' (gpt-3.5-turbo) "
|
||||
"and migrated 42 workflow node(s) to 'gpt-4o-mini'.",
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
||||
new=AsyncMock(return_value=mock_response),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
response = client.delete(
|
||||
"/admin/llm/models/model-1?replacement_model_slug=gpt-4o-mini"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["deleted_model_slug"] == "gpt-3.5-turbo"
|
||||
assert response_data["nodes_migrated"] == 42
|
||||
assert response_data["replacement_model_slug"] == "gpt-4o-mini"
|
||||
|
||||
# Verify refresh was called
|
||||
mock_refresh.assert_called_once()
|
||||
|
||||
# Snapshot test the response (must be string)
|
||||
configured_snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"delete_llm_model_success.json",
|
||||
)
|
||||
|
||||
|
||||
def test_delete_llm_model_validation_error(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test deletion fails with proper error when validation fails"""
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
||||
new=AsyncMock(side_effect=ValueError("Replacement model 'invalid' not found")),
|
||||
)
|
||||
|
||||
response = client.delete("/admin/llm/models/model-1?replacement_model_slug=invalid")
|
||||
|
||||
assert response.status_code == 400
|
||||
assert "Replacement model 'invalid' not found" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_delete_llm_model_no_replacement_with_usage(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test deletion fails when nodes exist but no replacement is provided"""
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
||||
new=AsyncMock(
|
||||
side_effect=ValueError(
|
||||
"Cannot delete model 'test-model': 5 workflow node(s) are using it. "
|
||||
"Please provide a replacement_model_slug to migrate them."
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
response = client.delete("/admin/llm/models/model-1")
|
||||
|
||||
assert response.status_code == 400
|
||||
assert "workflow node(s) are using it" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_delete_llm_model_no_replacement_no_usage(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test deletion succeeds when no nodes use the model and no replacement is provided"""
|
||||
mock_response = llm_model.DeleteLlmModelResponse(
|
||||
deleted_model_slug="unused-model",
|
||||
deleted_model_display_name="Unused Model",
|
||||
replacement_model_slug=None,
|
||||
nodes_migrated=0,
|
||||
message="Successfully deleted model 'Unused Model' (unused-model). No workflows were using this model.",
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.admin.llm_routes.llm_db.delete_model",
|
||||
new=AsyncMock(return_value=mock_response),
|
||||
)
|
||||
|
||||
mock_refresh = mocker.patch(
|
||||
"backend.api.features.admin.llm_routes._refresh_runtime_state",
|
||||
new=AsyncMock(),
|
||||
)
|
||||
|
||||
response = client.delete("/admin/llm/models/model-1")
|
||||
|
||||
assert response.status_code == 200
|
||||
response_data = response.json()
|
||||
assert response_data["deleted_model_slug"] == "unused-model"
|
||||
assert response_data["nodes_migrated"] == 0
|
||||
assert response_data["replacement_model_slug"] is None
|
||||
mock_refresh.assert_called_once()
|
||||
@@ -20,7 +20,6 @@ from backend.blocks._base import (
|
||||
)
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.data.llm_registry import get_all_model_slugs_for_validation
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.cache import cached
|
||||
from backend.util.models import Pagination
|
||||
@@ -37,14 +36,7 @@ from .model import (
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_llm_models() -> list[str]:
|
||||
"""Get LLM model names for search matching from the registry."""
|
||||
return [
|
||||
slug.lower().replace("-", " ") for slug in get_all_model_slugs_for_validation()
|
||||
]
|
||||
|
||||
llm_models = [name.name.lower().replace("_", " ") for name in LlmModel]
|
||||
|
||||
MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
@@ -509,8 +501,8 @@ async def _get_static_counts():
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
if field.annotation == LlmModel:
|
||||
# Check if query matches any value in llm_models from registry
|
||||
if any(query in name for name in _get_llm_models()):
|
||||
# Check if query matches any value in llm_models
|
||||
if any(query in name for name in llm_models):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ from .tools.models import (
|
||||
AgentPreviewResponse,
|
||||
AgentSavedResponse,
|
||||
AgentsFoundResponse,
|
||||
BlockDetailsResponse,
|
||||
BlockListResponse,
|
||||
BlockOutputResponse,
|
||||
ClarificationNeededResponse,
|
||||
@@ -972,7 +971,6 @@ ToolResponseUnion = (
|
||||
| AgentSavedResponse
|
||||
| ClarificationNeededResponse
|
||||
| BlockListResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| DocSearchResultsResponse
|
||||
| DocPageResponse
|
||||
|
||||
@@ -7,6 +7,7 @@ from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockInfoSummary,
|
||||
BlockInputFieldInfo,
|
||||
BlockListResponse,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
@@ -54,8 +55,7 @@ class FindBlockTool(BaseTool):
|
||||
"Blocks are reusable components that perform specific tasks like "
|
||||
"sending emails, making API calls, processing text, etc. "
|
||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||
"The response includes each block's id, name, and description. "
|
||||
"Call run_block with the block's id **with no inputs** to see detailed inputs/outputs and execute it."
|
||||
"The response includes each block's id, required_inputs, and input_schema."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -124,7 +124,7 @@ class FindBlockTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Enrich results with block information
|
||||
# Enrich results with full block information
|
||||
blocks: list[BlockInfoSummary] = []
|
||||
for result in results:
|
||||
block_id = result["content_id"]
|
||||
@@ -141,11 +141,65 @@ class FindBlockTool(BaseTool):
|
||||
):
|
||||
continue
|
||||
|
||||
# Get input/output schemas
|
||||
input_schema = {}
|
||||
output_schema = {}
|
||||
try:
|
||||
input_schema = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
try:
|
||||
output_schema = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
|
||||
# Get categories from block instance
|
||||
categories = []
|
||||
if hasattr(block, "categories") and block.categories:
|
||||
categories = [cat.value for cat in block.categories]
|
||||
|
||||
# Extract required inputs for easier use
|
||||
required_inputs: list[BlockInputFieldInfo] = []
|
||||
if input_schema:
|
||||
properties = input_schema.get("properties", {})
|
||||
required_fields = set(input_schema.get("required", []))
|
||||
# Get credential field names to exclude from required inputs
|
||||
credentials_fields = set(
|
||||
block.input_schema.get_credentials_fields().keys()
|
||||
)
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
# Skip credential fields - they're handled separately
|
||||
if field_name in credentials_fields:
|
||||
continue
|
||||
|
||||
required_inputs.append(
|
||||
BlockInputFieldInfo(
|
||||
name=field_name,
|
||||
type=field_schema.get("type", "string"),
|
||||
description=field_schema.get("description", ""),
|
||||
required=field_name in required_fields,
|
||||
default=field_schema.get("default"),
|
||||
)
|
||||
)
|
||||
|
||||
blocks.append(
|
||||
BlockInfoSummary(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
categories=categories,
|
||||
input_schema=input_schema,
|
||||
output_schema=output_schema,
|
||||
required_inputs=required_inputs,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -174,7 +228,8 @@ class FindBlockTool(BaseTool):
|
||||
return BlockListResponse(
|
||||
message=(
|
||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||
"To see a block's inputs/outputs and execute it, use run_block with the block's 'id' - providing no inputs."
|
||||
"To execute a block, use run_block with the block's 'id' field "
|
||||
"and provide 'input_data' matching the block's input_schema."
|
||||
),
|
||||
blocks=blocks,
|
||||
count=len(blocks),
|
||||
|
||||
@@ -18,13 +18,7 @@ _TEST_USER_ID = "test-user-find-block"
|
||||
|
||||
|
||||
def make_mock_block(
|
||||
block_id: str,
|
||||
name: str,
|
||||
block_type: BlockType,
|
||||
disabled: bool = False,
|
||||
input_schema: dict | None = None,
|
||||
output_schema: dict | None = None,
|
||||
credentials_fields: dict | None = None,
|
||||
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
||||
):
|
||||
"""Create a mock block for testing."""
|
||||
mock = MagicMock()
|
||||
@@ -34,13 +28,10 @@ def make_mock_block(
|
||||
mock.block_type = block_type
|
||||
mock.disabled = disabled
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = input_schema or {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = credentials_fields or {}
|
||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = output_schema or {}
|
||||
mock.output_schema.jsonschema.return_value = {}
|
||||
mock.categories = []
|
||||
return mock
|
||||
|
||||
@@ -146,241 +137,3 @@ class TestFindBlockFiltering:
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert len(response.blocks) == 1
|
||||
assert response.blocks[0].id == "normal-block-id"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_response_size_average_chars_per_block(self):
|
||||
"""Measure average chars per block in the serialized response."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Realistic block definitions modeled after real blocks
|
||||
block_defs = [
|
||||
{
|
||||
"id": "http-block-id",
|
||||
"name": "Send Web Request",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL to send the request to",
|
||||
},
|
||||
"method": {
|
||||
"type": "string",
|
||||
"description": "The HTTP method to use",
|
||||
},
|
||||
"headers": {
|
||||
"type": "object",
|
||||
"description": "Headers to include in the request",
|
||||
},
|
||||
"json_format": {
|
||||
"type": "boolean",
|
||||
"description": "If true, send the body as JSON",
|
||||
},
|
||||
"body": {
|
||||
"type": "object",
|
||||
"description": "Form/JSON body payload",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "HTTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["url", "method"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "object",
|
||||
"description": "The response from the server",
|
||||
},
|
||||
"client_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 4xx status codes",
|
||||
},
|
||||
"server_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 5xx status codes",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Errors for all other exceptions",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "email-block-id",
|
||||
"name": "Send Email",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"to_email": {
|
||||
"type": "string",
|
||||
"description": "Recipient email address",
|
||||
},
|
||||
"subject": {
|
||||
"type": "string",
|
||||
"description": "Subject of the email",
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Body of the email",
|
||||
},
|
||||
"config": {
|
||||
"type": "object",
|
||||
"description": "SMTP Config",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "SMTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["to_email", "subject", "body", "credentials"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Status of the email sending operation",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if sending failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "claude-code-block-id",
|
||||
"name": "Claude Code",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"e2b_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for E2B platform",
|
||||
},
|
||||
"anthropic_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for Anthropic",
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "Task or instruction for Claude Code",
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": "Sandbox timeout in seconds",
|
||||
},
|
||||
"setup_commands": {
|
||||
"type": "array",
|
||||
"description": "Shell commands to run before execution",
|
||||
},
|
||||
"working_directory": {
|
||||
"type": "string",
|
||||
"description": "Working directory for Claude Code",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID to resume a conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "Sandbox ID to reconnect to",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Previous conversation history",
|
||||
},
|
||||
"dispose_sandbox": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to dispose sandbox after execution",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"e2b_credentials",
|
||||
"anthropic_credentials",
|
||||
"prompt",
|
||||
],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "string",
|
||||
"description": "Output from Claude Code execution",
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"description": "Files created/modified by Claude Code",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Full conversation history",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID for this conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "ID of the sandbox instance",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if execution failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {
|
||||
"e2b_credentials": True,
|
||||
"anthropic_credentials": True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
search_results = [
|
||||
{"content_id": d["id"], "score": 0.9 - i * 0.1}
|
||||
for i, d in enumerate(block_defs)
|
||||
]
|
||||
mock_blocks = {
|
||||
d["id"]: make_mock_block(
|
||||
block_id=d["id"],
|
||||
name=d["name"],
|
||||
block_type=BlockType.STANDARD,
|
||||
input_schema=d["input_schema"],
|
||||
output_schema=d["output_schema"],
|
||||
credentials_fields=d["credentials_fields"],
|
||||
)
|
||||
for d in block_defs
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(search_results, len(search_results)),
|
||||
), patch(
|
||||
"backend.api.features.chat.tools.find_block.get_block",
|
||||
side_effect=lambda bid: mock_blocks.get(bid),
|
||||
):
|
||||
tool = FindBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert response.count == len(block_defs)
|
||||
|
||||
total_chars = len(response.model_dump_json())
|
||||
avg_chars = total_chars // response.count
|
||||
|
||||
# Print for visibility in test output
|
||||
print(f"\nTotal response size: {total_chars} chars")
|
||||
print(f"Number of blocks: {response.count}")
|
||||
print(f"Average chars per block: {avg_chars}")
|
||||
|
||||
# The old response was ~90K for 10 blocks (~9K per block).
|
||||
# Previous optimization reduced it to ~1.5K per block (no raw JSON schemas).
|
||||
# Now with only id/name/description, we expect ~300 chars per block.
|
||||
assert avg_chars < 500, (
|
||||
f"Average chars per block ({avg_chars}) exceeds 500. "
|
||||
f"Total response: {total_chars} chars for {response.count} blocks."
|
||||
)
|
||||
|
||||
@@ -25,7 +25,6 @@ class ResponseType(str, Enum):
|
||||
AGENT_SAVED = "agent_saved"
|
||||
CLARIFICATION_NEEDED = "clarification_needed"
|
||||
BLOCK_LIST = "block_list"
|
||||
BLOCK_DETAILS = "block_details"
|
||||
BLOCK_OUTPUT = "block_output"
|
||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||
DOC_PAGE = "doc_page"
|
||||
@@ -335,6 +334,13 @@ class BlockInfoSummary(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
required_inputs: list[BlockInputFieldInfo] = Field(
|
||||
default_factory=list,
|
||||
description="List of required input fields for this block",
|
||||
)
|
||||
|
||||
|
||||
class BlockListResponse(ToolResponseBase):
|
||||
@@ -344,25 +350,10 @@ class BlockListResponse(ToolResponseBase):
|
||||
blocks: list[BlockInfoSummary]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
class BlockDetails(BaseModel):
|
||||
"""Detailed block information."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
inputs: dict[str, Any] = {}
|
||||
outputs: dict[str, Any] = {}
|
||||
credentials: list[CredentialsMetaInput] = []
|
||||
|
||||
|
||||
class BlockDetailsResponse(ToolResponseBase):
|
||||
"""Response for block details (first run_block attempt)."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_DETAILS
|
||||
block: BlockDetails
|
||||
user_authenticated: bool = False
|
||||
usage_hint: str = Field(
|
||||
default="To execute a block, call run_block with block_id set to the block's "
|
||||
"'id' field and input_data containing the required fields from input_schema."
|
||||
)
|
||||
|
||||
|
||||
class BlockOutputResponse(ToolResponseBase):
|
||||
|
||||
@@ -23,11 +23,8 @@ from backend.util.exceptions import BlockError
|
||||
from .base import BaseTool
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .models import (
|
||||
BlockDetails,
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
SetupInfo,
|
||||
SetupRequirementsResponse,
|
||||
ToolResponseBase,
|
||||
@@ -54,8 +51,8 @@ class RunBlockTool(BaseTool):
|
||||
"Execute a specific block with the provided input data. "
|
||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||
"do NOT guess or make up block IDs. "
|
||||
"On first attempt (without input_data), returns detailed schema showing "
|
||||
"required inputs and outputs. Then call again with proper input_data to execute."
|
||||
"Use the 'id' from find_block results and provide input_data "
|
||||
"matching the block's required_inputs."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -70,19 +67,11 @@ class RunBlockTool(BaseTool):
|
||||
"NEVER guess this - always get it from find_block first."
|
||||
),
|
||||
},
|
||||
"block_name": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The block's human-readable name from find_block results. "
|
||||
"Used for display purposes in the UI."
|
||||
),
|
||||
},
|
||||
"input_data": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Input values for the block. "
|
||||
"First call with empty {} to see the block's schema, "
|
||||
"then call again with proper values to execute."
|
||||
"Input values for the block. Use the 'required_inputs' field "
|
||||
"from find_block to see what fields are needed."
|
||||
),
|
||||
},
|
||||
},
|
||||
@@ -167,34 +156,6 @@ class RunBlockTool(BaseTool):
|
||||
await self._resolve_block_credentials(user_id, block, input_data)
|
||||
)
|
||||
|
||||
# Get block schemas for details/validation
|
||||
try:
|
||||
input_schema: dict[str, Any] = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid input schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
try:
|
||||
output_schema: dict[str, Any] = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid output schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
# Return setup requirements response with missing credentials
|
||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||
@@ -227,53 +188,6 @@ class RunBlockTool(BaseTool):
|
||||
graph_version=None,
|
||||
)
|
||||
|
||||
# Check if this is a first attempt (required inputs missing)
|
||||
# Return block details so user can see what inputs are needed
|
||||
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
||||
required_keys = set(input_schema.get("required", []))
|
||||
required_non_credential_keys = required_keys - credentials_fields
|
||||
provided_input_keys = set(input_data.keys()) - credentials_fields
|
||||
|
||||
# Check for unknown input fields
|
||||
valid_fields = (
|
||||
set(input_schema.get("properties", {}).keys()) - credentials_fields
|
||||
)
|
||||
unrecognized_fields = provided_input_keys - valid_fields
|
||||
if unrecognized_fields:
|
||||
return InputValidationErrorResponse(
|
||||
message=(
|
||||
f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. "
|
||||
f"Block was not executed. Please use the correct field names from the schema."
|
||||
),
|
||||
session_id=session_id,
|
||||
unrecognized_fields=sorted(unrecognized_fields),
|
||||
inputs=input_schema,
|
||||
)
|
||||
|
||||
# Show details when not all required non-credential inputs are provided
|
||||
if not (required_non_credential_keys <= provided_input_keys):
|
||||
# Get credentials info for the response
|
||||
credentials_meta = []
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
credentials_meta.append(cred_meta)
|
||||
|
||||
return BlockDetailsResponse(
|
||||
message=(
|
||||
f"Block '{block.name}' details. "
|
||||
"Provide input_data matching the inputs schema to execute the block."
|
||||
),
|
||||
session_id=session_id,
|
||||
block=BlockDetails(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
inputs=input_schema,
|
||||
outputs=output_schema,
|
||||
credentials=credentials_meta,
|
||||
),
|
||||
user_authenticated=True,
|
||||
)
|
||||
|
||||
try:
|
||||
# Get or create user's workspace for CoPilot file operations
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
"""Tests for block execution guards and input validation in RunBlockTool."""
|
||||
"""Tests for block execution guards in RunBlockTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
InputValidationErrorResponse,
|
||||
)
|
||||
from backend.api.features.chat.tools.models import ErrorResponse
|
||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||
from backend.blocks._base import BlockType
|
||||
|
||||
@@ -33,39 +28,6 @@ def make_mock_block(
|
||||
return mock
|
||||
|
||||
|
||||
def make_mock_block_with_schema(
|
||||
block_id: str,
|
||||
name: str,
|
||||
input_properties: dict,
|
||||
required_fields: list[str],
|
||||
output_properties: dict | None = None,
|
||||
):
|
||||
"""Create a mock block with a defined input/output schema for validation tests."""
|
||||
mock = MagicMock()
|
||||
mock.id = block_id
|
||||
mock.name = name
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
mock.description = f"Test block: {name}"
|
||||
|
||||
input_schema = {
|
||||
"properties": input_properties,
|
||||
"required": required_fields,
|
||||
}
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = input_schema
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
|
||||
output_schema = {
|
||||
"properties": output_properties or {"result": {"type": "string"}},
|
||||
}
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = output_schema
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
class TestRunBlockFiltering:
|
||||
"""Tests for block execution guards in RunBlockTool."""
|
||||
|
||||
@@ -142,221 +104,3 @@ class TestRunBlockFiltering:
|
||||
# (may be other errors like missing credentials, but not the exclusion guard)
|
||||
if isinstance(response, ErrorResponse):
|
||||
assert "cannot be run directly in CoPilot" not in response.message
|
||||
|
||||
|
||||
class TestRunBlockInputValidation:
|
||||
"""Tests for input field validation in RunBlockTool.
|
||||
|
||||
run_block rejects unknown input field names with InputValidationErrorResponse,
|
||||
preventing silent failures where incorrect keys would be ignored and the block
|
||||
would execute with default values instead of the caller's intended values.
|
||||
"""
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_unknown_input_fields_are_rejected(self):
|
||||
"""run_block rejects unknown input fields instead of silently ignoring them.
|
||||
|
||||
Scenario: The AI Text Generator block has a field called 'model' (for LLM model
|
||||
selection), but the LLM calling the tool guesses wrong and sends 'LLM_Model'
|
||||
instead. The block should reject the request and return the valid schema.
|
||||
"""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string", "description": "The prompt to send"},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The LLM model to use",
|
||||
"default": "gpt-4o-mini",
|
||||
},
|
||||
"sys_prompt": {
|
||||
"type": "string",
|
||||
"description": "System prompt",
|
||||
"default": "",
|
||||
},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
output_properties={"response": {"type": "string"}},
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Provide 'prompt' (correct) but 'LLM_Model' instead of 'model' (wrong key)
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku about coding",
|
||||
"LLM_Model": "claude-opus-4-6", # WRONG KEY - should be 'model'
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
assert "Block was not executed" in response.message
|
||||
assert "inputs" in response.model_dump() # valid schema included
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_multiple_wrong_keys_are_all_reported(self):
|
||||
"""All unrecognized field names are reported in a single error response."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
"sys_prompt": {"type": "string", "default": ""},
|
||||
"retry": {"type": "integer", "default": 3},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Hello", # correct
|
||||
"llm_model": "claude-opus-4-6", # WRONG - should be 'model'
|
||||
"system_prompt": "Be helpful", # WRONG - should be 'sys_prompt'
|
||||
"retries": 5, # WRONG - should be 'retry'
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert set(response.unrecognized_fields) == {
|
||||
"llm_model",
|
||||
"system_prompt",
|
||||
"retries",
|
||||
}
|
||||
assert "Block was not executed" in response.message
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_unknown_fields_rejected_even_with_missing_required(self):
|
||||
"""Unknown fields are caught before the missing-required-fields check."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# 'prompt' is missing AND 'LLM_Model' is an unknown field
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"LLM_Model": "claude-opus-4-6", # wrong key, and 'prompt' is missing
|
||||
},
|
||||
)
|
||||
|
||||
# Unknown fields are caught first
|
||||
assert isinstance(response, InputValidationErrorResponse)
|
||||
assert "LLM_Model" in response.unrecognized_fields
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_correct_inputs_still_execute(self):
|
||||
"""Correct input field names pass validation and the block executes."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
async def mock_execute(input_data, **kwargs):
|
||||
yield "response", "Generated text"
|
||||
|
||||
mock_block.execute = mock_execute
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
),
|
||||
patch(
|
||||
"backend.api.features.chat.tools.run_block.get_or_create_workspace",
|
||||
new_callable=AsyncMock,
|
||||
return_value=MagicMock(id="test-workspace-id"),
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"prompt": "Write a haiku",
|
||||
"model": "gpt-4o-mini", # correct field name
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockOutputResponse)
|
||||
assert response.success is True
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_missing_required_fields_returns_details(self):
|
||||
"""Missing required fields returns BlockDetailsResponse with schema."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
mock_block = make_mock_block_with_schema(
|
||||
block_id="ai-text-gen-id",
|
||||
name="AI Text Generator",
|
||||
input_properties={
|
||||
"prompt": {"type": "string"},
|
||||
"model": {"type": "string", "default": "gpt-4o-mini"},
|
||||
},
|
||||
required_fields=["prompt"],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock_block,
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
|
||||
# Only provide valid optional field, missing required 'prompt'
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="ai-text-gen-id",
|
||||
input_data={
|
||||
"model": "gpt-4o-mini", # valid but optional
|
||||
},
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
"""Tests for BlockDetailsResponse in RunBlockTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.models import BlockDetailsResponse
|
||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||
from backend.blocks._base import BlockType
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
from ._test_data import make_session
|
||||
|
||||
_TEST_USER_ID = "test-user-run-block-details"
|
||||
|
||||
|
||||
def make_mock_block_with_inputs(
|
||||
block_id: str, name: str, description: str = "Test description"
|
||||
):
|
||||
"""Create a mock block with input/output schemas for testing."""
|
||||
mock = MagicMock()
|
||||
mock.id = block_id
|
||||
mock.name = name
|
||||
mock.description = description
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
# Input schema with non-credential fields
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"url": {"type": "string", "description": "URL to fetch"},
|
||||
"method": {"type": "string", "description": "HTTP method"},
|
||||
},
|
||||
"required": ["url"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
# Output schema
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"response": {"type": "object", "description": "HTTP response"},
|
||||
"error": {"type": "string", "description": "Error message"},
|
||||
}
|
||||
}
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_no_input_provided():
|
||||
"""When run_block is called without input_data, it should return BlockDetailsResponse."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with inputs
|
||||
http_block = make_mock_block_with_inputs(
|
||||
"http-block-id", "HTTP Request", "Send HTTP requests"
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=http_block,
|
||||
):
|
||||
# Mock credentials check to return no missing credentials
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=({}, []), # (matched_credentials, missing_credentials)
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="http-block-id",
|
||||
input_data={}, # Empty input data
|
||||
)
|
||||
|
||||
# Should return BlockDetailsResponse showing the schema
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "http-block-id"
|
||||
assert response.block.name == "HTTP Request"
|
||||
assert response.block.description == "Send HTTP requests"
|
||||
assert "url" in response.block.inputs["properties"]
|
||||
assert "method" in response.block.inputs["properties"]
|
||||
assert "response" in response.block.outputs["properties"]
|
||||
assert response.user_authenticated is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_only_credentials_provided():
|
||||
"""When only credentials are provided (no actual input), should return details."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with both credential and non-credential inputs
|
||||
mock = MagicMock()
|
||||
mock.id = "api-block-id"
|
||||
mock.name = "API Call"
|
||||
mock.description = "Make API calls"
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"credentials": {"type": "object", "description": "API credentials"},
|
||||
"endpoint": {"type": "string", "description": "API endpoint"},
|
||||
},
|
||||
"required": ["credentials", "endpoint"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {"credentials": True}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {"result": {"type": "object"}}
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock,
|
||||
):
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(
|
||||
{
|
||||
"credentials": CredentialsMetaInput(
|
||||
id="cred-id",
|
||||
provider=ProviderName("test_provider"),
|
||||
type="api_key",
|
||||
title="Test Credential",
|
||||
)
|
||||
},
|
||||
[],
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="api-block-id",
|
||||
input_data={"credentials": {"some": "cred"}}, # Only credential
|
||||
)
|
||||
|
||||
# Should return details because no non-credential inputs provided
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "api-block-id"
|
||||
assert response.block.name == "API Call"
|
||||
@@ -7,6 +7,7 @@ import prisma.errors
|
||||
import prisma.models
|
||||
import prisma.types
|
||||
|
||||
from backend.api.features.library.exceptions import FolderValidationError
|
||||
import backend.api.features.store.exceptions as store_exceptions
|
||||
import backend.api.features.store.image_gen as store_image_gen
|
||||
import backend.api.features.store.media as store_media
|
||||
@@ -42,6 +43,8 @@ async def list_library_agents(
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
include_executions: bool = False,
|
||||
folder_id: Optional[str] = None,
|
||||
include_root_only: bool = False,
|
||||
) -> library_model.LibraryAgentResponse:
|
||||
"""
|
||||
Retrieves a paginated list of LibraryAgent records for a given user.
|
||||
@@ -52,6 +55,8 @@ async def list_library_agents(
|
||||
sort_by: Sorting field (createdAt, updatedAt, isFavorite, isCreatedByUser).
|
||||
page: Current page (1-indexed).
|
||||
page_size: Number of items per page.
|
||||
folder_id: Filter by folder ID. If provided, only returns agents in this folder.
|
||||
include_root_only: If True, only returns agents without a folder (root-level).
|
||||
include_executions: Whether to include execution data for status calculation.
|
||||
Defaults to False for performance (UI fetches status separately).
|
||||
Set to True when accurate status/metrics are needed (e.g., agent generator).
|
||||
@@ -82,6 +87,13 @@ async def list_library_agents(
|
||||
"isArchived": False,
|
||||
}
|
||||
|
||||
# Apply folder filter
|
||||
if folder_id is not None:
|
||||
where_clause["folderId"] = folder_id
|
||||
elif include_root_only:
|
||||
where_clause["folderId"] = None
|
||||
|
||||
# Build search filter if applicable
|
||||
if search_term:
|
||||
where_clause["OR"] = [
|
||||
{
|
||||
@@ -634,6 +646,7 @@ async def update_library_agent(
|
||||
is_archived: Optional[bool] = None,
|
||||
is_deleted: Optional[Literal[False]] = None,
|
||||
settings: Optional[GraphSettings] = None,
|
||||
folder_id: Optional[str] = None,
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Updates the specified LibraryAgent record.
|
||||
@@ -646,6 +659,7 @@ async def update_library_agent(
|
||||
is_favorite: Whether this agent is marked as a favorite.
|
||||
is_archived: Whether this agent is archived.
|
||||
settings: User-specific settings for this library agent.
|
||||
folder_id: Folder ID to move agent to (empty string "" for root, None to skip).
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgent.
|
||||
@@ -673,13 +687,7 @@ async def update_library_agent(
|
||||
)
|
||||
update_fields["isDeleted"] = is_deleted
|
||||
if settings is not None:
|
||||
existing_agent = await get_library_agent(id=library_agent_id, user_id=user_id)
|
||||
current_settings_dict = (
|
||||
existing_agent.settings.model_dump() if existing_agent.settings else {}
|
||||
)
|
||||
new_settings = settings.model_dump(exclude_unset=True)
|
||||
merged_settings = {**current_settings_dict, **new_settings}
|
||||
update_fields["settings"] = SafeJson(merged_settings)
|
||||
update_fields["settings"] = SafeJson(settings.model_dump())
|
||||
|
||||
try:
|
||||
# If graph_version is provided, update to that specific version
|
||||
@@ -918,6 +926,788 @@ async def add_store_agent_to_library(
|
||||
raise DatabaseError("Failed to add agent to library") from e
|
||||
|
||||
|
||||
##############################################
|
||||
############ Folder DB Functions #############
|
||||
##############################################
|
||||
|
||||
MAX_FOLDER_DEPTH = 5
|
||||
|
||||
|
||||
async def list_folders(
|
||||
user_id: str,
|
||||
parent_id: Optional[str] = None,
|
||||
include_counts: bool = True,
|
||||
) -> list[library_model.LibraryFolder]:
|
||||
"""
|
||||
Lists folders for a user, optionally filtered by parent.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user.
|
||||
parent_id: If provided, only returns folders with this parent.
|
||||
If None, returns root-level folders.
|
||||
include_counts: Whether to include agent and subfolder counts.
|
||||
|
||||
Returns:
|
||||
A list of LibraryFolder objects.
|
||||
"""
|
||||
logger.debug(f"Listing folders for user #{user_id}, parent_id={parent_id}")
|
||||
|
||||
try:
|
||||
where_clause: prisma.types.LibraryFolderWhereInput = {
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
"parentId": parent_id,
|
||||
}
|
||||
|
||||
folders = await prisma.models.LibraryFolder.prisma().find_many(
|
||||
where=where_clause,
|
||||
order={"createdAt": "asc"},
|
||||
include=(
|
||||
{
|
||||
"LibraryAgents": {"where": {"isDeleted": False}},
|
||||
"Children": {"where": {"isDeleted": False}},
|
||||
}
|
||||
if include_counts
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
result = []
|
||||
for folder in folders:
|
||||
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
|
||||
subfolder_count = len(folder.Children) if folder.Children else 0
|
||||
result.append(
|
||||
library_model.LibraryFolder.from_db(
|
||||
folder,
|
||||
agent_count=agent_count,
|
||||
subfolder_count=subfolder_count,
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error listing folders: {e}")
|
||||
raise DatabaseError("Failed to list folders") from e
|
||||
|
||||
|
||||
async def get_folder_tree(
|
||||
user_id: str,
|
||||
) -> list[library_model.LibraryFolderTree]:
|
||||
"""
|
||||
Gets the full folder tree for a user.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
A list of LibraryFolderTree objects (root folders with nested children).
|
||||
"""
|
||||
logger.debug(f"Getting folder tree for user #{user_id}")
|
||||
|
||||
try:
|
||||
# Fetch all folders for the user
|
||||
all_folders = await prisma.models.LibraryFolder.prisma().find_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
},
|
||||
order={"createdAt": "asc"},
|
||||
include={
|
||||
"LibraryAgents": {"where": {"isDeleted": False}},
|
||||
"Children": {"where": {"isDeleted": False}},
|
||||
},
|
||||
)
|
||||
|
||||
# Build a map of folder ID to folder data
|
||||
folder_map: dict[str, library_model.LibraryFolderTree] = {}
|
||||
for folder in all_folders:
|
||||
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
|
||||
subfolder_count = len(folder.Children) if folder.Children else 0
|
||||
folder_map[folder.id] = library_model.LibraryFolderTree(
|
||||
**library_model.LibraryFolder.from_db(
|
||||
folder,
|
||||
agent_count=agent_count,
|
||||
subfolder_count=subfolder_count,
|
||||
).model_dump(),
|
||||
children=[],
|
||||
)
|
||||
|
||||
# Build the tree structure
|
||||
root_folders: list[library_model.LibraryFolderTree] = []
|
||||
for folder in all_folders:
|
||||
tree_folder = folder_map[folder.id]
|
||||
if folder.parentId and folder.parentId in folder_map:
|
||||
folder_map[folder.parentId].children.append(tree_folder)
|
||||
else:
|
||||
root_folders.append(tree_folder)
|
||||
|
||||
return root_folders
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error getting folder tree: {e}")
|
||||
raise DatabaseError("Failed to get folder tree") from e
|
||||
|
||||
|
||||
async def get_folder(
|
||||
folder_id: str,
|
||||
user_id: str,
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Gets a single folder by ID.
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the folder.
|
||||
user_id: The ID of the user (for ownership verification).
|
||||
|
||||
Returns:
|
||||
The LibraryFolder object.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the folder doesn't exist or doesn't belong to the user.
|
||||
"""
|
||||
try:
|
||||
folder = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": folder_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
},
|
||||
include={
|
||||
"LibraryAgents": {"where": {"isDeleted": False}},
|
||||
"Children": {"where": {"isDeleted": False}},
|
||||
},
|
||||
)
|
||||
|
||||
if not folder:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
|
||||
subfolder_count = len(folder.Children) if folder.Children else 0
|
||||
|
||||
return library_model.LibraryFolder.from_db(
|
||||
folder,
|
||||
agent_count=agent_count,
|
||||
subfolder_count=subfolder_count,
|
||||
)
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error getting folder: {e}")
|
||||
raise DatabaseError("Failed to get folder") from e
|
||||
|
||||
|
||||
async def get_folder_depth(folder_id: str, user_id: str) -> int:
|
||||
"""
|
||||
Calculate the depth of a folder in the hierarchy (root=0).
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the folder.
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
The depth of the folder (0 for root-level folders).
|
||||
"""
|
||||
depth = 0
|
||||
current_id: str | None = folder_id
|
||||
|
||||
while current_id:
|
||||
folder = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": current_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not folder:
|
||||
break
|
||||
if folder.parentId:
|
||||
depth += 1
|
||||
current_id = folder.parentId
|
||||
else:
|
||||
break
|
||||
|
||||
return depth
|
||||
|
||||
|
||||
async def is_descendant_of(
|
||||
folder_id: str,
|
||||
potential_ancestor_id: str,
|
||||
user_id: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if folder_id is a descendant of potential_ancestor_id.
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the folder to check.
|
||||
potential_ancestor_id: The ID of the potential ancestor.
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
True if folder_id is a descendant of potential_ancestor_id.
|
||||
"""
|
||||
current_id: str | None = folder_id
|
||||
|
||||
while current_id:
|
||||
if current_id == potential_ancestor_id:
|
||||
return True
|
||||
|
||||
folder = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": current_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not folder or not folder.parentId:
|
||||
break
|
||||
current_id = folder.parentId
|
||||
|
||||
return False
|
||||
|
||||
|
||||
async def validate_folder_operation(
|
||||
folder_id: Optional[str],
|
||||
target_parent_id: Optional[str],
|
||||
user_id: str,
|
||||
max_depth: int = MAX_FOLDER_DEPTH,
|
||||
) -> None:
|
||||
"""
|
||||
Validate that a folder move/create operation is valid.
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the folder being moved (None for create).
|
||||
target_parent_id: The target parent ID (None for root).
|
||||
user_id: The ID of the user.
|
||||
max_depth: Maximum allowed nesting depth.
|
||||
|
||||
Raises:
|
||||
FolderValidationError: If the operation is invalid.
|
||||
"""
|
||||
# Cannot move folder into itself
|
||||
if folder_id and folder_id == target_parent_id:
|
||||
raise FolderValidationError("Cannot move folder into itself")
|
||||
|
||||
# Check for circular reference
|
||||
if folder_id and target_parent_id:
|
||||
if await is_descendant_of(target_parent_id, folder_id, user_id):
|
||||
raise FolderValidationError("Cannot move folder into its own descendant")
|
||||
|
||||
# Check depth limit
|
||||
if target_parent_id:
|
||||
parent_depth = await get_folder_depth(target_parent_id, user_id)
|
||||
if parent_depth + 1 >= max_depth:
|
||||
raise FolderValidationError(
|
||||
f"Maximum folder nesting depth of {max_depth} exceeded"
|
||||
)
|
||||
|
||||
|
||||
async def create_folder(
|
||||
user_id: str,
|
||||
name: str,
|
||||
parent_id: Optional[str] = None,
|
||||
icon: Optional[str] = None,
|
||||
color: Optional[str] = None,
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Creates a new folder for the user.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user.
|
||||
name: The folder name.
|
||||
parent_id: Optional parent folder ID.
|
||||
icon: Optional icon identifier.
|
||||
color: Optional hex color code.
|
||||
|
||||
Returns:
|
||||
The created LibraryFolder.
|
||||
|
||||
Raises:
|
||||
FolderValidationError: If validation fails.
|
||||
DatabaseError: If there's a database error.
|
||||
"""
|
||||
logger.debug(f"Creating folder '{name}' for user #{user_id}")
|
||||
|
||||
try:
|
||||
# Validate operation
|
||||
await validate_folder_operation(
|
||||
folder_id=None,
|
||||
target_parent_id=parent_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
# Verify parent exists if provided
|
||||
if parent_id:
|
||||
parent = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": parent_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not parent:
|
||||
raise NotFoundError(f"Parent folder #{parent_id} not found")
|
||||
|
||||
# Build data dict conditionally - don't include Parent key if no parent_id
|
||||
create_data: dict = {
|
||||
"name": name,
|
||||
"User": {"connect": {"id": user_id}},
|
||||
}
|
||||
if icon is not None:
|
||||
create_data["icon"] = icon
|
||||
if color is not None:
|
||||
create_data["color"] = color
|
||||
if parent_id:
|
||||
create_data["Parent"] = {"connect": {"id": parent_id}}
|
||||
|
||||
folder = await prisma.models.LibraryFolder.prisma().create(data=create_data)
|
||||
|
||||
return library_model.LibraryFolder.from_db(folder)
|
||||
|
||||
except prisma.errors.UniqueViolationError:
|
||||
raise FolderValidationError(
|
||||
"A folder with this name already exists in this location"
|
||||
)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error creating folder: {e}")
|
||||
raise DatabaseError("Failed to create folder") from e
|
||||
|
||||
|
||||
async def create_folder_with_unique_name(
|
||||
user_id: str,
|
||||
base_name: str,
|
||||
parent_id: Optional[str] = None,
|
||||
icon: Optional[str] = None,
|
||||
color: Optional[str] = None,
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Creates a folder, appending (2), (3), etc. if name exists.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user.
|
||||
base_name: The base folder name.
|
||||
parent_id: Optional parent folder ID.
|
||||
icon: Optional icon identifier.
|
||||
color: Optional hex color code.
|
||||
|
||||
Returns:
|
||||
The created LibraryFolder.
|
||||
"""
|
||||
name = base_name
|
||||
suffix = 1
|
||||
|
||||
while True:
|
||||
try:
|
||||
return await create_folder(
|
||||
user_id=user_id,
|
||||
name=name,
|
||||
parent_id=parent_id,
|
||||
icon=icon,
|
||||
color=color,
|
||||
)
|
||||
except FolderValidationError as e:
|
||||
if "already exists" in str(e):
|
||||
suffix += 1
|
||||
name = f"{base_name} ({suffix})"
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
async def update_folder(
|
||||
folder_id: str,
|
||||
user_id: str,
|
||||
name: Optional[str] = None,
|
||||
icon: Optional[str] = None,
|
||||
color: Optional[str] = None,
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Updates a folder's properties.
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the folder to update.
|
||||
user_id: The ID of the user.
|
||||
name: New folder name.
|
||||
icon: New icon identifier.
|
||||
color: New hex color code.
|
||||
|
||||
Returns:
|
||||
The updated LibraryFolder.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the folder doesn't exist.
|
||||
DatabaseError: If there's a database error.
|
||||
"""
|
||||
logger.debug(f"Updating folder #{folder_id} for user #{user_id}")
|
||||
|
||||
try:
|
||||
# Verify folder exists and belongs to user
|
||||
existing = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": folder_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not existing:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
update_data: prisma.types.LibraryFolderUpdateInput = {}
|
||||
if name is not None:
|
||||
update_data["name"] = name
|
||||
if icon is not None:
|
||||
update_data["icon"] = icon
|
||||
if color is not None:
|
||||
update_data["color"] = color
|
||||
|
||||
if not update_data:
|
||||
return await get_folder(folder_id, user_id)
|
||||
|
||||
folder = await prisma.models.LibraryFolder.prisma().update(
|
||||
where={"id": folder_id},
|
||||
data=update_data,
|
||||
include={
|
||||
"LibraryAgents": {"where": {"isDeleted": False}},
|
||||
"Children": {"where": {"isDeleted": False}},
|
||||
},
|
||||
)
|
||||
|
||||
if not folder:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
|
||||
subfolder_count = len(folder.Children) if folder.Children else 0
|
||||
|
||||
return library_model.LibraryFolder.from_db(
|
||||
folder,
|
||||
agent_count=agent_count,
|
||||
subfolder_count=subfolder_count,
|
||||
)
|
||||
|
||||
except prisma.errors.UniqueViolationError:
|
||||
raise FolderValidationError(
|
||||
"A folder with this name already exists in this location"
|
||||
)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error updating folder: {e}")
|
||||
raise DatabaseError("Failed to update folder") from e
|
||||
|
||||
|
||||
async def move_folder(
|
||||
folder_id: str,
|
||||
user_id: str,
|
||||
target_parent_id: Optional[str],
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Moves a folder to a new parent.
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the folder to move.
|
||||
user_id: The ID of the user.
|
||||
target_parent_id: The target parent ID (None for root).
|
||||
|
||||
Returns:
|
||||
The moved LibraryFolder.
|
||||
|
||||
Raises:
|
||||
FolderValidationError: If the move is invalid.
|
||||
NotFoundError: If the folder doesn't exist.
|
||||
DatabaseError: If there's a database error.
|
||||
"""
|
||||
logger.debug(f"Moving folder #{folder_id} to parent #{target_parent_id}")
|
||||
|
||||
try:
|
||||
# Validate operation
|
||||
await validate_folder_operation(
|
||||
folder_id=folder_id,
|
||||
target_parent_id=target_parent_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
# Verify folder exists
|
||||
existing = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": folder_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not existing:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
# Verify target parent exists if provided
|
||||
if target_parent_id:
|
||||
parent = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": target_parent_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not parent:
|
||||
raise NotFoundError(
|
||||
f"Target parent folder #{target_parent_id} not found"
|
||||
)
|
||||
|
||||
folder = await prisma.models.LibraryFolder.prisma().update(
|
||||
where={"id": folder_id},
|
||||
data={
|
||||
"parentId": target_parent_id,
|
||||
},
|
||||
include={
|
||||
"LibraryAgents": {"where": {"isDeleted": False}},
|
||||
"Children": {"where": {"isDeleted": False}},
|
||||
},
|
||||
)
|
||||
|
||||
if not folder:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
agent_count = len(folder.LibraryAgents) if folder.LibraryAgents else 0
|
||||
subfolder_count = len(folder.Children) if folder.Children else 0
|
||||
|
||||
return library_model.LibraryFolder.from_db(
|
||||
folder,
|
||||
agent_count=agent_count,
|
||||
subfolder_count=subfolder_count,
|
||||
)
|
||||
|
||||
except prisma.errors.UniqueViolationError:
|
||||
raise FolderValidationError(
|
||||
"A folder with this name already exists in this location"
|
||||
)
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error moving folder: {e}")
|
||||
raise DatabaseError("Failed to move folder") from e
|
||||
|
||||
|
||||
async def delete_folder(
|
||||
folder_id: str,
|
||||
user_id: str,
|
||||
soft_delete: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Deletes a folder and all its contents (cascade).
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the folder to delete.
|
||||
user_id: The ID of the user.
|
||||
soft_delete: If True, soft-deletes; otherwise hard-deletes.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the folder doesn't exist.
|
||||
DatabaseError: If there's a database error.
|
||||
"""
|
||||
logger.debug(f"Deleting folder #{folder_id} for user #{user_id}")
|
||||
|
||||
try:
|
||||
# Verify folder exists
|
||||
existing = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": folder_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not existing:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
async with transaction() as tx:
|
||||
# Get all descendant folders recursively
|
||||
descendant_ids = await _get_descendant_folder_ids(folder_id, user_id, tx)
|
||||
all_folder_ids = [folder_id] + descendant_ids
|
||||
|
||||
if soft_delete:
|
||||
# Soft-delete all agents in these folders
|
||||
await prisma.models.LibraryAgent.prisma(tx).update_many(
|
||||
where={
|
||||
"folderId": {"in": all_folder_ids},
|
||||
"userId": user_id,
|
||||
},
|
||||
data={"isDeleted": True},
|
||||
)
|
||||
|
||||
# Soft-delete all folders
|
||||
await prisma.models.LibraryFolder.prisma(tx).update_many(
|
||||
where={
|
||||
"id": {"in": all_folder_ids},
|
||||
"userId": user_id,
|
||||
},
|
||||
data={"isDeleted": True},
|
||||
)
|
||||
else:
|
||||
# Move agents to root (or could hard-delete them)
|
||||
await prisma.models.LibraryAgent.prisma(tx).update_many(
|
||||
where={
|
||||
"folderId": {"in": all_folder_ids},
|
||||
"userId": user_id,
|
||||
},
|
||||
data={"folderId": None},
|
||||
)
|
||||
|
||||
# Hard-delete folders (children first due to FK constraints)
|
||||
for fid in reversed(all_folder_ids):
|
||||
await prisma.models.LibraryFolder.prisma(tx).delete(
|
||||
where={"id": fid}
|
||||
)
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error deleting folder: {e}")
|
||||
raise DatabaseError("Failed to delete folder") from e
|
||||
|
||||
|
||||
async def _get_descendant_folder_ids(
|
||||
folder_id: str,
|
||||
user_id: str,
|
||||
tx: Optional[prisma.Prisma] = None,
|
||||
) -> list[str]:
|
||||
"""
|
||||
Recursively get all descendant folder IDs.
|
||||
|
||||
Args:
|
||||
folder_id: The ID of the parent folder.
|
||||
user_id: The ID of the user.
|
||||
tx: Optional transaction.
|
||||
|
||||
Returns:
|
||||
A list of descendant folder IDs.
|
||||
"""
|
||||
prisma_client = prisma.models.LibraryFolder.prisma(tx)
|
||||
children = await prisma_client.find_many(
|
||||
where={
|
||||
"parentId": folder_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
|
||||
result: list[str] = []
|
||||
for child in children:
|
||||
result.append(child.id)
|
||||
result.extend(await _get_descendant_folder_ids(child.id, user_id, tx))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def move_agent_to_folder(
|
||||
library_agent_id: str,
|
||||
folder_id: Optional[str],
|
||||
user_id: str,
|
||||
) -> library_model.LibraryAgent:
|
||||
"""
|
||||
Moves a library agent to a folder.
|
||||
|
||||
Args:
|
||||
library_agent_id: The ID of the library agent.
|
||||
folder_id: The target folder ID (None for root).
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgent.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If the agent or folder doesn't exist.
|
||||
DatabaseError: If there's a database error.
|
||||
"""
|
||||
logger.debug(f"Moving agent #{library_agent_id} to folder #{folder_id}")
|
||||
|
||||
try:
|
||||
# Verify agent exists
|
||||
agent = await prisma.models.LibraryAgent.prisma().find_first(
|
||||
where={
|
||||
"id": library_agent_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not agent:
|
||||
raise NotFoundError(f"Library agent #{library_agent_id} not found")
|
||||
|
||||
# Verify folder exists if provided
|
||||
if folder_id:
|
||||
folder = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": folder_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not folder:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
await prisma.models.LibraryAgent.prisma().update(
|
||||
where={"id": library_agent_id},
|
||||
data={"folderId": folder_id},
|
||||
)
|
||||
|
||||
return await get_library_agent(library_agent_id, user_id)
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error moving agent to folder: {e}")
|
||||
raise DatabaseError("Failed to move agent to folder") from e
|
||||
|
||||
|
||||
async def bulk_move_agents_to_folder(
|
||||
agent_ids: list[str],
|
||||
folder_id: Optional[str],
|
||||
user_id: str,
|
||||
) -> list[library_model.LibraryAgent]:
|
||||
"""
|
||||
Moves multiple library agents to a folder.
|
||||
|
||||
Args:
|
||||
agent_ids: The IDs of the library agents.
|
||||
folder_id: The target folder ID (None for root).
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgents.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If any agent or the folder doesn't exist.
|
||||
DatabaseError: If there's a database error.
|
||||
"""
|
||||
logger.debug(f"Bulk moving {len(agent_ids)} agents to folder #{folder_id}")
|
||||
|
||||
try:
|
||||
# Verify folder exists if provided
|
||||
if folder_id:
|
||||
folder = await prisma.models.LibraryFolder.prisma().find_first(
|
||||
where={
|
||||
"id": folder_id,
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
if not folder:
|
||||
raise NotFoundError(f"Folder #{folder_id} not found")
|
||||
|
||||
# Update all agents
|
||||
await prisma.models.LibraryAgent.prisma().update_many(
|
||||
where={
|
||||
"id": {"in": agent_ids},
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
},
|
||||
data={"folderId": folder_id},
|
||||
)
|
||||
|
||||
# Fetch and return updated agents
|
||||
agents = await prisma.models.LibraryAgent.prisma().find_many(
|
||||
where={
|
||||
"id": {"in": agent_ids},
|
||||
"userId": user_id,
|
||||
},
|
||||
include=library_agent_include(
|
||||
user_id, include_nodes=False, include_executions=False
|
||||
),
|
||||
)
|
||||
|
||||
return [library_model.LibraryAgent.from_db(agent) for agent in agents]
|
||||
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error bulk moving agents to folder: {e}")
|
||||
raise DatabaseError("Failed to bulk move agents to folder") from e
|
||||
|
||||
|
||||
##############################################
|
||||
########### Presets DB Functions #############
|
||||
##############################################
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
class FolderValidationError(Exception):
|
||||
"""Raised when folder operations fail validation."""
|
||||
|
||||
pass
|
||||
@@ -26,6 +26,95 @@ class LibraryAgentStatus(str, Enum):
|
||||
ERROR = "ERROR"
|
||||
|
||||
|
||||
# === Folder Models ===
|
||||
|
||||
|
||||
class LibraryFolder(pydantic.BaseModel):
|
||||
"""Represents a folder for organizing library agents."""
|
||||
|
||||
id: str
|
||||
user_id: str
|
||||
name: str
|
||||
icon: str | None = None
|
||||
color: str | None = None
|
||||
parent_id: str | None = None
|
||||
created_at: datetime.datetime
|
||||
updated_at: datetime.datetime
|
||||
agent_count: int = 0 # Direct agents in folder
|
||||
subfolder_count: int = 0 # Direct child folders
|
||||
|
||||
@staticmethod
|
||||
def from_db(
|
||||
folder: prisma.models.LibraryFolder,
|
||||
agent_count: int = 0,
|
||||
subfolder_count: int = 0,
|
||||
) -> "LibraryFolder":
|
||||
"""Factory method that constructs a LibraryFolder from a Prisma model."""
|
||||
return LibraryFolder(
|
||||
id=folder.id,
|
||||
user_id=folder.userId,
|
||||
name=folder.name,
|
||||
icon=folder.icon,
|
||||
color=folder.color,
|
||||
parent_id=folder.parentId,
|
||||
created_at=folder.createdAt,
|
||||
updated_at=folder.updatedAt,
|
||||
agent_count=agent_count,
|
||||
subfolder_count=subfolder_count,
|
||||
)
|
||||
|
||||
|
||||
class LibraryFolderTree(LibraryFolder):
|
||||
"""Folder with nested children for tree view."""
|
||||
|
||||
children: list["LibraryFolderTree"] = []
|
||||
|
||||
|
||||
class FolderCreateRequest(pydantic.BaseModel):
|
||||
"""Request model for creating a folder."""
|
||||
|
||||
name: str = pydantic.Field(..., min_length=1, max_length=100)
|
||||
icon: str | None = None
|
||||
color: str | None = pydantic.Field(
|
||||
None, pattern=r"^#[0-9A-Fa-f]{6}$", description="Hex color code (#RRGGBB)"
|
||||
)
|
||||
parent_id: str | None = None
|
||||
|
||||
|
||||
class FolderUpdateRequest(pydantic.BaseModel):
|
||||
"""Request model for updating a folder."""
|
||||
|
||||
name: str | None = pydantic.Field(None, min_length=1, max_length=100)
|
||||
icon: str | None = None
|
||||
color: str | None = None
|
||||
|
||||
|
||||
class FolderMoveRequest(pydantic.BaseModel):
|
||||
"""Request model for moving a folder to a new parent."""
|
||||
|
||||
target_parent_id: str | None = None # None = move to root
|
||||
|
||||
|
||||
class BulkMoveAgentsRequest(pydantic.BaseModel):
|
||||
"""Request model for moving multiple agents to a folder."""
|
||||
|
||||
agent_ids: list[str]
|
||||
folder_id: str | None = None # None = move to root
|
||||
|
||||
|
||||
class FolderListResponse(pydantic.BaseModel):
|
||||
"""Response schema for a list of folders."""
|
||||
|
||||
folders: list[LibraryFolder]
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class FolderTreeResponse(pydantic.BaseModel):
|
||||
"""Response schema for folder tree structure."""
|
||||
|
||||
tree: list[LibraryFolderTree]
|
||||
|
||||
|
||||
class MarketplaceListingCreator(pydantic.BaseModel):
|
||||
"""Creator information for a marketplace listing."""
|
||||
|
||||
@@ -120,6 +209,9 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
can_access_graph: bool
|
||||
is_latest_version: bool
|
||||
is_favorite: bool
|
||||
folder_id: str | None = None
|
||||
folder_name: str | None = None # Denormalized for display
|
||||
|
||||
recommended_schedule_cron: str | None = None
|
||||
settings: GraphSettings = pydantic.Field(default_factory=GraphSettings)
|
||||
marketplace_listing: Optional["MarketplaceListing"] = None
|
||||
@@ -228,6 +320,10 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
creator=creator_data,
|
||||
)
|
||||
|
||||
# Folder information
|
||||
folder_id = agent.folderId
|
||||
folder_name = agent.Folder.name if agent.Folder else None
|
||||
|
||||
return LibraryAgent(
|
||||
id=agent.id,
|
||||
graph_id=agent.agentGraphId,
|
||||
@@ -259,6 +355,8 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
can_access_graph=can_access_graph,
|
||||
is_latest_version=is_latest_version,
|
||||
is_favorite=agent.isFavorite,
|
||||
folder_id=folder_id,
|
||||
folder_name=folder_name,
|
||||
recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron,
|
||||
settings=_parse_settings(agent.settings),
|
||||
marketplace_listing=marketplace_listing_data,
|
||||
@@ -470,3 +568,7 @@ class LibraryAgentUpdateRequest(pydantic.BaseModel):
|
||||
settings: Optional[GraphSettings] = pydantic.Field(
|
||||
default=None, description="User-specific settings for this library agent"
|
||||
)
|
||||
folder_id: Optional[str] = pydantic.Field(
|
||||
default=None,
|
||||
description="Folder ID to move agent to (empty string for root)",
|
||||
)
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import fastapi
|
||||
|
||||
from .agents import router as agents_router
|
||||
from .folders import router as folders_router
|
||||
from .presets import router as presets_router
|
||||
|
||||
router = fastapi.APIRouter()
|
||||
|
||||
router.include_router(presets_router)
|
||||
router.include_router(folders_router)
|
||||
router.include_router(agents_router)
|
||||
|
||||
@@ -41,17 +41,34 @@ async def list_library_agents(
|
||||
ge=1,
|
||||
description="Number of agents per page (must be >= 1)",
|
||||
),
|
||||
folder_id: Optional[str] = Query(
|
||||
None,
|
||||
description="Filter by folder ID",
|
||||
),
|
||||
include_root_only: bool = Query(
|
||||
False,
|
||||
description="Only return agents without a folder (root-level agents)",
|
||||
),
|
||||
) -> library_model.LibraryAgentResponse:
|
||||
"""
|
||||
Get all agents in the user's library (both created and saved).
|
||||
"""
|
||||
return await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_term,
|
||||
sort_by=sort_by,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
)
|
||||
try:
|
||||
return await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_term,
|
||||
sort_by=sort_by,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
folder_id=folder_id,
|
||||
include_root_only=include_root_only,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not list library agents for user #{user_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.get(
|
||||
@@ -160,15 +177,34 @@ async def update_library_agent(
|
||||
"""
|
||||
Update the library agent with the given fields.
|
||||
"""
|
||||
return await library_db.update_library_agent(
|
||||
library_agent_id=library_agent_id,
|
||||
user_id=user_id,
|
||||
auto_update_version=payload.auto_update_version,
|
||||
graph_version=payload.graph_version,
|
||||
is_favorite=payload.is_favorite,
|
||||
is_archived=payload.is_archived,
|
||||
settings=payload.settings,
|
||||
)
|
||||
try:
|
||||
return await library_db.update_library_agent(
|
||||
library_agent_id=library_agent_id,
|
||||
user_id=user_id,
|
||||
auto_update_version=payload.auto_update_version,
|
||||
graph_version=payload.graph_version,
|
||||
is_favorite=payload.is_favorite,
|
||||
is_archived=payload.is_archived,
|
||||
settings=payload.settings,
|
||||
folder_id=payload.folder_id,
|
||||
)
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error while updating library agent: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail={"message": str(e), "hint": "Verify DB connection."},
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while updating library agent: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail={"message": str(e), "hint": "Check server logs."},
|
||||
) from e
|
||||
|
||||
|
||||
@router.delete(
|
||||
|
||||
@@ -0,0 +1,408 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, HTTPException, Query, Security, status
|
||||
from fastapi.responses import Response
|
||||
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
from .. import db as library_db
|
||||
from .. import model as library_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/folders",
|
||||
tags=["library", "folders", "private"],
|
||||
dependencies=[Security(autogpt_auth_lib.requires_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
summary="List Library Folders",
|
||||
response_model=library_model.FolderListResponse,
|
||||
responses={
|
||||
200: {"description": "List of folders"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def list_folders(
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
parent_id: Optional[str] = Query(
|
||||
None,
|
||||
description="Filter by parent folder ID. If not provided, returns root-level folders.",
|
||||
),
|
||||
include_counts: bool = Query(
|
||||
True,
|
||||
description="Include agent and subfolder counts",
|
||||
),
|
||||
) -> library_model.FolderListResponse:
|
||||
"""
|
||||
List folders for the authenticated user.
|
||||
|
||||
Args:
|
||||
user_id: ID of the authenticated user.
|
||||
parent_id: Optional parent folder ID to filter by.
|
||||
include_counts: Whether to include agent and subfolder counts.
|
||||
|
||||
Returns:
|
||||
A FolderListResponse containing folders.
|
||||
"""
|
||||
try:
|
||||
folders = await library_db.list_folders(
|
||||
user_id=user_id,
|
||||
parent_id=parent_id,
|
||||
include_counts=include_counts,
|
||||
)
|
||||
return library_model.FolderListResponse(
|
||||
folders=folders,
|
||||
pagination=library_model.Pagination(
|
||||
total_items=len(folders),
|
||||
total_pages=1,
|
||||
current_page=1,
|
||||
page_size=len(folders),
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not list folders for user #{user_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.get(
|
||||
"/tree",
|
||||
summary="Get Folder Tree",
|
||||
response_model=library_model.FolderTreeResponse,
|
||||
responses={
|
||||
200: {"description": "Folder tree structure"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def get_folder_tree(
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.FolderTreeResponse:
|
||||
"""
|
||||
Get the full folder tree for the authenticated user.
|
||||
|
||||
Args:
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
A FolderTreeResponse containing the nested folder structure.
|
||||
"""
|
||||
try:
|
||||
tree = await library_db.get_folder_tree(user_id=user_id)
|
||||
return library_model.FolderTreeResponse(tree=tree)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not get folder tree for user #{user_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{folder_id}",
|
||||
summary="Get Folder",
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
200: {"description": "Folder details"},
|
||||
404: {"description": "Folder not found"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def get_folder(
|
||||
folder_id: str,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Get a specific folder.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to retrieve.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The requested LibraryFolder.
|
||||
"""
|
||||
try:
|
||||
return await library_db.get_folder(folder_id=folder_id, user_id=user_id)
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.error(f"Could not get folder #{folder_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
summary="Create Folder",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
201: {"description": "Folder created successfully"},
|
||||
400: {"description": "Validation error"},
|
||||
404: {"description": "Parent folder not found"},
|
||||
409: {"description": "Folder name conflict"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def create_folder(
|
||||
payload: library_model.FolderCreateRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Create a new folder.
|
||||
|
||||
Args:
|
||||
payload: The folder creation request.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The created LibraryFolder.
|
||||
"""
|
||||
try:
|
||||
return await library_db.create_folder(
|
||||
user_id=user_id,
|
||||
name=payload.name,
|
||||
parent_id=payload.parent_id,
|
||||
icon=payload.icon,
|
||||
color=payload.color,
|
||||
)
|
||||
except library_db.FolderValidationError as e:
|
||||
if "already exists" in str(e):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=str(e),
|
||||
) from e
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error creating folder: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/{folder_id}",
|
||||
summary="Update Folder",
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
200: {"description": "Folder updated successfully"},
|
||||
400: {"description": "Validation error"},
|
||||
404: {"description": "Folder not found"},
|
||||
409: {"description": "Folder name conflict"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def update_folder(
|
||||
folder_id: str,
|
||||
payload: library_model.FolderUpdateRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Update a folder's properties.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to update.
|
||||
payload: The folder update request.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The updated LibraryFolder.
|
||||
"""
|
||||
try:
|
||||
return await library_db.update_folder(
|
||||
folder_id=folder_id,
|
||||
user_id=user_id,
|
||||
name=payload.name,
|
||||
icon=payload.icon,
|
||||
color=payload.color,
|
||||
)
|
||||
except library_db.FolderValidationError as e:
|
||||
if "already exists" in str(e):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=str(e),
|
||||
) from e
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error updating folder #{folder_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{folder_id}/move",
|
||||
summary="Move Folder",
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
200: {"description": "Folder moved successfully"},
|
||||
400: {"description": "Validation error (circular reference, depth exceeded)"},
|
||||
404: {"description": "Folder or target parent not found"},
|
||||
409: {"description": "Folder name conflict in target location"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def move_folder(
|
||||
folder_id: str,
|
||||
payload: library_model.FolderMoveRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Move a folder to a new parent.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to move.
|
||||
payload: The move request with target parent.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The moved LibraryFolder.
|
||||
"""
|
||||
try:
|
||||
return await library_db.move_folder(
|
||||
folder_id=folder_id,
|
||||
user_id=user_id,
|
||||
target_parent_id=payload.target_parent_id,
|
||||
)
|
||||
except library_db.FolderValidationError as e:
|
||||
if "already exists" in str(e):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=str(e),
|
||||
) from e
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error moving folder #{folder_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{folder_id}",
|
||||
summary="Delete Folder",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
responses={
|
||||
204: {"description": "Folder deleted successfully"},
|
||||
404: {"description": "Folder not found"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def delete_folder(
|
||||
folder_id: str,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> Response:
|
||||
"""
|
||||
Soft-delete a folder and all its contents.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to delete.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
204 No Content if successful.
|
||||
"""
|
||||
try:
|
||||
await library_db.delete_folder(
|
||||
folder_id=folder_id,
|
||||
user_id=user_id,
|
||||
soft_delete=True,
|
||||
)
|
||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error deleting folder #{folder_id}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
|
||||
|
||||
# === Bulk Agent Operations ===
|
||||
|
||||
|
||||
@router.post(
|
||||
"/agents/bulk-move",
|
||||
summary="Bulk Move Agents",
|
||||
response_model=list[library_model.LibraryAgent],
|
||||
responses={
|
||||
200: {"description": "Agents moved successfully"},
|
||||
404: {"description": "Folder not found"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def bulk_move_agents(
|
||||
payload: library_model.BulkMoveAgentsRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> list[library_model.LibraryAgent]:
|
||||
"""
|
||||
Move multiple agents to a folder.
|
||||
|
||||
Args:
|
||||
payload: The bulk move request with agent IDs and target folder.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgents.
|
||||
"""
|
||||
try:
|
||||
return await library_db.bulk_move_agents_to_folder(
|
||||
agent_ids=payload.agent_ids,
|
||||
folder_id=payload.folder_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
except NotFoundError as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
) from e
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error bulk moving agents: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e),
|
||||
) from e
|
||||
@@ -393,7 +393,6 @@ async def get_creators(
|
||||
@router.get(
|
||||
"/creator/{username}",
|
||||
summary="Get creator details",
|
||||
operation_id="getV2GetCreatorDetails",
|
||||
tags=["store", "public"],
|
||||
response_model=store_model.CreatorDetails,
|
||||
)
|
||||
|
||||
@@ -18,7 +18,6 @@ from prisma.errors import PrismaError
|
||||
|
||||
import backend.api.features.admin.credit_admin_routes
|
||||
import backend.api.features.admin.execution_analytics_routes
|
||||
import backend.api.features.admin.llm_routes
|
||||
import backend.api.features.admin.store_admin_routes
|
||||
import backend.api.features.builder
|
||||
import backend.api.features.builder.routes
|
||||
@@ -39,15 +38,13 @@ import backend.data.db
|
||||
import backend.data.graph
|
||||
import backend.data.user
|
||||
import backend.integrations.webhooks.utils
|
||||
import backend.server.v2.llm.routes as public_llm_routes
|
||||
import backend.util.service
|
||||
import backend.util.settings
|
||||
from backend.api.features.chat.completion_consumer import (
|
||||
start_completion_consumer,
|
||||
stop_completion_consumer,
|
||||
)
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||
from backend.data.model import Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
@@ -118,27 +115,11 @@ async def lifespan_context(app: fastapi.FastAPI):
|
||||
|
||||
AutoRegistry.patch_integrations()
|
||||
|
||||
# Refresh LLM registry before initializing blocks so blocks can use registry data
|
||||
await llm_registry.refresh_llm_registry()
|
||||
await refresh_llm_costs()
|
||||
|
||||
# Clear block schema caches so they're regenerated with updated discriminator_mapping
|
||||
from backend.blocks._base import BlockSchema
|
||||
|
||||
BlockSchema.clear_all_schema_caches()
|
||||
|
||||
await backend.data.block.initialize_blocks()
|
||||
|
||||
await backend.data.user.migrate_and_encrypt_user_integrations()
|
||||
await backend.data.graph.fix_llm_provider_credentials()
|
||||
# migrate_llm_models uses registry default model
|
||||
from backend.blocks.llm import LlmModel
|
||||
|
||||
default_model_slug = llm_registry.get_default_model_slug()
|
||||
if default_model_slug:
|
||||
await backend.data.graph.migrate_llm_models(LlmModel(default_model_slug))
|
||||
else:
|
||||
logger.warning("Skipping LLM model migration: no default model available")
|
||||
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
|
||||
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
||||
|
||||
# Start chat completion consumer for Redis Streams notifications
|
||||
@@ -340,16 +321,6 @@ app.include_router(
|
||||
tags=["v2", "executions", "review"],
|
||||
prefix="/api/review",
|
||||
)
|
||||
app.include_router(
|
||||
backend.api.features.admin.llm_routes.router,
|
||||
tags=["v2", "admin", "llm"],
|
||||
prefix="/api/llm/admin",
|
||||
)
|
||||
app.include_router(
|
||||
public_llm_routes.router,
|
||||
tags=["v2", "llm"],
|
||||
prefix="/api",
|
||||
)
|
||||
app.include_router(
|
||||
backend.api.features.library.routes.router, tags=["v2"], prefix="/api/library"
|
||||
)
|
||||
|
||||
@@ -79,39 +79,7 @@ async def event_broadcaster(manager: ConnectionManager):
|
||||
payload=notification.payload,
|
||||
)
|
||||
|
||||
async def registry_refresh_worker():
|
||||
"""Listen for LLM registry refresh notifications and broadcast to all clients."""
|
||||
from backend.data.llm_registry import REGISTRY_REFRESH_CHANNEL
|
||||
from backend.data.redis_client import connect_async
|
||||
|
||||
redis = await connect_async()
|
||||
pubsub = redis.pubsub()
|
||||
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
|
||||
logger.info(
|
||||
"Subscribed to LLM registry refresh notifications for WebSocket broadcast"
|
||||
)
|
||||
|
||||
async for message in pubsub.listen():
|
||||
if (
|
||||
message["type"] == "message"
|
||||
and message["channel"] == REGISTRY_REFRESH_CHANNEL
|
||||
):
|
||||
logger.info(
|
||||
"Broadcasting LLM registry refresh to all WebSocket clients"
|
||||
)
|
||||
await manager.broadcast_to_all(
|
||||
method=WSMethod.NOTIFICATION,
|
||||
data={
|
||||
"type": "LLM_REGISTRY_REFRESH",
|
||||
"event": "registry_updated",
|
||||
},
|
||||
)
|
||||
|
||||
await asyncio.gather(
|
||||
execution_worker(),
|
||||
notification_worker(),
|
||||
registry_refresh_worker(),
|
||||
)
|
||||
await asyncio.gather(execution_worker(), notification_worker())
|
||||
finally:
|
||||
# Ensure PubSub connections are closed on any exit to prevent leaks
|
||||
await execution_bus.close()
|
||||
|
||||
@@ -133,26 +133,7 @@ class BlockInfo(BaseModel):
|
||||
|
||||
|
||||
class BlockSchema(BaseModel):
|
||||
cached_jsonschema: ClassVar[dict[str, Any] | None] = None
|
||||
|
||||
@classmethod
|
||||
def clear_schema_cache(cls) -> None:
|
||||
"""Clear the cached JSON schema for this class."""
|
||||
# Use None instead of {} because {} is truthy and would prevent regeneration
|
||||
cls.cached_jsonschema = None # type: ignore
|
||||
|
||||
@staticmethod
|
||||
def clear_all_schema_caches() -> None:
|
||||
"""Clear cached JSON schemas for all BlockSchema subclasses."""
|
||||
|
||||
def clear_recursive(cls: type) -> None:
|
||||
"""Recursively clear cache for class and all subclasses."""
|
||||
if hasattr(cls, "clear_schema_cache"):
|
||||
cls.clear_schema_cache()
|
||||
for subclass in cls.__subclasses__():
|
||||
clear_recursive(subclass)
|
||||
|
||||
clear_recursive(BlockSchema)
|
||||
cached_jsonschema: ClassVar[dict[str, Any]]
|
||||
|
||||
@classmethod
|
||||
def jsonschema(cls) -> dict[str, Any]:
|
||||
|
||||
@@ -7,6 +7,7 @@ from backend.blocks._base import (
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.blocks.llm import (
|
||||
DEFAULT_LLM_MODEL,
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
AIBlockBase,
|
||||
@@ -15,7 +16,6 @@ from backend.blocks.llm import (
|
||||
LlmModel,
|
||||
LLMResponse,
|
||||
llm_call,
|
||||
llm_model_schema_extra,
|
||||
)
|
||||
from backend.data.model import APIKeyCredentials, NodeExecutionStats, SchemaField
|
||||
|
||||
@@ -50,10 +50,9 @@ class AIConditionBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for evaluating the condition.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
|
||||
@@ -83,7 +82,7 @@ class AIConditionBlock(AIBlockBase):
|
||||
"condition": "the input is an email address",
|
||||
"yes_value": "Valid email",
|
||||
"no_value": "Not an email",
|
||||
"model": LlmModel.default(),
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
|
||||
@@ -4,17 +4,16 @@ import logging
|
||||
import re
|
||||
import secrets
|
||||
from abc import ABC
|
||||
from enum import Enum
|
||||
from enum import Enum, EnumMeta
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, Iterable, List, Literal, Optional
|
||||
from typing import Any, Iterable, List, Literal, NamedTuple, Optional
|
||||
|
||||
import anthropic
|
||||
import ollama
|
||||
import openai
|
||||
from anthropic.types import ToolParam
|
||||
from groq import AsyncGroq
|
||||
from pydantic import BaseModel, GetCoreSchemaHandler, SecretStr
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.blocks._base import (
|
||||
Block,
|
||||
@@ -23,8 +22,6 @@ from backend.blocks._base import (
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data import llm_registry
|
||||
from backend.data.llm_registry import ModelMetadata
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
@@ -69,123 +66,114 @@ TEST_CREDENTIALS_INPUT = {
|
||||
|
||||
|
||||
def AICredentialsField() -> AICredentials:
|
||||
"""
|
||||
Returns a CredentialsField for LLM providers.
|
||||
The discriminator_mapping will be refreshed when the schema is generated
|
||||
if it's empty, ensuring the LLM registry is loaded.
|
||||
"""
|
||||
# Get the mapping now - it may be empty initially, but will be refreshed
|
||||
# when the schema is generated via CredentialsMetaInput._add_json_schema_extra
|
||||
mapping = llm_registry.get_llm_discriminator_mapping()
|
||||
|
||||
return CredentialsField(
|
||||
description="API key for the LLM provider.",
|
||||
discriminator="model",
|
||||
discriminator_mapping=mapping, # May be empty initially, refreshed later
|
||||
discriminator_mapping={
|
||||
model.value: model.metadata.provider for model in LlmModel
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def llm_model_schema_extra() -> dict[str, Any]:
|
||||
return {"options": llm_registry.get_llm_model_schema_options()}
|
||||
class ModelMetadata(NamedTuple):
|
||||
provider: str
|
||||
context_window: int
|
||||
max_output_tokens: int | None
|
||||
display_name: str
|
||||
provider_name: str
|
||||
creator_name: str
|
||||
price_tier: Literal[1, 2, 3]
|
||||
|
||||
|
||||
class LlmModelMeta(type):
|
||||
"""
|
||||
Metaclass for LlmModel that enables attribute-style access to dynamic models.
|
||||
|
||||
This allows code like `LlmModel.GPT4O` to work by converting the attribute
|
||||
name to a slug format:
|
||||
- GPT4O -> gpt-4o
|
||||
- GPT4O_MINI -> gpt-4o-mini
|
||||
- CLAUDE_3_5_SONNET -> claude-3-5-sonnet
|
||||
"""
|
||||
|
||||
def __getattr__(cls, name: str):
|
||||
# Don't intercept private/dunder attributes
|
||||
if name.startswith("_"):
|
||||
raise AttributeError(f"type object 'LlmModel' has no attribute '{name}'")
|
||||
|
||||
# Convert attribute name to slug format:
|
||||
# 1. Lowercase: GPT4O -> gpt4o
|
||||
# 2. Underscores to hyphens: GPT4O_MINI -> gpt4o-mini
|
||||
slug = name.lower().replace("_", "-")
|
||||
|
||||
# Check for exact match in registry first (e.g., "o1" stays "o1")
|
||||
registry_slugs = llm_registry.get_dynamic_model_slugs()
|
||||
if slug in registry_slugs:
|
||||
return cls(slug)
|
||||
|
||||
# If no exact match, try inserting hyphen between letter and digit
|
||||
# e.g., gpt4o -> gpt-4o
|
||||
transformed_slug = re.sub(r"([a-z])(\d)", r"\1-\2", slug)
|
||||
return cls(transformed_slug)
|
||||
|
||||
def __iter__(cls):
|
||||
"""Iterate over all models from the registry.
|
||||
|
||||
Yields LlmModel instances for each model in the dynamic registry.
|
||||
Used by __get_pydantic_json_schema__ to build model metadata.
|
||||
"""
|
||||
for model in llm_registry.iter_dynamic_models():
|
||||
yield cls(model.slug)
|
||||
class LlmModelMeta(EnumMeta):
|
||||
pass
|
||||
|
||||
|
||||
class LlmModel(str, metaclass=LlmModelMeta):
|
||||
"""
|
||||
Dynamic LLM model type that accepts any model slug from the registry.
|
||||
|
||||
This is a string subclass (not an Enum) that allows any model slug value.
|
||||
All models are managed via the LLM Registry in the database.
|
||||
|
||||
Usage:
|
||||
model = LlmModel("gpt-4o") # Direct construction
|
||||
model = LlmModel.GPT4O # Attribute access (converted to "gpt-4o")
|
||||
model.value # Returns the slug string
|
||||
model.provider # Returns the provider from registry
|
||||
"""
|
||||
|
||||
def __new__(cls, value: str):
|
||||
if isinstance(value, LlmModel):
|
||||
return value
|
||||
return str.__new__(cls, value)
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, source_type: Any, handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
"""
|
||||
Tell Pydantic how to validate LlmModel.
|
||||
|
||||
Accepts strings and converts them to LlmModel instances.
|
||||
"""
|
||||
return core_schema.no_info_after_validator_function(
|
||||
cls, # The validator function (LlmModel constructor)
|
||||
core_schema.str_schema(), # Accept string input
|
||||
serialization=core_schema.to_string_ser_schema(), # Serialize as string
|
||||
)
|
||||
|
||||
@property
|
||||
def value(self) -> str:
|
||||
"""Return the model slug (for compatibility with enum-style access)."""
|
||||
return str(self)
|
||||
|
||||
@classmethod
|
||||
def default(cls) -> "LlmModel":
|
||||
"""
|
||||
Get the default model from the registry.
|
||||
|
||||
Returns the recommended model if set, otherwise gpt-4o if available
|
||||
and enabled, otherwise the first enabled model from the registry.
|
||||
Falls back to "gpt-4o" if registry is empty (e.g., at module import time).
|
||||
"""
|
||||
from backend.data.llm_registry import get_default_model_slug
|
||||
|
||||
slug = get_default_model_slug()
|
||||
if slug is None:
|
||||
# Registry is empty (e.g., at module import time before DB connection).
|
||||
# Fall back to gpt-4o for backward compatibility.
|
||||
slug = "gpt-4o"
|
||||
return cls(slug)
|
||||
class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
# OpenAI models
|
||||
O3_MINI = "o3-mini"
|
||||
O3 = "o3-2025-04-16"
|
||||
O1 = "o1"
|
||||
O1_MINI = "o1-mini"
|
||||
# GPT-5 models
|
||||
GPT5_2 = "gpt-5.2-2025-12-11"
|
||||
GPT5_1 = "gpt-5.1-2025-11-13"
|
||||
GPT5 = "gpt-5-2025-08-07"
|
||||
GPT5_MINI = "gpt-5-mini-2025-08-07"
|
||||
GPT5_NANO = "gpt-5-nano-2025-08-07"
|
||||
GPT5_CHAT = "gpt-5-chat-latest"
|
||||
GPT41 = "gpt-4.1-2025-04-14"
|
||||
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||
GPT4O_MINI = "gpt-4o-mini"
|
||||
GPT4O = "gpt-4o"
|
||||
GPT4_TURBO = "gpt-4-turbo"
|
||||
GPT3_5_TURBO = "gpt-3.5-turbo"
|
||||
# Anthropic models
|
||||
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_4_6_OPUS = "claude-opus-4-6"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
AIML_API_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct"
|
||||
AIML_API_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||
AIML_API_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
|
||||
AIML_API_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo"
|
||||
# Groq models
|
||||
LLAMA3_3_70B = "llama-3.3-70b-versatile"
|
||||
LLAMA3_1_8B = "llama-3.1-8b-instant"
|
||||
# Ollama models
|
||||
OLLAMA_LLAMA3_3 = "llama3.3"
|
||||
OLLAMA_LLAMA3_2 = "llama3.2"
|
||||
OLLAMA_LLAMA3_8B = "llama3"
|
||||
OLLAMA_LLAMA3_405B = "llama3.1:405b"
|
||||
OLLAMA_DOLPHIN = "dolphin-mistral:latest"
|
||||
# OpenRouter models
|
||||
OPENAI_GPT_OSS_120B = "openai/gpt-oss-120b"
|
||||
OPENAI_GPT_OSS_20B = "openai/gpt-oss-20b"
|
||||
GEMINI_2_5_PRO = "google/gemini-2.5-pro-preview-03-25"
|
||||
GEMINI_3_PRO_PREVIEW = "google/gemini-3-pro-preview"
|
||||
GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
|
||||
GEMINI_2_0_FLASH = "google/gemini-2.0-flash-001"
|
||||
GEMINI_2_5_FLASH_LITE_PREVIEW = "google/gemini-2.5-flash-lite-preview-06-17"
|
||||
GEMINI_2_0_FLASH_LITE = "google/gemini-2.0-flash-lite-001"
|
||||
MISTRAL_NEMO = "mistralai/mistral-nemo"
|
||||
COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024"
|
||||
COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024"
|
||||
DEEPSEEK_CHAT = "deepseek/deepseek-chat" # Actually: DeepSeek V3
|
||||
DEEPSEEK_R1_0528 = "deepseek/deepseek-r1-0528"
|
||||
PERPLEXITY_SONAR = "perplexity/sonar"
|
||||
PERPLEXITY_SONAR_PRO = "perplexity/sonar-pro"
|
||||
PERPLEXITY_SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research"
|
||||
NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b"
|
||||
NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b"
|
||||
AMAZON_NOVA_LITE_V1 = "amazon/nova-lite-v1"
|
||||
AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1"
|
||||
AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1"
|
||||
MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b"
|
||||
GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b"
|
||||
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
|
||||
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
|
||||
GROK_4 = "x-ai/grok-4"
|
||||
GROK_4_FAST = "x-ai/grok-4-fast"
|
||||
GROK_4_1_FAST = "x-ai/grok-4.1-fast"
|
||||
GROK_CODE_FAST_1 = "x-ai/grok-code-fast-1"
|
||||
KIMI_K2 = "moonshotai/kimi-k2"
|
||||
QWEN3_235B_A22B_THINKING = "qwen/qwen3-235b-a22b-thinking-2507"
|
||||
QWEN3_CODER = "qwen/qwen3-coder"
|
||||
# Llama API models
|
||||
LLAMA_API_LLAMA_4_SCOUT = "Llama-4-Scout-17B-16E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
|
||||
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
|
||||
# v0 by Vercel models
|
||||
V0_1_5_MD = "v0-1.5-md"
|
||||
V0_1_5_LG = "v0-1.5-lg"
|
||||
V0_1_0_MD = "v0-1.0-md"
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_json_schema__(cls, schema, handler):
|
||||
@@ -193,15 +181,7 @@ class LlmModel(str, metaclass=LlmModelMeta):
|
||||
llm_model_metadata = {}
|
||||
for model in cls:
|
||||
model_name = model.value
|
||||
# Skip disabled models - only show enabled models in the picker
|
||||
if not llm_registry.is_model_enabled(model_name):
|
||||
continue
|
||||
# Use registry directly with None check to gracefully handle
|
||||
# missing metadata during startup/import before registry is populated
|
||||
metadata = llm_registry.get_llm_model_metadata(model_name)
|
||||
if metadata is None:
|
||||
# Skip models without metadata (registry not yet populated)
|
||||
continue
|
||||
metadata = model.metadata
|
||||
llm_model_metadata[model_name] = {
|
||||
"creator": metadata.creator_name,
|
||||
"creator_name": metadata.creator_name,
|
||||
@@ -217,12 +197,7 @@ class LlmModel(str, metaclass=LlmModelMeta):
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
metadata = llm_registry.get_llm_model_metadata(self.value)
|
||||
if metadata:
|
||||
return metadata
|
||||
raise ValueError(
|
||||
f"Missing metadata for model: {self.value}. Model not found in LLM registry."
|
||||
)
|
||||
return MODEL_METADATA[self]
|
||||
|
||||
@property
|
||||
def provider(self) -> str:
|
||||
@@ -237,9 +212,300 @@ class LlmModel(str, metaclass=LlmModelMeta):
|
||||
return self.metadata.max_output_tokens
|
||||
|
||||
|
||||
# Default model constant for backward compatibility
|
||||
# Uses the dynamic registry to get the default model
|
||||
DEFAULT_LLM_MODEL = LlmModel.default()
|
||||
MODEL_METADATA = {
|
||||
# https://platform.openai.com/docs/models
|
||||
LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2),
|
||||
LlmModel.O3_MINI: ModelMetadata(
|
||||
"openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1
|
||||
), # o3-mini-2025-01-31
|
||||
LlmModel.O1: ModelMetadata(
|
||||
"openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3
|
||||
), # o1-2024-12-17
|
||||
LlmModel.O1_MINI: ModelMetadata(
|
||||
"openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2
|
||||
), # o1-mini-2024-09-12
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5_2: ModelMetadata(
|
||||
"openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3
|
||||
),
|
||||
LlmModel.GPT5_1: ModelMetadata(
|
||||
"openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2
|
||||
),
|
||||
LlmModel.GPT5: ModelMetadata(
|
||||
"openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1
|
||||
),
|
||||
LlmModel.GPT5_MINI: ModelMetadata(
|
||||
"openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1
|
||||
),
|
||||
LlmModel.GPT5_NANO: ModelMetadata(
|
||||
"openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1
|
||||
),
|
||||
LlmModel.GPT5_CHAT: ModelMetadata(
|
||||
"openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2
|
||||
),
|
||||
LlmModel.GPT41: ModelMetadata(
|
||||
"openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1
|
||||
),
|
||||
LlmModel.GPT41_MINI: ModelMetadata(
|
||||
"openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1
|
||||
),
|
||||
LlmModel.GPT4O_MINI: ModelMetadata(
|
||||
"openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1
|
||||
), # gpt-4o-mini-2024-07-18
|
||||
LlmModel.GPT4O: ModelMetadata(
|
||||
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
||||
), # gpt-4o-2024-08-06
|
||||
LlmModel.GPT4_TURBO: ModelMetadata(
|
||||
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
|
||||
), # gpt-4-turbo-2024-04-09
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata(
|
||||
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
|
||||
), # gpt-3.5-turbo-0125
|
||||
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-1-20250805
|
||||
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3
|
||||
), # claude-4-opus-20250514
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_4_6_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 128000, "Claude Opus 4.6", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-6
|
||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-5-20251101
|
||||
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3
|
||||
), # claude-sonnet-4-5-20250929
|
||||
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2
|
||||
), # claude-haiku-4-5-20251001
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1
|
||||
), # claude-3-haiku-20240307
|
||||
# https://docs.aimlapi.com/api-overview/model-database/text-models
|
||||
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata(
|
||||
"aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1
|
||||
),
|
||||
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata(
|
||||
"aiml_api",
|
||||
128000,
|
||||
40000,
|
||||
"Llama 3.1 Nemotron 70B Instruct",
|
||||
"AI/ML",
|
||||
"Nvidia",
|
||||
1,
|
||||
),
|
||||
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata(
|
||||
"aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1
|
||||
),
|
||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata(
|
||||
"aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1
|
||||
),
|
||||
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata(
|
||||
"aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1
|
||||
),
|
||||
# https://console.groq.com/docs/models
|
||||
LlmModel.LLAMA3_3_70B: ModelMetadata(
|
||||
"groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Meta", 1
|
||||
),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata(
|
||||
"groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Meta", 1
|
||||
),
|
||||
# https://ollama.com/library
|
||||
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata(
|
||||
"ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1
|
||||
),
|
||||
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata(
|
||||
"ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1
|
||||
),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata(
|
||||
"ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1
|
||||
),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata(
|
||||
"ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1
|
||||
),
|
||||
LlmModel.OLLAMA_DOLPHIN: ModelMetadata(
|
||||
"ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral AI", 1
|
||||
),
|
||||
# https://openrouter.ai/models
|
||||
LlmModel.GEMINI_2_5_PRO: ModelMetadata(
|
||||
"open_router",
|
||||
1050000,
|
||||
8192,
|
||||
"Gemini 2.5 Pro Preview 03.25",
|
||||
"OpenRouter",
|
||||
"Google",
|
||||
2,
|
||||
),
|
||||
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata(
|
||||
"open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2
|
||||
),
|
||||
LlmModel.GEMINI_2_5_FLASH: ModelMetadata(
|
||||
"open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1
|
||||
),
|
||||
LlmModel.GEMINI_2_0_FLASH: ModelMetadata(
|
||||
"open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1
|
||||
),
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
|
||||
"open_router",
|
||||
1048576,
|
||||
65535,
|
||||
"Gemini 2.5 Flash Lite Preview 06.17",
|
||||
"OpenRouter",
|
||||
"Google",
|
||||
1,
|
||||
),
|
||||
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata(
|
||||
"open_router",
|
||||
1048576,
|
||||
8192,
|
||||
"Gemini 2.0 Flash Lite 001",
|
||||
"OpenRouter",
|
||||
"Google",
|
||||
1,
|
||||
),
|
||||
LlmModel.MISTRAL_NEMO: ModelMetadata(
|
||||
"open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistral AI", 1
|
||||
),
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata(
|
||||
"open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1
|
||||
),
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata(
|
||||
"open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2
|
||||
),
|
||||
LlmModel.DEEPSEEK_CHAT: ModelMetadata(
|
||||
"open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1
|
||||
),
|
||||
LlmModel.DEEPSEEK_R1_0528: ModelMetadata(
|
||||
"open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "DeepSeek", 1
|
||||
),
|
||||
LlmModel.PERPLEXITY_SONAR: ModelMetadata(
|
||||
"open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1
|
||||
),
|
||||
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata(
|
||||
"open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2
|
||||
),
|
||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
|
||||
"open_router",
|
||||
128000,
|
||||
16000,
|
||||
"Sonar Deep Research",
|
||||
"OpenRouter",
|
||||
"Perplexity",
|
||||
3,
|
||||
),
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
|
||||
"open_router",
|
||||
131000,
|
||||
4096,
|
||||
"Hermes 3 Llama 3.1 405B",
|
||||
"OpenRouter",
|
||||
"Nous Research",
|
||||
1,
|
||||
),
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
|
||||
"open_router",
|
||||
12288,
|
||||
12288,
|
||||
"Hermes 3 Llama 3.1 70B",
|
||||
"OpenRouter",
|
||||
"Nous Research",
|
||||
1,
|
||||
),
|
||||
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata(
|
||||
"open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1
|
||||
),
|
||||
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata(
|
||||
"open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1
|
||||
),
|
||||
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata(
|
||||
"open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1
|
||||
),
|
||||
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata(
|
||||
"open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1
|
||||
),
|
||||
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata(
|
||||
"open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1
|
||||
),
|
||||
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata(
|
||||
"open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1
|
||||
),
|
||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata(
|
||||
"open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1
|
||||
),
|
||||
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata(
|
||||
"open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1
|
||||
),
|
||||
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata(
|
||||
"open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1
|
||||
),
|
||||
LlmModel.GROK_4: ModelMetadata(
|
||||
"open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3
|
||||
),
|
||||
LlmModel.GROK_4_FAST: ModelMetadata(
|
||||
"open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1
|
||||
),
|
||||
LlmModel.GROK_4_1_FAST: ModelMetadata(
|
||||
"open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1
|
||||
),
|
||||
LlmModel.GROK_CODE_FAST_1: ModelMetadata(
|
||||
"open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1
|
||||
),
|
||||
LlmModel.KIMI_K2: ModelMetadata(
|
||||
"open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshot AI", 1
|
||||
),
|
||||
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata(
|
||||
"open_router",
|
||||
262144,
|
||||
262144,
|
||||
"Qwen 3 235B A22B Thinking 2507",
|
||||
"OpenRouter",
|
||||
"Qwen",
|
||||
1,
|
||||
),
|
||||
LlmModel.QWEN3_CODER: ModelMetadata(
|
||||
"open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3
|
||||
),
|
||||
# Llama API models
|
||||
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata(
|
||||
"llama_api",
|
||||
128000,
|
||||
4028,
|
||||
"Llama 4 Scout 17B 16E Instruct FP8",
|
||||
"Llama API",
|
||||
"Meta",
|
||||
1,
|
||||
),
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata(
|
||||
"llama_api",
|
||||
128000,
|
||||
4028,
|
||||
"Llama 4 Maverick 17B 128E Instruct FP8",
|
||||
"Llama API",
|
||||
"Meta",
|
||||
1,
|
||||
),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata(
|
||||
"llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1
|
||||
),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata(
|
||||
"llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1
|
||||
),
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1),
|
||||
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1),
|
||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1),
|
||||
}
|
||||
|
||||
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_METADATA:
|
||||
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||
|
||||
|
||||
class ToolCall(BaseModel):
|
||||
@@ -332,11 +598,8 @@ def get_parallel_tool_calls_param(
|
||||
llm_model: LlmModel, parallel_tool_calls: bool | None
|
||||
) -> bool | openai.Omit:
|
||||
"""Get the appropriate parallel_tool_calls parameter for OpenAI-compatible APIs."""
|
||||
# Check for o-series models (o1, o1-mini, o3-mini, etc.) which don't support
|
||||
# parallel tool calls. Use regex to avoid false positives like "openai/gpt-oss".
|
||||
is_o_series = re.match(r"^o\d", llm_model) is not None
|
||||
if is_o_series or parallel_tool_calls is None:
|
||||
return openai.NOT_GIVEN
|
||||
if llm_model.startswith("o") or parallel_tool_calls is None:
|
||||
return openai.omit
|
||||
return parallel_tool_calls
|
||||
|
||||
|
||||
@@ -371,93 +634,15 @@ async def llm_call(
|
||||
- prompt_tokens: The number of tokens used in the prompt.
|
||||
- completion_tokens: The number of tokens used in the completion.
|
||||
"""
|
||||
# Get model metadata and check if enabled - with fallback support
|
||||
# The model we'll actually use (may differ if original is disabled)
|
||||
model_to_use = llm_model.value
|
||||
|
||||
# Check if model is in registry and if it's enabled
|
||||
from backend.data.llm_registry import (
|
||||
get_fallback_model_for_disabled,
|
||||
get_model_info,
|
||||
)
|
||||
|
||||
model_info = get_model_info(llm_model.value)
|
||||
|
||||
if model_info and not model_info.is_enabled:
|
||||
# Model is disabled - try to find a fallback from the same provider
|
||||
fallback = get_fallback_model_for_disabled(llm_model.value)
|
||||
if fallback:
|
||||
logger.warning(
|
||||
f"Model '{llm_model.value}' is disabled. Using fallback model '{fallback.slug}' from the same provider ({fallback.metadata.provider})."
|
||||
)
|
||||
model_to_use = fallback.slug
|
||||
# Use fallback model's metadata
|
||||
provider = fallback.metadata.provider
|
||||
context_window = fallback.metadata.context_window
|
||||
model_max_output = fallback.metadata.max_output_tokens or int(2**15)
|
||||
else:
|
||||
# No fallback available - raise error
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' is disabled and no fallback model "
|
||||
f"from the same provider is available. Please enable the model or "
|
||||
f"select a different model in the block configuration."
|
||||
)
|
||||
else:
|
||||
# Model is enabled or not in registry (legacy/static model)
|
||||
try:
|
||||
provider = llm_model.metadata.provider
|
||||
context_window = llm_model.context_window
|
||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
||||
except ValueError:
|
||||
# Model not in cache - try refreshing the registry once if we have DB access
|
||||
logger.warning(f"Model {llm_model.value} not found in registry cache")
|
||||
|
||||
# Try refreshing the registry if we have database access
|
||||
from backend.data.db import is_connected
|
||||
|
||||
if is_connected():
|
||||
try:
|
||||
logger.info(
|
||||
f"Refreshing LLM registry and retrying lookup for {llm_model.value}"
|
||||
)
|
||||
await llm_registry.refresh_llm_registry()
|
||||
# Try again after refresh
|
||||
try:
|
||||
provider = llm_model.metadata.provider
|
||||
context_window = llm_model.context_window
|
||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
||||
logger.info(
|
||||
f"Successfully loaded model {llm_model.value} metadata after registry refresh"
|
||||
)
|
||||
except ValueError:
|
||||
# Still not found after refresh
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' not found in registry after refresh. "
|
||||
"Please ensure the model is added and enabled in the LLM registry via the admin UI."
|
||||
)
|
||||
except Exception as refresh_exc:
|
||||
logger.error(f"Failed to refresh LLM registry: {refresh_exc}")
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' not found in registry and failed to refresh. "
|
||||
"Please ensure the model is added to the LLM registry via the admin UI."
|
||||
) from refresh_exc
|
||||
else:
|
||||
# No DB access (e.g., in executor without direct DB connection)
|
||||
# The registry should have been loaded on startup
|
||||
raise ValueError(
|
||||
f"LLM model '{llm_model.value}' not found in registry cache. "
|
||||
"The registry may need to be refreshed. Please contact support or try again later."
|
||||
)
|
||||
|
||||
# Create effective model for model-specific parameter resolution (e.g., o-series check)
|
||||
# This uses the resolved model_to_use which may differ from llm_model if fallback occurred
|
||||
effective_model = LlmModel(model_to_use)
|
||||
provider = llm_model.metadata.provider
|
||||
context_window = llm_model.context_window
|
||||
|
||||
if compress_prompt_to_fit:
|
||||
result = await compress_context(
|
||||
messages=prompt,
|
||||
target_tokens=context_window // 2,
|
||||
target_tokens=llm_model.context_window // 2,
|
||||
client=None, # Truncation-only, no LLM summarization
|
||||
reserve=0, # Caller handles response token budget separately
|
||||
)
|
||||
if result.error:
|
||||
logger.warning(
|
||||
@@ -468,7 +653,7 @@ async def llm_call(
|
||||
|
||||
# Calculate available tokens based on context window and input length
|
||||
estimated_input_tokens = estimate_token_count(prompt)
|
||||
# model_max_output already set above
|
||||
model_max_output = llm_model.max_output_tokens or int(2**15)
|
||||
user_max = max_tokens or model_max_output
|
||||
available_tokens = max(context_window - estimated_input_tokens, 0)
|
||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
|
||||
@@ -479,14 +664,14 @@ async def llm_call(
|
||||
response_format = None
|
||||
|
||||
parallel_tool_calls = get_parallel_tool_calls_param(
|
||||
effective_model, parallel_tool_calls
|
||||
llm_model, parallel_tool_calls
|
||||
)
|
||||
|
||||
if force_json_output:
|
||||
response_format = {"type": "json_object"}
|
||||
|
||||
response = await oai_client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_completion_tokens=max_tokens,
|
||||
@@ -533,7 +718,7 @@ async def llm_call(
|
||||
)
|
||||
try:
|
||||
resp = await client.messages.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
system=sysprompt,
|
||||
messages=messages,
|
||||
max_tokens=max_tokens,
|
||||
@@ -597,7 +782,7 @@ async def llm_call(
|
||||
client = AsyncGroq(api_key=credentials.api_key.get_secret_value())
|
||||
response_format = {"type": "json_object"} if force_json_output else None
|
||||
response = await client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
@@ -619,7 +804,7 @@ async def llm_call(
|
||||
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
|
||||
response = await client.generate(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
prompt=f"{sys_messages}\n\n{usr_messages}",
|
||||
stream=False,
|
||||
options={"num_ctx": max_tokens},
|
||||
@@ -641,7 +826,7 @@ async def llm_call(
|
||||
)
|
||||
|
||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||
effective_model, parallel_tool_calls
|
||||
llm_model, parallel_tool_calls
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
@@ -649,7 +834,7 @@ async def llm_call(
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
},
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
@@ -683,7 +868,7 @@ async def llm_call(
|
||||
)
|
||||
|
||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||
effective_model, parallel_tool_calls
|
||||
llm_model, parallel_tool_calls
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
@@ -691,7 +876,7 @@ async def llm_call(
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
},
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
@@ -718,7 +903,7 @@ async def llm_call(
|
||||
reasoning=reasoning,
|
||||
)
|
||||
elif provider == "aiml_api":
|
||||
client = openai.AsyncOpenAI(
|
||||
client = openai.OpenAI(
|
||||
base_url="https://api.aimlapi.com/v2",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
default_headers={
|
||||
@@ -728,8 +913,8 @@ async def llm_call(
|
||||
},
|
||||
)
|
||||
|
||||
completion = await client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
completion = client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
@@ -757,11 +942,11 @@ async def llm_call(
|
||||
response_format = {"type": "json_object"}
|
||||
|
||||
parallel_tool_calls_param = get_parallel_tool_calls_param(
|
||||
effective_model, parallel_tool_calls
|
||||
llm_model, parallel_tool_calls
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
@@ -812,10 +997,9 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
force_json_output: bool = SchemaField(
|
||||
title="Restrict LLM to pure JSON output",
|
||||
@@ -878,7 +1062,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||
test_input={
|
||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"expected_format": {
|
||||
"key1": "value1",
|
||||
@@ -1244,10 +1428,9 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
sys_prompt: str = SchemaField(
|
||||
@@ -1341,9 +1524,8 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for summarizing the text.",
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
focus: str = SchemaField(
|
||||
title="Focus",
|
||||
@@ -1559,9 +1741,8 @@ class AIConversationBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for the conversation.",
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
max_tokens: int | None = SchemaField(
|
||||
@@ -1598,7 +1779,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
},
|
||||
{"role": "user", "content": "Where was it played?"},
|
||||
],
|
||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
@@ -1661,10 +1842,9 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=LlmModel.default,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for generating the list.",
|
||||
advanced=True,
|
||||
json_schema_extra=llm_model_schema_extra(),
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
max_retries: int = SchemaField(
|
||||
@@ -1719,7 +1899,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
|
||||
"fictional worlds."
|
||||
),
|
||||
"model": "gpt-4o", # Using string value - enum accepts any model slug dynamically
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"max_retries": 3,
|
||||
"force_json_output": False,
|
||||
|
||||
@@ -226,10 +226,9 @@ class SmartDecisionMakerBlock(Block):
|
||||
)
|
||||
model: llm.LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default_factory=llm.LlmModel.default,
|
||||
default=llm.DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
json_schema_extra=llm.llm_model_schema_extra(),
|
||||
)
|
||||
credentials: llm.AICredentials = llm.AICredentialsField()
|
||||
multiple_tool_calls: bool = SchemaField(
|
||||
|
||||
@@ -10,13 +10,13 @@ import stagehand.main
|
||||
from stagehand import Stagehand
|
||||
|
||||
from backend.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AICredentials,
|
||||
AICredentialsField,
|
||||
LlmModel,
|
||||
ModelMetadata,
|
||||
)
|
||||
from backend.blocks.stagehand._config import stagehand as stagehand_provider
|
||||
from backend.data import llm_registry
|
||||
from backend.sdk import (
|
||||
APIKeyCredentials,
|
||||
Block,
|
||||
@@ -91,7 +91,7 @@ class StagehandRecommendedLlmModel(str, Enum):
|
||||
Returns the provider name for the model in the required format for Stagehand:
|
||||
provider/model_name
|
||||
"""
|
||||
model_metadata = self.metadata
|
||||
model_metadata = MODEL_METADATA[LlmModel(self.value)]
|
||||
model_name = self.value
|
||||
|
||||
if len(model_name.split("/")) == 1 and not self.value.startswith(
|
||||
@@ -107,23 +107,19 @@ class StagehandRecommendedLlmModel(str, Enum):
|
||||
|
||||
@property
|
||||
def provider(self) -> str:
|
||||
return self.metadata.provider
|
||||
return MODEL_METADATA[LlmModel(self.value)].provider
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
metadata = llm_registry.get_llm_model_metadata(self.value)
|
||||
if metadata:
|
||||
return metadata
|
||||
# Fallback to LlmModel enum if registry lookup fails
|
||||
return LlmModel(self.value).metadata
|
||||
return MODEL_METADATA[LlmModel(self.value)]
|
||||
|
||||
@property
|
||||
def context_window(self) -> int:
|
||||
return self.metadata.context_window
|
||||
return MODEL_METADATA[LlmModel(self.value)].context_window
|
||||
|
||||
@property
|
||||
def max_output_tokens(self) -> int | None:
|
||||
return self.metadata.max_output_tokens
|
||||
return MODEL_METADATA[LlmModel(self.value)].max_output_tokens
|
||||
|
||||
|
||||
class StagehandObserveBlock(Block):
|
||||
|
||||
@@ -19,30 +19,6 @@ CompletedBlockOutput = dict[str, list[Any]] # Completed stream, collected as a
|
||||
|
||||
|
||||
async def initialize_blocks() -> None:
|
||||
# Refresh LLM registry before initializing blocks so blocks can use registry data
|
||||
# This ensures the registry cache is populated even in executor context
|
||||
try:
|
||||
from backend.data import llm_registry
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
|
||||
# Only refresh if we have DB access (check if Prisma is connected)
|
||||
from backend.data.db import is_connected
|
||||
|
||||
if is_connected():
|
||||
await llm_registry.refresh_llm_registry()
|
||||
await refresh_llm_costs()
|
||||
logger.info("LLM registry refreshed during block initialization")
|
||||
else:
|
||||
logger.warning(
|
||||
"Prisma not connected, skipping LLM registry refresh during block initialization"
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to refresh LLM registry during block initialization: %s", exc
|
||||
)
|
||||
|
||||
# First, sync all provider costs to blocks
|
||||
# Imported here to avoid circular import
|
||||
from backend.blocks import get_blocks
|
||||
from backend.sdk.cost_integration import sync_all_provider_costs
|
||||
from backend.util.retry import func_retry
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
import logging
|
||||
from typing import Type
|
||||
|
||||
import prisma.models
|
||||
|
||||
from backend.blocks._base import Block, BlockCost, BlockCostType
|
||||
from backend.blocks.ai_image_customizer import AIImageCustomizerBlock, GeminiImageModel
|
||||
from backend.blocks.ai_image_generator_block import AIImageGeneratorBlock, ImageGenModel
|
||||
@@ -27,11 +24,13 @@ from backend.blocks.ideogram import IdeogramModelBlock
|
||||
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
||||
from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock
|
||||
from backend.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AIConversationBlock,
|
||||
AIListGeneratorBlock,
|
||||
AIStructuredResponseGeneratorBlock,
|
||||
AITextGeneratorBlock,
|
||||
AITextSummarizerBlock,
|
||||
LlmModel,
|
||||
)
|
||||
from backend.blocks.replicate.flux_advanced import ReplicateFluxAdvancedModelBlock
|
||||
from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
||||
@@ -39,7 +38,6 @@ from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||
from backend.blocks.video.narration import VideoNarrationBlock
|
||||
from backend.data import llm_registry
|
||||
from backend.integrations.credentials_store import (
|
||||
aiml_api_credentials,
|
||||
anthropic_credentials,
|
||||
@@ -59,112 +57,210 @@ from backend.integrations.credentials_store import (
|
||||
v0_credentials,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
|
||||
PROVIDER_CREDENTIALS = {
|
||||
"openai": openai_credentials,
|
||||
"anthropic": anthropic_credentials,
|
||||
"groq": groq_credentials,
|
||||
"open_router": open_router_credentials,
|
||||
"llama_api": llama_api_credentials,
|
||||
"aiml_api": aiml_api_credentials,
|
||||
"v0": v0_credentials,
|
||||
MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O3: 4,
|
||||
LlmModel.O3_MINI: 2,
|
||||
LlmModel.O1: 16,
|
||||
LlmModel.O1_MINI: 4,
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5_2: 6,
|
||||
LlmModel.GPT5_1: 5,
|
||||
LlmModel.GPT5: 2,
|
||||
LlmModel.GPT5_MINI: 1,
|
||||
LlmModel.GPT5_NANO: 1,
|
||||
LlmModel.GPT5_CHAT: 5,
|
||||
LlmModel.GPT41: 2,
|
||||
LlmModel.GPT41_MINI: 1,
|
||||
LlmModel.GPT4O_MINI: 1,
|
||||
LlmModel.GPT4O: 3,
|
||||
LlmModel.GPT4_TURBO: 10,
|
||||
LlmModel.GPT3_5_TURBO: 1,
|
||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
LlmModel.CLAUDE_4_6_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||
LlmModel.AIML_API_QWEN2_5_72B: 1,
|
||||
LlmModel.AIML_API_LLAMA3_1_70B: 1,
|
||||
LlmModel.AIML_API_LLAMA3_3_70B: 1,
|
||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
|
||||
LlmModel.AIML_API_LLAMA_3_2_3B: 1,
|
||||
LlmModel.LLAMA3_3_70B: 1,
|
||||
LlmModel.LLAMA3_1_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_3: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_2: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_405B: 1,
|
||||
LlmModel.OLLAMA_DOLPHIN: 1,
|
||||
LlmModel.OPENAI_GPT_OSS_120B: 1,
|
||||
LlmModel.OPENAI_GPT_OSS_20B: 1,
|
||||
LlmModel.GEMINI_2_5_PRO: 4,
|
||||
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
|
||||
LlmModel.GEMINI_2_5_FLASH: 1,
|
||||
LlmModel.GEMINI_2_0_FLASH: 1,
|
||||
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
|
||||
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
|
||||
LlmModel.MISTRAL_NEMO: 1,
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
||||
LlmModel.DEEPSEEK_CHAT: 2,
|
||||
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||
LlmModel.PERPLEXITY_SONAR: 1,
|
||||
LlmModel.PERPLEXITY_SONAR_PRO: 5,
|
||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: 1,
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: 1,
|
||||
LlmModel.AMAZON_NOVA_LITE_V1: 1,
|
||||
LlmModel.AMAZON_NOVA_MICRO_V1: 1,
|
||||
LlmModel.AMAZON_NOVA_PRO_V1: 1,
|
||||
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: 1,
|
||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: 1,
|
||||
LlmModel.META_LLAMA_4_SCOUT: 1,
|
||||
LlmModel.META_LLAMA_4_MAVERICK: 1,
|
||||
LlmModel.LLAMA_API_LLAMA_4_SCOUT: 1,
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: 1,
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: 1,
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: 1,
|
||||
LlmModel.GROK_4: 9,
|
||||
LlmModel.GROK_4_FAST: 1,
|
||||
LlmModel.GROK_4_1_FAST: 1,
|
||||
LlmModel.GROK_CODE_FAST_1: 1,
|
||||
LlmModel.KIMI_K2: 1,
|
||||
LlmModel.QWEN3_235B_A22B_THINKING: 1,
|
||||
LlmModel.QWEN3_CODER: 9,
|
||||
# v0 by Vercel models
|
||||
LlmModel.V0_1_5_MD: 1,
|
||||
LlmModel.V0_1_5_LG: 2,
|
||||
LlmModel.V0_1_0_MD: 1,
|
||||
}
|
||||
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
# All LLM costs now come from the database via llm_registry
|
||||
|
||||
LLM_COST: list[BlockCost] = []
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_COST:
|
||||
raise ValueError(f"Missing MODEL_COST for model: {model}")
|
||||
|
||||
|
||||
async def _build_llm_costs_from_registry() -> list[BlockCost]:
|
||||
"""
|
||||
Build BlockCost list from all models in the LLM registry.
|
||||
|
||||
This function checks for active model migrations with customCreditCost overrides.
|
||||
When a model has been migrated with a custom price, that price is used instead
|
||||
of the target model's default cost.
|
||||
"""
|
||||
# Query active migrations with custom pricing overrides
|
||||
migration_overrides: dict[str, int] = {}
|
||||
try:
|
||||
active_migrations = await prisma.models.LlmModelMigration.prisma().find_many(
|
||||
where={
|
||||
"isReverted": False,
|
||||
"customCreditCost": {"not": None},
|
||||
}
|
||||
)
|
||||
migration_overrides = {
|
||||
migration.sourceModelSlug: migration.customCreditCost
|
||||
for migration in active_migrations
|
||||
if migration.customCreditCost is not None
|
||||
}
|
||||
if migration_overrides:
|
||||
logger.info(
|
||||
"Found %d active model migrations with custom pricing overrides",
|
||||
len(migration_overrides),
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to query model migration overrides: %s. Proceeding with default costs.",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
costs: list[BlockCost] = []
|
||||
for model in llm_registry.iter_dynamic_models():
|
||||
for cost in model.costs:
|
||||
credentials = PROVIDER_CREDENTIALS.get(cost.credential_provider)
|
||||
if not credentials:
|
||||
logger.warning(
|
||||
"Skipping cost entry for %s due to unknown credentials provider %s",
|
||||
model.slug,
|
||||
cost.credential_provider,
|
||||
)
|
||||
continue
|
||||
|
||||
# Check if this model has a custom cost override from migration
|
||||
cost_amount = migration_overrides.get(model.slug, cost.credit_cost)
|
||||
|
||||
if model.slug in migration_overrides:
|
||||
logger.debug(
|
||||
"Applying custom cost override for model %s: %d credits (default: %d)",
|
||||
model.slug,
|
||||
cost_amount,
|
||||
cost.credit_cost,
|
||||
)
|
||||
|
||||
cost_filter = {
|
||||
"model": model.slug,
|
||||
LLM_COST = (
|
||||
# Anthropic Models
|
||||
[
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": credentials.id,
|
||||
"provider": credentials.provider,
|
||||
"type": credentials.type,
|
||||
"id": anthropic_credentials.id,
|
||||
"provider": anthropic_credentials.provider,
|
||||
"type": anthropic_credentials.type,
|
||||
},
|
||||
}
|
||||
costs.append(
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter=cost_filter,
|
||||
cost_amount=cost_amount,
|
||||
)
|
||||
)
|
||||
return costs
|
||||
|
||||
|
||||
async def refresh_llm_costs() -> None:
|
||||
"""
|
||||
Refresh LLM costs from the registry. All costs now come from the database.
|
||||
|
||||
This function also checks for active model migrations with custom pricing overrides
|
||||
and applies them to ensure accurate billing.
|
||||
"""
|
||||
LLM_COST.clear()
|
||||
LLM_COST.extend(await _build_llm_costs_from_registry())
|
||||
|
||||
|
||||
# Initial load will happen after registry is refreshed at startup
|
||||
# Don't call refresh_llm_costs() here - it will be called after registry refresh
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "anthropic"
|
||||
]
|
||||
# OpenAI Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": openai_credentials.id,
|
||||
"provider": openai_credentials.provider,
|
||||
"type": openai_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "openai"
|
||||
]
|
||||
# Groq Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {"id": groq_credentials.id},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "groq"
|
||||
]
|
||||
# Open Router Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": open_router_credentials.id,
|
||||
"provider": open_router_credentials.provider,
|
||||
"type": open_router_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "open_router"
|
||||
]
|
||||
# Llama API Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": llama_api_credentials.id,
|
||||
"provider": llama_api_credentials.provider,
|
||||
"type": llama_api_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "llama_api"
|
||||
]
|
||||
# v0 by Vercel Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": v0_credentials.id,
|
||||
"provider": v0_credentials.provider,
|
||||
"type": v0_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "v0"
|
||||
]
|
||||
# AI/ML Api Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": aiml_api_credentials.id,
|
||||
"provider": aiml_api_credentials.provider,
|
||||
"type": aiml_api_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "aiml_api"
|
||||
]
|
||||
)
|
||||
|
||||
# =============== This is the exhaustive list of cost for each Block =============== #
|
||||
|
||||
|
||||
@@ -1625,10 +1625,8 @@ async def migrate_llm_models(migrate_to: LlmModel):
|
||||
if field.annotation == LlmModel:
|
||||
llm_model_fields[block.id] = field_name
|
||||
|
||||
# Get all model slugs from the registry (dynamic, not hardcoded enum)
|
||||
from backend.data import llm_registry
|
||||
|
||||
enum_values = list(llm_registry.get_all_model_slugs_for_validation())
|
||||
# Convert enum values to a list of strings for the SQL query
|
||||
enum_values = [v.value for v in LlmModel]
|
||||
escaped_enum_values = repr(tuple(enum_values)) # hack but works
|
||||
|
||||
# Update each block
|
||||
|
||||
@@ -105,6 +105,7 @@ def library_agent_include(
|
||||
"""
|
||||
result: prisma.types.LibraryAgentInclude = {
|
||||
"Creator": True, # Always needed for creator info
|
||||
"Folder": True, # Always needed for folder info
|
||||
}
|
||||
|
||||
# Build AgentGraph include based on requested options
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
"""
|
||||
LLM Registry module for managing LLM models, providers, and costs dynamically.
|
||||
|
||||
This module provides a database-driven registry system for LLM models,
|
||||
replacing hardcoded model configurations with a flexible admin-managed system.
|
||||
"""
|
||||
|
||||
from backend.data.llm_registry.model import ModelMetadata
|
||||
|
||||
# Re-export for backwards compatibility
|
||||
from backend.data.llm_registry.notifications import (
|
||||
REGISTRY_REFRESH_CHANNEL,
|
||||
publish_registry_refresh_notification,
|
||||
subscribe_to_registry_refresh,
|
||||
)
|
||||
from backend.data.llm_registry.registry import (
|
||||
RegistryModel,
|
||||
RegistryModelCost,
|
||||
RegistryModelCreator,
|
||||
get_all_model_slugs_for_validation,
|
||||
get_default_model_slug,
|
||||
get_dynamic_model_slugs,
|
||||
get_fallback_model_for_disabled,
|
||||
get_llm_discriminator_mapping,
|
||||
get_llm_model_cost,
|
||||
get_llm_model_metadata,
|
||||
get_llm_model_schema_options,
|
||||
get_model_info,
|
||||
is_model_enabled,
|
||||
iter_dynamic_models,
|
||||
refresh_llm_registry,
|
||||
register_static_costs,
|
||||
register_static_metadata,
|
||||
)
|
||||
from backend.data.llm_registry.schema_utils import (
|
||||
is_llm_model_field,
|
||||
refresh_llm_discriminator_mapping,
|
||||
refresh_llm_model_options,
|
||||
update_schema_with_llm_registry,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Types
|
||||
"ModelMetadata",
|
||||
"RegistryModel",
|
||||
"RegistryModelCost",
|
||||
"RegistryModelCreator",
|
||||
# Registry functions
|
||||
"get_all_model_slugs_for_validation",
|
||||
"get_default_model_slug",
|
||||
"get_dynamic_model_slugs",
|
||||
"get_fallback_model_for_disabled",
|
||||
"get_llm_discriminator_mapping",
|
||||
"get_llm_model_cost",
|
||||
"get_llm_model_metadata",
|
||||
"get_llm_model_schema_options",
|
||||
"get_model_info",
|
||||
"is_model_enabled",
|
||||
"iter_dynamic_models",
|
||||
"refresh_llm_registry",
|
||||
"register_static_costs",
|
||||
"register_static_metadata",
|
||||
# Notifications
|
||||
"REGISTRY_REFRESH_CHANNEL",
|
||||
"publish_registry_refresh_notification",
|
||||
"subscribe_to_registry_refresh",
|
||||
# Schema utilities
|
||||
"is_llm_model_field",
|
||||
"refresh_llm_discriminator_mapping",
|
||||
"refresh_llm_model_options",
|
||||
"update_schema_with_llm_registry",
|
||||
]
|
||||
@@ -1,25 +0,0 @@
|
||||
"""Type definitions for LLM model metadata."""
|
||||
|
||||
from typing import Literal, NamedTuple
|
||||
|
||||
|
||||
class ModelMetadata(NamedTuple):
|
||||
"""Metadata for an LLM model.
|
||||
|
||||
Attributes:
|
||||
provider: The provider identifier (e.g., "openai", "anthropic")
|
||||
context_window: Maximum context window size in tokens
|
||||
max_output_tokens: Maximum output tokens (None if unlimited)
|
||||
display_name: Human-readable name for the model
|
||||
provider_name: Human-readable provider name (e.g., "OpenAI", "Anthropic")
|
||||
creator_name: Name of the organization that created the model
|
||||
price_tier: Relative cost tier (1=cheapest, 2=medium, 3=expensive)
|
||||
"""
|
||||
|
||||
provider: str
|
||||
context_window: int
|
||||
max_output_tokens: int | None
|
||||
display_name: str
|
||||
provider_name: str
|
||||
creator_name: str
|
||||
price_tier: Literal[1, 2, 3]
|
||||
@@ -1,89 +0,0 @@
|
||||
"""
|
||||
Redis pub/sub notifications for LLM registry updates.
|
||||
|
||||
When models are added/updated/removed via the admin UI, this module
|
||||
publishes notifications to Redis that all executor services subscribe to,
|
||||
ensuring they refresh their registry cache in real-time.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.data.redis_client import connect_async
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Redis channel name for LLM registry refresh notifications
|
||||
REGISTRY_REFRESH_CHANNEL = "llm_registry:refresh"
|
||||
|
||||
|
||||
async def publish_registry_refresh_notification() -> None:
|
||||
"""
|
||||
Publish a notification to Redis that the LLM registry has been updated.
|
||||
All executor services subscribed to this channel will refresh their registry.
|
||||
"""
|
||||
try:
|
||||
redis = await connect_async()
|
||||
await redis.publish(REGISTRY_REFRESH_CHANNEL, "refresh")
|
||||
logger.info("Published LLM registry refresh notification to Redis")
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to publish LLM registry refresh notification: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
async def subscribe_to_registry_refresh(
|
||||
on_refresh: Any, # Async callable that takes no args
|
||||
) -> None:
|
||||
"""
|
||||
Subscribe to Redis notifications for LLM registry updates.
|
||||
This runs in a loop and processes messages as they arrive.
|
||||
|
||||
Args:
|
||||
on_refresh: Async callable to execute when a refresh notification is received
|
||||
"""
|
||||
try:
|
||||
redis = await connect_async()
|
||||
pubsub = redis.pubsub()
|
||||
await pubsub.subscribe(REGISTRY_REFRESH_CHANNEL)
|
||||
logger.info(
|
||||
"Subscribed to LLM registry refresh notifications on channel: %s",
|
||||
REGISTRY_REFRESH_CHANNEL,
|
||||
)
|
||||
|
||||
# Process messages in a loop
|
||||
while True:
|
||||
try:
|
||||
message = await pubsub.get_message(
|
||||
ignore_subscribe_messages=True, timeout=1.0
|
||||
)
|
||||
if (
|
||||
message
|
||||
and message["type"] == "message"
|
||||
and message["channel"] == REGISTRY_REFRESH_CHANNEL
|
||||
):
|
||||
logger.info("Received LLM registry refresh notification")
|
||||
try:
|
||||
await on_refresh()
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"Error refreshing LLM registry from notification: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Error processing registry refresh message: %s", exc, exc_info=True
|
||||
)
|
||||
# Continue listening even if one message fails
|
||||
await asyncio.sleep(1)
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"Failed to subscribe to LLM registry refresh notifications: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
@@ -1,388 +0,0 @@
|
||||
"""Core LLM registry implementation for managing models dynamically."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Iterable
|
||||
|
||||
import prisma.models
|
||||
|
||||
from backend.data.llm_registry.model import ModelMetadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _json_to_dict(value: Any) -> dict[str, Any]:
|
||||
"""Convert Prisma Json type to dict, with fallback to empty dict."""
|
||||
if value is None:
|
||||
return {}
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
# Prisma Json type should always be a dict at runtime
|
||||
return dict(value) if value else {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RegistryModelCost:
|
||||
"""Cost configuration for an LLM model."""
|
||||
|
||||
credit_cost: int
|
||||
credential_provider: str
|
||||
credential_id: str | None
|
||||
credential_type: str | None
|
||||
currency: str | None
|
||||
metadata: dict[str, Any]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RegistryModelCreator:
|
||||
"""Creator information for an LLM model."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
display_name: str
|
||||
description: str | None
|
||||
website_url: str | None
|
||||
logo_url: str | None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RegistryModel:
|
||||
"""Represents a model in the LLM registry."""
|
||||
|
||||
slug: str
|
||||
display_name: str
|
||||
description: str | None
|
||||
metadata: ModelMetadata
|
||||
capabilities: dict[str, Any]
|
||||
extra_metadata: dict[str, Any]
|
||||
provider_display_name: str
|
||||
is_enabled: bool
|
||||
is_recommended: bool = False
|
||||
costs: tuple[RegistryModelCost, ...] = field(default_factory=tuple)
|
||||
creator: RegistryModelCreator | None = None
|
||||
|
||||
|
||||
_static_metadata: dict[str, ModelMetadata] = {}
|
||||
_static_costs: dict[str, int] = {}
|
||||
_dynamic_models: dict[str, RegistryModel] = {}
|
||||
_schema_options: list[dict[str, str]] = []
|
||||
_discriminator_mapping: dict[str, str] = {}
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
|
||||
def register_static_metadata(metadata: dict[Any, ModelMetadata]) -> None:
|
||||
"""Register static metadata for legacy models (deprecated)."""
|
||||
_static_metadata.update({str(key): value for key, value in metadata.items()})
|
||||
_refresh_cached_schema()
|
||||
|
||||
|
||||
def register_static_costs(costs: dict[Any, int]) -> None:
|
||||
"""Register static costs for legacy models (deprecated)."""
|
||||
_static_costs.update({str(key): value for key, value in costs.items()})
|
||||
|
||||
|
||||
def _build_schema_options() -> list[dict[str, str]]:
|
||||
"""Build schema options for model selection dropdown. Only includes enabled models."""
|
||||
options: list[dict[str, str]] = []
|
||||
# Only include enabled models in the dropdown options
|
||||
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
|
||||
if model.is_enabled:
|
||||
options.append(
|
||||
{
|
||||
"label": model.display_name,
|
||||
"value": model.slug,
|
||||
"group": model.metadata.provider,
|
||||
"description": model.description or "",
|
||||
}
|
||||
)
|
||||
|
||||
for slug, metadata in _static_metadata.items():
|
||||
if slug in _dynamic_models:
|
||||
continue
|
||||
options.append(
|
||||
{
|
||||
"label": slug,
|
||||
"value": slug,
|
||||
"group": metadata.provider,
|
||||
"description": "",
|
||||
}
|
||||
)
|
||||
return options
|
||||
|
||||
|
||||
async def refresh_llm_registry() -> None:
|
||||
"""Refresh the LLM registry from the database. Loads all models (enabled and disabled)."""
|
||||
async with _lock:
|
||||
try:
|
||||
records = await prisma.models.LlmModel.prisma().find_many(
|
||||
include={
|
||||
"Provider": True,
|
||||
"Costs": True,
|
||||
"Creator": True,
|
||||
}
|
||||
)
|
||||
logger.debug("Found %d LLM model records in database", len(records))
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"Failed to refresh LLM registry from DB: %s", exc, exc_info=True
|
||||
)
|
||||
return
|
||||
|
||||
dynamic: dict[str, RegistryModel] = {}
|
||||
for record in records:
|
||||
provider_name = (
|
||||
record.Provider.name if record.Provider else record.providerId
|
||||
)
|
||||
provider_display_name = (
|
||||
record.Provider.displayName if record.Provider else record.providerId
|
||||
)
|
||||
# Creator name: prefer Creator.name, fallback to provider display name
|
||||
creator_name = (
|
||||
record.Creator.name if record.Creator else provider_display_name
|
||||
)
|
||||
# Price tier: default to 1 (cheapest) if not set
|
||||
price_tier = getattr(record, "priceTier", 1) or 1
|
||||
# Clamp to valid range 1-3
|
||||
price_tier = max(1, min(3, price_tier))
|
||||
|
||||
metadata = ModelMetadata(
|
||||
provider=provider_name,
|
||||
context_window=record.contextWindow,
|
||||
max_output_tokens=record.maxOutputTokens,
|
||||
display_name=record.displayName,
|
||||
provider_name=provider_display_name,
|
||||
creator_name=creator_name,
|
||||
price_tier=price_tier, # type: ignore[arg-type]
|
||||
)
|
||||
costs = tuple(
|
||||
RegistryModelCost(
|
||||
credit_cost=cost.creditCost,
|
||||
credential_provider=cost.credentialProvider,
|
||||
credential_id=cost.credentialId,
|
||||
credential_type=cost.credentialType,
|
||||
currency=cost.currency,
|
||||
metadata=_json_to_dict(cost.metadata),
|
||||
)
|
||||
for cost in (record.Costs or [])
|
||||
)
|
||||
|
||||
# Map creator if present
|
||||
creator = None
|
||||
if record.Creator:
|
||||
creator = RegistryModelCreator(
|
||||
id=record.Creator.id,
|
||||
name=record.Creator.name,
|
||||
display_name=record.Creator.displayName,
|
||||
description=record.Creator.description,
|
||||
website_url=record.Creator.websiteUrl,
|
||||
logo_url=record.Creator.logoUrl,
|
||||
)
|
||||
|
||||
dynamic[record.slug] = RegistryModel(
|
||||
slug=record.slug,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
metadata=metadata,
|
||||
capabilities=_json_to_dict(record.capabilities),
|
||||
extra_metadata=_json_to_dict(record.metadata),
|
||||
provider_display_name=(
|
||||
record.Provider.displayName
|
||||
if record.Provider
|
||||
else record.providerId
|
||||
),
|
||||
is_enabled=record.isEnabled,
|
||||
is_recommended=record.isRecommended,
|
||||
costs=costs,
|
||||
creator=creator,
|
||||
)
|
||||
|
||||
# Atomic swap - build new structures then replace references
|
||||
# This ensures readers never see partially updated state
|
||||
global _dynamic_models
|
||||
_dynamic_models = dynamic
|
||||
_refresh_cached_schema()
|
||||
logger.info(
|
||||
"LLM registry refreshed with %s dynamic models (enabled: %s, disabled: %s)",
|
||||
len(dynamic),
|
||||
sum(1 for m in dynamic.values() if m.is_enabled),
|
||||
sum(1 for m in dynamic.values() if not m.is_enabled),
|
||||
)
|
||||
|
||||
|
||||
def _refresh_cached_schema() -> None:
|
||||
"""Refresh cached schema options and discriminator mapping."""
|
||||
global _schema_options, _discriminator_mapping
|
||||
|
||||
# Build new structures
|
||||
new_options = _build_schema_options()
|
||||
new_mapping = {
|
||||
slug: entry.metadata.provider for slug, entry in _dynamic_models.items()
|
||||
}
|
||||
for slug, metadata in _static_metadata.items():
|
||||
new_mapping.setdefault(slug, metadata.provider)
|
||||
|
||||
# Atomic swap - replace references to ensure readers see consistent state
|
||||
_schema_options = new_options
|
||||
_discriminator_mapping = new_mapping
|
||||
|
||||
|
||||
def get_llm_model_metadata(slug: str) -> ModelMetadata | None:
|
||||
"""Get model metadata by slug. Checks dynamic models first, then static metadata."""
|
||||
if slug in _dynamic_models:
|
||||
return _dynamic_models[slug].metadata
|
||||
return _static_metadata.get(slug)
|
||||
|
||||
|
||||
def get_llm_model_cost(slug: str) -> tuple[RegistryModelCost, ...]:
|
||||
"""Get model cost configuration by slug."""
|
||||
if slug in _dynamic_models:
|
||||
return _dynamic_models[slug].costs
|
||||
cost_value = _static_costs.get(slug)
|
||||
if cost_value is None:
|
||||
return tuple()
|
||||
return (
|
||||
RegistryModelCost(
|
||||
credit_cost=cost_value,
|
||||
credential_provider="static",
|
||||
credential_id=None,
|
||||
credential_type=None,
|
||||
currency=None,
|
||||
metadata={},
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def get_llm_model_schema_options() -> list[dict[str, str]]:
|
||||
"""
|
||||
Get schema options for LLM model selection dropdown.
|
||||
|
||||
Returns a copy of cached schema options that are refreshed when the registry is
|
||||
updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
|
||||
"""
|
||||
# Return a copy to prevent external mutation
|
||||
return list(_schema_options)
|
||||
|
||||
|
||||
def get_llm_discriminator_mapping() -> dict[str, str]:
|
||||
"""
|
||||
Get discriminator mapping for LLM models.
|
||||
|
||||
Returns a copy of cached discriminator mapping that is refreshed when the registry
|
||||
is updated via refresh_llm_registry() (called on startup and via Redis pub/sub).
|
||||
"""
|
||||
# Return a copy to prevent external mutation
|
||||
return dict(_discriminator_mapping)
|
||||
|
||||
|
||||
def get_dynamic_model_slugs() -> set[str]:
|
||||
"""Get all dynamic model slugs from the registry."""
|
||||
return set(_dynamic_models.keys())
|
||||
|
||||
|
||||
def get_all_model_slugs_for_validation() -> set[str]:
|
||||
"""
|
||||
Get ALL model slugs (both enabled and disabled) for validation purposes.
|
||||
|
||||
This is used for JSON schema enum validation - we need to accept any known
|
||||
model value (even disabled ones) so that existing graphs don't fail validation.
|
||||
The actual fallback/enforcement happens at runtime in llm_call().
|
||||
"""
|
||||
all_slugs = set(_dynamic_models.keys())
|
||||
all_slugs.update(_static_metadata.keys())
|
||||
return all_slugs
|
||||
|
||||
|
||||
def iter_dynamic_models() -> Iterable[RegistryModel]:
|
||||
"""Iterate over all dynamic models in the registry."""
|
||||
return tuple(_dynamic_models.values())
|
||||
|
||||
|
||||
def get_fallback_model_for_disabled(disabled_model_slug: str) -> RegistryModel | None:
|
||||
"""
|
||||
Find a fallback model when the requested model is disabled.
|
||||
|
||||
Looks for an enabled model from the same provider. Prefers models with
|
||||
similar names or capabilities if possible.
|
||||
|
||||
Args:
|
||||
disabled_model_slug: The slug of the disabled model
|
||||
|
||||
Returns:
|
||||
An enabled RegistryModel from the same provider, or None if no fallback found
|
||||
"""
|
||||
disabled_model = _dynamic_models.get(disabled_model_slug)
|
||||
if not disabled_model:
|
||||
return None
|
||||
|
||||
provider = disabled_model.metadata.provider
|
||||
|
||||
# Find all enabled models from the same provider
|
||||
candidates = [
|
||||
model
|
||||
for model in _dynamic_models.values()
|
||||
if model.is_enabled and model.metadata.provider == provider
|
||||
]
|
||||
|
||||
if not candidates:
|
||||
return None
|
||||
|
||||
# Sort by: prefer models with similar context window, then by name
|
||||
candidates.sort(
|
||||
key=lambda m: (
|
||||
abs(m.metadata.context_window - disabled_model.metadata.context_window),
|
||||
m.display_name.lower(),
|
||||
)
|
||||
)
|
||||
|
||||
return candidates[0]
|
||||
|
||||
|
||||
def is_model_enabled(model_slug: str) -> bool:
|
||||
"""Check if a model is enabled in the registry."""
|
||||
model = _dynamic_models.get(model_slug)
|
||||
if not model:
|
||||
# Model not in registry - assume it's a static/legacy model and allow it
|
||||
return True
|
||||
return model.is_enabled
|
||||
|
||||
|
||||
def get_model_info(model_slug: str) -> RegistryModel | None:
|
||||
"""Get model info from the registry."""
|
||||
return _dynamic_models.get(model_slug)
|
||||
|
||||
|
||||
def get_default_model_slug() -> str | None:
|
||||
"""
|
||||
Get the default model slug to use for block defaults.
|
||||
|
||||
Returns the recommended model if set (configured via admin UI),
|
||||
otherwise returns the first enabled model alphabetically.
|
||||
Returns None if no models are available or enabled.
|
||||
"""
|
||||
# Return the recommended model if one is set and enabled
|
||||
for model in _dynamic_models.values():
|
||||
if model.is_recommended and model.is_enabled:
|
||||
return model.slug
|
||||
|
||||
# No recommended model set - find first enabled model alphabetically
|
||||
for model in sorted(_dynamic_models.values(), key=lambda m: m.display_name.lower()):
|
||||
if model.is_enabled:
|
||||
logger.warning(
|
||||
"No recommended model set, using '%s' as default",
|
||||
model.slug,
|
||||
)
|
||||
return model.slug
|
||||
|
||||
# No enabled models available
|
||||
if _dynamic_models:
|
||||
logger.error(
|
||||
"No enabled models found in registry (%d models registered but all disabled)",
|
||||
len(_dynamic_models),
|
||||
)
|
||||
else:
|
||||
logger.error("No models registered in LLM registry")
|
||||
|
||||
return None
|
||||
@@ -1,130 +0,0 @@
|
||||
"""
|
||||
Helper utilities for LLM registry integration with block schemas.
|
||||
|
||||
This module handles the dynamic injection of discriminator mappings
|
||||
and model options from the LLM registry into block schemas.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.data.llm_registry.registry import (
|
||||
get_all_model_slugs_for_validation,
|
||||
get_default_model_slug,
|
||||
get_llm_discriminator_mapping,
|
||||
get_llm_model_schema_options,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def is_llm_model_field(field_name: str, field_info: Any) -> bool:
|
||||
"""
|
||||
Check if a field is an LLM model selection field.
|
||||
|
||||
Returns True if the field has 'options' in json_schema_extra
|
||||
(set by llm_model_schema_extra() in blocks/llm.py).
|
||||
"""
|
||||
if not hasattr(field_info, "json_schema_extra"):
|
||||
return False
|
||||
|
||||
extra = field_info.json_schema_extra
|
||||
if isinstance(extra, dict):
|
||||
return "options" in extra
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def refresh_llm_model_options(field_schema: dict[str, Any]) -> None:
|
||||
"""
|
||||
Refresh LLM model options from the registry.
|
||||
|
||||
Updates 'options' (for frontend dropdown) to show only enabled models,
|
||||
but keeps the 'enum' (for validation) inclusive of ALL known models.
|
||||
|
||||
This is important because:
|
||||
- Options: What users see in the dropdown (enabled models only)
|
||||
- Enum: What values pass validation (all known models, including disabled)
|
||||
|
||||
Existing graphs may have disabled models selected - they should pass validation
|
||||
and the fallback logic in llm_call() will handle using an alternative model.
|
||||
"""
|
||||
fresh_options = get_llm_model_schema_options()
|
||||
if not fresh_options:
|
||||
return
|
||||
|
||||
# Update options array (UI dropdown) - only enabled models
|
||||
if "options" in field_schema:
|
||||
field_schema["options"] = fresh_options
|
||||
|
||||
all_known_slugs = get_all_model_slugs_for_validation()
|
||||
if all_known_slugs and "enum" in field_schema:
|
||||
existing_enum = set(field_schema.get("enum", []))
|
||||
combined_enum = existing_enum | all_known_slugs
|
||||
field_schema["enum"] = sorted(combined_enum)
|
||||
|
||||
# Set the default value from the registry (gpt-4o if available, else first enabled)
|
||||
# This ensures new blocks have a sensible default pre-selected
|
||||
default_slug = get_default_model_slug()
|
||||
if default_slug:
|
||||
field_schema["default"] = default_slug
|
||||
|
||||
|
||||
def refresh_llm_discriminator_mapping(field_schema: dict[str, Any]) -> None:
|
||||
"""
|
||||
Refresh discriminator_mapping for fields that use model-based discrimination.
|
||||
|
||||
The discriminator is already set when AICredentialsField() creates the field.
|
||||
We only need to refresh the mapping when models are added/removed.
|
||||
"""
|
||||
if field_schema.get("discriminator") != "model":
|
||||
return
|
||||
|
||||
# Always refresh the mapping to get latest models
|
||||
fresh_mapping = get_llm_discriminator_mapping()
|
||||
if fresh_mapping is not None:
|
||||
field_schema["discriminator_mapping"] = fresh_mapping
|
||||
|
||||
|
||||
def update_schema_with_llm_registry(
|
||||
schema: dict[str, Any], model_class: type | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Update a JSON schema with current LLM registry data.
|
||||
|
||||
Refreshes:
|
||||
1. Model options for LLM model selection fields (dropdown choices)
|
||||
2. Discriminator mappings for credentials fields (model → provider)
|
||||
|
||||
Args:
|
||||
schema: The JSON schema to update (mutated in-place)
|
||||
model_class: The Pydantic model class (optional, for field introspection)
|
||||
"""
|
||||
properties = schema.get("properties", {})
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
if not isinstance(field_schema, dict):
|
||||
continue
|
||||
|
||||
# Refresh model options for LLM model fields
|
||||
if model_class and hasattr(model_class, "model_fields"):
|
||||
field_info = model_class.model_fields.get(field_name)
|
||||
if field_info and is_llm_model_field(field_name, field_info):
|
||||
try:
|
||||
refresh_llm_model_options(field_schema)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to refresh LLM options for field %s: %s",
|
||||
field_name,
|
||||
exc,
|
||||
)
|
||||
|
||||
# Refresh discriminator mapping for fields that use model discrimination
|
||||
try:
|
||||
refresh_llm_discriminator_mapping(field_schema)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to refresh discriminator mapping for field %s: %s",
|
||||
field_name,
|
||||
exc,
|
||||
)
|
||||
@@ -39,7 +39,6 @@ from pydantic_core import (
|
||||
)
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.llm_registry import update_schema_with_llm_registry
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.json import loads as json_loads
|
||||
from backend.util.request import parse_url
|
||||
@@ -551,9 +550,7 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
else:
|
||||
schema["credentials_provider"] = allowed_providers
|
||||
schema["credentials_types"] = model_class.allowed_cred_types()
|
||||
|
||||
# Ensure LLM discriminators are populated (delegates to shared helper)
|
||||
update_schema_with_llm_registry(schema, model_class)
|
||||
# Do not return anything, just mutate schema in place
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra=_add_json_schema_extra, # type: ignore
|
||||
@@ -708,20 +705,16 @@ def CredentialsField(
|
||||
This is enforced by the `BlockSchema` base class.
|
||||
"""
|
||||
|
||||
# Build field_schema_extra - always include discriminator and mapping if discriminator is set
|
||||
field_schema_extra: dict[str, Any] = {}
|
||||
|
||||
# Always include discriminator if provided
|
||||
if discriminator is not None:
|
||||
field_schema_extra["discriminator"] = discriminator
|
||||
# Always include discriminator_mapping when discriminator is set (even if empty initially)
|
||||
field_schema_extra["discriminator_mapping"] = discriminator_mapping or {}
|
||||
|
||||
# Include other optional fields (only if not None)
|
||||
if required_scopes:
|
||||
field_schema_extra["credentials_scopes"] = list(required_scopes)
|
||||
if discriminator_values:
|
||||
field_schema_extra["discriminator_values"] = discriminator_values
|
||||
field_schema_extra = {
|
||||
k: v
|
||||
for k, v in {
|
||||
"credentials_scopes": list(required_scopes) or None,
|
||||
"discriminator": discriminator,
|
||||
"discriminator_mapping": discriminator_mapping,
|
||||
"discriminator_values": discriminator_values,
|
||||
}.items()
|
||||
if v is not None
|
||||
}
|
||||
|
||||
# Merge any json_schema_extra passed in kwargs
|
||||
if "json_schema_extra" in kwargs:
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
"""
|
||||
Helper functions for LLM registry initialization in executor context.
|
||||
|
||||
These functions handle refreshing the LLM registry when the executor starts
|
||||
and subscribing to real-time updates via Redis pub/sub.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from backend.blocks._base import BlockSchema
|
||||
from backend.data import db, llm_registry
|
||||
from backend.data.block import initialize_blocks
|
||||
from backend.data.block_cost_config import refresh_llm_costs
|
||||
from backend.data.llm_registry import subscribe_to_registry_refresh
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def initialize_registry_for_executor() -> None:
|
||||
"""
|
||||
Initialize blocks and refresh LLM registry in the executor context.
|
||||
|
||||
This must run in the executor's event loop to have access to the database.
|
||||
"""
|
||||
try:
|
||||
# Connect to database if not already connected
|
||||
if not db.is_connected():
|
||||
await db.connect()
|
||||
logger.info("[GraphExecutor] Connected to database for registry refresh")
|
||||
|
||||
# Initialize blocks (internally refreshes LLM registry and costs)
|
||||
await initialize_blocks()
|
||||
logger.info("[GraphExecutor] Blocks initialized")
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"[GraphExecutor] Failed to refresh LLM registry on startup: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
async def refresh_registry_on_notification() -> None:
|
||||
"""Refresh LLM registry when notified via Redis pub/sub."""
|
||||
try:
|
||||
# Ensure DB is connected
|
||||
if not db.is_connected():
|
||||
await db.connect()
|
||||
|
||||
# Refresh registry and costs
|
||||
await llm_registry.refresh_llm_registry()
|
||||
await refresh_llm_costs()
|
||||
|
||||
# Clear block schema caches so they regenerate with new model options
|
||||
BlockSchema.clear_all_schema_caches()
|
||||
|
||||
logger.info("[GraphExecutor] LLM registry refreshed from notification")
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"[GraphExecutor] Failed to refresh LLM registry from notification: %s",
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
async def subscribe_to_registry_updates() -> None:
|
||||
"""Subscribe to Redis pub/sub for LLM registry refresh notifications."""
|
||||
await subscribe_to_registry_refresh(refresh_registry_on_notification)
|
||||
@@ -708,20 +708,6 @@ class ExecutionProcessor:
|
||||
)
|
||||
self.node_execution_thread.start()
|
||||
self.node_evaluation_thread.start()
|
||||
|
||||
# Initialize LLM registry and subscribe to updates
|
||||
from backend.executor.llm_registry_init import (
|
||||
initialize_registry_for_executor,
|
||||
subscribe_to_registry_updates,
|
||||
)
|
||||
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
initialize_registry_for_executor(), self.node_execution_loop
|
||||
)
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
subscribe_to_registry_updates(), self.node_execution_loop
|
||||
)
|
||||
|
||||
logger.info(f"[GraphExecutor] {self.tid} started")
|
||||
|
||||
@error_logged(swallow=False)
|
||||
|
||||
@@ -1,935 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Iterable, Sequence, cast
|
||||
|
||||
import prisma
|
||||
import prisma.models
|
||||
|
||||
from backend.data.db import transaction
|
||||
from backend.server.v2.llm import model as llm_model
|
||||
from backend.util.models import Pagination
|
||||
|
||||
|
||||
def _json_dict(value: Any | None) -> dict[str, Any]:
|
||||
if not value:
|
||||
return {}
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
return {}
|
||||
|
||||
|
||||
def _map_cost(record: prisma.models.LlmModelCost) -> llm_model.LlmModelCost:
|
||||
return llm_model.LlmModelCost(
|
||||
id=record.id,
|
||||
unit=record.unit,
|
||||
credit_cost=record.creditCost,
|
||||
credential_provider=record.credentialProvider,
|
||||
credential_id=record.credentialId,
|
||||
credential_type=record.credentialType,
|
||||
currency=record.currency,
|
||||
metadata=_json_dict(record.metadata),
|
||||
)
|
||||
|
||||
|
||||
def _map_creator(
|
||||
record: prisma.models.LlmModelCreator,
|
||||
) -> llm_model.LlmModelCreator:
|
||||
return llm_model.LlmModelCreator(
|
||||
id=record.id,
|
||||
name=record.name,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
website_url=record.websiteUrl,
|
||||
logo_url=record.logoUrl,
|
||||
metadata=_json_dict(record.metadata),
|
||||
)
|
||||
|
||||
|
||||
def _map_model(record: prisma.models.LlmModel) -> llm_model.LlmModel:
|
||||
costs = []
|
||||
if record.Costs:
|
||||
costs = [_map_cost(cost) for cost in record.Costs]
|
||||
|
||||
creator = None
|
||||
if hasattr(record, "Creator") and record.Creator:
|
||||
creator = _map_creator(record.Creator)
|
||||
|
||||
return llm_model.LlmModel(
|
||||
id=record.id,
|
||||
slug=record.slug,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
provider_id=record.providerId,
|
||||
creator_id=record.creatorId,
|
||||
creator=creator,
|
||||
context_window=record.contextWindow,
|
||||
max_output_tokens=record.maxOutputTokens,
|
||||
is_enabled=record.isEnabled,
|
||||
is_recommended=record.isRecommended,
|
||||
capabilities=_json_dict(record.capabilities),
|
||||
metadata=_json_dict(record.metadata),
|
||||
costs=costs,
|
||||
)
|
||||
|
||||
|
||||
def _map_provider(record: prisma.models.LlmProvider) -> llm_model.LlmProvider:
|
||||
models: list[llm_model.LlmModel] = []
|
||||
if record.Models:
|
||||
models = [_map_model(model) for model in record.Models]
|
||||
|
||||
return llm_model.LlmProvider(
|
||||
id=record.id,
|
||||
name=record.name,
|
||||
display_name=record.displayName,
|
||||
description=record.description,
|
||||
default_credential_provider=record.defaultCredentialProvider,
|
||||
default_credential_id=record.defaultCredentialId,
|
||||
default_credential_type=record.defaultCredentialType,
|
||||
supports_tools=record.supportsTools,
|
||||
supports_json_output=record.supportsJsonOutput,
|
||||
supports_reasoning=record.supportsReasoning,
|
||||
supports_parallel_tool=record.supportsParallelTool,
|
||||
metadata=_json_dict(record.metadata),
|
||||
models=models,
|
||||
)
|
||||
|
||||
|
||||
async def list_providers(
|
||||
include_models: bool = True, enabled_only: bool = False
|
||||
) -> list[llm_model.LlmProvider]:
|
||||
"""
|
||||
List all LLM providers.
|
||||
|
||||
Args:
|
||||
include_models: Whether to include models for each provider
|
||||
enabled_only: If True, only include enabled models (for public routes)
|
||||
"""
|
||||
include: Any = None
|
||||
if include_models:
|
||||
model_where = {"isEnabled": True} if enabled_only else None
|
||||
include = {
|
||||
"Models": {
|
||||
"include": {"Costs": True, "Creator": True},
|
||||
"where": model_where,
|
||||
}
|
||||
}
|
||||
records = await prisma.models.LlmProvider.prisma().find_many(include=include)
|
||||
return [_map_provider(record) for record in records]
|
||||
|
||||
|
||||
async def upsert_provider(
|
||||
request: llm_model.UpsertLlmProviderRequest,
|
||||
provider_id: str | None = None,
|
||||
) -> llm_model.LlmProvider:
|
||||
data: Any = {
|
||||
"name": request.name,
|
||||
"displayName": request.display_name,
|
||||
"description": request.description,
|
||||
"defaultCredentialProvider": request.default_credential_provider,
|
||||
"defaultCredentialId": request.default_credential_id,
|
||||
"defaultCredentialType": request.default_credential_type,
|
||||
"supportsTools": request.supports_tools,
|
||||
"supportsJsonOutput": request.supports_json_output,
|
||||
"supportsReasoning": request.supports_reasoning,
|
||||
"supportsParallelTool": request.supports_parallel_tool,
|
||||
"metadata": prisma.Json(request.metadata or {}),
|
||||
}
|
||||
include: Any = {"Models": {"include": {"Costs": True, "Creator": True}}}
|
||||
if provider_id:
|
||||
record = await prisma.models.LlmProvider.prisma().update(
|
||||
where={"id": provider_id},
|
||||
data=data,
|
||||
include=include,
|
||||
)
|
||||
else:
|
||||
record = await prisma.models.LlmProvider.prisma().create(
|
||||
data=data,
|
||||
include=include,
|
||||
)
|
||||
if record is None:
|
||||
raise ValueError("Failed to create/update provider")
|
||||
return _map_provider(record)
|
||||
|
||||
|
||||
async def delete_provider(provider_id: str) -> bool:
|
||||
"""
|
||||
Delete an LLM provider.
|
||||
|
||||
A provider can only be deleted if it has no associated models.
|
||||
Due to onDelete: Restrict on LlmModel.Provider, the database will
|
||||
block deletion if models exist.
|
||||
|
||||
Args:
|
||||
provider_id: UUID of the provider to delete
|
||||
|
||||
Returns:
|
||||
True if deleted successfully
|
||||
|
||||
Raises:
|
||||
ValueError: If provider not found or has associated models
|
||||
"""
|
||||
# Check if provider exists
|
||||
provider = await prisma.models.LlmProvider.prisma().find_unique(
|
||||
where={"id": provider_id},
|
||||
include={"Models": True},
|
||||
)
|
||||
if not provider:
|
||||
raise ValueError(f"Provider with id '{provider_id}' not found")
|
||||
|
||||
# Check if provider has any models
|
||||
model_count = len(provider.Models) if provider.Models else 0
|
||||
if model_count > 0:
|
||||
raise ValueError(
|
||||
f"Cannot delete provider '{provider.displayName}' because it has "
|
||||
f"{model_count} model(s). Delete all models first."
|
||||
)
|
||||
|
||||
# Safe to delete
|
||||
await prisma.models.LlmProvider.prisma().delete(where={"id": provider_id})
|
||||
return True
|
||||
|
||||
|
||||
async def list_models(
|
||||
provider_id: str | None = None,
|
||||
enabled_only: bool = False,
|
||||
page: int = 1,
|
||||
page_size: int = 50,
|
||||
) -> llm_model.LlmModelsResponse:
|
||||
"""
|
||||
List LLM models with pagination.
|
||||
|
||||
Args:
|
||||
provider_id: Optional filter by provider ID
|
||||
enabled_only: If True, only return enabled models (for public routes)
|
||||
page: Page number (1-indexed)
|
||||
page_size: Number of models per page
|
||||
"""
|
||||
where: Any = {}
|
||||
if provider_id:
|
||||
where["providerId"] = provider_id
|
||||
if enabled_only:
|
||||
where["isEnabled"] = True
|
||||
|
||||
# Get total count for pagination
|
||||
total_items = await prisma.models.LlmModel.prisma().count(
|
||||
where=where if where else None
|
||||
)
|
||||
|
||||
# Calculate pagination
|
||||
skip = (page - 1) * page_size
|
||||
total_pages = (total_items + page_size - 1) // page_size if total_items > 0 else 0
|
||||
|
||||
records = await prisma.models.LlmModel.prisma().find_many(
|
||||
where=where if where else None,
|
||||
include={"Costs": True, "Creator": True},
|
||||
skip=skip,
|
||||
take=page_size,
|
||||
)
|
||||
models = [_map_model(record) for record in records]
|
||||
|
||||
return llm_model.LlmModelsResponse(
|
||||
models=models,
|
||||
pagination=Pagination(
|
||||
total_items=total_items,
|
||||
total_pages=total_pages,
|
||||
current_page=page,
|
||||
page_size=page_size,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _cost_create_payload(
|
||||
costs: Sequence[llm_model.LlmModelCostInput],
|
||||
) -> dict[str, Iterable[dict[str, Any]]]:
|
||||
|
||||
create_items = []
|
||||
for cost in costs:
|
||||
item: dict[str, Any] = {
|
||||
"unit": cost.unit,
|
||||
"creditCost": cost.credit_cost,
|
||||
"credentialProvider": cost.credential_provider,
|
||||
}
|
||||
# Only include optional fields if they have values
|
||||
if cost.credential_id:
|
||||
item["credentialId"] = cost.credential_id
|
||||
if cost.credential_type:
|
||||
item["credentialType"] = cost.credential_type
|
||||
if cost.currency:
|
||||
item["currency"] = cost.currency
|
||||
# Handle metadata - use Prisma Json type
|
||||
if cost.metadata is not None and cost.metadata != {}:
|
||||
item["metadata"] = prisma.Json(cost.metadata)
|
||||
create_items.append(item)
|
||||
return {"create": create_items}
|
||||
|
||||
|
||||
async def create_model(
|
||||
request: llm_model.CreateLlmModelRequest,
|
||||
) -> llm_model.LlmModel:
|
||||
data: Any = {
|
||||
"slug": request.slug,
|
||||
"displayName": request.display_name,
|
||||
"description": request.description,
|
||||
"Provider": {"connect": {"id": request.provider_id}},
|
||||
"contextWindow": request.context_window,
|
||||
"maxOutputTokens": request.max_output_tokens,
|
||||
"isEnabled": request.is_enabled,
|
||||
"capabilities": prisma.Json(request.capabilities or {}),
|
||||
"metadata": prisma.Json(request.metadata or {}),
|
||||
"Costs": _cost_create_payload(request.costs),
|
||||
}
|
||||
if request.creator_id:
|
||||
data["Creator"] = {"connect": {"id": request.creator_id}}
|
||||
|
||||
record = await prisma.models.LlmModel.prisma().create(
|
||||
data=data,
|
||||
include={"Costs": True, "Creator": True, "Provider": True},
|
||||
)
|
||||
return _map_model(record)
|
||||
|
||||
|
||||
async def update_model(
|
||||
model_id: str,
|
||||
request: llm_model.UpdateLlmModelRequest,
|
||||
) -> llm_model.LlmModel:
|
||||
# Build scalar field updates (non-relation fields)
|
||||
scalar_data: Any = {}
|
||||
if request.display_name is not None:
|
||||
scalar_data["displayName"] = request.display_name
|
||||
if request.description is not None:
|
||||
scalar_data["description"] = request.description
|
||||
if request.context_window is not None:
|
||||
scalar_data["contextWindow"] = request.context_window
|
||||
if request.max_output_tokens is not None:
|
||||
scalar_data["maxOutputTokens"] = request.max_output_tokens
|
||||
if request.is_enabled is not None:
|
||||
scalar_data["isEnabled"] = request.is_enabled
|
||||
if request.capabilities is not None:
|
||||
scalar_data["capabilities"] = request.capabilities
|
||||
if request.metadata is not None:
|
||||
scalar_data["metadata"] = request.metadata
|
||||
# Foreign keys can be updated directly as scalar fields
|
||||
if request.provider_id is not None:
|
||||
scalar_data["providerId"] = request.provider_id
|
||||
if request.creator_id is not None:
|
||||
# Empty string means remove the creator
|
||||
scalar_data["creatorId"] = request.creator_id if request.creator_id else None
|
||||
|
||||
# If we have costs to update, we need to handle them separately
|
||||
# because nested writes have different constraints
|
||||
if request.costs is not None:
|
||||
# Wrap cost replacement in a transaction for atomicity
|
||||
async with transaction() as tx:
|
||||
# First update scalar fields
|
||||
if scalar_data:
|
||||
await tx.llmmodel.update(
|
||||
where={"id": model_id},
|
||||
data=scalar_data,
|
||||
)
|
||||
# Then handle costs: delete existing and create new
|
||||
await tx.llmmodelcost.delete_many(where={"llmModelId": model_id})
|
||||
if request.costs:
|
||||
cost_payload = _cost_create_payload(request.costs)
|
||||
for cost_item in cost_payload["create"]:
|
||||
cost_item["llmModelId"] = model_id
|
||||
await tx.llmmodelcost.create(data=cast(Any, cost_item))
|
||||
# Fetch the updated record (outside transaction)
|
||||
record = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id},
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
else:
|
||||
# No costs update - simple update
|
||||
record = await prisma.models.LlmModel.prisma().update(
|
||||
where={"id": model_id},
|
||||
data=scalar_data,
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
|
||||
if not record:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
return _map_model(record)
|
||||
|
||||
|
||||
async def toggle_model(
|
||||
model_id: str,
|
||||
is_enabled: bool,
|
||||
migrate_to_slug: str | None = None,
|
||||
migration_reason: str | None = None,
|
||||
custom_credit_cost: int | None = None,
|
||||
) -> llm_model.ToggleLlmModelResponse:
|
||||
"""
|
||||
Toggle a model's enabled status, optionally migrating workflows when disabling.
|
||||
|
||||
Args:
|
||||
model_id: UUID of the model to toggle
|
||||
is_enabled: New enabled status
|
||||
migrate_to_slug: If disabling and this is provided, migrate all workflows
|
||||
using this model to the specified replacement model
|
||||
migration_reason: Optional reason for the migration (e.g., "Provider outage")
|
||||
custom_credit_cost: Optional custom pricing override for migrated workflows.
|
||||
When set, the billing system should use this cost instead
|
||||
of the target model's cost for affected nodes.
|
||||
|
||||
Returns:
|
||||
ToggleLlmModelResponse with the updated model and optional migration stats
|
||||
"""
|
||||
import json
|
||||
|
||||
# Get the model being toggled
|
||||
model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id}, include={"Costs": True}
|
||||
)
|
||||
if not model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
|
||||
nodes_migrated = 0
|
||||
migration_id: str | None = None
|
||||
|
||||
# If disabling with migration, perform migration first
|
||||
if not is_enabled and migrate_to_slug:
|
||||
# Validate replacement model exists and is enabled
|
||||
replacement = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"slug": migrate_to_slug}
|
||||
)
|
||||
if not replacement:
|
||||
raise ValueError(f"Replacement model '{migrate_to_slug}' not found")
|
||||
if not replacement.isEnabled:
|
||||
raise ValueError(
|
||||
f"Replacement model '{migrate_to_slug}' is disabled. "
|
||||
f"Please enable it before using it as a replacement."
|
||||
)
|
||||
|
||||
# Perform all operations atomically within a single transaction
|
||||
# This ensures no nodes are missed between query and update
|
||||
async with transaction() as tx:
|
||||
# Get the IDs of nodes that will be migrated (inside transaction for consistency)
|
||||
node_ids_result = await tx.query_raw(
|
||||
"""
|
||||
SELECT id
|
||||
FROM "AgentNode"
|
||||
WHERE "constantInput"::jsonb->>'model' = $1
|
||||
FOR UPDATE
|
||||
""",
|
||||
model.slug,
|
||||
)
|
||||
migrated_node_ids = (
|
||||
[row["id"] for row in node_ids_result] if node_ids_result else []
|
||||
)
|
||||
nodes_migrated = len(migrated_node_ids)
|
||||
|
||||
if nodes_migrated > 0:
|
||||
# Update by IDs to ensure we only update the exact nodes we queried
|
||||
# Use JSON array and jsonb_array_elements_text for safe parameterization
|
||||
node_ids_json = json.dumps(migrated_node_ids)
|
||||
await tx.execute_raw(
|
||||
"""
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
to_jsonb($1::text)
|
||||
)
|
||||
WHERE id::text IN (
|
||||
SELECT jsonb_array_elements_text($2::jsonb)
|
||||
)
|
||||
""",
|
||||
migrate_to_slug,
|
||||
node_ids_json,
|
||||
)
|
||||
|
||||
record = await tx.llmmodel.update(
|
||||
where={"id": model_id},
|
||||
data={"isEnabled": is_enabled},
|
||||
include={"Costs": True},
|
||||
)
|
||||
|
||||
# Create migration record for revert capability
|
||||
if nodes_migrated > 0:
|
||||
migration_data: Any = {
|
||||
"sourceModelSlug": model.slug,
|
||||
"targetModelSlug": migrate_to_slug,
|
||||
"reason": migration_reason,
|
||||
"migratedNodeIds": json.dumps(migrated_node_ids),
|
||||
"nodeCount": nodes_migrated,
|
||||
"customCreditCost": custom_credit_cost,
|
||||
}
|
||||
migration_record = await tx.llmmodelmigration.create(
|
||||
data=migration_data
|
||||
)
|
||||
migration_id = migration_record.id
|
||||
else:
|
||||
# Simple toggle without migration
|
||||
record = await prisma.models.LlmModel.prisma().update(
|
||||
where={"id": model_id},
|
||||
data={"isEnabled": is_enabled},
|
||||
include={"Costs": True},
|
||||
)
|
||||
|
||||
if record is None:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
return llm_model.ToggleLlmModelResponse(
|
||||
model=_map_model(record),
|
||||
nodes_migrated=nodes_migrated,
|
||||
migrated_to_slug=migrate_to_slug if nodes_migrated > 0 else None,
|
||||
migration_id=migration_id,
|
||||
)
|
||||
|
||||
|
||||
async def get_model_usage(model_id: str) -> llm_model.LlmModelUsageResponse:
|
||||
"""Get usage count for a model."""
|
||||
import prisma as prisma_module
|
||||
|
||||
model = await prisma.models.LlmModel.prisma().find_unique(where={"id": model_id})
|
||||
if not model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
|
||||
count_result = await prisma_module.get_client().query_raw(
|
||||
"""
|
||||
SELECT COUNT(*) as count
|
||||
FROM "AgentNode"
|
||||
WHERE "constantInput"::jsonb->>'model' = $1
|
||||
""",
|
||||
model.slug,
|
||||
)
|
||||
node_count = int(count_result[0]["count"]) if count_result else 0
|
||||
|
||||
return llm_model.LlmModelUsageResponse(model_slug=model.slug, node_count=node_count)
|
||||
|
||||
|
||||
async def delete_model(
|
||||
model_id: str, replacement_model_slug: str | None = None
|
||||
) -> llm_model.DeleteLlmModelResponse:
|
||||
"""
|
||||
Delete a model and optionally migrate all AgentNodes using it to a replacement model.
|
||||
|
||||
This performs an atomic operation within a database transaction:
|
||||
1. Validates the model exists
|
||||
2. Counts affected nodes
|
||||
3. If nodes exist, validates replacement model and migrates them
|
||||
4. Deletes the LlmModel record (CASCADE deletes costs)
|
||||
|
||||
Args:
|
||||
model_id: UUID of the model to delete
|
||||
replacement_model_slug: Slug of the model to migrate to (required only if nodes use this model)
|
||||
|
||||
Returns:
|
||||
DeleteLlmModelResponse with migration stats
|
||||
|
||||
Raises:
|
||||
ValueError: If model not found, nodes exist but no replacement provided,
|
||||
replacement not found, or replacement is disabled
|
||||
"""
|
||||
# 1. Get the model being deleted (validation - outside transaction)
|
||||
model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id}, include={"Costs": True}
|
||||
)
|
||||
if not model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
|
||||
deleted_slug = model.slug
|
||||
deleted_display_name = model.displayName
|
||||
|
||||
# 2. Count affected nodes first to determine if replacement is needed
|
||||
import prisma as prisma_module
|
||||
|
||||
count_result = await prisma_module.get_client().query_raw(
|
||||
"""
|
||||
SELECT COUNT(*) as count
|
||||
FROM "AgentNode"
|
||||
WHERE "constantInput"::jsonb->>'model' = $1
|
||||
""",
|
||||
deleted_slug,
|
||||
)
|
||||
nodes_to_migrate = int(count_result[0]["count"]) if count_result else 0
|
||||
|
||||
# 3. Validate replacement model only if there are nodes to migrate
|
||||
if nodes_to_migrate > 0:
|
||||
if not replacement_model_slug:
|
||||
raise ValueError(
|
||||
f"Cannot delete model '{deleted_slug}': {nodes_to_migrate} workflow node(s) "
|
||||
f"are using it. Please provide a replacement_model_slug to migrate them."
|
||||
)
|
||||
replacement = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"slug": replacement_model_slug}
|
||||
)
|
||||
if not replacement:
|
||||
raise ValueError(f"Replacement model '{replacement_model_slug}' not found")
|
||||
if not replacement.isEnabled:
|
||||
raise ValueError(
|
||||
f"Replacement model '{replacement_model_slug}' is disabled. "
|
||||
f"Please enable it before using it as a replacement."
|
||||
)
|
||||
|
||||
# 4. Perform migration (if needed) and deletion atomically within a transaction
|
||||
async with transaction() as tx:
|
||||
# Migrate all AgentNode.constantInput->model to replacement
|
||||
if nodes_to_migrate > 0 and replacement_model_slug:
|
||||
await tx.execute_raw(
|
||||
"""
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
to_jsonb($1::text)
|
||||
)
|
||||
WHERE "constantInput"::jsonb->>'model' = $2
|
||||
""",
|
||||
replacement_model_slug,
|
||||
deleted_slug,
|
||||
)
|
||||
|
||||
# Delete the model (CASCADE will delete costs automatically)
|
||||
await tx.llmmodel.delete(where={"id": model_id})
|
||||
|
||||
# Build appropriate message based on whether migration happened
|
||||
if nodes_to_migrate > 0:
|
||||
message = (
|
||||
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}) "
|
||||
f"and migrated {nodes_to_migrate} workflow node(s) to '{replacement_model_slug}'."
|
||||
)
|
||||
else:
|
||||
message = (
|
||||
f"Successfully deleted model '{deleted_display_name}' ({deleted_slug}). "
|
||||
f"No workflows were using this model."
|
||||
)
|
||||
|
||||
return llm_model.DeleteLlmModelResponse(
|
||||
deleted_model_slug=deleted_slug,
|
||||
deleted_model_display_name=deleted_display_name,
|
||||
replacement_model_slug=replacement_model_slug,
|
||||
nodes_migrated=nodes_to_migrate,
|
||||
message=message,
|
||||
)
|
||||
|
||||
|
||||
def _map_migration(
|
||||
record: prisma.models.LlmModelMigration,
|
||||
) -> llm_model.LlmModelMigration:
|
||||
return llm_model.LlmModelMigration(
|
||||
id=record.id,
|
||||
source_model_slug=record.sourceModelSlug,
|
||||
target_model_slug=record.targetModelSlug,
|
||||
reason=record.reason,
|
||||
node_count=record.nodeCount,
|
||||
custom_credit_cost=record.customCreditCost,
|
||||
is_reverted=record.isReverted,
|
||||
created_at=record.createdAt.isoformat(),
|
||||
reverted_at=record.revertedAt.isoformat() if record.revertedAt else None,
|
||||
)
|
||||
|
||||
|
||||
async def list_migrations(
|
||||
include_reverted: bool = False,
|
||||
) -> list[llm_model.LlmModelMigration]:
|
||||
"""
|
||||
List model migrations, optionally including reverted ones.
|
||||
|
||||
Args:
|
||||
include_reverted: If True, include reverted migrations. Default is False.
|
||||
|
||||
Returns:
|
||||
List of LlmModelMigration records
|
||||
"""
|
||||
where: Any = None if include_reverted else {"isReverted": False}
|
||||
records = await prisma.models.LlmModelMigration.prisma().find_many(
|
||||
where=where,
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
return [_map_migration(record) for record in records]
|
||||
|
||||
|
||||
async def get_migration(migration_id: str) -> llm_model.LlmModelMigration | None:
|
||||
"""Get a specific migration by ID."""
|
||||
record = await prisma.models.LlmModelMigration.prisma().find_unique(
|
||||
where={"id": migration_id}
|
||||
)
|
||||
return _map_migration(record) if record else None
|
||||
|
||||
|
||||
async def revert_migration(
|
||||
migration_id: str,
|
||||
re_enable_source_model: bool = True,
|
||||
) -> llm_model.RevertMigrationResponse:
|
||||
"""
|
||||
Revert a model migration, restoring affected nodes to their original model.
|
||||
|
||||
This only reverts the specific nodes that were migrated, not all nodes
|
||||
currently using the target model.
|
||||
|
||||
Args:
|
||||
migration_id: UUID of the migration to revert
|
||||
re_enable_source_model: Whether to re-enable the source model if it's disabled
|
||||
|
||||
Returns:
|
||||
RevertMigrationResponse with revert stats
|
||||
|
||||
Raises:
|
||||
ValueError: If migration not found, already reverted, or source model not available
|
||||
"""
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
|
||||
# Get the migration record
|
||||
migration = await prisma.models.LlmModelMigration.prisma().find_unique(
|
||||
where={"id": migration_id}
|
||||
)
|
||||
if not migration:
|
||||
raise ValueError(f"Migration with id '{migration_id}' not found")
|
||||
|
||||
if migration.isReverted:
|
||||
raise ValueError(
|
||||
f"Migration '{migration_id}' has already been reverted "
|
||||
f"on {migration.revertedAt.isoformat() if migration.revertedAt else 'unknown date'}"
|
||||
)
|
||||
|
||||
# Check if source model exists
|
||||
source_model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"slug": migration.sourceModelSlug}
|
||||
)
|
||||
if not source_model:
|
||||
raise ValueError(
|
||||
f"Source model '{migration.sourceModelSlug}' no longer exists. "
|
||||
f"Cannot revert migration."
|
||||
)
|
||||
|
||||
# Get the migrated node IDs (Prisma auto-parses JSONB to list)
|
||||
migrated_node_ids: list[str] = (
|
||||
migration.migratedNodeIds
|
||||
if isinstance(migration.migratedNodeIds, list)
|
||||
else json.loads(migration.migratedNodeIds) # type: ignore
|
||||
)
|
||||
if not migrated_node_ids:
|
||||
raise ValueError("No nodes to revert in this migration")
|
||||
|
||||
# Track if we need to re-enable the source model
|
||||
source_model_was_disabled = not source_model.isEnabled
|
||||
should_re_enable = source_model_was_disabled and re_enable_source_model
|
||||
source_model_re_enabled = False
|
||||
|
||||
# Perform revert atomically
|
||||
async with transaction() as tx:
|
||||
# Re-enable the source model if requested and it was disabled
|
||||
if should_re_enable:
|
||||
await tx.llmmodel.update(
|
||||
where={"id": source_model.id},
|
||||
data={"isEnabled": True},
|
||||
)
|
||||
source_model_re_enabled = True
|
||||
|
||||
# Update only the specific nodes that were migrated
|
||||
# We need to check that they still have the target model (haven't been changed since)
|
||||
# Use a single batch update for efficiency
|
||||
# Use JSON array and jsonb_array_elements_text for safe parameterization
|
||||
node_ids_json = json.dumps(migrated_node_ids)
|
||||
result = await tx.execute_raw(
|
||||
"""
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
to_jsonb($1::text)
|
||||
)
|
||||
WHERE id::text IN (
|
||||
SELECT jsonb_array_elements_text($2::jsonb)
|
||||
)
|
||||
AND "constantInput"::jsonb->>'model' = $3
|
||||
""",
|
||||
migration.sourceModelSlug,
|
||||
node_ids_json,
|
||||
migration.targetModelSlug,
|
||||
)
|
||||
nodes_reverted = result if result else 0
|
||||
|
||||
# Mark migration as reverted
|
||||
await tx.llmmodelmigration.update(
|
||||
where={"id": migration_id},
|
||||
data={
|
||||
"isReverted": True,
|
||||
"revertedAt": datetime.now(timezone.utc),
|
||||
},
|
||||
)
|
||||
|
||||
# Calculate nodes that were already changed since migration
|
||||
nodes_already_changed = len(migrated_node_ids) - nodes_reverted
|
||||
|
||||
# Build appropriate message
|
||||
message_parts = [
|
||||
f"Successfully reverted migration: {nodes_reverted} node(s) restored "
|
||||
f"from '{migration.targetModelSlug}' to '{migration.sourceModelSlug}'."
|
||||
]
|
||||
if nodes_already_changed > 0:
|
||||
message_parts.append(
|
||||
f" {nodes_already_changed} node(s) were already changed and not reverted."
|
||||
)
|
||||
if source_model_re_enabled:
|
||||
message_parts.append(
|
||||
f" Model '{migration.sourceModelSlug}' has been re-enabled."
|
||||
)
|
||||
|
||||
return llm_model.RevertMigrationResponse(
|
||||
migration_id=migration_id,
|
||||
source_model_slug=migration.sourceModelSlug,
|
||||
target_model_slug=migration.targetModelSlug,
|
||||
nodes_reverted=nodes_reverted,
|
||||
nodes_already_changed=nodes_already_changed,
|
||||
source_model_re_enabled=source_model_re_enabled,
|
||||
message="".join(message_parts),
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Creator CRUD operations
|
||||
# ============================================================================
|
||||
|
||||
|
||||
async def list_creators() -> list[llm_model.LlmModelCreator]:
|
||||
"""List all LLM model creators."""
|
||||
records = await prisma.models.LlmModelCreator.prisma().find_many(
|
||||
order={"displayName": "asc"}
|
||||
)
|
||||
return [_map_creator(record) for record in records]
|
||||
|
||||
|
||||
async def get_creator(creator_id: str) -> llm_model.LlmModelCreator | None:
|
||||
"""Get a specific creator by ID."""
|
||||
record = await prisma.models.LlmModelCreator.prisma().find_unique(
|
||||
where={"id": creator_id}
|
||||
)
|
||||
return _map_creator(record) if record else None
|
||||
|
||||
|
||||
async def upsert_creator(
|
||||
request: llm_model.UpsertLlmCreatorRequest,
|
||||
creator_id: str | None = None,
|
||||
) -> llm_model.LlmModelCreator:
|
||||
"""Create or update a model creator."""
|
||||
data: Any = {
|
||||
"name": request.name,
|
||||
"displayName": request.display_name,
|
||||
"description": request.description,
|
||||
"websiteUrl": request.website_url,
|
||||
"logoUrl": request.logo_url,
|
||||
"metadata": prisma.Json(request.metadata or {}),
|
||||
}
|
||||
if creator_id:
|
||||
record = await prisma.models.LlmModelCreator.prisma().update(
|
||||
where={"id": creator_id},
|
||||
data=data,
|
||||
)
|
||||
else:
|
||||
record = await prisma.models.LlmModelCreator.prisma().create(data=data)
|
||||
if record is None:
|
||||
raise ValueError("Failed to create/update creator")
|
||||
return _map_creator(record)
|
||||
|
||||
|
||||
async def delete_creator(creator_id: str) -> bool:
|
||||
"""
|
||||
Delete a model creator.
|
||||
|
||||
This will set creatorId to NULL on all associated models (due to onDelete: SetNull).
|
||||
|
||||
Args:
|
||||
creator_id: UUID of the creator to delete
|
||||
|
||||
Returns:
|
||||
True if deleted successfully
|
||||
|
||||
Raises:
|
||||
ValueError: If creator not found
|
||||
"""
|
||||
creator = await prisma.models.LlmModelCreator.prisma().find_unique(
|
||||
where={"id": creator_id}
|
||||
)
|
||||
if not creator:
|
||||
raise ValueError(f"Creator with id '{creator_id}' not found")
|
||||
|
||||
await prisma.models.LlmModelCreator.prisma().delete(where={"id": creator_id})
|
||||
return True
|
||||
|
||||
|
||||
async def get_recommended_model() -> llm_model.LlmModel | None:
|
||||
"""
|
||||
Get the currently recommended LLM model.
|
||||
|
||||
Returns:
|
||||
The recommended model, or None if no model is marked as recommended.
|
||||
"""
|
||||
record = await prisma.models.LlmModel.prisma().find_first(
|
||||
where={"isRecommended": True, "isEnabled": True},
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
return _map_model(record) if record else None
|
||||
|
||||
|
||||
async def set_recommended_model(
|
||||
model_id: str,
|
||||
) -> tuple[llm_model.LlmModel, str | None]:
|
||||
"""
|
||||
Set a model as the recommended model.
|
||||
|
||||
This will clear the isRecommended flag from any other model and set it
|
||||
on the specified model. The model must be enabled.
|
||||
|
||||
Args:
|
||||
model_id: UUID of the model to set as recommended
|
||||
|
||||
Returns:
|
||||
Tuple of (the updated model, previous recommended model slug or None)
|
||||
|
||||
Raises:
|
||||
ValueError: If model not found or not enabled
|
||||
"""
|
||||
# First, verify the model exists and is enabled
|
||||
target_model = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id}
|
||||
)
|
||||
if not target_model:
|
||||
raise ValueError(f"Model with id '{model_id}' not found")
|
||||
if not target_model.isEnabled:
|
||||
raise ValueError(
|
||||
f"Cannot set disabled model '{target_model.slug}' as recommended"
|
||||
)
|
||||
|
||||
# Get the current recommended model (if any)
|
||||
current_recommended = await prisma.models.LlmModel.prisma().find_first(
|
||||
where={"isRecommended": True}
|
||||
)
|
||||
previous_slug = current_recommended.slug if current_recommended else None
|
||||
|
||||
# Use a transaction to ensure atomicity
|
||||
async with transaction() as tx:
|
||||
# Clear isRecommended from all models
|
||||
await tx.llmmodel.update_many(
|
||||
where={"isRecommended": True},
|
||||
data={"isRecommended": False},
|
||||
)
|
||||
# Set the new recommended model
|
||||
await tx.llmmodel.update(
|
||||
where={"id": model_id},
|
||||
data={"isRecommended": True},
|
||||
)
|
||||
|
||||
# Fetch and return the updated model
|
||||
updated_record = await prisma.models.LlmModel.prisma().find_unique(
|
||||
where={"id": model_id},
|
||||
include={"Costs": True, "Creator": True},
|
||||
)
|
||||
if not updated_record:
|
||||
raise ValueError("Failed to fetch updated model")
|
||||
|
||||
return _map_model(updated_record), previous_slug
|
||||
|
||||
|
||||
async def get_recommended_model_slug() -> str | None:
|
||||
"""
|
||||
Get the slug of the currently recommended LLM model.
|
||||
|
||||
Returns:
|
||||
The slug of the recommended model, or None if no model is marked as recommended.
|
||||
"""
|
||||
record = await prisma.models.LlmModel.prisma().find_first(
|
||||
where={"isRecommended": True, "isEnabled": True},
|
||||
)
|
||||
return record.slug if record else None
|
||||
@@ -1,235 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
|
||||
import prisma.enums
|
||||
import pydantic
|
||||
|
||||
from backend.util.models import Pagination
|
||||
|
||||
# Pattern for valid model slugs: alphanumeric start, then alphanumeric, dots, underscores, slashes, hyphens
|
||||
SLUG_PATTERN = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9._/-]*$")
|
||||
|
||||
|
||||
class LlmModelCost(pydantic.BaseModel):
|
||||
id: str
|
||||
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
|
||||
credit_cost: int
|
||||
credential_provider: str
|
||||
credential_id: Optional[str] = None
|
||||
credential_type: Optional[str] = None
|
||||
currency: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class LlmModelCreator(pydantic.BaseModel):
|
||||
"""Represents the organization that created/trained the model (e.g., OpenAI, Meta)."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
website_url: Optional[str] = None
|
||||
logo_url: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class LlmModel(pydantic.BaseModel):
|
||||
id: str
|
||||
slug: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
provider_id: str
|
||||
creator_id: Optional[str] = None
|
||||
creator: Optional[LlmModelCreator] = None
|
||||
context_window: int
|
||||
max_output_tokens: Optional[int] = None
|
||||
is_enabled: bool = True
|
||||
is_recommended: bool = False
|
||||
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
costs: list[LlmModelCost] = pydantic.Field(default_factory=list)
|
||||
|
||||
|
||||
class LlmProvider(pydantic.BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
default_credential_provider: Optional[str] = None
|
||||
default_credential_id: Optional[str] = None
|
||||
default_credential_type: Optional[str] = None
|
||||
supports_tools: bool = True
|
||||
supports_json_output: bool = True
|
||||
supports_reasoning: bool = False
|
||||
supports_parallel_tool: bool = False
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
models: list[LlmModel] = pydantic.Field(default_factory=list)
|
||||
|
||||
|
||||
class LlmProvidersResponse(pydantic.BaseModel):
|
||||
providers: list[LlmProvider]
|
||||
|
||||
|
||||
class LlmModelsResponse(pydantic.BaseModel):
|
||||
models: list[LlmModel]
|
||||
pagination: Optional[Pagination] = None
|
||||
|
||||
|
||||
class LlmCreatorsResponse(pydantic.BaseModel):
|
||||
creators: list[LlmModelCreator]
|
||||
|
||||
|
||||
class UpsertLlmProviderRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
default_credential_provider: Optional[str] = None
|
||||
default_credential_id: Optional[str] = None
|
||||
default_credential_type: Optional[str] = "api_key"
|
||||
supports_tools: bool = True
|
||||
supports_json_output: bool = True
|
||||
supports_reasoning: bool = False
|
||||
supports_parallel_tool: bool = False
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class UpsertLlmCreatorRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
website_url: Optional[str] = None
|
||||
logo_url: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class LlmModelCostInput(pydantic.BaseModel):
|
||||
unit: prisma.enums.LlmCostUnit = prisma.enums.LlmCostUnit.RUN
|
||||
credit_cost: int
|
||||
credential_provider: str
|
||||
credential_id: Optional[str] = None
|
||||
credential_type: Optional[str] = "api_key"
|
||||
currency: Optional[str] = None
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
|
||||
|
||||
class CreateLlmModelRequest(pydantic.BaseModel):
|
||||
slug: str
|
||||
display_name: str
|
||||
description: Optional[str] = None
|
||||
provider_id: str
|
||||
creator_id: Optional[str] = None
|
||||
context_window: int
|
||||
max_output_tokens: Optional[int] = None
|
||||
is_enabled: bool = True
|
||||
capabilities: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
metadata: dict[str, Any] = pydantic.Field(default_factory=dict)
|
||||
costs: list[LlmModelCostInput]
|
||||
|
||||
@pydantic.field_validator("slug")
|
||||
@classmethod
|
||||
def validate_slug(cls, v: str) -> str:
|
||||
if not v or len(v) > 100:
|
||||
raise ValueError("Slug must be 1-100 characters")
|
||||
if not SLUG_PATTERN.match(v):
|
||||
raise ValueError(
|
||||
"Slug must start with alphanumeric and contain only "
|
||||
"alphanumeric characters, dots, underscores, slashes, or hyphens"
|
||||
)
|
||||
return v
|
||||
|
||||
|
||||
class UpdateLlmModelRequest(pydantic.BaseModel):
|
||||
display_name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
context_window: Optional[int] = None
|
||||
max_output_tokens: Optional[int] = None
|
||||
is_enabled: Optional[bool] = None
|
||||
capabilities: Optional[dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
provider_id: Optional[str] = None
|
||||
creator_id: Optional[str] = None
|
||||
costs: Optional[list[LlmModelCostInput]] = None
|
||||
|
||||
|
||||
class ToggleLlmModelRequest(pydantic.BaseModel):
|
||||
is_enabled: bool
|
||||
migrate_to_slug: Optional[str] = None
|
||||
migration_reason: Optional[str] = None # e.g., "Provider outage"
|
||||
# Custom pricing override for migrated workflows. When set, billing should use
|
||||
# this cost instead of the target model's cost for affected nodes.
|
||||
# See LlmModelMigration in schema.prisma for full documentation.
|
||||
custom_credit_cost: Optional[int] = None
|
||||
|
||||
|
||||
class ToggleLlmModelResponse(pydantic.BaseModel):
|
||||
model: LlmModel
|
||||
nodes_migrated: int = 0
|
||||
migrated_to_slug: Optional[str] = None
|
||||
migration_id: Optional[str] = None # ID of the migration record for revert
|
||||
|
||||
|
||||
class DeleteLlmModelResponse(pydantic.BaseModel):
|
||||
deleted_model_slug: str
|
||||
deleted_model_display_name: str
|
||||
replacement_model_slug: Optional[str] = None
|
||||
nodes_migrated: int
|
||||
message: str
|
||||
|
||||
|
||||
class LlmModelUsageResponse(pydantic.BaseModel):
|
||||
model_slug: str
|
||||
node_count: int
|
||||
|
||||
|
||||
# Migration tracking models
|
||||
class LlmModelMigration(pydantic.BaseModel):
|
||||
id: str
|
||||
source_model_slug: str
|
||||
target_model_slug: str
|
||||
reason: Optional[str] = None
|
||||
node_count: int
|
||||
# Custom pricing override - billing should use this instead of target model's cost
|
||||
custom_credit_cost: Optional[int] = None
|
||||
is_reverted: bool = False
|
||||
created_at: datetime
|
||||
reverted_at: Optional[datetime] = None
|
||||
|
||||
|
||||
class LlmMigrationsResponse(pydantic.BaseModel):
|
||||
migrations: list[LlmModelMigration]
|
||||
|
||||
|
||||
class RevertMigrationRequest(pydantic.BaseModel):
|
||||
re_enable_source_model: bool = (
|
||||
True # Whether to re-enable the source model if disabled
|
||||
)
|
||||
|
||||
|
||||
class RevertMigrationResponse(pydantic.BaseModel):
|
||||
migration_id: str
|
||||
source_model_slug: str
|
||||
target_model_slug: str
|
||||
nodes_reverted: int
|
||||
nodes_already_changed: int = (
|
||||
0 # Nodes that were modified since migration (not reverted)
|
||||
)
|
||||
source_model_re_enabled: bool = False # Whether the source model was re-enabled
|
||||
message: str
|
||||
|
||||
|
||||
class SetRecommendedModelRequest(pydantic.BaseModel):
|
||||
model_id: str
|
||||
|
||||
|
||||
class SetRecommendedModelResponse(pydantic.BaseModel):
|
||||
model: LlmModel
|
||||
previous_recommended_slug: Optional[str] = None
|
||||
message: str
|
||||
|
||||
|
||||
class RecommendedModelResponse(pydantic.BaseModel):
|
||||
model: Optional[LlmModel] = None
|
||||
slug: Optional[str] = None
|
||||
@@ -1,29 +0,0 @@
|
||||
import autogpt_libs.auth
|
||||
import fastapi
|
||||
|
||||
from backend.server.v2.llm import db as llm_db
|
||||
from backend.server.v2.llm import model as llm_model
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
prefix="/llm",
|
||||
tags=["llm"],
|
||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.get("/models", response_model=llm_model.LlmModelsResponse)
|
||||
async def list_models(
|
||||
page: int = fastapi.Query(default=1, ge=1, description="Page number (1-indexed)"),
|
||||
page_size: int = fastapi.Query(
|
||||
default=50, ge=1, le=100, description="Number of models per page"
|
||||
),
|
||||
):
|
||||
"""List all enabled LLM models available to users."""
|
||||
return await llm_db.list_models(enabled_only=True, page=page, page_size=page_size)
|
||||
|
||||
|
||||
@router.get("/providers", response_model=llm_model.LlmProvidersResponse)
|
||||
async def list_providers():
|
||||
"""List all LLM providers with their enabled models."""
|
||||
providers = await llm_db.list_providers(include_models=True, enabled_only=True)
|
||||
return llm_model.LlmProvidersResponse(providers=providers)
|
||||
@@ -1,81 +0,0 @@
|
||||
-- CreateEnum
|
||||
CREATE TYPE "LlmCostUnit" AS ENUM ('RUN', 'TOKENS');
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmProvider" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"name" TEXT NOT NULL,
|
||||
"displayName" TEXT NOT NULL,
|
||||
"description" TEXT,
|
||||
"defaultCredentialProvider" TEXT,
|
||||
"defaultCredentialId" TEXT,
|
||||
"defaultCredentialType" TEXT,
|
||||
"supportsTools" BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
"supportsJsonOutput" BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
"supportsReasoning" BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
"supportsParallelTool" BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
|
||||
CONSTRAINT "LlmProvider_pkey" PRIMARY KEY ("id"),
|
||||
CONSTRAINT "LlmProvider_name_key" UNIQUE ("name")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmModel" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"slug" TEXT NOT NULL,
|
||||
"displayName" TEXT NOT NULL,
|
||||
"description" TEXT,
|
||||
"providerId" TEXT NOT NULL,
|
||||
"contextWindow" INTEGER NOT NULL,
|
||||
"maxOutputTokens" INTEGER,
|
||||
"isEnabled" BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
"capabilities" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
|
||||
CONSTRAINT "LlmModel_pkey" PRIMARY KEY ("id"),
|
||||
CONSTRAINT "LlmModel_slug_key" UNIQUE ("slug")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmModelCost" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"unit" "LlmCostUnit" NOT NULL DEFAULT 'RUN',
|
||||
"creditCost" INTEGER NOT NULL,
|
||||
"credentialProvider" TEXT NOT NULL,
|
||||
"credentialId" TEXT,
|
||||
"credentialType" TEXT,
|
||||
"currency" TEXT,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
"llmModelId" TEXT NOT NULL,
|
||||
|
||||
CONSTRAINT "LlmModelCost_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModel_providerId_isEnabled_idx" ON "LlmModel"("providerId", "isEnabled");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModel_slug_idx" ON "LlmModel"("slug");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelCost_llmModelId_idx" ON "LlmModelCost"("llmModelId");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelCost_credentialProvider_idx" ON "LlmModelCost"("credentialProvider");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "LlmModelCost_llmModelId_credentialProvider_unit_key" ON "LlmModelCost"("llmModelId", "credentialProvider", "unit");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_providerId_fkey" FOREIGN KEY ("providerId") REFERENCES "LlmProvider"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LlmModelCost" ADD CONSTRAINT "LlmModelCost_llmModelId_fkey" FOREIGN KEY ("llmModelId") REFERENCES "LlmModel"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
-- Seed LLM Registry from existing hard-coded data
|
||||
-- This migration populates the LlmProvider, LlmModel, and LlmModelCost tables
|
||||
-- with data from the existing MODEL_METADATA and MODEL_COST dictionaries
|
||||
|
||||
-- Insert Providers
|
||||
INSERT INTO "LlmProvider" ("id", "name", "displayName", "description", "defaultCredentialProvider", "defaultCredentialType", "supportsTools", "supportsJsonOutput", "supportsReasoning", "supportsParallelTool", "metadata")
|
||||
VALUES
|
||||
(gen_random_uuid(), 'openai', 'OpenAI', 'OpenAI language models', 'openai', 'api_key', true, true, true, true, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'anthropic', 'Anthropic', 'Anthropic Claude models', 'anthropic', 'api_key', true, true, true, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'groq', 'Groq', 'Groq inference API', 'groq', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'open_router', 'OpenRouter', 'OpenRouter unified API', 'open_router', 'api_key', true, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'aiml_api', 'AI/ML API', 'AI/ML API models', 'aiml_api', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'ollama', 'Ollama', 'Ollama local models', 'ollama', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'llama_api', 'Llama API', 'Llama API models', 'llama_api', 'api_key', false, true, false, false, '{}'::jsonb),
|
||||
(gen_random_uuid(), 'v0', 'v0', 'v0 by Vercel models', 'v0', 'api_key', true, true, false, false, '{}'::jsonb)
|
||||
ON CONFLICT ("name") DO NOTHING;
|
||||
|
||||
-- Insert Models (using CTEs to reference provider IDs)
|
||||
WITH provider_ids AS (
|
||||
SELECT "id", "name" FROM "LlmProvider"
|
||||
)
|
||||
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
model_slug,
|
||||
model_display_name,
|
||||
NULL,
|
||||
p."id",
|
||||
context_window,
|
||||
max_output_tokens,
|
||||
true,
|
||||
'{}'::jsonb,
|
||||
'{}'::jsonb
|
||||
FROM (VALUES
|
||||
-- OpenAI models
|
||||
('o3', 'O3', 'openai', 200000, 100000),
|
||||
('o3-mini', 'O3 Mini', 'openai', 200000, 100000),
|
||||
('o1', 'O1', 'openai', 200000, 100000),
|
||||
('o1-mini', 'O1 Mini', 'openai', 128000, 65536),
|
||||
('gpt-5-2025-08-07', 'GPT 5', 'openai', 400000, 128000),
|
||||
('gpt-5.1-2025-11-13', 'GPT 5.1', 'openai', 400000, 128000),
|
||||
('gpt-5-mini-2025-08-07', 'GPT 5 Mini', 'openai', 400000, 128000),
|
||||
('gpt-5-nano-2025-08-07', 'GPT 5 Nano', 'openai', 400000, 128000),
|
||||
('gpt-5-chat-latest', 'GPT 5 Chat', 'openai', 400000, 16384),
|
||||
('gpt-4.1-2025-04-14', 'GPT 4.1', 'openai', 1000000, 32768),
|
||||
('gpt-4.1-mini-2025-04-14', 'GPT 4.1 Mini', 'openai', 1047576, 32768),
|
||||
('gpt-4o-mini', 'GPT 4o Mini', 'openai', 128000, 16384),
|
||||
('gpt-4o', 'GPT 4o', 'openai', 128000, 16384),
|
||||
('gpt-4-turbo', 'GPT 4 Turbo', 'openai', 128000, 4096),
|
||||
('gpt-3.5-turbo', 'GPT 3.5 Turbo', 'openai', 16385, 4096),
|
||||
-- Anthropic models
|
||||
('claude-opus-4-1-20250805', 'Claude 4.1 Opus', 'anthropic', 200000, 32000),
|
||||
('claude-opus-4-20250514', 'Claude 4 Opus', 'anthropic', 200000, 32000),
|
||||
('claude-sonnet-4-20250514', 'Claude 4 Sonnet', 'anthropic', 200000, 64000),
|
||||
('claude-opus-4-5-20251101', 'Claude 4.5 Opus', 'anthropic', 200000, 64000),
|
||||
('claude-sonnet-4-5-20250929', 'Claude 4.5 Sonnet', 'anthropic', 200000, 64000),
|
||||
('claude-haiku-4-5-20251001', 'Claude 4.5 Haiku', 'anthropic', 200000, 64000),
|
||||
('claude-3-7-sonnet-20250219', 'Claude 3.7 Sonnet', 'anthropic', 200000, 64000),
|
||||
('claude-3-haiku-20240307', 'Claude 3 Haiku', 'anthropic', 200000, 4096),
|
||||
-- AI/ML API models
|
||||
('Qwen/Qwen2.5-72B-Instruct-Turbo', 'Qwen 2.5 72B', 'aiml_api', 32000, 8000),
|
||||
('nvidia/llama-3.1-nemotron-70b-instruct', 'Llama 3.1 Nemotron 70B', 'aiml_api', 128000, 40000),
|
||||
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 'Llama 3.3 70B', 'aiml_api', 128000, NULL),
|
||||
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 'Meta Llama 3.1 70B', 'aiml_api', 131000, 2000),
|
||||
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 'Llama 3.2 3B', 'aiml_api', 128000, NULL),
|
||||
-- Groq models
|
||||
('llama-3.3-70b-versatile', 'Llama 3.3 70B', 'groq', 128000, 32768),
|
||||
('llama-3.1-8b-instant', 'Llama 3.1 8B', 'groq', 128000, 8192),
|
||||
-- Ollama models
|
||||
('llama3.3', 'Llama 3.3', 'ollama', 8192, NULL),
|
||||
('llama3.2', 'Llama 3.2', 'ollama', 8192, NULL),
|
||||
('llama3', 'Llama 3', 'ollama', 8192, NULL),
|
||||
('llama3.1:405b', 'Llama 3.1 405B', 'ollama', 8192, NULL),
|
||||
('dolphin-mistral:latest', 'Dolphin Mistral', 'ollama', 32768, NULL),
|
||||
-- OpenRouter models
|
||||
('google/gemini-2.5-pro-preview-03-25', 'Gemini 2.5 Pro', 'open_router', 1050000, 8192),
|
||||
('google/gemini-3-pro-preview', 'Gemini 3 Pro Preview', 'open_router', 1048576, 65535),
|
||||
('google/gemini-2.5-flash', 'Gemini 2.5 Flash', 'open_router', 1048576, 65535),
|
||||
('google/gemini-2.0-flash-001', 'Gemini 2.0 Flash', 'open_router', 1048576, 8192),
|
||||
('google/gemini-2.5-flash-lite-preview-06-17', 'Gemini 2.5 Flash Lite Preview', 'open_router', 1048576, 65535),
|
||||
('google/gemini-2.0-flash-lite-001', 'Gemini 2.0 Flash Lite', 'open_router', 1048576, 8192),
|
||||
('mistralai/mistral-nemo', 'Mistral Nemo', 'open_router', 128000, 4096),
|
||||
('cohere/command-r-08-2024', 'Command R', 'open_router', 128000, 4096),
|
||||
('cohere/command-r-plus-08-2024', 'Command R Plus', 'open_router', 128000, 4096),
|
||||
('deepseek/deepseek-chat', 'DeepSeek Chat', 'open_router', 64000, 2048),
|
||||
('deepseek/deepseek-r1-0528', 'DeepSeek R1', 'open_router', 163840, 163840),
|
||||
('perplexity/sonar', 'Perplexity Sonar', 'open_router', 127000, 8000),
|
||||
('perplexity/sonar-pro', 'Perplexity Sonar Pro', 'open_router', 200000, 8000),
|
||||
('perplexity/sonar-deep-research', 'Perplexity Sonar Deep Research', 'open_router', 128000, 16000),
|
||||
('nousresearch/hermes-3-llama-3.1-405b', 'Hermes 3 Llama 3.1 405B', 'open_router', 131000, 4096),
|
||||
('nousresearch/hermes-3-llama-3.1-70b', 'Hermes 3 Llama 3.1 70B', 'open_router', 12288, 12288),
|
||||
('openai/gpt-oss-120b', 'GPT OSS 120B', 'open_router', 131072, 131072),
|
||||
('openai/gpt-oss-20b', 'GPT OSS 20B', 'open_router', 131072, 32768),
|
||||
('amazon/nova-lite-v1', 'Amazon Nova Lite', 'open_router', 300000, 5120),
|
||||
('amazon/nova-micro-v1', 'Amazon Nova Micro', 'open_router', 128000, 5120),
|
||||
('amazon/nova-pro-v1', 'Amazon Nova Pro', 'open_router', 300000, 5120),
|
||||
('microsoft/wizardlm-2-8x22b', 'WizardLM 2 8x22B', 'open_router', 65536, 4096),
|
||||
('gryphe/mythomax-l2-13b', 'MythoMax L2 13B', 'open_router', 4096, 4096),
|
||||
('meta-llama/llama-4-scout', 'Llama 4 Scout', 'open_router', 131072, 131072),
|
||||
('meta-llama/llama-4-maverick', 'Llama 4 Maverick', 'open_router', 1048576, 1000000),
|
||||
('x-ai/grok-4', 'Grok 4', 'open_router', 256000, 256000),
|
||||
('x-ai/grok-4-fast', 'Grok 4 Fast', 'open_router', 2000000, 30000),
|
||||
('x-ai/grok-4.1-fast', 'Grok 4.1 Fast', 'open_router', 2000000, 30000),
|
||||
('x-ai/grok-code-fast-1', 'Grok Code Fast 1', 'open_router', 256000, 10000),
|
||||
('moonshotai/kimi-k2', 'Kimi K2', 'open_router', 131000, 131000),
|
||||
('qwen/qwen3-235b-a22b-thinking-2507', 'Qwen 3 235B Thinking', 'open_router', 262144, 262144),
|
||||
('qwen/qwen3-coder', 'Qwen 3 Coder', 'open_router', 262144, 262144),
|
||||
-- Llama API models
|
||||
('Llama-4-Scout-17B-16E-Instruct-FP8', 'Llama 4 Scout', 'llama_api', 128000, 4028),
|
||||
('Llama-4-Maverick-17B-128E-Instruct-FP8', 'Llama 4 Maverick', 'llama_api', 128000, 4028),
|
||||
('Llama-3.3-8B-Instruct', 'Llama 3.3 8B', 'llama_api', 128000, 4028),
|
||||
('Llama-3.3-70B-Instruct', 'Llama 3.3 70B', 'llama_api', 128000, 4028),
|
||||
-- v0 models
|
||||
('v0-1.5-md', 'v0 1.5 MD', 'v0', 128000, 64000),
|
||||
('v0-1.5-lg', 'v0 1.5 LG', 'v0', 512000, 64000),
|
||||
('v0-1.0-md', 'v0 1.0 MD', 'v0', 128000, 64000)
|
||||
) AS models(model_slug, model_display_name, provider_name, context_window, max_output_tokens)
|
||||
JOIN provider_ids p ON p."name" = models.provider_name
|
||||
ON CONFLICT ("slug") DO NOTHING;
|
||||
|
||||
-- Insert Costs (using CTEs to reference model IDs)
|
||||
WITH model_ids AS (
|
||||
SELECT "id", "slug", "providerId" FROM "LlmModel"
|
||||
),
|
||||
provider_ids AS (
|
||||
SELECT "id", "name" FROM "LlmProvider"
|
||||
)
|
||||
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
'RUN'::"LlmCostUnit",
|
||||
cost,
|
||||
p."name",
|
||||
NULL,
|
||||
'api_key',
|
||||
NULL,
|
||||
'{}'::jsonb,
|
||||
m."id"
|
||||
FROM (VALUES
|
||||
-- OpenAI costs
|
||||
('o3', 4),
|
||||
('o3-mini', 2),
|
||||
('o1', 16),
|
||||
('o1-mini', 4),
|
||||
('gpt-5-2025-08-07', 2),
|
||||
('gpt-5.1-2025-11-13', 5),
|
||||
('gpt-5-mini-2025-08-07', 1),
|
||||
('gpt-5-nano-2025-08-07', 1),
|
||||
('gpt-5-chat-latest', 5),
|
||||
('gpt-4.1-2025-04-14', 2),
|
||||
('gpt-4.1-mini-2025-04-14', 1),
|
||||
('gpt-4o-mini', 1),
|
||||
('gpt-4o', 3),
|
||||
('gpt-4-turbo', 10),
|
||||
('gpt-3.5-turbo', 1),
|
||||
-- Anthropic costs
|
||||
('claude-opus-4-1-20250805', 21),
|
||||
('claude-opus-4-20250514', 21),
|
||||
('claude-sonnet-4-20250514', 5),
|
||||
('claude-haiku-4-5-20251001', 4),
|
||||
('claude-opus-4-5-20251101', 14),
|
||||
('claude-sonnet-4-5-20250929', 9),
|
||||
('claude-3-7-sonnet-20250219', 5),
|
||||
('claude-3-haiku-20240307', 1),
|
||||
-- AI/ML API costs
|
||||
('Qwen/Qwen2.5-72B-Instruct-Turbo', 1),
|
||||
('nvidia/llama-3.1-nemotron-70b-instruct', 1),
|
||||
('meta-llama/Llama-3.3-70B-Instruct-Turbo', 1),
|
||||
('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 1),
|
||||
('meta-llama/Llama-3.2-3B-Instruct-Turbo', 1),
|
||||
-- Groq costs
|
||||
('llama-3.3-70b-versatile', 1),
|
||||
('llama-3.1-8b-instant', 1),
|
||||
-- Ollama costs
|
||||
('llama3.3', 1),
|
||||
('llama3.2', 1),
|
||||
('llama3', 1),
|
||||
('llama3.1:405b', 1),
|
||||
('dolphin-mistral:latest', 1),
|
||||
-- OpenRouter costs
|
||||
('google/gemini-2.5-pro-preview-03-25', 4),
|
||||
('google/gemini-3-pro-preview', 5),
|
||||
('mistralai/mistral-nemo', 1),
|
||||
('cohere/command-r-08-2024', 1),
|
||||
('cohere/command-r-plus-08-2024', 3),
|
||||
('deepseek/deepseek-chat', 2),
|
||||
('perplexity/sonar', 1),
|
||||
('perplexity/sonar-pro', 5),
|
||||
('perplexity/sonar-deep-research', 10),
|
||||
('nousresearch/hermes-3-llama-3.1-405b', 1),
|
||||
('nousresearch/hermes-3-llama-3.1-70b', 1),
|
||||
('amazon/nova-lite-v1', 1),
|
||||
('amazon/nova-micro-v1', 1),
|
||||
('amazon/nova-pro-v1', 1),
|
||||
('microsoft/wizardlm-2-8x22b', 1),
|
||||
('gryphe/mythomax-l2-13b', 1),
|
||||
('meta-llama/llama-4-scout', 1),
|
||||
('meta-llama/llama-4-maverick', 1),
|
||||
('x-ai/grok-4', 9),
|
||||
('x-ai/grok-4-fast', 1),
|
||||
('x-ai/grok-4.1-fast', 1),
|
||||
('x-ai/grok-code-fast-1', 1),
|
||||
('moonshotai/kimi-k2', 1),
|
||||
('qwen/qwen3-235b-a22b-thinking-2507', 1),
|
||||
('qwen/qwen3-coder', 9),
|
||||
('google/gemini-2.5-flash', 1),
|
||||
('google/gemini-2.0-flash-001', 1),
|
||||
('google/gemini-2.5-flash-lite-preview-06-17', 1),
|
||||
('google/gemini-2.0-flash-lite-001', 1),
|
||||
('deepseek/deepseek-r1-0528', 1),
|
||||
('openai/gpt-oss-120b', 1),
|
||||
('openai/gpt-oss-20b', 1),
|
||||
-- Llama API costs
|
||||
('Llama-4-Scout-17B-16E-Instruct-FP8', 1),
|
||||
('Llama-4-Maverick-17B-128E-Instruct-FP8', 1),
|
||||
('Llama-3.3-8B-Instruct', 1),
|
||||
('Llama-3.3-70B-Instruct', 1),
|
||||
-- v0 costs
|
||||
('v0-1.5-md', 1),
|
||||
('v0-1.5-lg', 2),
|
||||
('v0-1.0-md', 1)
|
||||
) AS costs(model_slug, cost)
|
||||
JOIN model_ids m ON m."slug" = costs.model_slug
|
||||
JOIN provider_ids p ON p."id" = m."providerId"
|
||||
ON CONFLICT ("llmModelId", "credentialProvider", "unit") DO NOTHING;
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
-- CreateTable
|
||||
CREATE TABLE "LlmModelMigration" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
||||
"sourceModelSlug" TEXT NOT NULL,
|
||||
"targetModelSlug" TEXT NOT NULL,
|
||||
"reason" TEXT,
|
||||
"migratedNodeIds" JSONB NOT NULL DEFAULT '[]',
|
||||
"nodeCount" INTEGER NOT NULL,
|
||||
"customCreditCost" INTEGER,
|
||||
"isReverted" BOOLEAN NOT NULL DEFAULT false,
|
||||
"revertedAt" TIMESTAMP(3),
|
||||
|
||||
CONSTRAINT "LlmModelMigration_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelMigration_sourceModelSlug_idx" ON "LlmModelMigration"("sourceModelSlug");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelMigration_targetModelSlug_idx" ON "LlmModelMigration"("targetModelSlug");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelMigration_isReverted_idx" ON "LlmModelMigration"("isReverted");
|
||||
@@ -1,127 +0,0 @@
|
||||
-- Add LlmModelCreator table
|
||||
-- Creator represents who made/trained the model (e.g., OpenAI, Meta)
|
||||
-- This is distinct from Provider who hosts/serves the model (e.g., OpenRouter)
|
||||
|
||||
-- Create the LlmModelCreator table
|
||||
CREATE TABLE "LlmModelCreator" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
||||
"name" TEXT NOT NULL,
|
||||
"displayName" TEXT NOT NULL,
|
||||
"description" TEXT,
|
||||
"websiteUrl" TEXT,
|
||||
"logoUrl" TEXT,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}',
|
||||
|
||||
CONSTRAINT "LlmModelCreator_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- Create unique index on name
|
||||
CREATE UNIQUE INDEX "LlmModelCreator_name_key" ON "LlmModelCreator"("name");
|
||||
|
||||
-- Add creatorId column to LlmModel
|
||||
ALTER TABLE "LlmModel" ADD COLUMN "creatorId" TEXT;
|
||||
|
||||
-- Add foreign key constraint
|
||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_creatorId_fkey"
|
||||
FOREIGN KEY ("creatorId") REFERENCES "LlmModelCreator"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
||||
|
||||
-- Create index on creatorId
|
||||
CREATE INDEX "LlmModel_creatorId_idx" ON "LlmModel"("creatorId");
|
||||
|
||||
-- Seed creators based on known model creators
|
||||
INSERT INTO "LlmModelCreator" ("id", "updatedAt", "name", "displayName", "description", "websiteUrl", "metadata")
|
||||
VALUES
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'openai', 'OpenAI', 'Creator of GPT models', 'https://openai.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'anthropic', 'Anthropic', 'Creator of Claude models', 'https://anthropic.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'meta', 'Meta', 'Creator of Llama models', 'https://ai.meta.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'google', 'Google', 'Creator of Gemini models', 'https://deepmind.google', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'mistral', 'Mistral AI', 'Creator of Mistral models', 'https://mistral.ai', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cohere', 'Cohere', 'Creator of Command models', 'https://cohere.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'deepseek', 'DeepSeek', 'Creator of DeepSeek models', 'https://deepseek.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'perplexity', 'Perplexity AI', 'Creator of Sonar models', 'https://perplexity.ai', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'qwen', 'Qwen (Alibaba)', 'Creator of Qwen models', 'https://qwenlm.github.io', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'xai', 'xAI', 'Creator of Grok models', 'https://x.ai', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'amazon', 'Amazon', 'Creator of Nova models', 'https://aws.amazon.com/bedrock', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'microsoft', 'Microsoft', 'Creator of WizardLM models', 'https://microsoft.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'moonshot', 'Moonshot AI', 'Creator of Kimi models', 'https://moonshot.cn', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nvidia', 'NVIDIA', 'Creator of Nemotron models', 'https://nvidia.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'nous_research', 'Nous Research', 'Creator of Hermes models', 'https://nousresearch.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'vercel', 'Vercel', 'Creator of v0 models', 'https://vercel.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'cognitive_computations', 'Cognitive Computations', 'Creator of Dolphin models', 'https://erichartford.com', '{}'),
|
||||
(gen_random_uuid(), CURRENT_TIMESTAMP, 'gryphe', 'Gryphe', 'Creator of MythoMax models', 'https://huggingface.co/Gryphe', '{}')
|
||||
ON CONFLICT ("name") DO NOTHING;
|
||||
|
||||
-- Update existing models with their creators
|
||||
-- OpenAI models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'openai')
|
||||
WHERE "slug" LIKE 'gpt-%' OR "slug" LIKE 'o1%' OR "slug" LIKE 'o3%' OR "slug" LIKE 'openai/%';
|
||||
|
||||
-- Anthropic models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'anthropic')
|
||||
WHERE "slug" LIKE 'claude-%';
|
||||
|
||||
-- Meta/Llama models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'meta')
|
||||
WHERE "slug" LIKE 'llama%' OR "slug" LIKE 'Llama%' OR "slug" LIKE 'meta-llama/%' OR "slug" LIKE '%/llama-%';
|
||||
|
||||
-- Google models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'google')
|
||||
WHERE "slug" LIKE 'google/%' OR "slug" LIKE 'gemini%';
|
||||
|
||||
-- Mistral models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'mistral')
|
||||
WHERE "slug" LIKE 'mistral%' OR "slug" LIKE 'mistralai/%';
|
||||
|
||||
-- Cohere models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cohere')
|
||||
WHERE "slug" LIKE 'cohere/%' OR "slug" LIKE 'command-%';
|
||||
|
||||
-- DeepSeek models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'deepseek')
|
||||
WHERE "slug" LIKE 'deepseek/%' OR "slug" LIKE 'deepseek-%';
|
||||
|
||||
-- Perplexity models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'perplexity')
|
||||
WHERE "slug" LIKE 'perplexity/%' OR "slug" LIKE 'sonar%';
|
||||
|
||||
-- Qwen models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'qwen')
|
||||
WHERE "slug" LIKE 'Qwen/%' OR "slug" LIKE 'qwen/%';
|
||||
|
||||
-- xAI/Grok models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'xai')
|
||||
WHERE "slug" LIKE 'x-ai/%' OR "slug" LIKE 'grok%';
|
||||
|
||||
-- Amazon models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'amazon')
|
||||
WHERE "slug" LIKE 'amazon/%' OR "slug" LIKE 'nova-%';
|
||||
|
||||
-- Microsoft models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'microsoft')
|
||||
WHERE "slug" LIKE 'microsoft/%' OR "slug" LIKE 'wizardlm%';
|
||||
|
||||
-- Moonshot models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'moonshot')
|
||||
WHERE "slug" LIKE 'moonshotai/%' OR "slug" LIKE 'kimi%';
|
||||
|
||||
-- NVIDIA models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nvidia')
|
||||
WHERE "slug" LIKE 'nvidia/%' OR "slug" LIKE '%nemotron%';
|
||||
|
||||
-- Nous Research models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'nous_research')
|
||||
WHERE "slug" LIKE 'nousresearch/%' OR "slug" LIKE 'hermes%';
|
||||
|
||||
-- Vercel/v0 models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'vercel')
|
||||
WHERE "slug" LIKE 'v0-%';
|
||||
|
||||
-- Dolphin models (Cognitive Computations / Eric Hartford)
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'cognitive_computations')
|
||||
WHERE "slug" LIKE 'dolphin-%';
|
||||
|
||||
-- Gryphe models
|
||||
UPDATE "LlmModel" SET "creatorId" = (SELECT "id" FROM "LlmModelCreator" WHERE "name" = 'gryphe')
|
||||
WHERE "slug" LIKE 'gryphe/%' OR "slug" LIKE 'mythomax%';
|
||||
@@ -1,4 +0,0 @@
|
||||
-- CreateIndex
|
||||
-- Index for efficient LLM model lookups on AgentNode.constantInput->>'model'
|
||||
-- This improves performance of model migration queries in the LLM registry
|
||||
CREATE INDEX "AgentNode_constantInput_model_idx" ON "AgentNode" ((("constantInput"->>'model')));
|
||||
@@ -1,52 +0,0 @@
|
||||
-- Add GPT-5.2 model and update O3 slug
|
||||
-- This migration adds the new GPT-5.2 model added in dev branch
|
||||
|
||||
-- Update O3 slug to match dev branch format
|
||||
UPDATE "LlmModel"
|
||||
SET "slug" = 'o3-2025-04-16'
|
||||
WHERE "slug" = 'o3';
|
||||
|
||||
-- Update cost reference for O3 if needed
|
||||
-- (costs are linked by model ID, so no update needed)
|
||||
|
||||
-- Add GPT-5.2 model
|
||||
WITH provider_id AS (
|
||||
SELECT "id" FROM "LlmProvider" WHERE "name" = 'openai'
|
||||
)
|
||||
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
'gpt-5.2-2025-12-11',
|
||||
'GPT 5.2',
|
||||
'OpenAI GPT-5.2 model',
|
||||
p."id",
|
||||
400000,
|
||||
128000,
|
||||
true,
|
||||
'{}'::jsonb,
|
||||
'{}'::jsonb
|
||||
FROM provider_id p
|
||||
ON CONFLICT ("slug") DO NOTHING;
|
||||
|
||||
-- Add cost for GPT-5.2
|
||||
WITH model_id AS (
|
||||
SELECT m."id", p."name" as provider_name
|
||||
FROM "LlmModel" m
|
||||
JOIN "LlmProvider" p ON p."id" = m."providerId"
|
||||
WHERE m."slug" = 'gpt-5.2-2025-12-11'
|
||||
)
|
||||
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
'RUN'::"LlmCostUnit",
|
||||
3, -- Same cost tier as GPT-5.1
|
||||
m.provider_name,
|
||||
NULL,
|
||||
'api_key',
|
||||
NULL,
|
||||
'{}'::jsonb,
|
||||
m."id"
|
||||
FROM model_id m
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM "LlmModelCost" c WHERE c."llmModelId" = m."id"
|
||||
);
|
||||
@@ -1,11 +0,0 @@
|
||||
-- Add isRecommended field to LlmModel table
|
||||
-- This allows admins to mark a model as the recommended default
|
||||
|
||||
ALTER TABLE "LlmModel" ADD COLUMN "isRecommended" BOOLEAN NOT NULL DEFAULT false;
|
||||
|
||||
-- Set gpt-4o-mini as the default recommended model (if it exists)
|
||||
UPDATE "LlmModel" SET "isRecommended" = true WHERE "slug" = 'gpt-4o-mini' AND "isEnabled" = true;
|
||||
|
||||
-- Create unique partial index to enforce only one recommended model at the database level
|
||||
-- This prevents multiple rows from having isRecommended = true
|
||||
CREATE UNIQUE INDEX "LlmModel_single_recommended_idx" ON "LlmModel" ("isRecommended") WHERE "isRecommended" = true;
|
||||
@@ -1,61 +0,0 @@
|
||||
-- Add new columns to LlmModel table for extended model metadata
|
||||
-- These columns support the LLM Picker UI enhancements
|
||||
|
||||
-- Add priceTier column: 1=cheapest, 2=medium, 3=expensive
|
||||
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "priceTier" INTEGER NOT NULL DEFAULT 1;
|
||||
|
||||
-- Add creatorId column for model creator relationship (if not exists)
|
||||
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "creatorId" TEXT;
|
||||
|
||||
-- Add isRecommended column (if not exists)
|
||||
ALTER TABLE "LlmModel" ADD COLUMN IF NOT EXISTS "isRecommended" BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Add index on creatorId if not exists
|
||||
CREATE INDEX IF NOT EXISTS "LlmModel_creatorId_idx" ON "LlmModel"("creatorId");
|
||||
|
||||
-- Add foreign key for creatorId if not exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'LlmModel_creatorId_fkey') THEN
|
||||
-- Only add FK if LlmModelCreator table exists
|
||||
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'LlmModelCreator') THEN
|
||||
ALTER TABLE "LlmModel" ADD CONSTRAINT "LlmModel_creatorId_fkey"
|
||||
FOREIGN KEY ("creatorId") REFERENCES "LlmModelCreator"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
||||
END IF;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Update priceTier values for existing models based on original MODEL_METADATA
|
||||
-- Tier 1 = cheapest, Tier 2 = medium, Tier 3 = expensive
|
||||
|
||||
-- OpenAI models
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'o3';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'o3-mini';
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'o1';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'o1-mini';
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'gpt-5.2';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-5.1';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5-mini';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-5-nano';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-5-chat-latest';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" LIKE 'gpt-4.1%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-4o-mini';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" = 'gpt-4o';
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'gpt-4-turbo';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'gpt-3.5-turbo';
|
||||
|
||||
-- Anthropic models
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE 'claude-opus%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'claude-sonnet%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE 'claude%-4-5-sonnet%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'claude%-haiku%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 1 WHERE "slug" = 'claude-3-haiku-20240307';
|
||||
|
||||
-- OpenRouter models - Pro/expensive tiers
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE 'google/gemini%-pro%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE '%command-r-plus%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 2 WHERE "slug" LIKE '%sonar-pro%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE '%sonar-deep-research%';
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" = 'x-ai/grok-4';
|
||||
UPDATE "LlmModel" SET "priceTier" = 3 WHERE "slug" LIKE '%qwen3-coder%';
|
||||
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the column `search` on the `StoreListingVersion` table. All the data in the column will be lost.
|
||||
|
||||
*/
|
||||
-- DropIndex
|
||||
DROP INDEX "UnifiedContentEmbedding_search_idx";
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE "LibraryAgent" ADD COLUMN "folderId" TEXT;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE "StoreListingVersion" DROP COLUMN "search";
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LibraryFolder" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"userId" TEXT NOT NULL,
|
||||
"name" TEXT NOT NULL,
|
||||
"icon" TEXT,
|
||||
"color" TEXT,
|
||||
"parentId" TEXT,
|
||||
"isDeleted" BOOLEAN NOT NULL DEFAULT false,
|
||||
|
||||
CONSTRAINT "LibraryFolder_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LibraryFolder_userId_isDeleted_idx" ON "LibraryFolder"("userId", "isDeleted");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LibraryFolder_parentId_idx" ON "LibraryFolder"("parentId");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "LibraryFolder_userId_parentId_name_key" ON "LibraryFolder"("userId", "parentId", "name");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LibraryAgent_folderId_idx" ON "LibraryAgent"("folderId");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LibraryAgent" ADD CONSTRAINT "LibraryAgent_folderId_fkey" FOREIGN KEY ("folderId") REFERENCES "LibraryFolder"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LibraryFolder" ADD CONSTRAINT "LibraryFolder_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LibraryFolder" ADD CONSTRAINT "LibraryFolder_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "LibraryFolder"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -1,6 +0,0 @@
|
||||
-- Add composite index on LlmModelMigration for optimized active migration queries
|
||||
-- This index improves performance when querying for non-reverted migrations by model slug
|
||||
-- Used by the billing system to apply customCreditCost overrides
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LlmModelMigration_sourceModelSlug_isReverted_idx" ON "LlmModelMigration"("sourceModelSlug", "isReverted");
|
||||
@@ -1,61 +0,0 @@
|
||||
-- Sync LLM models with latest dev branch changes
|
||||
-- This migration adds new models and removes deprecated ones
|
||||
|
||||
-- Remove models that were deleted from dev
|
||||
DELETE FROM "LlmModelCost" WHERE "llmModelId" IN (
|
||||
SELECT "id" FROM "LlmModel" WHERE "slug" IN ('o3', 'o3-mini', 'claude-3-7-sonnet-20250219')
|
||||
);
|
||||
|
||||
DELETE FROM "LlmModel" WHERE "slug" IN ('o3', 'o3-mini', 'claude-3-7-sonnet-20250219');
|
||||
|
||||
-- Add new models from dev
|
||||
WITH provider_ids AS (
|
||||
SELECT "id", "name" FROM "LlmProvider"
|
||||
)
|
||||
INSERT INTO "LlmModel" ("id", "slug", "displayName", "description", "providerId", "contextWindow", "maxOutputTokens", "isEnabled", "capabilities", "metadata")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
model_slug,
|
||||
model_display_name,
|
||||
NULL,
|
||||
p."id",
|
||||
context_window,
|
||||
max_output_tokens,
|
||||
true,
|
||||
'{}'::jsonb,
|
||||
'{}'::jsonb
|
||||
FROM (VALUES
|
||||
-- New OpenAI model
|
||||
('gpt-5.2-2025-12-11', 'GPT 5.2', 'openai', 400000, 128000),
|
||||
-- New Anthropic model
|
||||
('claude-opus-4-6', 'Claude 4.6 Opus', 'anthropic', 200000, 64000)
|
||||
) AS models(model_slug, model_display_name, provider_name, context_window, max_output_tokens)
|
||||
JOIN provider_ids p ON p."name" = models.provider_name
|
||||
ON CONFLICT ("slug") DO NOTHING;
|
||||
|
||||
-- Add costs for new models
|
||||
WITH model_ids AS (
|
||||
SELECT "id", "slug", "providerId" FROM "LlmModel"
|
||||
),
|
||||
provider_ids AS (
|
||||
SELECT "id", "name" FROM "LlmProvider"
|
||||
)
|
||||
INSERT INTO "LlmModelCost" ("id", "unit", "creditCost", "credentialProvider", "credentialId", "credentialType", "currency", "metadata", "llmModelId")
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
'RUN'::"LlmCostUnit",
|
||||
cost,
|
||||
p."name",
|
||||
NULL,
|
||||
'api_key',
|
||||
NULL,
|
||||
'{}'::jsonb,
|
||||
m."id"
|
||||
FROM (VALUES
|
||||
-- New model costs (estimate based on similar models)
|
||||
('gpt-5.2-2025-12-11', 5), -- Similar to GPT 5.1
|
||||
('claude-opus-4-6', 21) -- Similar to other Opus 4.x models
|
||||
) AS costs(model_slug, cost)
|
||||
JOIN model_ids m ON m."slug" = costs.model_slug
|
||||
JOIN provider_ids p ON p."id" = m."providerId"
|
||||
ON CONFLICT ("llmModelId", "credentialProvider", "unit") DO NOTHING;
|
||||
68
autogpt_platform/backend/poetry.lock
generated
68
autogpt_platform/backend/poetry.lock
generated
@@ -441,14 +441,14 @@ develop = true
|
||||
colorama = "^0.4.6"
|
||||
cryptography = "^46.0"
|
||||
expiringdict = "^1.2.2"
|
||||
fastapi = "^0.128.7"
|
||||
fastapi = "^0.128.0"
|
||||
google-cloud-logging = "^3.13.0"
|
||||
launchdarkly-server-sdk = "^9.15.0"
|
||||
launchdarkly-server-sdk = "^9.14.1"
|
||||
pydantic = "^2.12.5"
|
||||
pydantic-settings = "^2.12.0"
|
||||
pyjwt = {version = "^2.11.0", extras = ["crypto"]}
|
||||
redis = "^6.2.0"
|
||||
supabase = "^2.28.0"
|
||||
supabase = "^2.27.2"
|
||||
uvicorn = "^0.40.0"
|
||||
|
||||
[package.source]
|
||||
@@ -1382,14 +1382,14 @@ tzdata = "*"
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.128.7"
|
||||
version = "0.128.6"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fastapi-0.128.7-py3-none-any.whl", hash = "sha256:6bd9bd31cb7047465f2d3fa3ba3f33b0870b17d4eaf7cdb36d1576ab060ad662"},
|
||||
{file = "fastapi-0.128.7.tar.gz", hash = "sha256:783c273416995486c155ad2c0e2b45905dedfaf20b9ef8d9f6a9124670639a24"},
|
||||
{file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"},
|
||||
{file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3117,14 +3117,14 @@ urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-server-sdk"
|
||||
version = "9.15.0"
|
||||
version = "9.14.1"
|
||||
description = "LaunchDarkly SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_server_sdk-9.15.0-py3-none-any.whl", hash = "sha256:c267e29bfa3fb5e2a06a208448ada6ed5557a2924979b8d79c970b45d227c668"},
|
||||
{file = "launchdarkly_server_sdk-9.15.0.tar.gz", hash = "sha256:f31441b74bc1a69c381db57c33116509e407a2612628ad6dff0a7dbb39d5020b"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1-py3-none-any.whl", hash = "sha256:a9e2bd9ecdef845cd631ae0d4334a1115e5b44257c42eb2349492be4bac7815c"},
|
||||
{file = "launchdarkly_server_sdk-9.14.1.tar.gz", hash = "sha256:1df44baf0a0efa74d8c1dad7a00592b98bce7d19edded7f770da8dbc49922213"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -4728,14 +4728,14 @@ tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "postgrest-2.28.0-py3-none-any.whl", hash = "sha256:7bca2f24dd1a1bf8a3d586c7482aba6cd41662da6733045fad585b63b7f7df75"},
|
||||
{file = "postgrest-2.28.0.tar.gz", hash = "sha256:c36b38646d25ea4255321d3d924ce70f8d20ec7799cb42c1221d6a818d4f6515"},
|
||||
{file = "postgrest-2.27.3-py3-none-any.whl", hash = "sha256:ed79123af7127edd78d538bfe8351d277e45b1a36994a4dbf57ae27dde87a7b7"},
|
||||
{file = "postgrest-2.27.3.tar.gz", hash = "sha256:c2e2679addfc8eaab23197bad7ddaee6cbb4cbe8c483ebd2d2e5219543037cc3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -6260,14 +6260,14 @@ all = ["numpy"]
|
||||
|
||||
[[package]]
|
||||
name = "realtime"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "realtime-2.28.0-py3-none-any.whl", hash = "sha256:db1bd59bab9b1fcc9f9d3b1a073bed35bf4994d720e6751f10031a58d57a3836"},
|
||||
{file = "realtime-2.28.0.tar.gz", hash = "sha256:d18cedcebd6a8f22fcd509bc767f639761eb218b7b2b6f14fc4205b6259b50fc"},
|
||||
{file = "realtime-2.27.3-py3-none-any.whl", hash = "sha256:f571115f86988e33c41c895cb3fba2eaa1b693aeaede3617288f44274ca90f43"},
|
||||
{file = "realtime-2.27.3.tar.gz", hash = "sha256:02b082243107656a5ef3fb63e8e2ab4c40bc199abb45adb8a42ed63f089a1041"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7024,14 +7024,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "storage3-2.28.0-py3-none-any.whl", hash = "sha256:ecb50efd2ac71dabbdf97e99ad346eafa630c4c627a8e5a138ceb5fbbadae716"},
|
||||
{file = "storage3-2.28.0.tar.gz", hash = "sha256:bc1d008aff67de7a0f2bd867baee7aadbcdb6f78f5a310b4f7a38e8c13c19865"},
|
||||
{file = "storage3-2.27.3-py3-none-any.whl", hash = "sha256:11a05b7da84bccabeeea12d940bca3760cf63fe6ca441868677335cfe4fdfbe0"},
|
||||
{file = "storage3-2.27.3.tar.gz", hash = "sha256:dc1a4a010cf36d5482c5cb6c1c28fc5f00e23284342b89e4ae43b5eae8501ddb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7091,35 +7091,35 @@ typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""}
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.28.0-py3-none-any.whl", hash = "sha256:42776971c7d0ccca16034df1ab96a31c50228eb1eb19da4249ad2f756fc20272"},
|
||||
{file = "supabase-2.28.0.tar.gz", hash = "sha256:aea299aaab2a2eed3c57e0be7fc035c6807214194cce795a3575add20268ece1"},
|
||||
{file = "supabase-2.27.3-py3-none-any.whl", hash = "sha256:082a74642fcf9954693f1ce8c251baf23e4bda26ffdbc8dcd4c99c82e60d69ff"},
|
||||
{file = "supabase-2.27.3.tar.gz", hash = "sha256:5e5a348232ac4315c1032ddd687278f0b982465471f0cbb52bca7e6a66495ff3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.26,<0.29"
|
||||
postgrest = "2.28.0"
|
||||
realtime = "2.28.0"
|
||||
storage3 = "2.28.0"
|
||||
supabase-auth = "2.28.0"
|
||||
supabase-functions = "2.28.0"
|
||||
postgrest = "2.27.3"
|
||||
realtime = "2.27.3"
|
||||
storage3 = "2.27.3"
|
||||
supabase-auth = "2.27.3"
|
||||
supabase-functions = "2.27.3"
|
||||
yarl = ">=1.22.0"
|
||||
|
||||
[[package]]
|
||||
name = "supabase-auth"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_auth-2.28.0-py3-none-any.whl", hash = "sha256:2ac85026cc285054c7fa6d41924f3a333e9ec298c013e5b5e1754039ba7caec9"},
|
||||
{file = "supabase_auth-2.28.0.tar.gz", hash = "sha256:2bb8f18ff39934e44b28f10918db965659f3735cd6fbfcc022fe0b82dbf8233e"},
|
||||
{file = "supabase_auth-2.27.3-py3-none-any.whl", hash = "sha256:82a4262eaad85383319d394dab0eea11fcf3ebd774062aef8ea3874ae2f02579"},
|
||||
{file = "supabase_auth-2.27.3.tar.gz", hash = "sha256:39894d4bc60b6f23b5cff4d0d7d4c1659e5d69563cadf014d4896f780ca8ca78"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7129,14 +7129,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
|
||||
|
||||
[[package]]
|
||||
name = "supabase-functions"
|
||||
version = "2.28.0"
|
||||
version = "2.27.3"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase_functions-2.28.0-py3-none-any.whl", hash = "sha256:30bf2d586f8df285faf0621bb5d5bb3ec3157234fc820553ca156f009475e4ae"},
|
||||
{file = "supabase_functions-2.28.0.tar.gz", hash = "sha256:db3dddfc37aca5858819eb461130968473bd8c75bd284581013958526dac718b"},
|
||||
{file = "supabase_functions-2.27.3-py3-none-any.whl", hash = "sha256:9d14a931d49ede1c6cf5fbfceb11c44061535ba1c3f310f15384964d86a83d9e"},
|
||||
{file = "supabase_functions-2.27.3.tar.gz", hash = "sha256:e954f1646da8ca6e7e16accef58d0884a5f97b25956ee98e7d4927a210ed92f9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "fa9c5deadf593e815dd2190f58e22152373900603f5f244b9616cd721de84d2f"
|
||||
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||
|
||||
@@ -65,7 +65,7 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
|
||||
sqlalchemy = "^2.0.40"
|
||||
strenum = "^0.4.9"
|
||||
stripe = "^11.5.0"
|
||||
supabase = "2.28.0"
|
||||
supabase = "2.27.3"
|
||||
tenacity = "^9.1.4"
|
||||
todoist-api-python = "^2.1.7"
|
||||
tweepy = "^4.16.0"
|
||||
|
||||
@@ -51,6 +51,7 @@ model User {
|
||||
ChatSessions ChatSession[]
|
||||
AgentPresets AgentPreset[]
|
||||
LibraryAgents LibraryAgent[]
|
||||
LibraryFolders LibraryFolder[]
|
||||
|
||||
Profile Profile[]
|
||||
UserOnboarding UserOnboarding?
|
||||
@@ -395,6 +396,9 @@ model LibraryAgent {
|
||||
creatorId String?
|
||||
Creator Profile? @relation(fields: [creatorId], references: [id])
|
||||
|
||||
folderId String?
|
||||
Folder LibraryFolder? @relation(fields: [folderId], references: [id], onDelete: Restrict)
|
||||
|
||||
useGraphIsActiveVersion Boolean @default(false)
|
||||
|
||||
isFavorite Boolean @default(false)
|
||||
@@ -407,6 +411,32 @@ model LibraryAgent {
|
||||
@@unique([userId, agentGraphId, agentGraphVersion])
|
||||
@@index([agentGraphId, agentGraphVersion])
|
||||
@@index([creatorId])
|
||||
@@index([folderId])
|
||||
}
|
||||
|
||||
model LibraryFolder {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
|
||||
userId String
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
name String
|
||||
icon String?
|
||||
color String?
|
||||
|
||||
parentId String?
|
||||
Parent LibraryFolder? @relation("FolderHierarchy", fields: [parentId], references: [id], onDelete: Cascade)
|
||||
Children LibraryFolder[] @relation("FolderHierarchy")
|
||||
|
||||
isDeleted Boolean @default(false)
|
||||
|
||||
LibraryAgents LibraryAgent[]
|
||||
|
||||
@@unique([userId, parentId, name]) // Name unique per parent per user
|
||||
@@index([userId, isDeleted])
|
||||
@@index([parentId])
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
@@ -1143,153 +1173,6 @@ enum APIKeyStatus {
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
///////////// LLM REGISTRY AND BILLING DATA /////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
// LlmCostUnit: Defines how LLM MODEL costs are calculated (per run or per token).
|
||||
// This is distinct from BlockCostType (in backend/data/block.py) which defines
|
||||
// how BLOCK EXECUTION costs are calculated (per run, per byte, or per second).
|
||||
// LlmCostUnit is for pricing individual LLM model API calls in the registry,
|
||||
// while BlockCostType is for billing platform block executions.
|
||||
enum LlmCostUnit {
|
||||
RUN
|
||||
TOKENS
|
||||
}
|
||||
|
||||
model LlmModelCreator {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
name String @unique // e.g., "openai", "anthropic", "meta"
|
||||
displayName String // e.g., "OpenAI", "Anthropic", "Meta"
|
||||
description String?
|
||||
websiteUrl String? // Link to creator's website
|
||||
logoUrl String? // URL to creator's logo
|
||||
|
||||
metadata Json @default("{}")
|
||||
|
||||
Models LlmModel[]
|
||||
}
|
||||
|
||||
model LlmProvider {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
name String @unique
|
||||
displayName String
|
||||
description String?
|
||||
|
||||
defaultCredentialProvider String?
|
||||
defaultCredentialId String?
|
||||
defaultCredentialType String?
|
||||
|
||||
supportsTools Boolean @default(true)
|
||||
supportsJsonOutput Boolean @default(true)
|
||||
supportsReasoning Boolean @default(false)
|
||||
supportsParallelTool Boolean @default(false)
|
||||
|
||||
metadata Json @default("{}")
|
||||
|
||||
Models LlmModel[]
|
||||
}
|
||||
|
||||
model LlmModel {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
slug String @unique
|
||||
displayName String
|
||||
description String?
|
||||
|
||||
providerId String
|
||||
Provider LlmProvider @relation(fields: [providerId], references: [id], onDelete: Restrict)
|
||||
|
||||
// Creator is the organization that created/trained the model (e.g., OpenAI, Meta)
|
||||
// This is distinct from the provider who hosts/serves the model (e.g., OpenRouter)
|
||||
creatorId String?
|
||||
Creator LlmModelCreator? @relation(fields: [creatorId], references: [id], onDelete: SetNull)
|
||||
|
||||
contextWindow Int
|
||||
maxOutputTokens Int?
|
||||
priceTier Int @default(1) // 1=cheapest, 2=medium, 3=expensive
|
||||
isEnabled Boolean @default(true)
|
||||
isRecommended Boolean @default(false)
|
||||
|
||||
capabilities Json @default("{}")
|
||||
metadata Json @default("{}")
|
||||
|
||||
Costs LlmModelCost[]
|
||||
|
||||
@@index([providerId, isEnabled])
|
||||
@@index([creatorId])
|
||||
@@index([slug])
|
||||
}
|
||||
|
||||
model LlmModelCost {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
unit LlmCostUnit @default(RUN)
|
||||
|
||||
creditCost Int
|
||||
|
||||
credentialProvider String
|
||||
credentialId String?
|
||||
credentialType String?
|
||||
currency String?
|
||||
|
||||
metadata Json @default("{}")
|
||||
|
||||
llmModelId String
|
||||
Model LlmModel @relation(fields: [llmModelId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@unique([llmModelId, credentialProvider, unit])
|
||||
@@index([llmModelId])
|
||||
@@index([credentialProvider])
|
||||
}
|
||||
|
||||
// Tracks model migrations for revert capability
|
||||
// When a model is disabled with migration, we record which nodes were affected
|
||||
// so they can be reverted when the original model is back online
|
||||
model LlmModelMigration {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
sourceModelSlug String // The original model that was disabled
|
||||
targetModelSlug String // The model workflows were migrated to
|
||||
reason String? // Why the migration happened (e.g., "Provider outage")
|
||||
|
||||
// Track affected nodes as JSON array of node IDs
|
||||
// Format: ["node-uuid-1", "node-uuid-2", ...]
|
||||
migratedNodeIds Json @default("[]")
|
||||
nodeCount Int // Number of nodes migrated
|
||||
|
||||
// Custom pricing override for migrated workflows during the migration period.
|
||||
// Use case: When migrating users from an expensive model (e.g., GPT-4) to a cheaper
|
||||
// one (e.g., GPT-3.5), you may want to temporarily maintain the original pricing
|
||||
// to avoid billing surprises, or offer a discount during the transition.
|
||||
//
|
||||
// IMPORTANT: This field is intended for integration with the billing system.
|
||||
// When billing calculates costs for nodes affected by this migration, it should
|
||||
// check if customCreditCost is set and use it instead of the target model's cost.
|
||||
// If null, the target model's normal cost applies.
|
||||
//
|
||||
// TODO: Integrate with billing system to apply this override during cost calculation.
|
||||
customCreditCost Int?
|
||||
|
||||
// Revert tracking
|
||||
isReverted Boolean @default(false)
|
||||
revertedAt DateTime?
|
||||
|
||||
@@index([sourceModelSlug])
|
||||
@@index([targetModelSlug])
|
||||
@@index([isReverted])
|
||||
}
|
||||
////////////// OAUTH PROVIDER TABLES //////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
@@ -37,7 +37,7 @@ services:
|
||||
context: ../
|
||||
dockerfile: autogpt_platform/backend/Dockerfile
|
||||
target: migrate
|
||||
command: ["sh", "-c", "prisma generate && python3 gen_prisma_types_stub.py && prisma migrate deploy"]
|
||||
command: ["sh", "-c", "poetry run prisma generate && poetry run gen-prisma-stub && poetry run prisma migrate deploy"]
|
||||
develop:
|
||||
watch:
|
||||
- path: ./
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||
"poetry run prisma migrate status | grep -q 'No pending migrations' || exit 1",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
"dependencies": {
|
||||
"@ai-sdk/react": "3.0.61",
|
||||
"@faker-js/faker": "10.0.0",
|
||||
"@ferrucc-io/emoji-picker": "0.0.48",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@next/third-parties": "15.4.6",
|
||||
"@phosphor-icons/react": "2.1.10",
|
||||
|
||||
116
autogpt_platform/frontend/pnpm-lock.yaml
generated
116
autogpt_platform/frontend/pnpm-lock.yaml
generated
@@ -18,6 +18,9 @@ importers:
|
||||
'@faker-js/faker':
|
||||
specifier: 10.0.0
|
||||
version: 10.0.0
|
||||
'@ferrucc-io/emoji-picker':
|
||||
specifier: 0.0.48
|
||||
version: 0.0.48(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(tailwindcss@3.4.17)
|
||||
'@hookform/resolvers':
|
||||
specifier: 5.2.2
|
||||
version: 5.2.2(react-hook-form@7.66.0(react@18.3.1))
|
||||
@@ -1507,6 +1510,14 @@ packages:
|
||||
resolution: {integrity: sha512-UollFEUkVXutsaP+Vndjxar40Gs5JL2HeLcl8xO1QAjJgOdhc3OmBFWyEylS+RddWaaBiAzH+5/17PLQJwDiLw==}
|
||||
engines: {node: ^20.19.0 || ^22.13.0 || ^23.5.0 || >=24.0.0, npm: '>=10'}
|
||||
|
||||
'@ferrucc-io/emoji-picker@0.0.48':
|
||||
resolution: {integrity: sha512-DJ5u+6VLF9OK7x+S/luwrVb5CHC6W16jL5b8vBUYNpxKWSuFgyliDHVtw1SGe6+dr5RUbf8WQwPJdKZmU3Ittg==}
|
||||
engines: {node: '>=18'}
|
||||
peerDependencies:
|
||||
react: ^18.2.0 || ^19.0.0
|
||||
react-dom: ^18.2.0 || ^19.0.0
|
||||
tailwindcss: '>=3.0.0'
|
||||
|
||||
'@floating-ui/core@1.7.3':
|
||||
resolution: {integrity: sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==}
|
||||
|
||||
@@ -3114,6 +3125,10 @@ packages:
|
||||
'@shikijs/vscode-textmate@10.0.2':
|
||||
resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==}
|
||||
|
||||
'@sindresorhus/is@4.6.0':
|
||||
resolution: {integrity: sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
'@standard-schema/spec@1.0.0':
|
||||
resolution: {integrity: sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==}
|
||||
|
||||
@@ -3381,10 +3396,19 @@ packages:
|
||||
react: '>=16.8'
|
||||
react-dom: '>=16.8'
|
||||
|
||||
'@tanstack/react-virtual@3.13.18':
|
||||
resolution: {integrity: sha512-dZkhyfahpvlaV0rIKnvQiVoWPyURppl6w4m9IwMDpuIjcJ1sD9YGWrt0wISvgU7ewACXx2Ct46WPgI6qAD4v6A==}
|
||||
peerDependencies:
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
|
||||
react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
|
||||
|
||||
'@tanstack/table-core@8.21.3':
|
||||
resolution: {integrity: sha512-ldZXEhOBb8Is7xLs01fR3YEc3DERiz5silj8tnGkFZytt1abEvl/GhUmCE0PMLaMPTa3Jk4HbKmRlHmu+gCftg==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
'@tanstack/virtual-core@3.13.18':
|
||||
resolution: {integrity: sha512-Mx86Hqu1k39icq2Zusq+Ey2J6dDWTjDvEv43PJtRCoEYTLyfaPnxIQ6iy7YAOK0NV/qOEmZQ/uCufrppZxTgcg==}
|
||||
|
||||
'@testing-library/dom@10.4.1':
|
||||
resolution: {integrity: sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==}
|
||||
engines: {node: '>=18'}
|
||||
@@ -4378,6 +4402,10 @@ packages:
|
||||
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
char-regex@1.0.2:
|
||||
resolution: {integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
character-entities-html4@2.1.0:
|
||||
resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==}
|
||||
|
||||
@@ -4995,6 +5023,9 @@ packages:
|
||||
emoji-regex@9.2.2:
|
||||
resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==}
|
||||
|
||||
emojilib@2.4.0:
|
||||
resolution: {integrity: sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==}
|
||||
|
||||
emojis-list@3.0.0:
|
||||
resolution: {integrity: sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==}
|
||||
engines: {node: '>= 4'}
|
||||
@@ -5975,6 +6006,24 @@ packages:
|
||||
resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==}
|
||||
hasBin: true
|
||||
|
||||
jotai@2.17.1:
|
||||
resolution: {integrity: sha512-TFNZZDa/0ewCLQyRC/Sq9crtixNj/Xdf/wmj9631xxMuKToVJZDbqcHIYN0OboH+7kh6P6tpIK7uKWClj86PKw==}
|
||||
engines: {node: '>=12.20.0'}
|
||||
peerDependencies:
|
||||
'@babel/core': '>=7.0.0'
|
||||
'@babel/template': '>=7.0.0'
|
||||
'@types/react': '>=17.0.0'
|
||||
react: '>=17.0.0'
|
||||
peerDependenciesMeta:
|
||||
'@babel/core':
|
||||
optional: true
|
||||
'@babel/template':
|
||||
optional: true
|
||||
'@types/react':
|
||||
optional: true
|
||||
react:
|
||||
optional: true
|
||||
|
||||
js-tokens@4.0.0:
|
||||
resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==}
|
||||
|
||||
@@ -6593,6 +6642,10 @@ packages:
|
||||
node-abort-controller@3.1.1:
|
||||
resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==}
|
||||
|
||||
node-emoji@2.2.0:
|
||||
resolution: {integrity: sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
node-fetch-h2@2.3.0:
|
||||
resolution: {integrity: sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==}
|
||||
engines: {node: 4.x || >=6.0.0}
|
||||
@@ -7691,6 +7744,10 @@ packages:
|
||||
resolution: {integrity: sha512-LH7FpTAkeD+y5xQC4fzS+tFtaNlvt3Ib1zKzvhjv/Y+cioV4zIuw4IZr2yhRLu67CWL7FR9/6KXKnjRoZTvGGQ==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
skin-tone@2.0.0:
|
||||
resolution: {integrity: sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
slash@3.0.0:
|
||||
resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==}
|
||||
engines: {node: '>=8'}
|
||||
@@ -8168,6 +8225,13 @@ packages:
|
||||
resolution: {integrity: sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==}
|
||||
engines: {node: '>=4'}
|
||||
|
||||
unicode-emoji-json@0.8.0:
|
||||
resolution: {integrity: sha512-3wDXXvp6YGoKGhS2O2H7+V+bYduOBydN1lnI0uVfr1cIdY02uFFiEH1i3kE5CCE4l6UqbLKVmEFW9USxTAMD1g==}
|
||||
|
||||
unicode-emoji-modifier-base@1.0.0:
|
||||
resolution: {integrity: sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==}
|
||||
engines: {node: '>=4'}
|
||||
|
||||
unicode-match-property-ecmascript@2.0.0:
|
||||
resolution: {integrity: sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==}
|
||||
engines: {node: '>=4'}
|
||||
@@ -9777,6 +9841,22 @@ snapshots:
|
||||
|
||||
'@faker-js/faker@10.0.0': {}
|
||||
|
||||
'@ferrucc-io/emoji-picker@0.0.48(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(tailwindcss@3.4.17)':
|
||||
dependencies:
|
||||
'@tanstack/react-virtual': 3.13.18(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
clsx: 2.1.1
|
||||
jotai: 2.17.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react@18.3.1)
|
||||
node-emoji: 2.2.0
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
tailwind-merge: 2.6.0
|
||||
tailwindcss: 3.4.17
|
||||
unicode-emoji-json: 0.8.0
|
||||
transitivePeerDependencies:
|
||||
- '@babel/core'
|
||||
- '@babel/template'
|
||||
- '@types/react'
|
||||
|
||||
'@floating-ui/core@1.7.3':
|
||||
dependencies:
|
||||
'@floating-ui/utils': 0.2.10
|
||||
@@ -11538,6 +11618,8 @@ snapshots:
|
||||
|
||||
'@shikijs/vscode-textmate@10.0.2': {}
|
||||
|
||||
'@sindresorhus/is@4.6.0': {}
|
||||
|
||||
'@standard-schema/spec@1.0.0': {}
|
||||
|
||||
'@standard-schema/spec@1.1.0': {}
|
||||
@@ -12011,8 +12093,16 @@ snapshots:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
'@tanstack/react-virtual@3.13.18(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
|
||||
dependencies:
|
||||
'@tanstack/virtual-core': 3.13.18
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
'@tanstack/table-core@8.21.3': {}
|
||||
|
||||
'@tanstack/virtual-core@3.13.18': {}
|
||||
|
||||
'@testing-library/dom@10.4.1':
|
||||
dependencies:
|
||||
'@babel/code-frame': 7.27.1
|
||||
@@ -13104,6 +13194,8 @@ snapshots:
|
||||
ansi-styles: 4.3.0
|
||||
supports-color: 7.2.0
|
||||
|
||||
char-regex@1.0.2: {}
|
||||
|
||||
character-entities-html4@2.1.0: {}
|
||||
|
||||
character-entities-legacy@3.0.0: {}
|
||||
@@ -13747,6 +13839,8 @@ snapshots:
|
||||
|
||||
emoji-regex@9.2.2: {}
|
||||
|
||||
emojilib@2.4.0: {}
|
||||
|
||||
emojis-list@3.0.0: {}
|
||||
|
||||
endent@2.1.0:
|
||||
@@ -15028,6 +15122,13 @@ snapshots:
|
||||
|
||||
jiti@2.6.1: {}
|
||||
|
||||
jotai@2.17.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@18.3.17)(react@18.3.1):
|
||||
optionalDependencies:
|
||||
'@babel/core': 7.28.5
|
||||
'@babel/template': 7.27.2
|
||||
'@types/react': 18.3.17
|
||||
react: 18.3.1
|
||||
|
||||
js-tokens@4.0.0: {}
|
||||
|
||||
js-yaml@4.1.0:
|
||||
@@ -15896,6 +15997,13 @@ snapshots:
|
||||
|
||||
node-abort-controller@3.1.1: {}
|
||||
|
||||
node-emoji@2.2.0:
|
||||
dependencies:
|
||||
'@sindresorhus/is': 4.6.0
|
||||
char-regex: 1.0.2
|
||||
emojilib: 2.4.0
|
||||
skin-tone: 2.0.0
|
||||
|
||||
node-fetch-h2@2.3.0:
|
||||
dependencies:
|
||||
http2-client: 1.3.5
|
||||
@@ -17196,6 +17304,10 @@ snapshots:
|
||||
dependencies:
|
||||
jsep: 1.4.0
|
||||
|
||||
skin-tone@2.0.0:
|
||||
dependencies:
|
||||
unicode-emoji-modifier-base: 1.0.0
|
||||
|
||||
slash@3.0.0: {}
|
||||
|
||||
sonner@2.0.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
@@ -17711,6 +17823,10 @@ snapshots:
|
||||
|
||||
unicode-canonical-property-names-ecmascript@2.0.1: {}
|
||||
|
||||
unicode-emoji-json@0.8.0: {}
|
||||
|
||||
unicode-emoji-modifier-base@1.0.0: {}
|
||||
|
||||
unicode-match-property-ecmascript@2.0.0:
|
||||
dependencies:
|
||||
unicode-canonical-property-names-ecmascript: 2.0.1
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
"use client";
|
||||
|
||||
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
||||
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
|
||||
import { Cpu } from "@phosphor-icons/react";
|
||||
|
||||
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
||||
|
||||
@@ -29,11 +26,6 @@ const sidebarLinkGroups = [
|
||||
href: "/admin/execution-analytics",
|
||||
icon: <FileText className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "LLM Registry",
|
||||
href: "/admin/llms",
|
||||
icon: <Cpu size={24} />,
|
||||
},
|
||||
{
|
||||
text: "Admin User Management",
|
||||
href: "/admin/settings",
|
||||
|
||||
@@ -1,493 +0,0 @@
|
||||
"use server";
|
||||
|
||||
import { revalidatePath } from "next/cache";
|
||||
|
||||
// Generated API functions
|
||||
import {
|
||||
getV2ListLlmProviders,
|
||||
postV2CreateLlmProvider,
|
||||
patchV2UpdateLlmProvider,
|
||||
deleteV2DeleteLlmProvider,
|
||||
getV2ListLlmModels,
|
||||
postV2CreateLlmModel,
|
||||
patchV2UpdateLlmModel,
|
||||
patchV2ToggleLlmModelAvailability,
|
||||
deleteV2DeleteLlmModelAndMigrateWorkflows,
|
||||
getV2GetModelUsageCount,
|
||||
getV2ListModelMigrations,
|
||||
postV2RevertAModelMigration,
|
||||
getV2ListModelCreators,
|
||||
postV2CreateModelCreator,
|
||||
patchV2UpdateModelCreator,
|
||||
deleteV2DeleteModelCreator,
|
||||
postV2SetRecommendedModel,
|
||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
|
||||
// Generated types
|
||||
import type { LlmProvidersResponse } from "@/app/api/__generated__/models/llmProvidersResponse";
|
||||
import type { LlmModelsResponse } from "@/app/api/__generated__/models/llmModelsResponse";
|
||||
import type { UpsertLlmProviderRequest } from "@/app/api/__generated__/models/upsertLlmProviderRequest";
|
||||
import type { CreateLlmModelRequest } from "@/app/api/__generated__/models/createLlmModelRequest";
|
||||
import type { UpdateLlmModelRequest } from "@/app/api/__generated__/models/updateLlmModelRequest";
|
||||
import type { ToggleLlmModelRequest } from "@/app/api/__generated__/models/toggleLlmModelRequest";
|
||||
import type { LlmMigrationsResponse } from "@/app/api/__generated__/models/llmMigrationsResponse";
|
||||
import type { LlmCreatorsResponse } from "@/app/api/__generated__/models/llmCreatorsResponse";
|
||||
import type { UpsertLlmCreatorRequest } from "@/app/api/__generated__/models/upsertLlmCreatorRequest";
|
||||
import type { LlmModelUsageResponse } from "@/app/api/__generated__/models/llmModelUsageResponse";
|
||||
import { LlmCostUnit } from "@/app/api/__generated__/models/llmCostUnit";
|
||||
|
||||
const ADMIN_LLM_PATH = "/admin/llms";
|
||||
|
||||
// =============================================================================
|
||||
// Utilities
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Extracts and validates a required string field from FormData.
|
||||
* Throws an error if the field is missing or empty.
|
||||
*/
|
||||
function getRequiredFormField(
|
||||
formData: FormData,
|
||||
fieldName: string,
|
||||
displayName?: string,
|
||||
): string {
|
||||
const raw = formData.get(fieldName);
|
||||
const value = raw ? String(raw).trim() : "";
|
||||
if (!value) {
|
||||
throw new Error(`${displayName || fieldName} is required`);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts and validates a required positive number field from FormData.
|
||||
* Throws an error if the field is missing, empty, or not a positive number.
|
||||
*/
|
||||
function getRequiredPositiveNumber(
|
||||
formData: FormData,
|
||||
fieldName: string,
|
||||
displayName?: string,
|
||||
): number {
|
||||
const raw = formData.get(fieldName);
|
||||
const value = Number(raw);
|
||||
if (raw === null || raw === "" || !Number.isFinite(value) || value <= 0) {
|
||||
throw new Error(`${displayName || fieldName} must be a positive number`);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts and validates a required number field from FormData.
|
||||
* Throws an error if the field is missing, empty, or not a finite number.
|
||||
*/
|
||||
function getRequiredNumber(
|
||||
formData: FormData,
|
||||
fieldName: string,
|
||||
displayName?: string,
|
||||
): number {
|
||||
const raw = formData.get(fieldName);
|
||||
const value = Number(raw);
|
||||
if (raw === null || raw === "" || !Number.isFinite(value)) {
|
||||
throw new Error(`${displayName || fieldName} is required`);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Provider Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmProviders(): Promise<LlmProvidersResponse> {
|
||||
const response = await getV2ListLlmProviders({ include_models: true });
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch LLM providers");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function createLlmProviderAction(formData: FormData) {
|
||||
const payload: UpsertLlmProviderRequest = {
|
||||
name: String(formData.get("name") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
default_credential_provider: formData.get("default_credential_provider")
|
||||
? String(formData.get("default_credential_provider")).trim()
|
||||
: undefined,
|
||||
default_credential_id: formData.get("default_credential_id")
|
||||
? String(formData.get("default_credential_id")).trim()
|
||||
: undefined,
|
||||
default_credential_type: formData.get("default_credential_type")
|
||||
? String(formData.get("default_credential_type")).trim()
|
||||
: "api_key",
|
||||
supports_tools: formData.getAll("supports_tools").includes("on"),
|
||||
supports_json_output: formData
|
||||
.getAll("supports_json_output")
|
||||
.includes("on"),
|
||||
supports_reasoning: formData.getAll("supports_reasoning").includes("on"),
|
||||
supports_parallel_tool: formData
|
||||
.getAll("supports_parallel_tool")
|
||||
.includes("on"),
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const response = await postV2CreateLlmProvider(payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to create LLM provider");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function deleteLlmProviderAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const providerId = getRequiredFormField(
|
||||
formData,
|
||||
"provider_id",
|
||||
"Provider id",
|
||||
);
|
||||
|
||||
const response = await deleteV2DeleteLlmProvider(providerId);
|
||||
if (response.status !== 200) {
|
||||
const errorData = response.data as { detail?: string };
|
||||
throw new Error(errorData?.detail || "Failed to delete provider");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function updateLlmProviderAction(formData: FormData) {
|
||||
const providerId = getRequiredFormField(
|
||||
formData,
|
||||
"provider_id",
|
||||
"Provider id",
|
||||
);
|
||||
|
||||
const payload: UpsertLlmProviderRequest = {
|
||||
name: String(formData.get("name") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
default_credential_provider: formData.get("default_credential_provider")
|
||||
? String(formData.get("default_credential_provider")).trim()
|
||||
: undefined,
|
||||
default_credential_id: formData.get("default_credential_id")
|
||||
? String(formData.get("default_credential_id")).trim()
|
||||
: undefined,
|
||||
default_credential_type: formData.get("default_credential_type")
|
||||
? String(formData.get("default_credential_type")).trim()
|
||||
: "api_key",
|
||||
supports_tools: formData.getAll("supports_tools").includes("on"),
|
||||
supports_json_output: formData
|
||||
.getAll("supports_json_output")
|
||||
.includes("on"),
|
||||
supports_reasoning: formData.getAll("supports_reasoning").includes("on"),
|
||||
supports_parallel_tool: formData
|
||||
.getAll("supports_parallel_tool")
|
||||
.includes("on"),
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const response = await patchV2UpdateLlmProvider(providerId, payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to update LLM provider");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Model Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmModels(): Promise<LlmModelsResponse> {
|
||||
const response = await getV2ListLlmModels();
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch LLM models");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function createLlmModelAction(formData: FormData) {
|
||||
const providerId = getRequiredFormField(formData, "provider_id", "Provider");
|
||||
const creatorId = formData.get("creator_id");
|
||||
const contextWindow = getRequiredPositiveNumber(
|
||||
formData,
|
||||
"context_window",
|
||||
"Context window",
|
||||
);
|
||||
const creditCost = getRequiredNumber(formData, "credit_cost", "Credit cost");
|
||||
|
||||
// Fetch provider to get default credentials
|
||||
const providersResponse = await getV2ListLlmProviders({
|
||||
include_models: false,
|
||||
});
|
||||
if (providersResponse.status !== 200) {
|
||||
throw new Error("Failed to fetch providers");
|
||||
}
|
||||
const provider = providersResponse.data.providers.find(
|
||||
(p) => p.id === providerId,
|
||||
);
|
||||
|
||||
if (!provider) {
|
||||
throw new Error("Provider not found");
|
||||
}
|
||||
|
||||
const payload: CreateLlmModelRequest = {
|
||||
slug: String(formData.get("slug") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
provider_id: providerId,
|
||||
creator_id: creatorId ? String(creatorId) : undefined,
|
||||
context_window: contextWindow,
|
||||
max_output_tokens: formData.get("max_output_tokens")
|
||||
? Number(formData.get("max_output_tokens"))
|
||||
: undefined,
|
||||
is_enabled: formData.getAll("is_enabled").includes("on"),
|
||||
capabilities: {},
|
||||
metadata: {},
|
||||
costs: [
|
||||
{
|
||||
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
|
||||
credit_cost: creditCost,
|
||||
credential_provider:
|
||||
provider.default_credential_provider || provider.name,
|
||||
credential_id: provider.default_credential_id || undefined,
|
||||
credential_type: provider.default_credential_type || "api_key",
|
||||
metadata: {},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const response = await postV2CreateLlmModel(payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to create LLM model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function updateLlmModelAction(formData: FormData) {
|
||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
||||
const creatorId = formData.get("creator_id");
|
||||
|
||||
const payload: UpdateLlmModelRequest = {
|
||||
display_name: formData.get("display_name")
|
||||
? String(formData.get("display_name"))
|
||||
: undefined,
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
provider_id: formData.get("provider_id")
|
||||
? String(formData.get("provider_id"))
|
||||
: undefined,
|
||||
creator_id: creatorId ? String(creatorId) : undefined,
|
||||
context_window: formData.get("context_window")
|
||||
? Number(formData.get("context_window"))
|
||||
: undefined,
|
||||
max_output_tokens: formData.get("max_output_tokens")
|
||||
? Number(formData.get("max_output_tokens"))
|
||||
: undefined,
|
||||
is_enabled: formData.has("is_enabled")
|
||||
? formData.getAll("is_enabled").includes("on")
|
||||
: undefined,
|
||||
costs: formData.get("credit_cost")
|
||||
? [
|
||||
{
|
||||
unit: (formData.get("unit") as LlmCostUnit) || LlmCostUnit.RUN,
|
||||
credit_cost: Number(formData.get("credit_cost")),
|
||||
credential_provider: String(
|
||||
formData.get("credential_provider") || "",
|
||||
).trim(),
|
||||
credential_id: formData.get("credential_id")
|
||||
? String(formData.get("credential_id"))
|
||||
: undefined,
|
||||
credential_type: formData.get("credential_type")
|
||||
? String(formData.get("credential_type"))
|
||||
: undefined,
|
||||
metadata: {},
|
||||
},
|
||||
]
|
||||
: undefined,
|
||||
};
|
||||
|
||||
const response = await patchV2UpdateLlmModel(modelId, payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to update LLM model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function toggleLlmModelAction(formData: FormData): Promise<void> {
|
||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
||||
const shouldEnable = formData.get("is_enabled") === "true";
|
||||
const migrateToSlug = formData.get("migrate_to_slug");
|
||||
const migrationReason = formData.get("migration_reason");
|
||||
const customCreditCost = formData.get("custom_credit_cost");
|
||||
|
||||
const payload: ToggleLlmModelRequest = {
|
||||
is_enabled: shouldEnable,
|
||||
migrate_to_slug: migrateToSlug ? String(migrateToSlug) : undefined,
|
||||
migration_reason: migrationReason ? String(migrationReason) : undefined,
|
||||
custom_credit_cost: customCreditCost ? Number(customCreditCost) : undefined,
|
||||
};
|
||||
|
||||
const response = await patchV2ToggleLlmModelAvailability(modelId, payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to toggle LLM model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function deleteLlmModelAction(formData: FormData): Promise<void> {
|
||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
||||
const rawReplacement = formData.get("replacement_model_slug");
|
||||
const replacementModelSlug =
|
||||
rawReplacement && String(rawReplacement).trim()
|
||||
? String(rawReplacement).trim()
|
||||
: undefined;
|
||||
|
||||
const response = await deleteV2DeleteLlmModelAndMigrateWorkflows(modelId, {
|
||||
replacement_model_slug: replacementModelSlug,
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to delete model");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function fetchLlmModelUsage(
|
||||
modelId: string,
|
||||
): Promise<LlmModelUsageResponse> {
|
||||
const response = await getV2GetModelUsageCount(modelId);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch model usage");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Migration Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmMigrations(
|
||||
includeReverted: boolean = false,
|
||||
): Promise<LlmMigrationsResponse> {
|
||||
const response = await getV2ListModelMigrations({
|
||||
include_reverted: includeReverted,
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch migrations");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function revertLlmMigrationAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const migrationId = getRequiredFormField(
|
||||
formData,
|
||||
"migration_id",
|
||||
"Migration id",
|
||||
);
|
||||
|
||||
const response = await postV2RevertAModelMigration(migrationId, null);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to revert migration");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Creator Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function fetchLlmCreators(): Promise<LlmCreatorsResponse> {
|
||||
const response = await getV2ListModelCreators();
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to fetch creators");
|
||||
}
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function createLlmCreatorAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const payload: UpsertLlmCreatorRequest = {
|
||||
name: String(formData.get("name") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
website_url: formData.get("website_url")
|
||||
? String(formData.get("website_url")).trim()
|
||||
: undefined,
|
||||
logo_url: formData.get("logo_url")
|
||||
? String(formData.get("logo_url")).trim()
|
||||
: undefined,
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const response = await postV2CreateModelCreator(payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to create creator");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function updateLlmCreatorAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const creatorId = getRequiredFormField(formData, "creator_id", "Creator id");
|
||||
|
||||
const payload: UpsertLlmCreatorRequest = {
|
||||
name: String(formData.get("name") || "").trim(),
|
||||
display_name: String(formData.get("display_name") || "").trim(),
|
||||
description: formData.get("description")
|
||||
? String(formData.get("description"))
|
||||
: undefined,
|
||||
website_url: formData.get("website_url")
|
||||
? String(formData.get("website_url")).trim()
|
||||
: undefined,
|
||||
logo_url: formData.get("logo_url")
|
||||
? String(formData.get("logo_url")).trim()
|
||||
: undefined,
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const response = await patchV2UpdateModelCreator(creatorId, payload);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to update creator");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
export async function deleteLlmCreatorAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const creatorId = getRequiredFormField(formData, "creator_id", "Creator id");
|
||||
|
||||
const response = await deleteV2DeleteModelCreator(creatorId);
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to delete creator");
|
||||
}
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Recommended Model Actions
|
||||
// =============================================================================
|
||||
|
||||
export async function setRecommendedModelAction(
|
||||
formData: FormData,
|
||||
): Promise<void> {
|
||||
const modelId = getRequiredFormField(formData, "model_id", "Model id");
|
||||
|
||||
const response = await postV2SetRecommendedModel({ model_id: modelId });
|
||||
if (response.status !== 200) {
|
||||
throw new Error("Failed to set recommended model");
|
||||
}
|
||||
|
||||
revalidatePath(ADMIN_LLM_PATH);
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { createLlmCreatorAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
|
||||
export function AddCreatorModal() {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await createLlmCreatorAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to create creator");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Add Creator"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "512px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="primary" size="small">
|
||||
Add Creator
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Add a new model creator (the organization that made/trained the
|
||||
model).
|
||||
</div>
|
||||
|
||||
<form action={handleSubmit} className="space-y-4">
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Name (slug) <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="name"
|
||||
required
|
||||
name="name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="openai"
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Lowercase identifier (e.g., openai, meta, anthropic)
|
||||
</p>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="display_name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Display Name <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="display_name"
|
||||
required
|
||||
name="display_name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="OpenAI"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="description"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Description
|
||||
</label>
|
||||
<textarea
|
||||
id="description"
|
||||
name="description"
|
||||
rows={2}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Creator of GPT models..."
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="website_url"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Website URL
|
||||
</label>
|
||||
<input
|
||||
id="website_url"
|
||||
name="website_url"
|
||||
type="url"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="https://openai.com"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Creating..." : "Add Creator"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,314 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import { createLlmModelAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
|
||||
interface Props {
|
||||
providers: LlmProvider[];
|
||||
creators: LlmModelCreator[];
|
||||
}
|
||||
|
||||
export function AddModelModal({ providers, creators }: Props) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [selectedCreatorId, setSelectedCreatorId] = useState("");
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await createLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to create model");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
// When provider changes, auto-select matching creator if one exists
|
||||
function handleProviderChange(providerId: string) {
|
||||
const provider = providers.find((p) => p.id === providerId);
|
||||
if (provider) {
|
||||
// Find creator with same name as provider (e.g., "openai" -> "openai")
|
||||
const matchingCreator = creators.find((c) => c.name === provider.name);
|
||||
if (matchingCreator) {
|
||||
setSelectedCreatorId(matchingCreator.id);
|
||||
} else {
|
||||
// No matching creator (e.g., OpenRouter hosts other creators' models)
|
||||
setSelectedCreatorId("");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Add Model"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="primary" size="small">
|
||||
Add Model
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Register a new model slug, metadata, and pricing.
|
||||
</div>
|
||||
|
||||
<form action={handleSubmit} className="space-y-6">
|
||||
{/* Basic Information */}
|
||||
<div className="space-y-4">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Basic Information
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Core model details
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="slug"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Model Slug <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="slug"
|
||||
required
|
||||
name="slug"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="gpt-4.1-mini-2025-04-14"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="display_name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Display Name <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="display_name"
|
||||
required
|
||||
name="display_name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="GPT 4.1 Mini"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="description"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Description
|
||||
</label>
|
||||
<textarea
|
||||
id="description"
|
||||
name="description"
|
||||
rows={3}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Optional description..."
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Model Configuration */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Model Configuration
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Model capabilities and limits
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="provider_id"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Provider <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<select
|
||||
id="provider_id"
|
||||
required
|
||||
name="provider_id"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
defaultValue=""
|
||||
onChange={(e) => handleProviderChange(e.target.value)}
|
||||
>
|
||||
<option value="" disabled>
|
||||
Select provider
|
||||
</option>
|
||||
{providers.map((provider) => (
|
||||
<option key={provider.id} value={provider.id}>
|
||||
{provider.display_name} ({provider.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Who hosts/serves the model
|
||||
</p>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="creator_id"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Creator
|
||||
</label>
|
||||
<select
|
||||
id="creator_id"
|
||||
name="creator_id"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
value={selectedCreatorId}
|
||||
onChange={(e) => setSelectedCreatorId(e.target.value)}
|
||||
>
|
||||
<option value="">No creator selected</option>
|
||||
{creators.map((creator) => (
|
||||
<option key={creator.id} value={creator.id}>
|
||||
{creator.display_name} ({creator.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Who made/trained the model (e.g., OpenAI, Meta)
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="context_window"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Context Window <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="context_window"
|
||||
required
|
||||
type="number"
|
||||
name="context_window"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="128000"
|
||||
min={1}
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="max_output_tokens"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Max Output Tokens
|
||||
</label>
|
||||
<input
|
||||
id="max_output_tokens"
|
||||
type="number"
|
||||
name="max_output_tokens"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="16384"
|
||||
min={1}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Pricing */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">Pricing</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Credit cost per run (credentials are managed via the provider)
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-1">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="credit_cost"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Credit Cost <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="credit_cost"
|
||||
required
|
||||
type="number"
|
||||
name="credit_cost"
|
||||
step="1"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="5"
|
||||
min={0}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Credit cost is always in platform credits. Credentials are
|
||||
inherited from the selected provider.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Enabled Toggle */}
|
||||
<div className="flex items-center gap-3 border-t border-border pt-6">
|
||||
<input type="hidden" name="is_enabled" value="off" />
|
||||
<input
|
||||
id="is_enabled"
|
||||
type="checkbox"
|
||||
name="is_enabled"
|
||||
defaultChecked
|
||||
className="h-4 w-4 rounded border-input"
|
||||
/>
|
||||
<label
|
||||
htmlFor="is_enabled"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Enabled by default
|
||||
</label>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Creating..." : "Save Model"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,268 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { createLlmProviderAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
|
||||
export function AddProviderModal() {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await createLlmProviderAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(
|
||||
err instanceof Error ? err.message : "Failed to create provider",
|
||||
);
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Add Provider"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="primary" size="small">
|
||||
Add Provider
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Define a new upstream provider and default credential information.
|
||||
</div>
|
||||
|
||||
{/* Setup Instructions */}
|
||||
<div className="mb-6 rounded-lg border border-primary/30 bg-primary/5 p-4">
|
||||
<div className="space-y-2">
|
||||
<h4 className="text-sm font-semibold text-foreground">
|
||||
Before Adding a Provider
|
||||
</h4>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
To use a new provider, you must first configure its credentials in
|
||||
the backend:
|
||||
</p>
|
||||
<ol className="list-inside list-decimal space-y-1 text-xs text-muted-foreground">
|
||||
<li>
|
||||
Add the credential to{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
backend/integrations/credentials_store.py
|
||||
</code>{" "}
|
||||
with a UUID, provider name, and settings secret reference
|
||||
</li>
|
||||
<li>
|
||||
Add it to the{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>{" "}
|
||||
dictionary in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
backend/data/block_cost_config.py
|
||||
</code>
|
||||
</li>
|
||||
<li>
|
||||
Use the <strong>same provider name</strong> in the
|
||||
"Credential Provider" field below that matches the key
|
||||
in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>
|
||||
</li>
|
||||
</ol>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form action={handleSubmit} className="space-y-6">
|
||||
{/* Basic Information */}
|
||||
<div className="space-y-4">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Basic Information
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Core provider details
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Provider Slug <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="name"
|
||||
required
|
||||
name="name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="e.g. openai"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="display_name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Display Name <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="display_name"
|
||||
required
|
||||
name="display_name"
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="OpenAI"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="description"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Description
|
||||
</label>
|
||||
<textarea
|
||||
id="description"
|
||||
name="description"
|
||||
rows={3}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Optional description..."
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Default Credentials */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Default Credentials
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Credential provider name that matches the key in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>
|
||||
</p>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="default_credential_provider"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Credential Provider <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="default_credential_provider"
|
||||
name="default_credential_provider"
|
||||
required
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="openai"
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
<strong>Important:</strong> This must exactly match the key in
|
||||
the{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>{" "}
|
||||
dictionary in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
||||
block_cost_config.py
|
||||
</code>
|
||||
. Common values: "openai", "anthropic",
|
||||
"groq", "open_router", etc.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Capabilities */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Capabilities
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Provider feature flags
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-3 sm:grid-cols-2">
|
||||
{[
|
||||
{ name: "supports_tools", label: "Supports tools" },
|
||||
{ name: "supports_json_output", label: "Supports JSON output" },
|
||||
{ name: "supports_reasoning", label: "Supports reasoning" },
|
||||
{
|
||||
name: "supports_parallel_tool",
|
||||
label: "Supports parallel tool calls",
|
||||
},
|
||||
].map(({ name, label }) => (
|
||||
<div
|
||||
key={name}
|
||||
className="flex items-center gap-3 rounded-md border border-border bg-muted/30 px-4 py-3 transition-colors hover:bg-muted/50"
|
||||
>
|
||||
<input type="hidden" name={name} value="off" />
|
||||
<input
|
||||
id={name}
|
||||
type="checkbox"
|
||||
name={name}
|
||||
defaultChecked={
|
||||
name !== "supports_reasoning" &&
|
||||
name !== "supports_parallel_tool"
|
||||
}
|
||||
className="h-4 w-4 rounded border-input"
|
||||
/>
|
||||
<label
|
||||
htmlFor={name}
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
{label}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Creating..." : "Save Provider"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { updateLlmCreatorAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { DeleteCreatorModal } from "./DeleteCreatorModal";
|
||||
|
||||
export function CreatorsTable({ creators }: { creators: LlmModelCreator[] }) {
|
||||
if (!creators.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No creators registered yet.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Creator</TableHead>
|
||||
<TableHead>Description</TableHead>
|
||||
<TableHead>Website</TableHead>
|
||||
<TableHead>Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{creators.map((creator) => (
|
||||
<TableRow key={creator.id}>
|
||||
<TableCell>
|
||||
<div className="font-medium">{creator.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{creator.name}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<span className="text-sm text-muted-foreground">
|
||||
{creator.description || "—"}
|
||||
</span>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{creator.website_url ? (
|
||||
<a
|
||||
href={creator.website_url}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="text-sm text-primary hover:underline"
|
||||
>
|
||||
{(() => {
|
||||
try {
|
||||
return new URL(creator.website_url).hostname;
|
||||
} catch {
|
||||
return creator.website_url;
|
||||
}
|
||||
})()}
|
||||
</a>
|
||||
) : (
|
||||
<span className="text-muted-foreground">—</span>
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex items-center justify-end gap-2">
|
||||
<EditCreatorModal creator={creator} />
|
||||
<DeleteCreatorModal creator={creator} />
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function EditCreatorModal({ creator }: { creator: LlmModelCreator }) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await updateLlmCreatorAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to update creator");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Edit Creator"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "512px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="outline" size="small" className="min-w-0">
|
||||
Edit
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<form action={handleSubmit} className="space-y-4">
|
||||
<input type="hidden" name="creator_id" value={creator.id} />
|
||||
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Name (slug)</label>
|
||||
<input
|
||||
required
|
||||
name="name"
|
||||
defaultValue={creator.name}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Display Name</label>
|
||||
<input
|
||||
required
|
||||
name="display_name"
|
||||
defaultValue={creator.display_name}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Description</label>
|
||||
<textarea
|
||||
name="description"
|
||||
rows={2}
|
||||
defaultValue={creator.description ?? ""}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium">Website URL</label>
|
||||
<input
|
||||
name="website_url"
|
||||
type="url"
|
||||
defaultValue={creator.website_url ?? ""}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Updating..." : "Update"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import { deleteLlmCreatorAction } from "../actions";
|
||||
|
||||
export function DeleteCreatorModal({ creator }: { creator: LlmModelCreator }) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleDelete(formData: FormData) {
|
||||
setIsDeleting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await deleteLlmCreatorAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to delete creator");
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Delete Creator"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "480px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="space-y-4">
|
||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
||||
<div className="flex items-start gap-3">
|
||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
||||
⚠️
|
||||
</div>
|
||||
<div className="text-sm text-foreground">
|
||||
<p className="font-semibold">You are about to delete:</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">{creator.display_name}</span>{" "}
|
||||
<span className="text-muted-foreground">
|
||||
({creator.name})
|
||||
</span>
|
||||
</p>
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
Models using this creator will have their creator field
|
||||
cleared. This is safe and won't affect model
|
||||
functionality.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form action={handleDelete} className="space-y-4">
|
||||
<input type="hidden" name="creator_id" value={creator.id} />
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isDeleting}
|
||||
type="button"
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
size="small"
|
||||
disabled={isDeleting}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
||||
>
|
||||
{isDeleting ? "Deleting..." : "Delete Creator"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import { deleteLlmModelAction, fetchLlmModelUsage } from "../actions";
|
||||
|
||||
export function DeleteModelModal({
|
||||
model,
|
||||
availableModels,
|
||||
}: {
|
||||
model: LlmModel;
|
||||
availableModels: LlmModel[];
|
||||
}) {
|
||||
const router = useRouter();
|
||||
const [open, setOpen] = useState(false);
|
||||
const [selectedReplacement, setSelectedReplacement] = useState<string>("");
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [usageCount, setUsageCount] = useState<number | null>(null);
|
||||
const [usageLoading, setUsageLoading] = useState(false);
|
||||
const [usageError, setUsageError] = useState<string | null>(null);
|
||||
|
||||
// Filter out the current model and disabled models from replacement options
|
||||
const replacementOptions = availableModels.filter(
|
||||
(m) => m.id !== model.id && m.is_enabled,
|
||||
);
|
||||
|
||||
// Check if migration is required (has blocks using this model)
|
||||
const requiresMigration = usageCount !== null && usageCount > 0;
|
||||
|
||||
async function fetchUsage() {
|
||||
setUsageLoading(true);
|
||||
setUsageError(null);
|
||||
try {
|
||||
const usage = await fetchLlmModelUsage(model.id);
|
||||
setUsageCount(usage.node_count);
|
||||
} catch (err) {
|
||||
console.error("Failed to fetch model usage:", err);
|
||||
setUsageError("Failed to load usage count");
|
||||
setUsageCount(null);
|
||||
} finally {
|
||||
setUsageLoading(false);
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDelete(formData: FormData) {
|
||||
setIsDeleting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await deleteLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to delete model");
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if delete button should be enabled
|
||||
const canDelete =
|
||||
!isDeleting &&
|
||||
!usageLoading &&
|
||||
usageCount !== null &&
|
||||
(requiresMigration
|
||||
? selectedReplacement && replacementOptions.length > 0
|
||||
: true);
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Delete Model"
|
||||
controlled={{
|
||||
isOpen: open,
|
||||
set: async (isOpen) => {
|
||||
setOpen(isOpen);
|
||||
if (isOpen) {
|
||||
setUsageCount(null);
|
||||
setUsageError(null);
|
||||
setError(null);
|
||||
setSelectedReplacement("");
|
||||
await fetchUsage();
|
||||
}
|
||||
},
|
||||
}}
|
||||
styling={{ maxWidth: "600px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
{requiresMigration
|
||||
? "This action cannot be undone. All workflows using this model will be migrated to the replacement model you select."
|
||||
: "This action cannot be undone."}
|
||||
</div>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
||||
<div className="flex items-start gap-3">
|
||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
||||
⚠️
|
||||
</div>
|
||||
<div className="text-sm text-foreground">
|
||||
<p className="font-semibold">You are about to delete:</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">{model.display_name}</span>{" "}
|
||||
<span className="text-muted-foreground">({model.slug})</span>
|
||||
</p>
|
||||
{usageLoading && (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
Loading usage count...
|
||||
</p>
|
||||
)}
|
||||
{usageError && (
|
||||
<p className="mt-2 text-destructive">{usageError}</p>
|
||||
)}
|
||||
{!usageLoading && !usageError && usageCount !== null && (
|
||||
<p className="mt-2 font-semibold">
|
||||
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
|
||||
currently use this model
|
||||
</p>
|
||||
)}
|
||||
{requiresMigration && (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
All workflows currently using this model will be
|
||||
automatically updated to use the replacement model you
|
||||
choose below.
|
||||
</p>
|
||||
)}
|
||||
{!usageLoading && usageCount === 0 && (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
No workflows are using this model. It can be safely deleted.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form action={handleDelete} className="space-y-4">
|
||||
<input type="hidden" name="model_id" value={model.id} />
|
||||
<input
|
||||
type="hidden"
|
||||
name="replacement_model_slug"
|
||||
value={selectedReplacement}
|
||||
/>
|
||||
|
||||
{requiresMigration && (
|
||||
<label className="text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Select Replacement Model{" "}
|
||||
<span className="text-destructive">*</span>
|
||||
</span>
|
||||
<select
|
||||
required
|
||||
value={selectedReplacement}
|
||||
onChange={(e) => setSelectedReplacement(e.target.value)}
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
>
|
||||
<option value="">-- Choose a replacement model --</option>
|
||||
{replacementOptions.map((m) => (
|
||||
<option key={m.id} value={m.slug}>
|
||||
{m.display_name} ({m.slug})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{replacementOptions.length === 0 && (
|
||||
<p className="mt-2 text-xs text-destructive">
|
||||
No replacement models available. You must have at least one
|
||||
other enabled model before deleting this one.
|
||||
</p>
|
||||
)}
|
||||
</label>
|
||||
)}
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setSelectedReplacement("");
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isDeleting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
size="small"
|
||||
disabled={!canDelete}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
||||
>
|
||||
{isDeleting
|
||||
? "Deleting..."
|
||||
: requiresMigration
|
||||
? "Delete and Migrate"
|
||||
: "Delete"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import { deleteLlmProviderAction } from "../actions";
|
||||
|
||||
export function DeleteProviderModal({ provider }: { provider: LlmProvider }) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
const modelCount = provider.models?.length ?? 0;
|
||||
const hasModels = modelCount > 0;
|
||||
|
||||
async function handleDelete(formData: FormData) {
|
||||
setIsDeleting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await deleteLlmProviderAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(
|
||||
err instanceof Error ? err.message : "Failed to delete provider",
|
||||
);
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Delete Provider"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "480px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="min-w-0 text-destructive hover:bg-destructive/10"
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="space-y-4">
|
||||
<div
|
||||
className={`rounded-lg border p-4 ${
|
||||
hasModels
|
||||
? "border-destructive/30 bg-destructive/10"
|
||||
: "border-amber-500/30 bg-amber-500/10 dark:border-amber-400/30 dark:bg-amber-400/10"
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-start gap-3">
|
||||
<div
|
||||
className={`flex-shrink-0 ${
|
||||
hasModels
|
||||
? "text-destructive"
|
||||
: "text-amber-600 dark:text-amber-400"
|
||||
}`}
|
||||
>
|
||||
{hasModels ? "🚫" : "⚠️"}
|
||||
</div>
|
||||
<div className="text-sm text-foreground">
|
||||
<p className="font-semibold">You are about to delete:</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">{provider.display_name}</span>{" "}
|
||||
<span className="text-muted-foreground">
|
||||
({provider.name})
|
||||
</span>
|
||||
</p>
|
||||
{hasModels ? (
|
||||
<p className="mt-2 text-destructive">
|
||||
This provider has {modelCount} model(s). You must delete all
|
||||
models before you can delete this provider.
|
||||
</p>
|
||||
) : (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
This provider has no models and can be safely deleted.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form action={handleDelete} className="space-y-4">
|
||||
<input type="hidden" name="provider_id" value={provider.id} />
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isDeleting}
|
||||
type="button"
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
size="small"
|
||||
disabled={isDeleting || hasModels}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90 disabled:opacity-50"
|
||||
>
|
||||
{isDeleting ? "Deleting..." : "Delete Provider"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,288 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import { toggleLlmModelAction, fetchLlmModelUsage } from "../actions";
|
||||
|
||||
export function DisableModelModal({
|
||||
model,
|
||||
availableModels,
|
||||
}: {
|
||||
model: LlmModel;
|
||||
availableModels: LlmModel[];
|
||||
}) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isDisabling, setIsDisabling] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [usageCount, setUsageCount] = useState<number | null>(null);
|
||||
const [selectedMigration, setSelectedMigration] = useState<string>("");
|
||||
const [wantsMigration, setWantsMigration] = useState(false);
|
||||
const [migrationReason, setMigrationReason] = useState("");
|
||||
const [customCreditCost, setCustomCreditCost] = useState<string>("");
|
||||
|
||||
// Filter out the current model and disabled models from replacement options
|
||||
const migrationOptions = availableModels.filter(
|
||||
(m) => m.id !== model.id && m.is_enabled,
|
||||
);
|
||||
|
||||
async function fetchUsage() {
|
||||
try {
|
||||
const usage = await fetchLlmModelUsage(model.id);
|
||||
setUsageCount(usage.node_count);
|
||||
} catch {
|
||||
setUsageCount(null);
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDisable(formData: FormData) {
|
||||
setIsDisabling(true);
|
||||
setError(null);
|
||||
try {
|
||||
await toggleLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to disable model");
|
||||
} finally {
|
||||
setIsDisabling(false);
|
||||
}
|
||||
}
|
||||
|
||||
function resetState() {
|
||||
setError(null);
|
||||
setSelectedMigration("");
|
||||
setWantsMigration(false);
|
||||
setMigrationReason("");
|
||||
setCustomCreditCost("");
|
||||
}
|
||||
|
||||
const hasUsage = usageCount !== null && usageCount > 0;
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Disable Model"
|
||||
controlled={{
|
||||
isOpen: open,
|
||||
set: async (isOpen) => {
|
||||
setOpen(isOpen);
|
||||
if (isOpen) {
|
||||
setUsageCount(null);
|
||||
resetState();
|
||||
await fetchUsage();
|
||||
}
|
||||
},
|
||||
}}
|
||||
styling={{ maxWidth: "600px" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="min-w-0"
|
||||
>
|
||||
Disable
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Disabling a model will hide it from users when creating new workflows.
|
||||
</div>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div className="rounded-lg border border-amber-500/30 bg-amber-500/10 p-4 dark:border-amber-400/30 dark:bg-amber-400/10">
|
||||
<div className="flex items-start gap-3">
|
||||
<div className="flex-shrink-0 text-amber-600 dark:text-amber-400">
|
||||
⚠️
|
||||
</div>
|
||||
<div className="text-sm text-foreground">
|
||||
<p className="font-semibold">You are about to disable:</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">{model.display_name}</span>{" "}
|
||||
<span className="text-muted-foreground">({model.slug})</span>
|
||||
</p>
|
||||
{usageCount === null ? (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
Loading usage data...
|
||||
</p>
|
||||
) : usageCount > 0 ? (
|
||||
<p className="mt-2 font-semibold">
|
||||
Impact: {usageCount} block{usageCount !== 1 ? "s" : ""}{" "}
|
||||
currently use this model
|
||||
</p>
|
||||
) : (
|
||||
<p className="mt-2 text-muted-foreground">
|
||||
No workflows are currently using this model.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{hasUsage && (
|
||||
<div className="space-y-4 rounded-lg border border-border bg-muted/50 p-4">
|
||||
<label className="flex items-start gap-3">
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={wantsMigration}
|
||||
onChange={(e) => {
|
||||
setWantsMigration(e.target.checked);
|
||||
if (!e.target.checked) {
|
||||
setSelectedMigration("");
|
||||
}
|
||||
}}
|
||||
className="mt-1"
|
||||
/>
|
||||
<div className="text-sm">
|
||||
<span className="font-medium">
|
||||
Migrate existing workflows to another model
|
||||
</span>
|
||||
<p className="mt-1 text-muted-foreground">
|
||||
Creates a revertible migration record. If unchecked,
|
||||
existing workflows will use automatic fallback to an enabled
|
||||
model from the same provider.
|
||||
</p>
|
||||
</div>
|
||||
</label>
|
||||
|
||||
{wantsMigration && (
|
||||
<div className="space-y-4 border-t border-border pt-4">
|
||||
<label className="block text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Replacement Model{" "}
|
||||
<span className="text-destructive">*</span>
|
||||
</span>
|
||||
<select
|
||||
required
|
||||
value={selectedMigration}
|
||||
onChange={(e) => setSelectedMigration(e.target.value)}
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
>
|
||||
<option value="">-- Choose a replacement model --</option>
|
||||
{migrationOptions.map((m) => (
|
||||
<option key={m.id} value={m.slug}>
|
||||
{m.display_name} ({m.slug})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{migrationOptions.length === 0 && (
|
||||
<p className="mt-2 text-xs text-destructive">
|
||||
No other enabled models available for migration.
|
||||
</p>
|
||||
)}
|
||||
</label>
|
||||
|
||||
<label className="block text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Migration Reason{" "}
|
||||
<span className="font-normal text-muted-foreground">
|
||||
(optional)
|
||||
</span>
|
||||
</span>
|
||||
<input
|
||||
type="text"
|
||||
value={migrationReason}
|
||||
onChange={(e) => setMigrationReason(e.target.value)}
|
||||
placeholder="e.g., Provider outage, Cost reduction"
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
/>
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Helps track why the migration was made
|
||||
</p>
|
||||
</label>
|
||||
|
||||
<label className="block text-sm font-medium">
|
||||
<span className="mb-2 block">
|
||||
Custom Credit Cost{" "}
|
||||
<span className="font-normal text-muted-foreground">
|
||||
(optional)
|
||||
</span>
|
||||
</span>
|
||||
<input
|
||||
type="number"
|
||||
min="0"
|
||||
value={customCreditCost}
|
||||
onChange={(e) => setCustomCreditCost(e.target.value)}
|
||||
placeholder="Leave blank to use target model's cost"
|
||||
className="w-full rounded border border-input bg-background p-2 text-sm"
|
||||
/>
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Override pricing for migrated workflows. When set, billing
|
||||
will use this cost instead of the target model's
|
||||
cost.
|
||||
</p>
|
||||
</label>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<form action={handleDisable} className="space-y-4">
|
||||
<input type="hidden" name="model_id" value={model.id} />
|
||||
<input type="hidden" name="is_enabled" value="false" />
|
||||
{wantsMigration && selectedMigration && (
|
||||
<>
|
||||
<input
|
||||
type="hidden"
|
||||
name="migrate_to_slug"
|
||||
value={selectedMigration}
|
||||
/>
|
||||
{migrationReason && (
|
||||
<input
|
||||
type="hidden"
|
||||
name="migration_reason"
|
||||
value={migrationReason}
|
||||
/>
|
||||
)}
|
||||
{customCreditCost && (
|
||||
<input
|
||||
type="hidden"
|
||||
name="custom_credit_cost"
|
||||
value={customCreditCost}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
resetState();
|
||||
}}
|
||||
disabled={isDisabling}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="primary"
|
||||
size="small"
|
||||
disabled={
|
||||
isDisabling ||
|
||||
(wantsMigration && !selectedMigration) ||
|
||||
usageCount === null
|
||||
}
|
||||
>
|
||||
{isDisabling
|
||||
? "Disabling..."
|
||||
: wantsMigration && selectedMigration
|
||||
? "Disable & Migrate"
|
||||
: "Disable Model"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import { updateLlmModelAction } from "../actions";
|
||||
|
||||
export function EditModelModal({
|
||||
model,
|
||||
providers,
|
||||
creators,
|
||||
}: {
|
||||
model: LlmModel;
|
||||
providers: LlmProvider[];
|
||||
creators: LlmModelCreator[];
|
||||
}) {
|
||||
const router = useRouter();
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const cost = model.costs?.[0];
|
||||
const provider = providers.find((p) => p.id === model.provider_id);
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await updateLlmModelAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to update model");
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Edit Model"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="outline" size="small" className="min-w-0">
|
||||
Edit
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Update model metadata and pricing information.
|
||||
</div>
|
||||
{error && (
|
||||
<div className="mb-4 rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
<form action={handleSubmit} className="space-y-4">
|
||||
<input type="hidden" name="model_id" value={model.id} />
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Display Name
|
||||
<input
|
||||
required
|
||||
name="display_name"
|
||||
defaultValue={model.display_name}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
/>
|
||||
</label>
|
||||
<label className="text-sm font-medium">
|
||||
Provider
|
||||
<select
|
||||
required
|
||||
name="provider_id"
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
defaultValue={model.provider_id}
|
||||
>
|
||||
{providers.map((p) => (
|
||||
<option key={p.id} value={p.id}>
|
||||
{p.display_name} ({p.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Who hosts/serves the model
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Creator
|
||||
<select
|
||||
name="creator_id"
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
defaultValue={model.creator_id ?? ""}
|
||||
>
|
||||
<option value="">No creator selected</option>
|
||||
{creators.map((c) => (
|
||||
<option key={c.id} value={c.id}>
|
||||
{c.display_name} ({c.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Who made/trained the model (e.g., OpenAI, Meta)
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<label className="text-sm font-medium">
|
||||
Description
|
||||
<textarea
|
||||
name="description"
|
||||
rows={2}
|
||||
defaultValue={model.description ?? ""}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
placeholder="Optional description..."
|
||||
/>
|
||||
</label>
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Context Window
|
||||
<input
|
||||
required
|
||||
type="number"
|
||||
name="context_window"
|
||||
defaultValue={model.context_window}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
min={1}
|
||||
/>
|
||||
</label>
|
||||
<label className="text-sm font-medium">
|
||||
Max Output Tokens
|
||||
<input
|
||||
type="number"
|
||||
name="max_output_tokens"
|
||||
defaultValue={model.max_output_tokens ?? undefined}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
min={1}
|
||||
/>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div className="grid gap-4 md:grid-cols-2">
|
||||
<label className="text-sm font-medium">
|
||||
Credit Cost
|
||||
<input
|
||||
required
|
||||
type="number"
|
||||
name="credit_cost"
|
||||
defaultValue={cost?.credit_cost ?? 0}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
min={0}
|
||||
/>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Credits charged per run
|
||||
</span>
|
||||
</label>
|
||||
<label className="text-sm font-medium">
|
||||
Credential Provider
|
||||
<select
|
||||
required
|
||||
name="credential_provider"
|
||||
defaultValue={cost?.credential_provider ?? provider?.name ?? ""}
|
||||
className="mt-1 w-full rounded border border-input bg-background p-2 text-sm"
|
||||
>
|
||||
<option value="" disabled>
|
||||
Select provider
|
||||
</option>
|
||||
{providers.map((p) => (
|
||||
<option key={p.id} value={p.name}>
|
||||
{p.display_name} ({p.name})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Must match a key in PROVIDER_CREDENTIALS
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
{/* Hidden defaults for credential_type and unit */}
|
||||
<input
|
||||
type="hidden"
|
||||
name="credential_type"
|
||||
value={
|
||||
cost?.credential_type ??
|
||||
provider?.default_credential_type ??
|
||||
"api_key"
|
||||
}
|
||||
/>
|
||||
<input type="hidden" name="unit" value={cost?.unit ?? "RUN"} />
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => setOpen(false)}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Updating..." : "Update Model"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,263 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { updateLlmProviderAction } from "../actions";
|
||||
import { useRouter } from "next/navigation";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
|
||||
export function EditProviderModal({ provider }: { provider: LlmProvider }) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [isSubmitting, setIsSubmitting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
async function handleSubmit(formData: FormData) {
|
||||
setIsSubmitting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await updateLlmProviderAction(formData);
|
||||
setOpen(false);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(
|
||||
err instanceof Error ? err.message : "Failed to update provider",
|
||||
);
|
||||
} finally {
|
||||
setIsSubmitting(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Edit Provider"
|
||||
controlled={{ isOpen: open, set: setOpen }}
|
||||
styling={{ maxWidth: "768px", maxHeight: "90vh", overflowY: "auto" }}
|
||||
>
|
||||
<Dialog.Trigger>
|
||||
<Button variant="outline" size="small">
|
||||
Edit
|
||||
</Button>
|
||||
</Dialog.Trigger>
|
||||
<Dialog.Content>
|
||||
<div className="mb-4 text-sm text-muted-foreground">
|
||||
Update provider configuration and capabilities.
|
||||
</div>
|
||||
|
||||
<form action={handleSubmit} className="space-y-6">
|
||||
<input type="hidden" name="provider_id" value={provider.id} />
|
||||
|
||||
{/* Basic Information */}
|
||||
<div className="space-y-4">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Basic Information
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Core provider details
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Provider Slug <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="name"
|
||||
required
|
||||
name="name"
|
||||
defaultValue={provider.name}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="e.g. openai"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="display_name"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Display Name <span className="text-destructive">*</span>
|
||||
</label>
|
||||
<input
|
||||
id="display_name"
|
||||
required
|
||||
name="display_name"
|
||||
defaultValue={provider.display_name}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="OpenAI"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="description"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Description
|
||||
</label>
|
||||
<textarea
|
||||
id="description"
|
||||
name="description"
|
||||
rows={3}
|
||||
defaultValue={provider.description ?? ""}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Optional description..."
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Default Credentials */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Default Credentials
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Credential provider name that matches the key in{" "}
|
||||
<code className="rounded bg-muted px-1 py-0.5 font-mono text-xs">
|
||||
PROVIDER_CREDENTIALS
|
||||
</code>
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-4 sm:grid-cols-2">
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="default_credential_provider"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Credential Provider
|
||||
</label>
|
||||
<input
|
||||
id="default_credential_provider"
|
||||
name="default_credential_provider"
|
||||
defaultValue={provider.default_credential_provider ?? ""}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="openai"
|
||||
/>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="default_credential_id"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Credential ID
|
||||
</label>
|
||||
<input
|
||||
id="default_credential_id"
|
||||
name="default_credential_id"
|
||||
defaultValue={provider.default_credential_id ?? ""}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="Optional credential ID"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<label
|
||||
htmlFor="default_credential_type"
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
Credential Type
|
||||
</label>
|
||||
<input
|
||||
id="default_credential_type"
|
||||
name="default_credential_type"
|
||||
defaultValue={provider.default_credential_type ?? "api_key"}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm transition-colors placeholder:text-muted-foreground focus:border-primary focus:outline-none focus:ring-2 focus:ring-primary/20"
|
||||
placeholder="api_key"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Capabilities */}
|
||||
<div className="space-y-4 border-t border-border pt-6">
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
Capabilities
|
||||
</h3>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Provider feature flags
|
||||
</p>
|
||||
</div>
|
||||
<div className="grid gap-3 sm:grid-cols-2">
|
||||
{[
|
||||
{
|
||||
name: "supports_tools",
|
||||
label: "Supports tools",
|
||||
checked: provider.supports_tools,
|
||||
},
|
||||
{
|
||||
name: "supports_json_output",
|
||||
label: "Supports JSON output",
|
||||
checked: provider.supports_json_output,
|
||||
},
|
||||
{
|
||||
name: "supports_reasoning",
|
||||
label: "Supports reasoning",
|
||||
checked: provider.supports_reasoning,
|
||||
},
|
||||
{
|
||||
name: "supports_parallel_tool",
|
||||
label: "Supports parallel tool calls",
|
||||
checked: provider.supports_parallel_tool,
|
||||
},
|
||||
].map(({ name, label, checked }) => (
|
||||
<div
|
||||
key={name}
|
||||
className="flex items-center gap-3 rounded-md border border-border bg-muted/30 px-4 py-3 transition-colors hover:bg-muted/50"
|
||||
>
|
||||
<input type="hidden" name={name} value="off" />
|
||||
<input
|
||||
id={name}
|
||||
type="checkbox"
|
||||
name={name}
|
||||
defaultChecked={checked}
|
||||
className="h-4 w-4 rounded border-input"
|
||||
/>
|
||||
<label
|
||||
htmlFor={name}
|
||||
className="text-sm font-medium text-foreground"
|
||||
>
|
||||
{label}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="rounded-lg border border-destructive/30 bg-destructive/10 p-3 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
setOpen(false);
|
||||
setError(null);
|
||||
}}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
type="submit"
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
{isSubmitting ? "Saving..." : "Save Changes"}
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</form>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import { ErrorBoundary } from "@/components/molecules/ErrorBoundary/ErrorBoundary";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { AddProviderModal } from "./AddProviderModal";
|
||||
import { AddModelModal } from "./AddModelModal";
|
||||
import { AddCreatorModal } from "./AddCreatorModal";
|
||||
import { ProviderList } from "./ProviderList";
|
||||
import { ModelsTable } from "./ModelsTable";
|
||||
import { MigrationsTable } from "./MigrationsTable";
|
||||
import { CreatorsTable } from "./CreatorsTable";
|
||||
import { RecommendedModelSelector } from "./RecommendedModelSelector";
|
||||
|
||||
interface Props {
|
||||
providers: LlmProvider[];
|
||||
models: LlmModel[];
|
||||
migrations: LlmModelMigration[];
|
||||
creators: LlmModelCreator[];
|
||||
}
|
||||
|
||||
function AdminErrorFallback() {
|
||||
return (
|
||||
<div className="mx-auto max-w-xl p-6">
|
||||
<ErrorCard
|
||||
responseError={{
|
||||
message:
|
||||
"An error occurred while loading the LLM Registry. Please refresh the page.",
|
||||
}}
|
||||
context="llm-registry"
|
||||
onRetry={() => window.location.reload()}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export function LlmRegistryDashboard({
|
||||
providers,
|
||||
models,
|
||||
migrations,
|
||||
creators,
|
||||
}: Props) {
|
||||
return (
|
||||
<ErrorBoundary fallback={<AdminErrorFallback />} context="llm-registry">
|
||||
<div className="mx-auto p-6">
|
||||
<div className="flex flex-col gap-6">
|
||||
{/* Header */}
|
||||
<div>
|
||||
<h1 className="text-3xl font-bold">LLM Registry</h1>
|
||||
<p className="text-muted-foreground">
|
||||
Manage providers, creators, models, and credit pricing
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Active Migrations Section - Only show if there are migrations */}
|
||||
{migrations.length > 0 && (
|
||||
<div className="rounded-lg border border-primary/30 bg-primary/5 p-6 shadow-sm">
|
||||
<div className="mb-4">
|
||||
<h2 className="text-xl font-semibold">Active Migrations</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
These migrations can be reverted to restore workflows to their
|
||||
original model
|
||||
</p>
|
||||
</div>
|
||||
<MigrationsTable migrations={migrations} />
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Providers & Creators Section - Side by Side */}
|
||||
<div className="grid gap-6 lg:grid-cols-2">
|
||||
{/* Providers */}
|
||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<h2 className="text-xl font-semibold">Providers</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Who hosts/serves the models
|
||||
</p>
|
||||
</div>
|
||||
<AddProviderModal />
|
||||
</div>
|
||||
<ProviderList providers={providers} />
|
||||
</div>
|
||||
|
||||
{/* Creators */}
|
||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<h2 className="text-xl font-semibold">Creators</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Who made/trained the models
|
||||
</p>
|
||||
</div>
|
||||
<AddCreatorModal />
|
||||
</div>
|
||||
<CreatorsTable creators={creators} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Models Section */}
|
||||
<div className="rounded-lg border bg-card p-6 shadow-sm">
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<h2 className="text-xl font-semibold">Models</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Toggle availability, adjust context windows, and update credit
|
||||
pricing
|
||||
</p>
|
||||
</div>
|
||||
<AddModelModal providers={providers} creators={creators} />
|
||||
</div>
|
||||
|
||||
{/* Recommended Model Selector */}
|
||||
<div className="mb-6">
|
||||
<RecommendedModelSelector models={models} />
|
||||
</div>
|
||||
|
||||
<ModelsTable
|
||||
models={models}
|
||||
providers={providers}
|
||||
creators={creators}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</ErrorBoundary>
|
||||
);
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import type { LlmModelMigration } from "@/app/api/__generated__/models/llmModelMigration";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import { revertLlmMigrationAction } from "../actions";
|
||||
|
||||
export function MigrationsTable({
|
||||
migrations,
|
||||
}: {
|
||||
migrations: LlmModelMigration[];
|
||||
}) {
|
||||
if (!migrations.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No active migrations. Migrations are created when you disable a model
|
||||
with the "Migrate existing workflows" option.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Migration</TableHead>
|
||||
<TableHead>Reason</TableHead>
|
||||
<TableHead>Nodes Affected</TableHead>
|
||||
<TableHead>Custom Cost</TableHead>
|
||||
<TableHead>Created</TableHead>
|
||||
<TableHead className="text-right">Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{migrations.map((migration) => (
|
||||
<MigrationRow key={migration.id} migration={migration} />
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function MigrationRow({ migration }: { migration: LlmModelMigration }) {
|
||||
const [isReverting, setIsReverting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
async function handleRevert(formData: FormData) {
|
||||
setIsReverting(true);
|
||||
setError(null);
|
||||
try {
|
||||
await revertLlmMigrationAction(formData);
|
||||
} catch (err) {
|
||||
setError(
|
||||
err instanceof Error ? err.message : "Failed to revert migration",
|
||||
);
|
||||
} finally {
|
||||
setIsReverting(false);
|
||||
}
|
||||
}
|
||||
|
||||
const createdDate = new Date(migration.created_at);
|
||||
|
||||
return (
|
||||
<>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<div className="text-sm">
|
||||
<span className="font-medium">{migration.source_model_slug}</span>
|
||||
<span className="mx-2 text-muted-foreground">→</span>
|
||||
<span className="font-medium">{migration.target_model_slug}</span>
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{migration.reason || "—"}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm">{migration.node_count}</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm">
|
||||
{migration.custom_credit_cost !== null &&
|
||||
migration.custom_credit_cost !== undefined
|
||||
? `${migration.custom_credit_cost} credits`
|
||||
: "—"}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{createdDate.toLocaleDateString()}{" "}
|
||||
{createdDate.toLocaleTimeString([], {
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
})}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell className="text-right">
|
||||
<form action={handleRevert} className="inline">
|
||||
<input type="hidden" name="migration_id" value={migration.id} />
|
||||
<Button
|
||||
type="submit"
|
||||
variant="outline"
|
||||
size="small"
|
||||
disabled={isReverting}
|
||||
>
|
||||
{isReverting ? "Reverting..." : "Revert"}
|
||||
</Button>
|
||||
</form>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
{error && (
|
||||
<TableRow>
|
||||
<TableCell colSpan={6}>
|
||||
<div className="rounded border border-destructive/30 bg-destructive/10 p-2 text-sm text-destructive">
|
||||
{error}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,265 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import type { LlmModelCreator } from "@/app/api/__generated__/models/llmModelCreator";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { toggleLlmModelAction } from "../actions";
|
||||
import { DeleteModelModal } from "./DeleteModelModal";
|
||||
import { DisableModelModal } from "./DisableModelModal";
|
||||
import { EditModelModal } from "./EditModelModal";
|
||||
import { Star, Spinner } from "@phosphor-icons/react";
|
||||
import { getV2ListLlmModels } from "@/app/api/__generated__/endpoints/admin/admin";
|
||||
|
||||
const PAGE_SIZE = 50;
|
||||
|
||||
export function ModelsTable({
|
||||
models: initialModels,
|
||||
providers,
|
||||
creators,
|
||||
}: {
|
||||
models: LlmModel[];
|
||||
providers: LlmProvider[];
|
||||
creators: LlmModelCreator[];
|
||||
}) {
|
||||
const [models, setModels] = useState<LlmModel[]>(initialModels);
|
||||
const [currentPage, setCurrentPage] = useState(1);
|
||||
const [hasMore, setHasMore] = useState(initialModels.length === PAGE_SIZE);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const loadedPagesRef = useRef(1);
|
||||
|
||||
// Sync with parent when initialModels changes (e.g., after enable/disable)
|
||||
// Re-fetch all loaded pages to preserve expanded state
|
||||
useEffect(() => {
|
||||
async function refetchAllPages() {
|
||||
const pagesToLoad = loadedPagesRef.current;
|
||||
|
||||
if (pagesToLoad === 1) {
|
||||
// Only first page loaded, just use initialModels
|
||||
setModels(initialModels);
|
||||
setHasMore(initialModels.length === PAGE_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
// Re-fetch all pages we had loaded
|
||||
const allModels: LlmModel[] = [...initialModels];
|
||||
let lastPageHadFullResults = initialModels.length === PAGE_SIZE;
|
||||
|
||||
for (let page = 2; page <= pagesToLoad; page++) {
|
||||
try {
|
||||
const response = await getV2ListLlmModels({
|
||||
page,
|
||||
page_size: PAGE_SIZE,
|
||||
});
|
||||
if (response.status === 200) {
|
||||
allModels.push(...response.data.models);
|
||||
lastPageHadFullResults = response.data.models.length === PAGE_SIZE;
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Error refetching page ${page}:`, err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
setModels(allModels);
|
||||
setHasMore(lastPageHadFullResults);
|
||||
}
|
||||
|
||||
refetchAllPages();
|
||||
}, [initialModels]);
|
||||
|
||||
async function loadMore() {
|
||||
if (isLoading) return;
|
||||
setIsLoading(true);
|
||||
|
||||
try {
|
||||
const nextPage = currentPage + 1;
|
||||
const response = await getV2ListLlmModels({
|
||||
page: nextPage,
|
||||
page_size: PAGE_SIZE,
|
||||
});
|
||||
|
||||
if (response.status === 200) {
|
||||
setModels((prev) => [...prev, ...response.data.models]);
|
||||
setCurrentPage(nextPage);
|
||||
loadedPagesRef.current = nextPage;
|
||||
setHasMore(response.data.models.length === PAGE_SIZE);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error("Error loading more models:", err);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
}
|
||||
if (!models.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No models registered yet.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
const providerLookup = new Map(
|
||||
providers.map((provider) => [provider.id, provider]),
|
||||
);
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Model</TableHead>
|
||||
<TableHead>Provider</TableHead>
|
||||
<TableHead>Creator</TableHead>
|
||||
<TableHead>Context Window</TableHead>
|
||||
<TableHead>Max Output</TableHead>
|
||||
<TableHead>Cost</TableHead>
|
||||
<TableHead>Status</TableHead>
|
||||
<TableHead>Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{models.map((model) => {
|
||||
const cost = model.costs?.[0];
|
||||
const provider = providerLookup.get(model.provider_id);
|
||||
return (
|
||||
<TableRow
|
||||
key={model.id}
|
||||
className={model.is_enabled ? "" : "opacity-60"}
|
||||
>
|
||||
<TableCell>
|
||||
<div className="font-medium">{model.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{model.slug}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{provider ? (
|
||||
<>
|
||||
<div>{provider.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{provider.name}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
model.provider_id
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{model.creator ? (
|
||||
<>
|
||||
<div>{model.creator.display_name}</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{model.creator.name}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<span className="text-muted-foreground">—</span>
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>{model.context_window.toLocaleString()}</TableCell>
|
||||
<TableCell>
|
||||
{model.max_output_tokens
|
||||
? model.max_output_tokens.toLocaleString()
|
||||
: "—"}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{cost ? (
|
||||
<>
|
||||
<div className="font-medium">
|
||||
{cost.credit_cost} credits
|
||||
</div>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{cost.credential_provider}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
"—"
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex flex-col gap-1">
|
||||
<span
|
||||
className={`inline-flex rounded-full px-2.5 py-1 text-xs font-semibold ${
|
||||
model.is_enabled
|
||||
? "bg-primary/10 text-primary"
|
||||
: "bg-muted text-muted-foreground"
|
||||
}`}
|
||||
>
|
||||
{model.is_enabled ? "Enabled" : "Disabled"}
|
||||
</span>
|
||||
{model.is_recommended && (
|
||||
<span className="inline-flex items-center gap-1 rounded-full bg-amber-500/10 px-2.5 py-1 text-xs font-semibold text-amber-600 dark:text-amber-400">
|
||||
<Star size={12} weight="fill" />
|
||||
Recommended
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex items-center justify-end gap-2">
|
||||
{model.is_enabled ? (
|
||||
<DisableModelModal
|
||||
model={model}
|
||||
availableModels={models}
|
||||
/>
|
||||
) : (
|
||||
<EnableModelButton modelId={model.id} />
|
||||
)}
|
||||
<EditModelModal
|
||||
model={model}
|
||||
providers={providers}
|
||||
creators={creators}
|
||||
/>
|
||||
<DeleteModelModal
|
||||
model={model}
|
||||
availableModels={models}
|
||||
/>
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
|
||||
{hasMore && (
|
||||
<div className="mt-4 flex justify-center">
|
||||
<Button onClick={loadMore} disabled={isLoading} variant="outline">
|
||||
{isLoading ? (
|
||||
<>
|
||||
<Spinner className="mr-2 h-4 w-4 animate-spin" />
|
||||
Loading...
|
||||
</>
|
||||
) : (
|
||||
"Load More"
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function EnableModelButton({ modelId }: { modelId: string }) {
|
||||
return (
|
||||
<form action={toggleLlmModelAction} className="inline">
|
||||
<input type="hidden" name="model_id" value={modelId} />
|
||||
<input type="hidden" name="is_enabled" value="true" />
|
||||
<Button type="submit" variant="outline" size="small" className="min-w-0">
|
||||
Enable
|
||||
</Button>
|
||||
</form>
|
||||
);
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/atoms/Table/Table";
|
||||
import type { LlmProvider } from "@/app/api/__generated__/models/llmProvider";
|
||||
import { DeleteProviderModal } from "./DeleteProviderModal";
|
||||
import { EditProviderModal } from "./EditProviderModal";
|
||||
|
||||
export function ProviderList({ providers }: { providers: LlmProvider[] }) {
|
||||
if (!providers.length) {
|
||||
return (
|
||||
<div className="rounded-lg border border-dashed border-border p-6 text-center text-sm text-muted-foreground">
|
||||
No providers configured yet.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Name</TableHead>
|
||||
<TableHead>Display Name</TableHead>
|
||||
<TableHead>Default Credential</TableHead>
|
||||
<TableHead>Capabilities</TableHead>
|
||||
<TableHead>Models</TableHead>
|
||||
<TableHead className="w-[100px]">Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{providers.map((provider) => (
|
||||
<TableRow key={provider.id}>
|
||||
<TableCell className="font-medium">{provider.name}</TableCell>
|
||||
<TableCell>{provider.display_name}</TableCell>
|
||||
<TableCell>
|
||||
{provider.default_credential_provider
|
||||
? `${provider.default_credential_provider} (${provider.default_credential_id ?? "id?"})`
|
||||
: "—"}
|
||||
</TableCell>
|
||||
<TableCell className="text-sm text-muted-foreground">
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{provider.supports_tools && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
Tools
|
||||
</span>
|
||||
)}
|
||||
{provider.supports_json_output && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
JSON
|
||||
</span>
|
||||
)}
|
||||
{provider.supports_reasoning && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
Reasoning
|
||||
</span>
|
||||
)}
|
||||
{provider.supports_parallel_tool && (
|
||||
<span className="rounded bg-muted px-2 py-0.5 text-xs">
|
||||
Parallel Tools
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell className="text-sm">
|
||||
<span
|
||||
className={
|
||||
(provider.models?.length ?? 0) > 0
|
||||
? "text-foreground"
|
||||
: "text-muted-foreground"
|
||||
}
|
||||
>
|
||||
{provider.models?.length ?? 0}
|
||||
</span>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex gap-2">
|
||||
<EditProviderModal provider={provider} />
|
||||
<DeleteProviderModal provider={provider} />
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState } from "react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import type { LlmModel } from "@/app/api/__generated__/models/llmModel";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { setRecommendedModelAction } from "../actions";
|
||||
import { Star } from "@phosphor-icons/react";
|
||||
|
||||
export function RecommendedModelSelector({ models }: { models: LlmModel[] }) {
|
||||
const router = useRouter();
|
||||
const enabledModels = models.filter((m) => m.is_enabled);
|
||||
const currentRecommended = models.find((m) => m.is_recommended);
|
||||
|
||||
const [selectedModelId, setSelectedModelId] = useState<string>(
|
||||
currentRecommended?.id || "",
|
||||
);
|
||||
const [isSaving, setIsSaving] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const hasChanges = selectedModelId !== (currentRecommended?.id || "");
|
||||
|
||||
async function handleSave() {
|
||||
if (!selectedModelId) return;
|
||||
|
||||
setIsSaving(true);
|
||||
setError(null);
|
||||
try {
|
||||
const formData = new FormData();
|
||||
formData.set("model_id", selectedModelId);
|
||||
await setRecommendedModelAction(formData);
|
||||
router.refresh();
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : "Failed to save");
|
||||
} finally {
|
||||
setIsSaving(false);
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-lg border border-border bg-card p-4">
|
||||
<div className="mb-3 flex items-center gap-2">
|
||||
<Star size={20} weight="fill" className="text-amber-500" />
|
||||
<h3 className="text-sm font-semibold">Recommended Model</h3>
|
||||
</div>
|
||||
<p className="mb-3 text-xs text-muted-foreground">
|
||||
The recommended model is shown as the default suggestion in model
|
||||
selection dropdowns throughout the platform.
|
||||
</p>
|
||||
|
||||
<div className="flex items-center gap-3">
|
||||
<select
|
||||
value={selectedModelId}
|
||||
onChange={(e) => setSelectedModelId(e.target.value)}
|
||||
className="flex-1 rounded-md border border-input bg-background px-3 py-2 text-sm"
|
||||
disabled={isSaving}
|
||||
>
|
||||
<option value="">-- Select a model --</option>
|
||||
{enabledModels.map((model) => (
|
||||
<option key={model.id} value={model.id}>
|
||||
{model.display_name} ({model.slug})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
|
||||
<Button
|
||||
type="button"
|
||||
variant="primary"
|
||||
size="small"
|
||||
onClick={handleSave}
|
||||
disabled={!hasChanges || !selectedModelId || isSaving}
|
||||
>
|
||||
{isSaving ? "Saving..." : "Save"}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{error && <p className="mt-2 text-xs text-destructive">{error}</p>}
|
||||
|
||||
{currentRecommended && !hasChanges && (
|
||||
<p className="mt-2 text-xs text-muted-foreground">
|
||||
Currently set to:{" "}
|
||||
<span className="font-medium">{currentRecommended.display_name}</span>
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/**
|
||||
* Server-side data fetching for LLM Registry page.
|
||||
*/
|
||||
|
||||
import {
|
||||
fetchLlmCreators,
|
||||
fetchLlmMigrations,
|
||||
fetchLlmModels,
|
||||
fetchLlmProviders,
|
||||
} from "./actions";
|
||||
|
||||
export async function getLlmRegistryPageData() {
|
||||
// Fetch providers and models (required)
|
||||
const [providersResponse, modelsResponse] = await Promise.all([
|
||||
fetchLlmProviders(),
|
||||
fetchLlmModels(),
|
||||
]);
|
||||
|
||||
// Fetch migrations separately with fallback (table might not exist yet)
|
||||
let migrations: Awaited<ReturnType<typeof fetchLlmMigrations>>["migrations"] =
|
||||
[];
|
||||
try {
|
||||
const migrationsResponse = await fetchLlmMigrations(false);
|
||||
migrations = migrationsResponse.migrations;
|
||||
} catch {
|
||||
// Migrations table might not exist yet - that's ok, just show empty list
|
||||
console.warn("Could not fetch migrations - table may not exist yet");
|
||||
}
|
||||
|
||||
// Fetch creators separately with fallback (table might not exist yet)
|
||||
let creators: Awaited<ReturnType<typeof fetchLlmCreators>>["creators"] = [];
|
||||
try {
|
||||
const creatorsResponse = await fetchLlmCreators();
|
||||
creators = creatorsResponse.creators;
|
||||
} catch {
|
||||
// Creators table might not exist yet - that's ok, just show empty list
|
||||
console.warn("Could not fetch creators - table may not exist yet");
|
||||
}
|
||||
|
||||
return {
|
||||
providers: providersResponse.providers,
|
||||
models: modelsResponse.models,
|
||||
migrations,
|
||||
creators,
|
||||
};
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
import { withRoleAccess } from "@/lib/withRoleAccess";
|
||||
import { getLlmRegistryPageData } from "./getLlmRegistryPage";
|
||||
import { LlmRegistryDashboard } from "./components/LlmRegistryDashboard";
|
||||
|
||||
async function LlmRegistryPage() {
|
||||
const data = await getLlmRegistryPageData();
|
||||
return <LlmRegistryDashboard {...data} />;
|
||||
}
|
||||
|
||||
export default async function AdminLlmRegistryPage() {
|
||||
const withAdminAccess = await withRoleAccess(["admin"]);
|
||||
const ProtectedLlmRegistryPage = await withAdminAccess(LlmRegistryPage);
|
||||
return <ProtectedLlmRegistryPage />;
|
||||
}
|
||||
@@ -7,9 +7,8 @@ import { BlockCategoryResponse } from "@/app/api/__generated__/models/blockCateg
|
||||
import { BlockResponse } from "@/app/api/__generated__/models/blockResponse";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { useState, useEffect } from "react";
|
||||
import { useState } from "react";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import BackendApi from "@/lib/autogpt-server-api";
|
||||
|
||||
export const useAllBlockContent = () => {
|
||||
const { toast } = useToast();
|
||||
@@ -94,32 +93,6 @@ export const useAllBlockContent = () => {
|
||||
const isErrorOnLoadingMore = (categoryName: string) =>
|
||||
errorLoadingCategories.has(categoryName);
|
||||
|
||||
// Listen for LLM registry refresh notifications
|
||||
useEffect(() => {
|
||||
const api = new BackendApi();
|
||||
const queryClient = getQueryClient();
|
||||
|
||||
const handleNotification = (notification: any) => {
|
||||
if (
|
||||
notification?.type === "LLM_REGISTRY_REFRESH" ||
|
||||
notification?.event === "registry_updated"
|
||||
) {
|
||||
// Invalidate all block-related queries to force refresh
|
||||
const categoriesQueryKey = getGetV2GetBuilderBlockCategoriesQueryKey();
|
||||
queryClient.invalidateQueries({ queryKey: categoriesQueryKey });
|
||||
}
|
||||
};
|
||||
|
||||
const unsubscribe = api.onWebSocketMessage(
|
||||
"notification",
|
||||
handleNotification,
|
||||
);
|
||||
|
||||
return () => {
|
||||
unsubscribe();
|
||||
};
|
||||
}, []);
|
||||
|
||||
return {
|
||||
data,
|
||||
isLoading,
|
||||
|
||||
@@ -610,11 +610,8 @@ const NodeOneOfDiscriminatorField: FC<{
|
||||
|
||||
return oneOfVariants
|
||||
.map((variant) => {
|
||||
const discProperty = variant.properties?.[discriminatorProperty];
|
||||
const variantDiscValue =
|
||||
discProperty && "const" in discProperty
|
||||
? (discProperty.const as string)
|
||||
: undefined; // NOTE: can discriminators only be strings?
|
||||
const variantDiscValue = variant.properties?.[discriminatorProperty]
|
||||
?.const as string; // NOTE: can discriminators only be strings?
|
||||
|
||||
return {
|
||||
value: variantDiscValue,
|
||||
@@ -1127,47 +1124,9 @@ const NodeStringInput: FC<{
|
||||
displayName,
|
||||
}) => {
|
||||
value ||= schema.default || "";
|
||||
|
||||
// Check if we have options with labels (e.g., LLM model picker)
|
||||
const hasOptions = schema.options && schema.options.length > 0;
|
||||
const hasEnum = schema.enum && schema.enum.length > 0;
|
||||
|
||||
// Helper to get display label for a value
|
||||
const getDisplayLabel = (val: string) => {
|
||||
if (hasOptions) {
|
||||
const option = schema.options!.find((opt) => opt.value === val);
|
||||
return option?.label || beautifyString(val);
|
||||
}
|
||||
return beautifyString(val);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={className}>
|
||||
{hasOptions ? (
|
||||
// Render options with proper labels (used by LLM model picker)
|
||||
<Select
|
||||
defaultValue={value}
|
||||
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
|
||||
>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={schema.placeholder || displayName}>
|
||||
{value ? getDisplayLabel(value) : undefined}
|
||||
</SelectValue>
|
||||
</SelectTrigger>
|
||||
<SelectContent className="nodrag">
|
||||
{schema.options!.map((option, index) => (
|
||||
<SelectItem
|
||||
key={index}
|
||||
value={option.value}
|
||||
title={option.description}
|
||||
>
|
||||
{option.label || beautifyString(option.value)}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
) : hasEnum ? (
|
||||
// Fallback to enum with beautified strings
|
||||
{schema.enum && schema.enum.length > 0 ? (
|
||||
<Select
|
||||
defaultValue={value}
|
||||
onValueChange={(newValue) => handleInputChange(selfKey, newValue)}
|
||||
@@ -1176,8 +1135,8 @@ const NodeStringInput: FC<{
|
||||
<SelectValue placeholder={schema.placeholder || displayName} />
|
||||
</SelectTrigger>
|
||||
<SelectContent className="nodrag">
|
||||
{schema
|
||||
.enum!.filter((option) => option)
|
||||
{schema.enum
|
||||
.filter((option) => option)
|
||||
.map((option, index) => (
|
||||
<SelectItem key={index} value={option}>
|
||||
{beautifyString(option)}
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { MorphingTextAnimation } from "../../components/MorphingTextAnimation/MorphingTextAnimation";
|
||||
import { ToolAccordion } from "../../components/ToolAccordion/ToolAccordion";
|
||||
import { BlockDetailsCard } from "./components/BlockDetailsCard/BlockDetailsCard";
|
||||
import { BlockOutputCard } from "./components/BlockOutputCard/BlockOutputCard";
|
||||
import { ErrorCard } from "./components/ErrorCard/ErrorCard";
|
||||
import { SetupRequirementsCard } from "./components/SetupRequirementsCard/SetupRequirementsCard";
|
||||
@@ -12,7 +11,6 @@ import {
|
||||
getAnimationText,
|
||||
getRunBlockToolOutput,
|
||||
isRunBlockBlockOutput,
|
||||
isRunBlockDetailsOutput,
|
||||
isRunBlockErrorOutput,
|
||||
isRunBlockSetupRequirementsOutput,
|
||||
ToolIcon,
|
||||
@@ -43,7 +41,6 @@ export function RunBlockTool({ part }: Props) {
|
||||
part.state === "output-available" &&
|
||||
!!output &&
|
||||
(isRunBlockBlockOutput(output) ||
|
||||
isRunBlockDetailsOutput(output) ||
|
||||
isRunBlockSetupRequirementsOutput(output) ||
|
||||
isRunBlockErrorOutput(output));
|
||||
|
||||
@@ -61,10 +58,6 @@ export function RunBlockTool({ part }: Props) {
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||
|
||||
{isRunBlockDetailsOutput(output) && (
|
||||
<BlockDetailsCard output={output} />
|
||||
)}
|
||||
|
||||
{isRunBlockSetupRequirementsOutput(output) && (
|
||||
<SetupRequirementsCard output={output} />
|
||||
)}
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
import type { Meta, StoryObj } from "@storybook/nextjs";
|
||||
import { ResponseType } from "@/app/api/__generated__/models/responseType";
|
||||
import type { BlockDetailsResponse } from "../../helpers";
|
||||
import { BlockDetailsCard } from "./BlockDetailsCard";
|
||||
|
||||
const meta: Meta<typeof BlockDetailsCard> = {
|
||||
title: "Copilot/RunBlock/BlockDetailsCard",
|
||||
component: BlockDetailsCard,
|
||||
parameters: {
|
||||
layout: "centered",
|
||||
},
|
||||
tags: ["autodocs"],
|
||||
decorators: [
|
||||
(Story) => (
|
||||
<div style={{ maxWidth: 480 }}>
|
||||
<Story />
|
||||
</div>
|
||||
),
|
||||
],
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof meta>;
|
||||
|
||||
const baseBlock: BlockDetailsResponse = {
|
||||
type: ResponseType.block_details,
|
||||
message:
|
||||
"Here are the details for the GetWeather block. Provide the required inputs to run it.",
|
||||
session_id: "session-123",
|
||||
user_authenticated: true,
|
||||
block: {
|
||||
id: "block-abc-123",
|
||||
name: "GetWeather",
|
||||
description: "Fetches current weather data for a given location.",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: {
|
||||
title: "Location",
|
||||
type: "string",
|
||||
description:
|
||||
"City name or coordinates (e.g. 'London' or '51.5,-0.1')",
|
||||
},
|
||||
units: {
|
||||
title: "Units",
|
||||
type: "string",
|
||||
description: "Temperature units: 'metric' or 'imperial'",
|
||||
},
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
temperature: {
|
||||
title: "Temperature",
|
||||
type: "number",
|
||||
description: "Current temperature in the requested units",
|
||||
},
|
||||
condition: {
|
||||
title: "Condition",
|
||||
type: "string",
|
||||
description: "Weather condition description (e.g. 'Sunny', 'Rain')",
|
||||
},
|
||||
},
|
||||
},
|
||||
credentials: [],
|
||||
},
|
||||
};
|
||||
|
||||
export const Default: Story = {
|
||||
args: {
|
||||
output: baseBlock,
|
||||
},
|
||||
};
|
||||
|
||||
export const InputsOnly: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "This block requires inputs. No outputs are defined.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
outputs: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const OutputsOnly: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "This block has no required inputs.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
inputs: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const ManyFields: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "Block with many input and output fields.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
name: "SendEmail",
|
||||
description: "Sends an email via SMTP.",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
to: {
|
||||
title: "To",
|
||||
type: "string",
|
||||
description: "Recipient email address",
|
||||
},
|
||||
subject: {
|
||||
title: "Subject",
|
||||
type: "string",
|
||||
description: "Email subject line",
|
||||
},
|
||||
body: {
|
||||
title: "Body",
|
||||
type: "string",
|
||||
description: "Email body content",
|
||||
},
|
||||
cc: {
|
||||
title: "CC",
|
||||
type: "string",
|
||||
description: "CC recipients (comma-separated)",
|
||||
},
|
||||
bcc: {
|
||||
title: "BCC",
|
||||
type: "string",
|
||||
description: "BCC recipients (comma-separated)",
|
||||
},
|
||||
},
|
||||
required: ["to", "subject", "body"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
message_id: {
|
||||
title: "Message ID",
|
||||
type: "string",
|
||||
description: "Unique ID of the sent email",
|
||||
},
|
||||
status: {
|
||||
title: "Status",
|
||||
type: "string",
|
||||
description: "Delivery status",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const NoFieldDescriptions: Story = {
|
||||
args: {
|
||||
output: {
|
||||
...baseBlock,
|
||||
message: "Fields without descriptions.",
|
||||
block: {
|
||||
...baseBlock.block,
|
||||
name: "SimpleBlock",
|
||||
inputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
input_a: { title: "Input A", type: "string" },
|
||||
input_b: { title: "Input B", type: "number" },
|
||||
},
|
||||
required: ["input_a"],
|
||||
},
|
||||
outputs: {
|
||||
type: "object",
|
||||
properties: {
|
||||
result: { title: "Result", type: "string" },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
@@ -1,103 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import type { BlockDetailsResponse } from "../../helpers";
|
||||
import {
|
||||
ContentBadge,
|
||||
ContentCard,
|
||||
ContentCardDescription,
|
||||
ContentCardTitle,
|
||||
ContentGrid,
|
||||
ContentMessage,
|
||||
} from "../../../../components/ToolAccordion/AccordionContent";
|
||||
|
||||
interface Props {
|
||||
output: BlockDetailsResponse;
|
||||
}
|
||||
|
||||
function SchemaFieldList({
|
||||
title,
|
||||
properties,
|
||||
required,
|
||||
}: {
|
||||
title: string;
|
||||
properties: Record<string, unknown>;
|
||||
required?: string[];
|
||||
}) {
|
||||
const entries = Object.entries(properties);
|
||||
if (entries.length === 0) return null;
|
||||
|
||||
const requiredSet = new Set(required ?? []);
|
||||
|
||||
return (
|
||||
<ContentCard>
|
||||
<ContentCardTitle className="text-xs">{title}</ContentCardTitle>
|
||||
<div className="mt-2 grid gap-2">
|
||||
{entries.map(([name, schema]) => {
|
||||
const field = schema as Record<string, unknown> | undefined;
|
||||
const fieldTitle =
|
||||
typeof field?.title === "string" ? field.title : name;
|
||||
const fieldType =
|
||||
typeof field?.type === "string" ? field.type : "unknown";
|
||||
const description =
|
||||
typeof field?.description === "string"
|
||||
? field.description
|
||||
: undefined;
|
||||
|
||||
return (
|
||||
<div key={name} className="rounded-xl border p-2">
|
||||
<div className="flex items-center justify-between gap-2">
|
||||
<ContentCardTitle className="text-xs">
|
||||
{fieldTitle}
|
||||
</ContentCardTitle>
|
||||
<div className="flex gap-1">
|
||||
<ContentBadge>{fieldType}</ContentBadge>
|
||||
{requiredSet.has(name) && (
|
||||
<ContentBadge>Required</ContentBadge>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{description && (
|
||||
<ContentCardDescription className="mt-1 text-xs">
|
||||
{description}
|
||||
</ContentCardDescription>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</ContentCard>
|
||||
);
|
||||
}
|
||||
|
||||
export function BlockDetailsCard({ output }: Props) {
|
||||
const inputs = output.block.inputs as {
|
||||
properties?: Record<string, unknown>;
|
||||
required?: string[];
|
||||
} | null;
|
||||
const outputs = output.block.outputs as {
|
||||
properties?: Record<string, unknown>;
|
||||
required?: string[];
|
||||
} | null;
|
||||
|
||||
return (
|
||||
<ContentGrid>
|
||||
<ContentMessage>{output.message}</ContentMessage>
|
||||
|
||||
{inputs?.properties && Object.keys(inputs.properties).length > 0 && (
|
||||
<SchemaFieldList
|
||||
title="Inputs"
|
||||
properties={inputs.properties}
|
||||
required={inputs.required}
|
||||
/>
|
||||
)}
|
||||
|
||||
{outputs?.properties && Object.keys(outputs.properties).length > 0 && (
|
||||
<SchemaFieldList
|
||||
title="Outputs"
|
||||
properties={outputs.properties}
|
||||
required={outputs.required}
|
||||
/>
|
||||
)}
|
||||
</ContentGrid>
|
||||
);
|
||||
}
|
||||
@@ -10,37 +10,18 @@ import {
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
|
||||
/** Block details returned on first run_block attempt (before input_data provided). */
|
||||
export interface BlockDetailsResponse {
|
||||
type: typeof ResponseType.block_details;
|
||||
message: string;
|
||||
session_id?: string | null;
|
||||
block: {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
inputs: Record<string, unknown>;
|
||||
outputs: Record<string, unknown>;
|
||||
credentials: unknown[];
|
||||
};
|
||||
user_authenticated: boolean;
|
||||
}
|
||||
|
||||
export interface RunBlockInput {
|
||||
block_id?: string;
|
||||
block_name?: string;
|
||||
input_data?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export type RunBlockToolOutput =
|
||||
| SetupRequirementsResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| ErrorResponse;
|
||||
|
||||
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
||||
ResponseType.setup_requirements,
|
||||
ResponseType.block_details,
|
||||
ResponseType.block_output,
|
||||
ResponseType.error,
|
||||
]);
|
||||
@@ -54,15 +35,6 @@ export function isRunBlockSetupRequirementsOutput(
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockDetailsOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockDetailsResponse {
|
||||
return (
|
||||
output.type === ResponseType.block_details ||
|
||||
("block" in output && typeof output.block === "object")
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockBlockOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockOutputResponse {
|
||||
@@ -92,7 +64,6 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
|
||||
return output as RunBlockToolOutput;
|
||||
}
|
||||
if ("block_id" in output) return output as BlockOutputResponse;
|
||||
if ("block" in output) return output as BlockDetailsResponse;
|
||||
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
||||
if ("error" in output || "details" in output)
|
||||
return output as ErrorResponse;
|
||||
@@ -113,25 +84,17 @@ export function getAnimationText(part: {
|
||||
output?: unknown;
|
||||
}): string {
|
||||
const input = part.input as RunBlockInput | undefined;
|
||||
const blockName = input?.block_name?.trim();
|
||||
const blockId = input?.block_id?.trim();
|
||||
// Prefer block_name if available, otherwise fall back to block_id
|
||||
const blockText = blockName
|
||||
? ` "${blockName}"`
|
||||
: blockId
|
||||
? ` "${blockId}"`
|
||||
: "";
|
||||
const blockText = blockId ? ` "${blockId}"` : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Running${blockText}`;
|
||||
return `Running the block${blockText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Running${blockText}`;
|
||||
if (!output) return `Running the block${blockText}`;
|
||||
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
||||
if (isRunBlockDetailsOutput(output))
|
||||
return `Details for "${output.block.name}"`;
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
return `Setup needed for "${output.setup_info.agent_name}"`;
|
||||
}
|
||||
@@ -195,21 +158,6 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockDetailsOutput(output)) {
|
||||
const inputKeys = Object.keys(
|
||||
(output.block.inputs as { properties?: Record<string, unknown> })
|
||||
?.properties ?? {},
|
||||
);
|
||||
return {
|
||||
icon,
|
||||
title: output.block.name,
|
||||
description:
|
||||
inputKeys.length > 0
|
||||
? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available`
|
||||
: output.message,
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
const missingCredsCount = Object.keys(
|
||||
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
||||
|
||||
@@ -1,17 +1,25 @@
|
||||
"use client";
|
||||
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { LibraryAgentSort } from "@/app/api/__generated__/models/libraryAgentSort";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
|
||||
import { HeartIcon } from "@phosphor-icons/react";
|
||||
import { useFavoriteAgents } from "../../hooks/useFavoriteAgents";
|
||||
import { LibraryAgentCard } from "../LibraryAgentCard/LibraryAgentCard";
|
||||
import { LibraryTabs, Tab } from "../LibraryTabs/LibraryTabs";
|
||||
import { LibraryActionSubHeader } from "../LibraryActionSubHeader/LibraryActionSubHeader";
|
||||
|
||||
interface Props {
|
||||
searchTerm: string;
|
||||
tabs: Tab[];
|
||||
activeTab: string;
|
||||
onTabChange: (tabId: string) => void;
|
||||
setLibrarySort: (value: LibraryAgentSort) => void;
|
||||
}
|
||||
|
||||
export function FavoritesSection({ searchTerm }: Props) {
|
||||
export function FavoritesSection({ searchTerm, tabs, activeTab, onTabChange, setLibrarySort }: Props) {
|
||||
const {
|
||||
allAgents: favoriteAgents,
|
||||
agentLoading: isLoading,
|
||||
@@ -21,38 +29,26 @@ export function FavoritesSection({ searchTerm }: Props) {
|
||||
isFetchingNextPage,
|
||||
} = useFavoriteAgents({ searchTerm });
|
||||
|
||||
if (isLoading || favoriteAgents.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="!mb-8">
|
||||
<div className="mb-3 flex items-center gap-2 p-2">
|
||||
<HeartIcon className="h-5 w-5" weight="fill" />
|
||||
<div className="flex items-baseline gap-2">
|
||||
<Text variant="h4">Favorites</Text>
|
||||
{!isLoading && (
|
||||
<Text
|
||||
variant="body"
|
||||
data-testid="agents-count"
|
||||
className="relative bottom-px text-zinc-500"
|
||||
>
|
||||
{agentCount}
|
||||
</Text>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<>
|
||||
<LibraryActionSubHeader agentCount={agentCount} setLibrarySort={setLibrarySort} />
|
||||
<LibraryTabs tabs={tabs} activeTab={activeTab} onTabChange={onTabChange} />
|
||||
|
||||
<div className="relative">
|
||||
{isLoading ? (
|
||||
<div className="flex h-[200px] items-center justify-center">
|
||||
<LoadingSpinner size="large" />
|
||||
</div>
|
||||
) : favoriteAgents.length === 0 ? (
|
||||
<div className="flex h-[200px] flex-col items-center justify-center gap-2 text-zinc-500">
|
||||
<HeartIcon className="h-10 w-10" />
|
||||
<Text variant="body">No favorite agents yet</Text>
|
||||
</div>
|
||||
) : (
|
||||
<InfiniteScroll
|
||||
isFetchingNextPage={isFetchingNextPage}
|
||||
fetchNextPage={fetchNextPage}
|
||||
hasNextPage={hasNextPage}
|
||||
loader={
|
||||
<div className="flex h-8 w-full items-center justify-center">
|
||||
<div className="h-6 w-6 animate-spin rounded-full border-b-2 border-t-2 border-neutral-800" />
|
||||
</div>
|
||||
}
|
||||
loader={<LoadingSpinner size="medium" />}
|
||||
>
|
||||
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4">
|
||||
{favoriteAgents.map((agent: LibraryAgent) => (
|
||||
@@ -60,9 +56,7 @@ export function FavoritesSection({ searchTerm }: Props) {
|
||||
))}
|
||||
</div>
|
||||
</InfiniteScroll>
|
||||
</div>
|
||||
|
||||
{favoriteAgents.length > 0 && <div className="!mt-10 border-t" />}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
"use client";
|
||||
|
||||
import { motion, AnimatePresence } from "framer-motion";
|
||||
import { HeartIcon } from "@phosphor-icons/react";
|
||||
import { useEffect, useState } from "react";
|
||||
|
||||
interface FlyingHeartProps {
|
||||
startPosition: { x: number; y: number } | null;
|
||||
targetPosition: { x: number; y: number } | null;
|
||||
onAnimationComplete: () => void;
|
||||
}
|
||||
|
||||
export function FlyingHeart({
|
||||
startPosition,
|
||||
targetPosition,
|
||||
onAnimationComplete,
|
||||
}: FlyingHeartProps) {
|
||||
const [isVisible, setIsVisible] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (startPosition && targetPosition) {
|
||||
setIsVisible(true);
|
||||
}
|
||||
}, [startPosition, targetPosition]);
|
||||
|
||||
if (!startPosition || !targetPosition) return null;
|
||||
|
||||
return (
|
||||
<AnimatePresence>
|
||||
{isVisible && (
|
||||
<motion.div
|
||||
className="pointer-events-none fixed z-50"
|
||||
initial={{
|
||||
x: startPosition.x,
|
||||
y: startPosition.y,
|
||||
scale: 1,
|
||||
opacity: 1,
|
||||
}}
|
||||
animate={{
|
||||
x: targetPosition.x,
|
||||
y: targetPosition.y,
|
||||
scale: 0.5,
|
||||
opacity: 0,
|
||||
}}
|
||||
exit={{ opacity: 0 }}
|
||||
transition={{
|
||||
type: "spring",
|
||||
damping: 20,
|
||||
stiffness: 200,
|
||||
duration: 0.5,
|
||||
}}
|
||||
onAnimationComplete={() => {
|
||||
setIsVisible(false);
|
||||
onAnimationComplete();
|
||||
}}
|
||||
>
|
||||
<HeartIcon
|
||||
size={24}
|
||||
weight="fill"
|
||||
className="text-red-500 drop-shadow-md"
|
||||
/>
|
||||
</motion.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
);
|
||||
}
|
||||
@@ -13,7 +13,7 @@ export function LibraryActionSubHeader({ agentCount, setLibrarySort }: Props) {
|
||||
return (
|
||||
<div className="flex items-baseline justify-between">
|
||||
<div className="flex items-baseline gap-4">
|
||||
<Text variant="h4">My agents</Text>
|
||||
<Text variant="h5">My agents</Text>
|
||||
<Text
|
||||
variant="body"
|
||||
data-testid="agents-count"
|
||||
|
||||
@@ -4,6 +4,7 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { CaretCircleRightIcon } from "@phosphor-icons/react";
|
||||
import Image from "next/image";
|
||||
import NextLink from "next/link";
|
||||
import { motion } from "framer-motion";
|
||||
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import Avatar, {
|
||||
@@ -14,13 +15,24 @@ import { Link } from "@/components/atoms/Link/Link";
|
||||
import { AgentCardMenu } from "./components/AgentCardMenu";
|
||||
import { FavoriteButton } from "./components/FavoriteButton";
|
||||
import { useLibraryAgentCard } from "./useLibraryAgentCard";
|
||||
import { useFavoriteAnimation } from "../../context/FavoriteAnimationContext";
|
||||
|
||||
interface Props {
|
||||
agent: LibraryAgent;
|
||||
draggable?: boolean;
|
||||
}
|
||||
|
||||
export function LibraryAgentCard({ agent }: Props) {
|
||||
export function LibraryAgentCard({
|
||||
agent,
|
||||
draggable = true,
|
||||
}: Props) {
|
||||
const { id, name, graph_id, can_access_graph, image_url } = agent;
|
||||
const { triggerFavoriteAnimation } = useFavoriteAnimation();
|
||||
|
||||
function handleDragStart(e: React.DragEvent<HTMLDivElement>) {
|
||||
e.dataTransfer.setData("application/agent-id", id);
|
||||
e.dataTransfer.effectAllowed = "move";
|
||||
}
|
||||
|
||||
const {
|
||||
isFromMarketplace,
|
||||
@@ -28,14 +40,29 @@ export function LibraryAgentCard({ agent }: Props) {
|
||||
profile,
|
||||
creator_image_url,
|
||||
handleToggleFavorite,
|
||||
} = useLibraryAgentCard({ agent });
|
||||
} = useLibraryAgentCard({
|
||||
agent,
|
||||
onFavoriteAdd: triggerFavoriteAnimation,
|
||||
});
|
||||
|
||||
return (
|
||||
<div
|
||||
data-testid="library-agent-card"
|
||||
data-agent-id={id}
|
||||
className="group relative inline-flex h-[10.625rem] w-full max-w-[25rem] flex-col items-start justify-start gap-2.5 rounded-medium border border-zinc-100 bg-white transition-all duration-300 hover:shadow-md"
|
||||
draggable={draggable}
|
||||
onDragStart={handleDragStart}
|
||||
className="cursor-grab active:cursor-grabbing"
|
||||
>
|
||||
<motion.div
|
||||
layoutId={`agent-card-${id}`}
|
||||
data-testid="library-agent-card"
|
||||
data-agent-id={id}
|
||||
className="group relative inline-flex h-[10.625rem] w-full max-w-[25rem] flex-col items-start justify-start gap-2.5 rounded-medium border border-zinc-100 bg-white hover:shadow-md"
|
||||
transition={{
|
||||
type: "spring",
|
||||
damping: 25,
|
||||
stiffness: 300,
|
||||
}}
|
||||
style={{ willChange: "transform" }}
|
||||
>
|
||||
<NextLink href={`/library/agents/${id}`} className="flex-shrink-0">
|
||||
<div className="relative flex items-center gap-2 px-4 pt-3">
|
||||
<Avatar className="h-4 w-4 rounded-full">
|
||||
@@ -125,6 +152,7 @@ export function LibraryAgentCard({ agent }: Props) {
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</motion.div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,10 @@ import {
|
||||
useDeleteV2DeleteLibraryAgent,
|
||||
usePostV2ForkLibraryAgent,
|
||||
} from "@/app/api/__generated__/endpoints/library/library";
|
||||
import {
|
||||
usePostV2BulkMoveAgents,
|
||||
getGetV2ListLibraryFoldersQueryKey,
|
||||
} from "@/app/api/__generated__/endpoints/folders/folders";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
@@ -22,6 +26,7 @@ import { useQueryClient } from "@tanstack/react-query";
|
||||
import Link from "next/link";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
import { MoveToFolderDialog } from "../../MoveToFolderDialog/MoveToFolderDialog";
|
||||
|
||||
interface AgentCardMenuProps {
|
||||
agent: LibraryAgent;
|
||||
@@ -32,11 +37,25 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
|
||||
const queryClient = useQueryClient();
|
||||
const router = useRouter();
|
||||
const [showDeleteDialog, setShowDeleteDialog] = useState(false);
|
||||
const [showMoveDialog, setShowMoveDialog] = useState(false);
|
||||
const [isDeletingAgent, setIsDeletingAgent] = useState(false);
|
||||
const [isDuplicatingAgent, setIsDuplicatingAgent] = useState(false);
|
||||
const [isRemovingFromFolder, setIsRemovingFromFolder] = useState(false);
|
||||
|
||||
const { mutateAsync: deleteAgent } = useDeleteV2DeleteLibraryAgent();
|
||||
const { mutateAsync: forkAgent } = usePostV2ForkLibraryAgent();
|
||||
const { mutateAsync: bulkMoveAgents } = usePostV2BulkMoveAgents({
|
||||
mutation: {
|
||||
onSuccess: () => {
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListLibraryAgentsQueryKey(),
|
||||
});
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListLibraryFoldersQueryKey(),
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
async function handleDuplicateAgent() {
|
||||
if (!agent.id) return;
|
||||
@@ -70,6 +89,37 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
|
||||
}
|
||||
}
|
||||
|
||||
async function handleRemoveFromFolder() {
|
||||
if (!agent.id) return;
|
||||
|
||||
setIsRemovingFromFolder(true);
|
||||
|
||||
try {
|
||||
await bulkMoveAgents({
|
||||
data: {
|
||||
agent_ids: [agent.id],
|
||||
folder_id: undefined,
|
||||
},
|
||||
});
|
||||
|
||||
toast({
|
||||
title: "Removed from folder",
|
||||
description: "Agent has been moved back to your library.",
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
toast({
|
||||
title: "Failed to remove from folder",
|
||||
description:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
} finally {
|
||||
setIsRemovingFromFolder(false);
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDeleteAgent() {
|
||||
if (!agent.id) return;
|
||||
|
||||
@@ -138,6 +188,31 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
|
||||
Duplicate agent
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuItem
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
setShowMoveDialog(true);
|
||||
}}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
Move to folder
|
||||
</DropdownMenuItem>
|
||||
{agent.folder_id && (
|
||||
<>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuItem
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
handleRemoveFromFolder();
|
||||
}}
|
||||
disabled={isRemovingFromFolder}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
Remove from folder
|
||||
</DropdownMenuItem>
|
||||
</>
|
||||
)}
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuItem
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
@@ -183,6 +258,14 @@ export function AgentCardMenu({ agent }: AgentCardMenuProps) {
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
|
||||
<MoveToFolderDialog
|
||||
agentId={agent.id}
|
||||
agentName={agent.name}
|
||||
currentFolderId={agent.folder_id}
|
||||
isOpen={showMoveDialog}
|
||||
setIsOpen={setShowMoveDialog}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -3,10 +3,12 @@
|
||||
import { cn } from "@/lib/utils";
|
||||
import { HeartIcon } from "@phosphor-icons/react";
|
||||
import type { MouseEvent } from "react";
|
||||
import { useRef } from "react";
|
||||
import { motion, AnimatePresence } from "framer-motion";
|
||||
|
||||
interface FavoriteButtonProps {
|
||||
isFavorite: boolean;
|
||||
onClick: (e: MouseEvent<HTMLButtonElement>) => void;
|
||||
onClick: (e: MouseEvent<HTMLButtonElement>, position: { x: number; y: number }) => void;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
@@ -15,25 +17,46 @@ export function FavoriteButton({
|
||||
onClick,
|
||||
className,
|
||||
}: FavoriteButtonProps) {
|
||||
const buttonRef = useRef<HTMLButtonElement>(null);
|
||||
|
||||
function handleClick(e: MouseEvent<HTMLButtonElement>) {
|
||||
const rect = buttonRef.current?.getBoundingClientRect();
|
||||
const position = rect
|
||||
? { x: rect.left + rect.width / 2 - 12, y: rect.top + rect.height / 2 - 12 }
|
||||
: { x: 0, y: 0 };
|
||||
onClick(e, position);
|
||||
}
|
||||
|
||||
return (
|
||||
<button
|
||||
onClick={onClick}
|
||||
ref={buttonRef}
|
||||
onClick={handleClick}
|
||||
className={cn(
|
||||
"rounded-full p-2 transition-all duration-200",
|
||||
"hover:scale-110",
|
||||
"hover:scale-110 active:scale-95",
|
||||
!isFavorite && "opacity-0 group-hover:opacity-100",
|
||||
className,
|
||||
)}
|
||||
aria-label={isFavorite ? "Remove from favorites" : "Add to favorites"}
|
||||
>
|
||||
<HeartIcon
|
||||
size={20}
|
||||
weight={isFavorite ? "fill" : "regular"}
|
||||
className={cn(
|
||||
"transition-colors duration-200",
|
||||
isFavorite ? "text-red-500" : "text-gray-600 hover:text-red-500",
|
||||
)}
|
||||
/>
|
||||
<AnimatePresence mode="wait" initial={false}>
|
||||
<motion.div
|
||||
key={isFavorite ? "filled" : "empty"}
|
||||
initial={{ scale: 0.5, opacity: 0 }}
|
||||
animate={{ scale: 1, opacity: 1 }}
|
||||
exit={{ scale: 0.5, opacity: 0 }}
|
||||
transition={{ type: "spring", damping: 15, stiffness: 300 }}
|
||||
>
|
||||
<HeartIcon
|
||||
size={20}
|
||||
weight={isFavorite ? "fill" : "regular"}
|
||||
className={cn(
|
||||
"transition-colors duration-200",
|
||||
isFavorite ? "text-red-500" : "text-gray-600 hover:text-red-500",
|
||||
)}
|
||||
/>
|
||||
</motion.div>
|
||||
</AnimatePresence>
|
||||
</button>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -14,11 +14,11 @@ import { updateFavoriteInQueries } from "./helpers";
|
||||
|
||||
interface Props {
|
||||
agent: LibraryAgent;
|
||||
onFavoriteAdd?: (position: { x: number; y: number }) => void;
|
||||
}
|
||||
|
||||
export function useLibraryAgentCard({ agent }: Props) {
|
||||
const { id, name, is_favorite, creator_image_url, marketplace_listing } =
|
||||
agent;
|
||||
export function useLibraryAgentCard({ agent, onFavoriteAdd }: Props) {
|
||||
const { id, is_favorite, creator_image_url, marketplace_listing } = agent;
|
||||
|
||||
const isFromMarketplace = Boolean(marketplace_listing);
|
||||
const [isFavorite, setIsFavorite] = useState(is_favorite);
|
||||
@@ -49,26 +49,31 @@ export function useLibraryAgentCard({ agent }: Props) {
|
||||
});
|
||||
}
|
||||
|
||||
async function handleToggleFavorite(e: React.MouseEvent) {
|
||||
async function handleToggleFavorite(
|
||||
e: React.MouseEvent,
|
||||
position: { x: number; y: number }
|
||||
) {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
|
||||
const newIsFavorite = !isFavorite;
|
||||
|
||||
// Optimistic update - update UI immediately
|
||||
setIsFavorite(newIsFavorite);
|
||||
updateQueryData(newIsFavorite);
|
||||
|
||||
// Trigger animation immediately for adding to favorites
|
||||
if (newIsFavorite && onFavoriteAdd) {
|
||||
onFavoriteAdd(position);
|
||||
}
|
||||
|
||||
try {
|
||||
await updateLibraryAgent({
|
||||
libraryAgentId: id,
|
||||
data: { is_favorite: newIsFavorite },
|
||||
});
|
||||
|
||||
toast({
|
||||
title: newIsFavorite ? "Added to favorites" : "Removed from favorites",
|
||||
description: `${name} has been ${newIsFavorite ? "added to" : "removed from"} your favorites.`,
|
||||
});
|
||||
} catch {
|
||||
// Revert on failure
|
||||
setIsFavorite(!newIsFavorite);
|
||||
updateQueryData(!newIsFavorite);
|
||||
|
||||
|
||||
@@ -1,30 +1,66 @@
|
||||
"use client";
|
||||
|
||||
import { LibraryAgentSort } from "@/app/api/__generated__/models/libraryAgentSort";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
|
||||
import { LibraryActionSubHeader } from "../LibraryActionSubHeader/LibraryActionSubHeader";
|
||||
import { LibraryAgentCard } from "../LibraryAgentCard/LibraryAgentCard";
|
||||
import { LibraryFolder } from "../LibraryFolder/LibraryFolder";
|
||||
import { LibrarySubSection } from "../LibrarySubSection/LibrarySubSection";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { ArrowLeftIcon, HeartIcon } from "@phosphor-icons/react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Tab } from "../LibraryTabs/LibraryTabs";
|
||||
import { LayoutGroup } from "framer-motion";
|
||||
import { LibraryFolderEditDialog } from "../LibraryFolderEditDialog/LibraryFolderEditDialog";
|
||||
import { LibraryFolderDeleteDialog } from "../LibraryFolderDeleteDialog/LibraryFolderDeleteDialog";
|
||||
import { useLibraryAgentList } from "./useLibraryAgentList";
|
||||
|
||||
interface Props {
|
||||
searchTerm: string;
|
||||
librarySort: LibraryAgentSort;
|
||||
setLibrarySort: (value: LibraryAgentSort) => void;
|
||||
selectedFolderId: string | null;
|
||||
onFolderSelect: (folderId: string | null) => void;
|
||||
tabs: Tab[];
|
||||
activeTab: string;
|
||||
onTabChange: (tabId: string) => void;
|
||||
}
|
||||
|
||||
export function LibraryAgentList({
|
||||
searchTerm,
|
||||
librarySort,
|
||||
setLibrarySort,
|
||||
selectedFolderId,
|
||||
onFolderSelect,
|
||||
tabs,
|
||||
activeTab,
|
||||
onTabChange,
|
||||
}: Props) {
|
||||
const {
|
||||
isFavoritesTab,
|
||||
agentLoading,
|
||||
agentCount,
|
||||
allAgents: agents,
|
||||
agents,
|
||||
hasNextPage,
|
||||
isFetchingNextPage,
|
||||
fetchNextPage,
|
||||
} = useLibraryAgentList({ searchTerm, librarySort });
|
||||
foldersData,
|
||||
currentFolder,
|
||||
showFolders,
|
||||
editingFolder,
|
||||
setEditingFolder,
|
||||
deletingFolder,
|
||||
setDeletingFolder,
|
||||
handleAgentDrop,
|
||||
handleFolderDeleted,
|
||||
} = useLibraryAgentList({
|
||||
searchTerm,
|
||||
librarySort,
|
||||
selectedFolderId,
|
||||
onFolderSelect,
|
||||
activeTab,
|
||||
});
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -32,11 +68,42 @@ export function LibraryAgentList({
|
||||
agentCount={agentCount}
|
||||
setLibrarySort={setLibrarySort}
|
||||
/>
|
||||
<div className="px-2">
|
||||
{!selectedFolderId && (
|
||||
<LibrarySubSection
|
||||
tabs={tabs}
|
||||
activeTab={activeTab}
|
||||
onTabChange={onTabChange}
|
||||
/>
|
||||
)}
|
||||
|
||||
<div>
|
||||
{selectedFolderId && (
|
||||
<div className="mb-4 flex items-center gap-3">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => onFolderSelect(null)}
|
||||
className="gap-2"
|
||||
>
|
||||
<ArrowLeftIcon className="h-4 w-4" />
|
||||
Back to Library
|
||||
</Button>
|
||||
{currentFolder && (
|
||||
<Text variant="h4" className="text-zinc-700">
|
||||
{currentFolder.icon} {currentFolder.name}
|
||||
</Text>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{agentLoading ? (
|
||||
<div className="flex h-[200px] items-center justify-center">
|
||||
<LoadingSpinner size="large" />
|
||||
</div>
|
||||
) : isFavoritesTab && agents.length === 0 ? (
|
||||
<div className="flex h-[200px] flex-col items-center justify-center gap-2 text-zinc-500">
|
||||
<HeartIcon className="h-10 w-10" />
|
||||
<Text variant="body">No favorite agents yet</Text>
|
||||
</div>
|
||||
) : (
|
||||
<InfiniteScroll
|
||||
isFetchingNextPage={isFetchingNextPage}
|
||||
@@ -44,14 +111,52 @@ export function LibraryAgentList({
|
||||
hasNextPage={hasNextPage}
|
||||
loader={<LoadingSpinner size="medium" />}
|
||||
>
|
||||
<div className="grid grid-cols-1 gap-6 sm:grid-cols-2 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4">
|
||||
{agents.map((agent) => (
|
||||
<LibraryAgentCard key={agent.id} agent={agent} />
|
||||
))}
|
||||
</div>
|
||||
<LayoutGroup>
|
||||
<div className="grid grid-cols-1 gap-6 sm:grid-cols-2 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4">
|
||||
{showFolders &&
|
||||
foldersData?.folders.map((folder) => (
|
||||
<LibraryFolder
|
||||
key={folder.id}
|
||||
id={folder.id}
|
||||
name={folder.name}
|
||||
agentCount={folder.agent_count ?? 0}
|
||||
color={folder.color ?? undefined}
|
||||
icon={folder.icon ?? "📁"}
|
||||
onAgentDrop={handleAgentDrop}
|
||||
onClick={() => onFolderSelect(folder.id)}
|
||||
onEdit={() => setEditingFolder(folder)}
|
||||
onDelete={() => setDeletingFolder(folder)}
|
||||
/>
|
||||
))}
|
||||
{agents.map((agent) => (
|
||||
<LibraryAgentCard key={agent.id} agent={agent} />
|
||||
))}
|
||||
</div>
|
||||
</LayoutGroup>
|
||||
</InfiniteScroll>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{editingFolder && (
|
||||
<LibraryFolderEditDialog
|
||||
folder={editingFolder}
|
||||
isOpen={!!editingFolder}
|
||||
setIsOpen={(open) => {
|
||||
if (!open) setEditingFolder(null);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{deletingFolder && (
|
||||
<LibraryFolderDeleteDialog
|
||||
folder={deletingFolder}
|
||||
isOpen={!!deletingFolder}
|
||||
setIsOpen={(open) => {
|
||||
if (!open) setDeletingFolder(null);
|
||||
}}
|
||||
onDeleted={handleFolderDeleted}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,36 +1,71 @@
|
||||
"use client";
|
||||
|
||||
import { useGetV2ListLibraryAgentsInfinite } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { getGetV2ListLibraryAgentsQueryKey } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import {
|
||||
useGetV2ListLibraryFolders,
|
||||
usePostV2BulkMoveAgents,
|
||||
getGetV2ListLibraryFoldersQueryKey,
|
||||
} from "@/app/api/__generated__/endpoints/folders/folders";
|
||||
import type { getV2ListLibraryFoldersResponseSuccess } from "@/app/api/__generated__/endpoints/folders/folders";
|
||||
import type { LibraryFolder } from "@/app/api/__generated__/models/libraryFolder";
|
||||
import { LibraryAgentSort } from "@/app/api/__generated__/models/libraryAgentSort";
|
||||
import {
|
||||
okData,
|
||||
getPaginatedTotalCount,
|
||||
getPaginationNextPageNumber,
|
||||
unpaginate,
|
||||
} from "@/app/api/helpers";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { useFavoriteAgents } from "../../hooks/useFavoriteAgents";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { useEffect, useRef } from "react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
|
||||
interface Props {
|
||||
searchTerm: string;
|
||||
librarySort: LibraryAgentSort;
|
||||
selectedFolderId: string | null;
|
||||
onFolderSelect: (folderId: string | null) => void;
|
||||
activeTab: string;
|
||||
}
|
||||
|
||||
export function useLibraryAgentList({ searchTerm, librarySort }: Props) {
|
||||
const queryClient = getQueryClient();
|
||||
export function useLibraryAgentList({
|
||||
searchTerm,
|
||||
librarySort,
|
||||
selectedFolderId,
|
||||
onFolderSelect,
|
||||
activeTab,
|
||||
}: Props) {
|
||||
const isFavoritesTab = activeTab === "favorites";
|
||||
const { toast } = useToast();
|
||||
const stableQueryClient = getQueryClient();
|
||||
const queryClient = useQueryClient();
|
||||
const prevSortRef = useRef<LibraryAgentSort | null>(null);
|
||||
|
||||
const [editingFolder, setEditingFolder] = useState<LibraryFolder | null>(
|
||||
null,
|
||||
);
|
||||
const [deletingFolder, setDeletingFolder] = useState<LibraryFolder | null>(
|
||||
null,
|
||||
);
|
||||
|
||||
// --- Agent list fetching ---
|
||||
|
||||
const {
|
||||
data: agentsQueryData,
|
||||
fetchNextPage,
|
||||
hasNextPage,
|
||||
isFetchingNextPage,
|
||||
isLoading: agentLoading,
|
||||
isLoading: allAgentsLoading,
|
||||
} = useGetV2ListLibraryAgentsInfinite(
|
||||
{
|
||||
page: 1,
|
||||
page_size: 20,
|
||||
search_term: searchTerm || undefined,
|
||||
sort_by: librarySort,
|
||||
folder_id: selectedFolderId ?? undefined,
|
||||
include_root_only: selectedFolderId === null ? true : undefined,
|
||||
},
|
||||
{
|
||||
query: {
|
||||
@@ -39,28 +74,148 @@ export function useLibraryAgentList({ searchTerm, librarySort }: Props) {
|
||||
},
|
||||
);
|
||||
|
||||
// Reset queries when sort changes to ensure fresh data with correct sorting
|
||||
useEffect(() => {
|
||||
if (prevSortRef.current !== null && prevSortRef.current !== librarySort) {
|
||||
// Reset all library agent queries to ensure fresh fetch with new sort
|
||||
queryClient.resetQueries({
|
||||
stableQueryClient.resetQueries({
|
||||
queryKey: ["/api/library/agents"],
|
||||
});
|
||||
}
|
||||
prevSortRef.current = librarySort;
|
||||
}, [librarySort, queryClient]);
|
||||
}, [librarySort, stableQueryClient]);
|
||||
|
||||
const allAgents = agentsQueryData
|
||||
const allAgentsList = agentsQueryData
|
||||
? unpaginate(agentsQueryData, "agents")
|
||||
: [];
|
||||
const agentCount = getPaginatedTotalCount(agentsQueryData);
|
||||
const allAgentsCount = getPaginatedTotalCount(agentsQueryData);
|
||||
|
||||
// --- Favorites ---
|
||||
|
||||
const favoriteAgentsData = useFavoriteAgents({ searchTerm });
|
||||
|
||||
const {
|
||||
agentLoading,
|
||||
agentCount,
|
||||
allAgents: agents,
|
||||
hasNextPage: agentsHasNextPage,
|
||||
isFetchingNextPage: agentsIsFetchingNextPage,
|
||||
fetchNextPage: agentsFetchNextPage,
|
||||
} = isFavoritesTab
|
||||
? favoriteAgentsData
|
||||
: {
|
||||
agentLoading: allAgentsLoading,
|
||||
agentCount: allAgentsCount,
|
||||
allAgents: allAgentsList,
|
||||
hasNextPage: hasNextPage,
|
||||
isFetchingNextPage: isFetchingNextPage,
|
||||
fetchNextPage: fetchNextPage,
|
||||
};
|
||||
|
||||
// --- Folders ---
|
||||
|
||||
const { data: foldersData } = useGetV2ListLibraryFolders(undefined, {
|
||||
query: { select: okData },
|
||||
});
|
||||
|
||||
const { mutate: moveAgentToFolder } = usePostV2BulkMoveAgents({
|
||||
mutation: {
|
||||
onMutate: async ({ data }) => {
|
||||
await queryClient.cancelQueries({
|
||||
queryKey: getGetV2ListLibraryFoldersQueryKey(),
|
||||
});
|
||||
await queryClient.cancelQueries({
|
||||
queryKey: getGetV2ListLibraryAgentsQueryKey(),
|
||||
});
|
||||
|
||||
const previousFolders = queryClient.getQueriesData<
|
||||
getV2ListLibraryFoldersResponseSuccess
|
||||
>({ queryKey: getGetV2ListLibraryFoldersQueryKey() });
|
||||
|
||||
if (data.folder_id) {
|
||||
queryClient.setQueriesData<getV2ListLibraryFoldersResponseSuccess>(
|
||||
{ queryKey: getGetV2ListLibraryFoldersQueryKey() },
|
||||
(old) => {
|
||||
if (!old?.data?.folders) return old;
|
||||
return {
|
||||
...old,
|
||||
data: {
|
||||
...old.data,
|
||||
folders: old.data.folders.map((f) =>
|
||||
f.id === data.folder_id
|
||||
? {
|
||||
...f,
|
||||
agent_count:
|
||||
(f.agent_count ?? 0) + data.agent_ids.length,
|
||||
}
|
||||
: f,
|
||||
),
|
||||
},
|
||||
};
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
return { previousFolders };
|
||||
},
|
||||
onError: (_error, _variables, context) => {
|
||||
if (context?.previousFolders) {
|
||||
for (const [queryKey, data] of context.previousFolders) {
|
||||
queryClient.setQueryData(queryKey, data);
|
||||
}
|
||||
}
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to move agent. Please try again.",
|
||||
variant: "destructive",
|
||||
});
|
||||
},
|
||||
onSettled: () => {
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListLibraryFoldersQueryKey(),
|
||||
});
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListLibraryAgentsQueryKey(),
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
function handleAgentDrop(agentId: string, folderId: string) {
|
||||
moveAgentToFolder({
|
||||
data: {
|
||||
agent_ids: [agentId],
|
||||
folder_id: folderId,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const currentFolder = selectedFolderId
|
||||
? foldersData?.folders.find((f) => f.id === selectedFolderId)
|
||||
: null;
|
||||
|
||||
const showFolders = !isFavoritesTab && !selectedFolderId;
|
||||
|
||||
function handleFolderDeleted() {
|
||||
if (selectedFolderId === deletingFolder?.id) {
|
||||
onFolderSelect(null);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
allAgents,
|
||||
isFavoritesTab,
|
||||
agentLoading,
|
||||
hasNextPage,
|
||||
agentCount,
|
||||
isFetchingNextPage,
|
||||
fetchNextPage,
|
||||
agents,
|
||||
hasNextPage: agentsHasNextPage,
|
||||
isFetchingNextPage: agentsIsFetchingNextPage,
|
||||
fetchNextPage: agentsFetchNextPage,
|
||||
foldersData,
|
||||
currentFolder,
|
||||
showFolders,
|
||||
editingFolder,
|
||||
setEditingFolder,
|
||||
deletingFolder,
|
||||
setDeletingFolder,
|
||||
handleAgentDrop,
|
||||
handleFolderDeleted,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -0,0 +1,353 @@
|
||||
import { useState } from "react";
|
||||
import { motion } from "framer-motion";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
|
||||
type FolderSize = "xs" | "sm" | "md" | "lg" | "xl";
|
||||
export type FolderColorName =
|
||||
| "neutral"
|
||||
| "slate"
|
||||
| "zinc"
|
||||
| "stone"
|
||||
| "red"
|
||||
| "orange"
|
||||
| "amber"
|
||||
| "yellow"
|
||||
| "lime"
|
||||
| "green"
|
||||
| "emerald"
|
||||
| "teal"
|
||||
| "cyan"
|
||||
| "sky"
|
||||
| "blue"
|
||||
| "indigo"
|
||||
| "violet"
|
||||
| "purple"
|
||||
| "fuchsia"
|
||||
| "pink"
|
||||
| "rose";
|
||||
|
||||
export type FolderColor = FolderColorName | (string & {});
|
||||
|
||||
const hexToColorName: Record<string, FolderColorName> = {
|
||||
"#3B82F6": "blue",
|
||||
"#3b82f6": "blue",
|
||||
"#A855F7": "purple",
|
||||
"#a855f7": "purple",
|
||||
"#10B981": "emerald",
|
||||
"#10b981": "emerald",
|
||||
"#F97316": "orange",
|
||||
"#f97316": "orange",
|
||||
"#EC4899": "pink",
|
||||
"#ec4899": "pink",
|
||||
};
|
||||
|
||||
function resolveColor(color: FolderColor | undefined): FolderColorName {
|
||||
if (!color) return "blue";
|
||||
if (color in hexToColorName) return hexToColorName[color];
|
||||
if (color in colorMap) return color as FolderColorName;
|
||||
return "blue";
|
||||
}
|
||||
|
||||
interface Props {
|
||||
className?: string;
|
||||
size?: FolderSize | number;
|
||||
color?: FolderColor;
|
||||
icon?: string;
|
||||
isOpen?: boolean;
|
||||
}
|
||||
|
||||
const sizeMap: Record<FolderSize, number> = {
|
||||
xs: 0.4,
|
||||
sm: 0.75,
|
||||
md: 1,
|
||||
lg: 1.25,
|
||||
xl: 1.5,
|
||||
};
|
||||
|
||||
const colorMap: Record<
|
||||
FolderColorName,
|
||||
{
|
||||
bg: string;
|
||||
border: string;
|
||||
borderLight: string;
|
||||
fill: string;
|
||||
stroke: string;
|
||||
}
|
||||
> = {
|
||||
neutral: {
|
||||
bg: "bg-neutral-300",
|
||||
border: "border-neutral-300",
|
||||
borderLight: "border-neutral-200",
|
||||
fill: "fill-neutral-300",
|
||||
stroke: "stroke-neutral-400",
|
||||
},
|
||||
slate: {
|
||||
bg: "bg-slate-300",
|
||||
border: "border-slate-300",
|
||||
borderLight: "border-slate-200",
|
||||
fill: "fill-slate-300",
|
||||
stroke: "stroke-slate-400",
|
||||
},
|
||||
zinc: {
|
||||
bg: "bg-zinc-300",
|
||||
border: "border-zinc-300",
|
||||
borderLight: "border-zinc-200",
|
||||
fill: "fill-zinc-300",
|
||||
stroke: "stroke-zinc-400",
|
||||
},
|
||||
stone: {
|
||||
bg: "bg-stone-300",
|
||||
border: "border-stone-300",
|
||||
borderLight: "border-stone-200",
|
||||
fill: "fill-stone-300",
|
||||
stroke: "stroke-stone-400",
|
||||
},
|
||||
red: {
|
||||
bg: "bg-red-300",
|
||||
border: "border-red-300",
|
||||
borderLight: "border-red-200",
|
||||
fill: "fill-red-300",
|
||||
stroke: "stroke-red-400",
|
||||
},
|
||||
orange: {
|
||||
bg: "bg-orange-200",
|
||||
border: "border-orange-200",
|
||||
borderLight: "border-orange-200",
|
||||
fill: "fill-orange-200",
|
||||
stroke: "stroke-orange-400",
|
||||
},
|
||||
amber: {
|
||||
bg: "bg-amber-200",
|
||||
border: "border-amber-200",
|
||||
borderLight: "border-amber-200",
|
||||
fill: "fill-amber-200",
|
||||
stroke: "stroke-amber-400",
|
||||
},
|
||||
yellow: {
|
||||
bg: "bg-yellow-200",
|
||||
border: "border-yellow-200",
|
||||
borderLight: "border-yellow-200",
|
||||
fill: "fill-yellow-200",
|
||||
stroke: "stroke-yellow-400",
|
||||
},
|
||||
lime: {
|
||||
bg: "bg-lime-300",
|
||||
border: "border-lime-300",
|
||||
borderLight: "border-lime-200",
|
||||
fill: "fill-lime-300",
|
||||
stroke: "stroke-lime-400",
|
||||
},
|
||||
green: {
|
||||
bg: "bg-green-200",
|
||||
border: "border-green-200",
|
||||
borderLight: "border-green-200",
|
||||
fill: "fill-green-200",
|
||||
stroke: "stroke-green-400",
|
||||
},
|
||||
emerald: {
|
||||
bg: "bg-emerald-300",
|
||||
border: "border-emerald-300",
|
||||
borderLight: "border-emerald-200",
|
||||
fill: "fill-emerald-300",
|
||||
stroke: "stroke-emerald-400",
|
||||
},
|
||||
teal: {
|
||||
bg: "bg-teal-300",
|
||||
border: "border-teal-300",
|
||||
borderLight: "border-teal-200",
|
||||
fill: "fill-teal-300",
|
||||
stroke: "stroke-teal-400",
|
||||
},
|
||||
cyan: {
|
||||
bg: "bg-cyan-300",
|
||||
border: "border-cyan-300",
|
||||
borderLight: "border-cyan-200",
|
||||
fill: "fill-cyan-300",
|
||||
stroke: "stroke-cyan-400",
|
||||
},
|
||||
sky: {
|
||||
bg: "bg-sky-300",
|
||||
border: "border-sky-300",
|
||||
borderLight: "border-sky-200",
|
||||
fill: "fill-sky-300",
|
||||
stroke: "stroke-sky-400",
|
||||
},
|
||||
blue: {
|
||||
bg: "bg-blue-300",
|
||||
border: "border-blue-300",
|
||||
borderLight: "border-blue-200",
|
||||
fill: "fill-blue-300",
|
||||
stroke: "stroke-blue-400",
|
||||
},
|
||||
indigo: {
|
||||
bg: "bg-indigo-300",
|
||||
border: "border-indigo-300",
|
||||
borderLight: "border-indigo-200",
|
||||
fill: "fill-indigo-300",
|
||||
stroke: "stroke-indigo-400",
|
||||
},
|
||||
violet: {
|
||||
bg: "bg-violet-300",
|
||||
border: "border-violet-300",
|
||||
borderLight: "border-violet-200",
|
||||
fill: "fill-violet-300",
|
||||
stroke: "stroke-violet-400",
|
||||
},
|
||||
purple: {
|
||||
bg: "bg-purple-200",
|
||||
border: "border-purple-200",
|
||||
borderLight: "border-purple-200",
|
||||
fill: "fill-purple-200",
|
||||
stroke: "stroke-purple-400",
|
||||
},
|
||||
fuchsia: {
|
||||
bg: "bg-fuchsia-300",
|
||||
border: "border-fuchsia-300",
|
||||
borderLight: "border-fuchsia-200",
|
||||
fill: "fill-fuchsia-300",
|
||||
stroke: "stroke-fuchsia-400",
|
||||
},
|
||||
pink: {
|
||||
bg: "bg-pink-300",
|
||||
border: "border-pink-300",
|
||||
borderLight: "border-pink-200",
|
||||
fill: "fill-pink-300",
|
||||
stroke: "stroke-pink-400",
|
||||
},
|
||||
rose: {
|
||||
bg: "bg-rose-300",
|
||||
border: "border-rose-300",
|
||||
borderLight: "border-rose-200",
|
||||
fill: "fill-rose-300",
|
||||
stroke: "stroke-rose-400",
|
||||
},
|
||||
};
|
||||
|
||||
export function FolderIcon({
|
||||
className = "",
|
||||
size = "xs",
|
||||
color = "blue",
|
||||
icon,
|
||||
isOpen = false,
|
||||
}: Props) {
|
||||
const scale = typeof size === "number" ? size : sizeMap[size];
|
||||
const resolvedColor = resolveColor(color);
|
||||
const colors = colorMap[resolvedColor];
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`group relative cursor-pointer ${className}`}
|
||||
style={{
|
||||
width: 320 * scale,
|
||||
height: 208 * scale,
|
||||
}}
|
||||
>
|
||||
<div
|
||||
className="h-52 w-80 origin-top-left"
|
||||
style={{ transform: `scale(${scale})`, perspective: "500px" }}
|
||||
>
|
||||
<div
|
||||
className={`folder-back relative mx-auto flex h-full w-[87.5%] justify-center overflow-visible rounded-3xl ${colors.bg} ${colors.border}`}
|
||||
>
|
||||
{[
|
||||
{
|
||||
initial: { rotate: -3, x: -38, y: 2 },
|
||||
open: { rotate: -8, x: -70, y: -75 },
|
||||
transition: {
|
||||
type: "spring" as const,
|
||||
bounce: 0.15,
|
||||
stiffness: 160,
|
||||
damping: 22,
|
||||
},
|
||||
className: "z-10",
|
||||
},
|
||||
{
|
||||
initial: { rotate: 0, x: 0, y: 0 },
|
||||
open: { rotate: 1, x: 2, y: -95 },
|
||||
transition: {
|
||||
type: "spring" as const,
|
||||
duration: 0.55,
|
||||
bounce: 0.12,
|
||||
stiffness: 190,
|
||||
damping: 24,
|
||||
},
|
||||
className: "z-20",
|
||||
},
|
||||
{
|
||||
initial: { rotate: 3.5, x: 42, y: 1 },
|
||||
open: { rotate: 9, x: 75, y: -80 },
|
||||
transition: {
|
||||
type: "spring" as const,
|
||||
duration: 0.58,
|
||||
bounce: 0.17,
|
||||
stiffness: 170,
|
||||
damping: 21,
|
||||
},
|
||||
className: "z-10",
|
||||
},
|
||||
].map((page, i) => (
|
||||
<motion.div
|
||||
key={i}
|
||||
initial={page.initial}
|
||||
animate={isOpen ? page.open : page.initial}
|
||||
transition={page.transition}
|
||||
className={`absolute top-2 h-fit w-32 rounded-xl shadow-lg ${page.className}`}
|
||||
>
|
||||
<Page color={resolvedColor} />
|
||||
</motion.div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<motion.div
|
||||
animate={{
|
||||
rotateX: isOpen ? -15 : 0,
|
||||
}}
|
||||
transition={{ type: "spring", duration: 0.5, bounce: 0.25 }}
|
||||
className="absolute inset-x-0 -bottom-px z-30 mx-auto flex h-44 w-[87.5%] origin-bottom items-end justify-center overflow-visible"
|
||||
style={{ transformStyle: "preserve-3d" }}
|
||||
>
|
||||
<svg
|
||||
className="h-auto w-full"
|
||||
viewBox="0 0 173 109"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
preserveAspectRatio="none"
|
||||
>
|
||||
<path
|
||||
className={`${colors.fill} ${colors.stroke}`}
|
||||
d="M15.0423 0.500003C0.5 0.500009 0.5 14.2547 0.5 14.2547V92.5C0.5 101.337 7.66344 108.5 16.5 108.5H156.5C165.337 108.5 172.5 101.337 172.5 92.5V34.3302C172.5 25.4936 165.355 18.3302 156.519 18.3302H108.211C98.1341 18.3302 91.2921 5.57144 82.0156 1.63525C80.3338 0.921645 78.2634 0.500002 75.7187 0.500003H15.0423Z"
|
||||
/>
|
||||
</svg>
|
||||
<div className="absolute inset-0 flex items-center justify-center text-7xl">
|
||||
{icon}
|
||||
</div>
|
||||
</motion.div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
interface PageProps {
|
||||
color: FolderColorName;
|
||||
}
|
||||
|
||||
function Page({ color = "blue" }: PageProps) {
|
||||
const colors = colorMap[color];
|
||||
return (
|
||||
<div
|
||||
className={`h-full w-full rounded-xl border bg-white p-4 ${colors.borderLight}`}
|
||||
>
|
||||
<div className="flex flex-col gap-2">
|
||||
<Text variant="h5" className="text-black">
|
||||
agent.json
|
||||
</Text>
|
||||
{Array.from({ length: 8 }).map((_, i) => (
|
||||
<div key={i} className="flex gap-2">
|
||||
<div className="h-1.5 flex-1 rounded-full bg-neutral-100" />
|
||||
<div className="h-1.5 flex-1 rounded-full bg-neutral-100" />
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user